text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
## 데이터 살펴보기 ``` from tensorflow.keras.datasets import fashion_mnist # 데이터를 다운받습니다. (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() import matplotlib.pyplot as plt import numpy as np np.random.seed(777) class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] sample_size = 9 # 0 ~ 59999의 범위에서 무작위로 3개의 정수를 뽑습니다. random_idx = np.random.randint(60000, size=sample_size) # 0 ~ 1 범위로 만듭니다. x_train = np.reshape(x_train / 255, (-1, 28, 28, 1)) x_test = np.reshape(x_test / 255, (-1, 28, 28, 1)) from tensorflow.keras.utils import to_categorical # 각 데이터의 레이블을 범주형 형태로 변경합니다. y_train = to_categorical(y_train) y_test = to_categorical(y_test) # 검증 데이터셋을 만듭니다. from sklearn.model_selection import train_test_split # 훈련/테스트 데이터를 0.7/0.3의 비율로 분리합니다. x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777) print('Fashion-MNIST ready~') ``` ## 모델 구성하기 ``` from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten # 이전과 다르게 리스트 형태로 층을 구성해볼 수 있습니다. model = Sequential([ # 항상 모델의 첫 번째 층은 입력의 형태를 명시해주어야 합니다. Conv2D(filters = 16, kernel_size = 3, strides = (1, 1), padding = 'same', activation = 'relu', input_shape = (28, 28, 1)), MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'), Conv2D(filters = 32, kernel_size = 3, strides = (1, 1), padding = 'same', activation = 'relu'), MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'), Conv2D(filters = 64, kernel_size = 3, strides = (1, 1), padding = 'same', activation = 'relu'), MaxPool2D(pool_size = (2, 2), strides = 2, padding = 'same'), Flatten(), # Dense 층에 입력하기 위해 데이터를 펼쳐줍니다. Dense(64, activation = 'relu'), Dense(10, activation = 'softmax') # 10개의 출력을 가지는 신경망 ]) ``` ## 모델 학습하기 ``` model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc']) model.fit(x_train, y_train, epochs = 30, batch_size = 32, validation_data = (x_val, y_val)) ``` ## 모델 구조 확인하기(5.2.4절) ``` model.summary() # 모델의 구조를 확인합니다. ``` ### plot_model 함수를 사용합니다 ``` # 각주의 해결 방법을 써도 통하지 않는 다면, 다음 코드의 주석을 풀어 실행시키세요. # import os # os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin' from tensorflow.keras.utils import plot_model plot_model(model, './model.png', show_shapes=True) ```
github_jupyter
``` from __future__ import print_function, division import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.utils.data import DataLoader, Dataset from torchvision import transforms, models import torch.nn.functional as F from tqdm.notebook import tqdm from PIL import Image import matplotlib.pyplot as plt import time import os TRAIN_IMG_PATH = "../input/actorsdataset/train/Train/" TEST_IMG_PATH = "../input/actorsdataset/test/Test/" LABELS_CSV_PATH = "../input/actorsdataset/train.csv" SAMPLE_SUB_PATH = "../input/actorsdataset/test.csv" # Creating dict for one hot encoding agedict = {'YOUNG':0, 'MIDDLE':1, 'OLD':2} revdict = {0:'YOUNG', 1:'MIDDLE', 2:'OLD'} df = pd.read_csv(LABELS_CSV_PATH) df['Class'] = df['Class'].map(agedict) # 90-10 split for train, test cut = int(len(df)*0.9) train, test = df[:cut], df[cut:].reset_index(drop=True) class actorsDataset(Dataset): def __init__(self, img_dir, labels, transform=None): self.labels = labels self.dir = img_dir self.transform = transform def __len__(self): return(len(self.labels)) def __getitem__(self, i): img = os.path.join(self.dir, self.labels.ID[i]) image = Image.open(img) label = self.labels['Class'][i] if self.transform: image = self.transform(image) return [image, label] data_transform = transforms.Compose([ transforms.RandomResizedCrop(128), transforms.ToTensor() ]) train_df = actorsDataset(TRAIN_IMG_PATH, train, data_transform) test_df = actorsDataset(TRAIN_IMG_PATH, test, data_transform) datasets = {'train':train_df, 'val':test_df} ``` #### DataLoader **While training a model, we typically want to pass samples in “minibatches”, reshuffle the data at every epoch to reduce model overfitting, and use Python’s multiprocessing to speed up data retrieval.** DataLoader is an iterable that abstracts this complexity for us in an easy API. ``` trainloader = DataLoader(train_df, batch_size=32, shuffle=True) testloader = DataLoader(test_df, batch_size=32, shuffle=True) dataloader = {'train':trainloader, 'val':testloader} def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() # A state_dict() is simply a python ordered dictionary object that maps each parameter to its parameter tensor (torch.Tensor object). # The keys of this ordered dictionary are the names of the parameters, which can be used to access the respective parameter tensors. best_model = model.state_dict() best_accu = 0.0 for epoch in tqdm(range(num_epochs)): print('Epoch {}/{}'.format(epoch, num_epochs)) print('-*-'*10) since_epoch = time.time() for phase in ['train','val']: run_loss = 0.0 corr = 0 for data in tqdm(dataloader[phase]): inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) # Sets the gradients of all optimized torch.Tensors to zero. optimizer.zero_grad() # Context-manager that sets gradient calculation to on or off with torch.set_grad_enabled(phase=='train'): outputs = model(inputs) _,preds = torch.max(outputs,1) loss = criterion(outputs, labels) if phase=='train': loss.backward() optimizer.step() run_loss += loss.item() * inputs.size(0) corr += torch.sum(preds==labels.data) if phase=='train': # If not called the learning rate won’t be changed # stays at the initial value. scheduler.step() model.train(True) else: model.train(False) epoch_loss = run_loss / len(datasets[phase]) epoch_accu = corr.double() / len(datasets[phase]) time_epoch = time.time() - since_epoch print('{} Loss: {:.4f} Acc: {:.4f} in {:.0f}m {:.0f}s'.format(phase, epoch_loss, epoch_accu, time_epoch // 60, time_epoch % 60)) if phase=='val' and epoch_accu > best_accu: best_accu = epoch_accu best_model = model.state_dict() print() time_elaps = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elaps//60, time_elaps%60)) print('Best Val Acc: {:4f}'.format(best_accu)) return model ``` ### optimizer.zero_grad() **we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes** ##### Deafult Behavior is Useful for RNNs **Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly. Else the gradient would point in some other direction than the intended direction towards the minimum** ### loss.backward() || optimizer.step() When you call loss.backward(), all it does is compute gradient of loss w.r.t all the parameters in loss that have requires_grad = True and store them in parameter.grad attribute for every parameter. optimizer.step() updates all the parameters based on parameter.grad ``` class actorCNN(nn.Module): def __init__(self): super(actorCNN, self).__init__() self.model = models.resnet18(pretrained=True) for params in self.model.parameters(): params.requires_grad=False self.model.fc = nn.Linear(512, 512) self.l = nn.ReLU(inplace=True) self.fc2 = nn.Linear(512, 3) self.classifier = nn.Sequential(self.model, self.l, self.fc2) def forward(self, x): return self.classifier(x) num_epochs = 25 num_classes = 3 batch_size = 128 learning_rate = 0.002 device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu") model = actorCNN().to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1) model_ft = train_model(model, criterion, optimizer, exp_lr_scheduler) submission_df = pd.read_csv(SAMPLE_SUB_PATH) output_df = pd.DataFrame(index=submission_df.index, columns = submission_df.keys()) output_df['ID'] = submission_df['ID'] output_df['Class'] = [0]*len(submission_df) submission_df = actorsDataset(TEST_IMG_PATH, output_df, data_transform) sub_loader = DataLoader(submission_df, batch_size=1, shuffle=False) def test_sub(model): prediction = [] model.train(False) for data in sub_loader: inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, pred = torch.max(outputs.data, 1) prediction.append(int(pred)) return prediction output_df['Class'] = test_sub(model_ft) output_df['Class'] = output_df['Class'].map(revdict) output_df.to_csv('submission.csv', index=False) ```
github_jupyter
## pandas Series ``` import pandas as pd import numpy as np from numpy.random import randn np.random.seed(0) np.random.random(5) labels = ['a', 'b', 'c'] my_data = [10, 20, 30] dic_data = {'a':100, 'b':200, 'c':300} arr = np.array(my_data) pd.Series(labels) pd.Series(my_data) pd.Series(dic_data) pd.Series(data=my_data, index=labels) ser1 = pd.Series([1,2,3,4], ['USA','Japan','China','Swis']) # Series(data, index) ser1 ser2 = pd.Series([1,2,4,6], ['USA','Italy','China','BD']) ser2 ser1 + ser2 ``` ## Pandas DataFrame ``` df = pd.DataFrame(data=randn(5,4), index=['A','B','C','D','E'], columns=['C1','C2','C3','C4'], dtype="float64") df df['C1'] type(df['C1']) df[['C1','C3']] df['new'] = df['C2'] + df['C4'] df df.drop(labels='new', axis=1) df df.drop("E") df ``` ## show only row data ``` df.loc[['C']] df.loc['C'] ``` ## show index based row ``` df.iloc[2] df.loc['C','C3'] df.loc[['B','C'], ['C3','C4']] booldf = df>0 booldf df[booldf] df[df>0] df['C3']>0 df[df['C3']>0] df[df['C3']>0][['C1','C4']] dfseries = df['C3']>0 dfdata = df[dfseries] dfcolumn = ['C1','C4'] dfdata[dfcolumn] ``` ## multiple condition in dataframe ``` df[(df['C2']>0) & (df['C4']>.5)] df['states'] = "DD GG HH UU OI".split() df df.set_index('states') ``` ## multiple index levels ``` outside = ['G1','G1','G1','G2','G2','G2'] inside = [1,2,3,1,2,3] hier_index = list(zip(outside,inside)) hier_index hier_index = pd.MultiIndex.from_tuples(hier_index) hier_index df1 = pd.DataFrame(randn(6,2),hier_index,['A','B']) df1 df1.loc['G1'] df1.loc['G1'].loc[3] df1.index.names df1.index.names = ['Groups','Num'] df1 df1.loc['G1'] df1.loc['G1'].loc[3] df1.loc['G1'].loc[3]['B'] df1 df1.xs('G1') df1.xs(key=1,level='Num') ``` ## pandas missing data ``` data = {'A':[1,np.nan,np.nan],'B':[4,np.nan,6],'C':[7,8,9]} data dd = pd.DataFrame(data) dd dd.dropna() dd.dropna(axis=1) dd.dropna(thresh=2) dd.fillna(value='FILL') dd['B'].fillna(value=dd['B'].mean()) ``` ## pandas groupby ``` datas = {'Company':['G','M','F','G','F','M'], 'Person':['Sa','Fs','Yt','Gh','Jk','Po'], 'Sales':[111,222,666,333,444,888]} datas dataCompany = pd.DataFrame(datas) dataCompany byCompany = dataCompany.groupby('Company') byCompany byCompany.sum() byCompany.mean() byCompany.std() byCompany.sum().loc['F'] dataCompany.groupby('Company').min() dataCompany.groupby('Company').describe() dataCompany.groupby('Company').describe().transpose() ``` ## dataframe concatenation ``` df_1 = pd.DataFrame({'A':['A10','A11','A12','A13'], 'B':['B10','B11','B12','B13'], 'C':['C10','C11','C12','C13'], 'D':['D10','D11','D12','D13']}, index=[0,1,2,3]) df_1 df_2 = pd.DataFrame({'A':['A20','A21','A22','A23'], 'B':['B20','B21','B22','B23'], 'C':['C20','C21','C22','C23'], 'D':['D20','D21','D22','D23']}, index=[4,5,6,7]) df_2 df_3 = pd.DataFrame({'A':['A30','A31','A32','A33'], 'B':['B30','B31','B32','B33'], 'C':['C30','C31','C32','C33'], 'D':['D30','D31','D32','D33']}, index=[8,9,10,11]) df_3 pd.concat([df_1,df_2,df_3]) c_data = pd.concat([df_1,df_2,df_3], axis=1) c_data.fillna(value='x') ``` ## merging dataframe ``` df_left = pd.DataFrame({'A':['A10','A11','A12','A13'], 'B':['B10','B11','B12','B13'], 'C':['C10','C11','C12','C13'], 'key':['key10','key11','key12','key13']}) df_right = pd.DataFrame({'AA':['A20','A21','A22','A23'], 'BB':['B20','B21','B22','B23'], 'CC':['C20','C21','C22','C23'], 'key':['key10','key11','key12','key13']}) pd.merge(df_left,df_right,how='inner',on='key') df_left = pd.DataFrame({'A':['A10','A11','A12','A13'], 'B':['B10','B11','B12','B13'], 'C':['C10','C11','C12','C13'], 'key1':['key10','key11','key12','key13'], 'key2':['key10','key11','key10','key10']}) df_right = pd.DataFrame({'AA':['A20','A21','A22','A23'], 'BB':['B20','B21','B22','B23'], 'CC':['C20','C21','C22','C23'], 'key1':['key10','key11','key11','key13'], 'key2':['key10','key10','key11','key11']}) pd.merge(df_left,df_right,on=['key1','key2']) pd.merge(df_left,df_right,how='outer',on=['key1','key2']) pd.merge(df_left,df_right,how='left',on=['key1','key2']) pd.merge(df_left,df_right,how='right',on=['key1','key2']) ``` ## join dataframe ``` df_left = pd.DataFrame({'A':['A10','A11','A12','A13'], 'B':['B10','B11','B12','B13'], 'C':['C10','C11','C12','C13']}, index=['key10','key11','key12','key12']) df_right = pd.DataFrame({'AA':['A20','A21','A22','A23'], 'BB':['B20','B21','B22','B25'], 'CC':['C20','C21','C22','C24']}, index=['key10','key11','key10','key11']) df_left.join(df_right) df_left.join(df_right, how='inner') ```
github_jupyter
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/named_entity_recognition_(NER)/NER_aspect_airline_ATIS.ipynb) Named entities are phrases that contain the names of persons, organizations, locations, times and quantities. Example: <br> <br> #Content ATIS dataset provides large number of messages and their associated intents that can be used in training a classifier. Within a chatbot, intent refers to the goal the customer has in mind when typing in a question or comment. While entity refers to the modifier the customer uses to describe their issue, the intent is what they really mean. For example, a user says, ‘I need new shoes.’ The intent behind the message is to browse the footwear on offer. Understanding the intent of the customer is key to implementing a successful chatbot experience for end-user. https://www.kaggle.com/hassanamin/atis-airlinetravelinformationsystem <br> <br> |Tags predicted by this model | |------| | O| | I-depart_time.end_time| | B-arrive_date.date_relative| | I-fromloc.state_name| | B-depart_date.date_relative| | B-fromloc.state_code| | B-meal_description| | B-depart_time.time_relative| | I-fare_amount| | I-fromloc.city_name| | B-booking_class| | I-arrive_time.end_time| | B-return_date.today_relative| | B-fromloc.state_name| | B-round_trip| | B-depart_date.today_relative| | I-return_date.day_number| | I-depart_time.start_time| | B-period_of_day| | B-arrive_date.day_number| | B-flight_stop| | B-depart_date.day_name| | I-stoploc.city_name| | I-return_date.today_relative| | B-class_type| | B-stoploc.state_code| | B-economy| | B-depart_time.end_time| | B-return_date.date_relative| | I-fromloc.airport_name| | B-arrive_date.month_name| | I-flight_mod| | B-toloc.airport_code| | I-depart_time.end_time| | B-airline_code| | B-flight_mod| | B-cost_relative| | B-state_name| | B-fromloc.city_name| | B-depart_time.period_of_day| | I-city_name| | B-depart_time.period_mod| | B-city_name| | B-meal| | B-return_date.day_number| | I-airline_name| | I-restriction_code| | B-airline_name| | B-restriction_code| | B-flight| | B-transport_type| | B-time_relative| | B-arrive_time.time_relative| | B-fromloc.airport_code| | B-time| | I-toloc.city_name| | B-toloc.state_name| | B-meal_code| | I-arrive_date.day_number| | B-depart_time.start_time| | B-month_name| | B-fromloc.airport_name| | B-flight_number| | B-days_code| | I-meal_description| | B-fare_basis_code| | I-cost_relative| | I-time| | B-return_time.period_of_day| | I-depart_time.time| | B-depart_date.day_number| | I-economy| | B-arrive_time.start_time| | B-return_date.day_name| | B-return_time.period_mod| | B-airport_code| | B-stoploc.airport_code| | B-flight_time| | I-transport_type| | B-depart_date.month_name| | I-toloc.airport_name| | B-today_relative| | I-arrive_time.period_of_day| | B-day_name| | B-toloc.city_name| | B-connect| | I-round_trip| | B-depart_time.time| | B-airport_name| | B-arrive_time.period_of_day| | B-stoploc.airport_name| | I-class_type| | B-aircraft_code| | I-return_date.date_relative| | B-toloc.country_name| | I-flight_number| | B-state_code| | B-or| | I-depart_date.today_relative| | B-toloc.airport_name| | I-arrive_time.time| | I-flight_time| | I-state_name| | I-airport_name| | I-depart_time.period_of_day| | B-arrive_time.time| | B-depart_date.year| | I-flight_stop| | I-toloc.state_name| | B-arrive_date.day_name| | B-compartment| | I-depart_date.day_number| | I-meal_code| | B-arrive_time.end_time| | I-today_relative| | I-arrive_time.start_time| | B-toloc.state_code| | B-day_number| | I-arrive_time.time_relative| | I-fare_basis_code| | I-depart_time.time_relative| | B-return_date.month_name| | B-stoploc.city_name| | B-arrive_time.period_mod| | B-fare_amount| | B-mod| | B-arrive_date.today_relative| ``` import os ! apt-get update -qq > /dev/null # Install java ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] !pip install nlu==1.1.1.rc1 pyspark==2.4.7 > /dev/null ! wget http://ckl-it.de/wp-content/uploads/2021/01/atis_intents.csv ``` # NLU makes NER easy. You just need to load the NER model via ner.load() and predict on some dataset. It could be a pandas dataframe with a column named text or just an array of strings. ``` import nlu import pandas as pd df = pd.read_csv("atis_intents.csv") df.columns = ["flight","text"] ner_df = nlu.load('en.ner.aspect.airline',).predict(df["text"],output_level='chunk') ner_df ``` ## Lets explore our data which the predicted NER tags and visalize them! We specify [1:] so we dont see the count for the O-tag wich is the most common, since most words in a sentence are not named entities and thus not part of a chunk ``` ner_df['entities'].value_counts()[0:50].plot.bar(title='Occurence of Named Entities in dataset', figsize=(20,14)) ``` ## Most occurding `fromloc.city_name` tagged entities ``` ner_type_to_viz = 'fromloc.city_name' ner_df[ner_df.entities_confidence == ner_type_to_viz]['entities'].value_counts().plot.bar(title='Most often occuring fromloc.city_name labeled entities in the dataset') ``` ## Most occurding `flight_time` tagged entities ``` ner_type_to_viz = 'flight_time' ner_df[ner_df.entities_confidence == ner_type_to_viz]['entities'].value_counts().plot.bar(title='Most often occuring ORG labeled entities in the dataset') ```
github_jupyter
# Ajustar dados esparsos à uma dada função > Empregar o método dos mínimos quadrados para ajustar dados esparsos à uma função desejada é uma prática usual em diversas áreas. Quer aprender a fazer isso com poucas linhas de código em Python? - toc: false - badges: true - comments: true - author: Felipe N. Schuch - image: images/curve-fitting.png - categories: [SciPy,NumPy,Matplotlib] Empregar o método dos mínimos quadrados para ajustar dados esparsos à uma função desejada é uma prática usual em diversas áreas, usualmente empregada para previsão de tenências ou como uma estimativa para uma faixa não coberta pelos dados coletados. O primeiro passo é importar Numpy para nos servir como estrutura de dados e Matplotlib para produção dos gráficos: ``` import numpy as np import matplotlib.pyplot as plt #hide # Aqui definimos o estilo das figuras plt.style.use(['seaborn-darkgrid']) ``` O segundo passo é ter acesso aos dados esparsos aos quais queremos ajustar uma função. Para fins didáticos, eles serão produzidos de maneira artifical como uma função cosseno \\( \text{datay}(x) = 2,9\cos( 1,5 x) \\), sendo \\( -6 \le x \le 6 \\), em adição a números aleatórios. Note que a semente (seed) foi fixada para garantir a reprodutibilidade da solução. ``` datax = np.linspace(-6, 6, num=60) datay = 2.9 * np.cos(1.5 * datax) np.random.seed(67) datay += np.random.normal(size=datax.size) ``` Podemos graficar os nossos dados para conferência: ``` plt.scatter(datax, datay, label='dados') plt.xlabel('x') plt.ylabel('y') plt.legend(title='@aprenda.py') plt.show() ``` Usaremos a função [scipy.optimize.curve_fit](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html), baseada no método dos mínimos quadrados não linear, assumindo que `ydata = f(xdata, *params) + eps`. Portanto, para fazer a ajuste para a funcão que queremos aqui \\( \text{fun}(x) = a\cos(bx) \\), usamos o seguinte código: ``` from scipy.optimize import curve_fit def fun(x, a, b): ''' Note que aqui podemos definir tanta complexidade quanto necessário para o problema em estudo, como: multiplas variáveis, laços "for", testes lógicos, entre outros. Entretanto, para o nosso exemplo, a linha a seguir é suficiente. ''' return a * np.cos(b * x) (a, b), c = curve_fit(fun, datax, datay) #Imprimir os parâmetros de ajuste obtidos print(f'a: {a}') print(f'b: {b}') sigma = np.sqrt(np.diag(c)) print(f'desvio padrão: {sigma}') ``` E assim obtivemos valores para as constantes de ajuste `a`, `b` e também para o desvio padrão de cada uma delas. Com isso, podemos aumentar a resolução de `datax` para `linex`, uma vez que agora temos uma função contínua que define nossos dados esparsos. Por fim, graficamos os resultados: ``` linex = np.linspace(datax[0],datax[-1], num=datax.size*4) plt.figure() plt.plot(linex, fun(linex, a, b), label='fun(x)', color='C1') plt.scatter(datax, datay, label='dados') plt.fill_between(linex, fun(linex, a-sigma[0], b-sigma[1]), fun(linex, a+sigma[0], b+sigma[1]), color='C1', alpha=.25, label='Desvio Padrão') plt.xlabel('x') plt.ylabel('y') plt.legend(title='@aprenda.py') plt.show() ```
github_jupyter
## Age-structured SIR model for India with social distancing In example-4 we ran the age-structured SIR model for India with the parameter $\beta$ fitted to case data. We can now examine the effect of interventions, **for an idealised best-case**. We assume that lockdown **instantaneously** and **completely** removes all social contacts from the spheres of workplace, schools, and others. The contact matrix, then, is time-dependent, whose form we take to be $$ C_{ij}(t) = C_{ij} + u(t)(C^W_{ij} + C^S_{ij} + C^O_{ij}) $$ where $u(t)$ is a control function given by $$ u(t) = \frac{1}{2}\left[\tanh\left(\frac{t-t_{\text{on}}}{t_w}\right)-\tanh\left(\frac{t-t_{\text{off}}}{t_w}\right)\right] $$ This function has three parameters: the time at which the social distancing is implemented, $t_{\text on}$, the time at which it is lifted $t_{\text {off}}$ and the **lag** between implementation and compliance $t_w$. In the best-case scenario, we assume that this lag is less than day, or in other words, the lockdown acts **instantaneously**. The function $u(t)$ is one in window $t_{\text on} \leq t \leq t_{\text{off}}$ and zero outside. We can multiply this by a scale factor $0\leq \epsilon \leq 1$ as a measure of the efficacy of the social distancing. The value $\epsilon =1$ reflects complete compliance; the value $\epsilon=0$ reflects zero compliance. In the best-scenario, we assume $\epsilon =1$ so there is no scale factor in the control function. We now numerically integrate as before, imposing controls of 21 days duration and lifting it afterwards. We also explore various other scenarios, with several periods of lockdown separated by periods of relaxation. The goal is to reduce the number of infectives. In the language of optimal control, our cost function is the number of infectives. We find that the 21 day lockdown will not reduce the number of infectives to a point where explicit contact tracing will be feasible. We take the number 10 to represent a point where social contact tracing may be feasible. This is dependent on region and the effectiveness with which such measures can be implemented. We find a longer lockdowns are needed to bring the number of infectives down to that level. ``` %%capture ## compile PyRoss for this notebook import os owd = os.getcwd() os.chdir('../') %run setup.py install os.chdir(owd) %matplotlib inline import numpy as np import pyross import pandas as pd import matplotlib.pyplot as plt from scipy.io import loadmat MM = np.array((0,0,.2,.2,.2,.2,.2,.2,.4,.4,1.3,1.3,3.6,3.6,8,8)) ## mortality per 100 ## population and age classes M=16 ## number of age classes my_data = np.genfromtxt('data/age_structures/India-2019.csv', delimiter=',', skip_header=1) aM, aF = my_data[:, 1], my_data[:, 2] Ni=aM+aF; Ni=Ni[0:M]; N=np.sum(Ni) # contact matrices my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_home_1.xlsx', sheet_name='India',index_col=None) CH = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_work_1.xlsx', sheet_name='India',index_col=None) CW = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_school_1.xlsx', sheet_name='India',index_col=None) CS = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_other_locations_1.xlsx', sheet_name='India',index_col=None) CO = np.array(my_data) my_data = pd.read_excel('data/contact_matrices_152_countries/MUestimates_all_locations_1.xlsx', sheet_name='India',index_col=None) CA = np.array(my_data) C=CH+CW+CS+CO ``` ### One closure ``` beta = 0.01646692 # contact rate parameter gIa = 1./7 # recovery rate of asymptomatic infectives gIs = 1./7 # recovery rate of symptomatic infectives alpha = 0. # asymptomatic fraction fsa = 1 # suppresion of contact by symptomatics # initial conditions Is_0 = np.zeros((M)); Is_0[6:13]=3; Is_0[2:6]=1 Ia_0 = np.zeros((M)) R_0 = np.zeros((M)) S_0 = Ni - (Ia_0 + Is_0 + R_0) parameters = {'alpha':alpha,'beta':beta, 'gIa':gIa,'gIs':gIs,'fsa':fsa} model = pyross.models.SIR(parameters, M, Ni) # the contact matrix is time-dependent def contactMatrix(t): if t<21: xx = C elif 21<=t<42: xx = CH else: xx = C return xx # start simulation Tf=98.5; Nf=2000 data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf) IC = np.zeros((Nf)) for i in range(M): IC += data['X'][:,2*M+i] t = data['t'] fig = plt.figure(num=None, figsize=(28, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 26}) plt.plot(t, IC, '-', lw=4, color='#A60628', label='forecast', alpha=0.8) plt.xticks(np.arange(0, 200, 14),('4 Mar','18 Mar','1 Apr','15 Apr','29 Apr','13 May','27 May','10Jun')); t1=int(Nf/14) plt.fill_between(t[3*t1:6*t1], 0, 6000, color="#A60628", alpha=0.2) my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6) day, cases = my_data[:,0], my_data[:,3] - my_data[:,1] plt.plot(cases, 'o-', lw=4, color='#348ABD', ms=16, label='data', alpha=0.5) plt.legend(fontsize=26, loc='upper left'); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Infected individuals'); plt.ylim(0, 6000); plt.xlim(0, 98); plt.savefig('/Users/rsingh/Desktop/4a.png', format='png', dpi=200) m1 = .01*MM*(data['X'][0,0:M]-data['X'][-1,0:M]) np.sum(m1) ``` ### Two closures ``` def contactMatrix(t): if t<21: xx = C elif 21<=t<42: xx = CH elif 42<=t<47: xx = C elif 47<=t<75: xx = CH else: xx = C return xx # start simulation Tf=98; Nf=2000 data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf) IC = np.zeros((Nf)) SC = np.zeros((Nf)) for i in range(M): IC += data['X'][:,2*M+i] SC += data['X'][:,0*M+i] fig=plt.figure(num=None, figsize=(28, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 26}) plt.plot(t, IC, '-', lw=4, color='#A60628', label='forecast', alpha=0.8) plt.xticks(np.arange(0, 200, 14),('4 Mar','18 Mar','1 Apr','15 Apr','29 Apr','13 May','27 May','10Jun')); t1=int(Nf/(14)) plt.fill_between(t[3*t1:6*t1], 0, 600, color="#A60628", alpha=0.2) plt.fill_between(t[6*t1+108:10*t1+108], 0, 600, color="#A60628", alpha=0.2) my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6) day, cases = my_data[:,0], my_data[:,3] - my_data[:,1] plt.plot(cases, 'o-', lw=4, color='#348ABD', ms=16, label='data', alpha=0.5) plt.legend(fontsize=26); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Infected individuals'); plt.xlim(0, 98); plt.ylim(0, 600); plt.savefig('/Users/rsingh/Desktop/4b.png', format='png', dpi=200) m1 = .01*MM*(data['X'][0,0:M]-data['X'][-1,0:M]) np.sum(m1) SC[-1]-SC[0] cases ``` ## Three closures ``` def contactMatrix(t): if t<21: xx = C elif 21<=t<42: xx = CH elif 42<=t<47: xx = C elif 47<=t<75: xx = CH elif 75<=t<80: xx = C else: xx = CH return xx # start simulation Tf=98; Nf=2000; data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf) IC = np.zeros((Nf)) SC = np.zeros((Nf)) for i in range(M): IC += data['X'][:,2*M+i] SC += data['X'][:,0*M+i] fig=plt.figure(num=None, figsize=(28, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 26}) plt.plot(t, IC, '-', lw=4, color='#A60628', label='forecast', alpha=0.8) plt.xticks(np.arange(0, 200, 14),('4 Mar','18 Mar','1 Apr','15 Apr','29 Apr','13 May','27 May','10Jun')); t1=int(Nf/(14)) plt.fill_between(t[3*t1:6*t1], 0, 600, color="#A60628", alpha=0.2) plt.fill_between(t[6*t1+108:10*t1+108], 0, 600, color="#A60628", alpha=0.2) plt.fill_between(t[10*t1+213:14*t1+410], 0, 600, color="#A60628", alpha=0.2) my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6) day, cases = my_data[:,0], my_data[:,3] - my_data[:,1] plt.plot(cases, 'o-', lw=4, color='#348ABD', ms=16, label='data', alpha=0.5) plt.legend(fontsize=26, loc='upper left'); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Infected individuals'); plt.xlim(0, 98); plt.ylim(0, 600); plt.savefig('/Users/rsingh/Desktop/4b.png', format='png', dpi=200) ``` ### One long closure ``` def contactMatrix(t): if t<21: xx = C elif 21<=t<70: xx = CH else: xx = CH return xx # start simulation Tf=84; Nf=2000; data=model.simulate(S_0, Ia_0, Is_0, contactMatrix, Tf, Nf) IC = np.zeros((Nf)) SC = np.zeros((Nf)) for i in range(M): IC += data['X'][:,2*M+i] SC += data['X'][:,0*M+i] t = data['t']; t1=int(Nf/(12)) fig = plt.figure(num=None, figsize=(28, 8), dpi=80, facecolor='w', edgecolor='k') plt.rcParams.update({'font.size': 26}) plt.plot(t[0:10*t1], IC[0:10*t1], '-', lw=4, color='#A60628', label='forecast', alpha=0.6) plt.plot(t[10*t1:], IC[10*t1:], ':', lw=4, color='dimgrey', alpha=0.8) plt.xticks(np.arange(0, 200, 14),('4 Mar','18 Mar','1 Apr','15 Apr','29 Apr','13 May','27 May','10Jun')); plt.fill_between(t[3*t1+2:10*t1+5], 0, 600, color="#A60628", alpha=0.2) #plt.fill_between(t[10*t1+10:14*t1+70], 0, 600, color="#A60628", alpha=0.2) my_data = np.genfromtxt('data/covid-cases/india.txt', delimiter='', skip_header=6) day, cases = my_data[:,0], my_data[:,3]- my_data[:,1] plt.plot(cases, 'o-', lw=4, color='#348ABD', ms=16, label='data', alpha=0.5) plt.legend(fontsize=26, loc='upper left'); plt.grid() plt.autoscale(enable=True, axis='x', tight=True) plt.ylabel('Infected individuals'); plt.ylim(0, 600); plt.xlim(0, 98); plt.savefig('/Users/rsingh/Desktop/4d.png', format='png', dpi=200) #IC[-1] ```
github_jupyter
# Tutorial 2 - Two tanks system ## Background information This tutorial shows how to use _numerous_ to create a system of multiple components. The tutorial is aimed at demonstrating the usability and scalability of the model architecture for systems with multiple physical components connected bewteen each other by means of connectors. The tutorial is built based on a relative simple system, e.g. two tanks are placed on top of each other and connected by a valve. The implementation using _numerous_ might therefore seems tedious and over-complicated, given the limited number of components and thus equations to solve. However, as already pintpointed, we wish to illustrate how systems can be created, and how _numerous_ model architecture would be advantegeous for systems with a high degree of complexity. The model implementation is developed by instantiating _items_ of different classes, and connecting them by means of _connectors_. The tutorial can be run only importing the extra packages pandas, numpy and plotly. ## System governing equations A system of two tanks and a valve is shown in the figure below. H indicates the height of a certain mass M of liquid in each tank. The liquid density is indicated by $\rho$, while the cross-sectional area of each tank is indicated by the parameter $A_0$.The mass flow rate through the valve is determined by a valve characteristic parameter $\mathrm{C_v}$. ![Screenshot.png](sketch_tanks.png) At time 0, the top tank (tank 1) is filled with a certain amount of liquid, determining the height $\mathrm{H_1}$, while the bottom tank (tank 2) is completely empty. If the valve is opened at time 0, a mass flow rate starts flowing through the valve between tank 1 and tank 2. After a given time (that we indicate as $t_\mathrm{filling}$) the top tank will be empty, while the bottom tank will contain the entire mass of liquiq. The mathematical model describing this systme is illusrated below. For the sake of simplicity, we assume that the mass flow rate through the valve is proportional to the pressure drop accross the valve by the relation: $$\begin{equation} \dot{\mathrm{m}} = \mathrm{C_v} \sqrt{\mathrm{\Delta p}} \end{equation} $$ Moreover, the conservation of mass in the two tanks is expressed by the equations (with the assumption of incompressible fluid): $$\begin{equation} \rho A_{0,1} \mathrm{\dfrac{dH_1}{dt}} = - \dot{m} \end{equation} $$ $$\begin{equation} \rho A_{0,2} \mathrm{\dfrac{dH_2}{dt}} = + \dot{m} \end{equation} $$ The pressure drop accross the valve can be expressed by the equation: $$\begin{equation} \Delta p = \rho g H_1 \end{equation} $$ By substituting this latter expression in the previous equations, the differential system of equations describing the system becomes: $$\begin{equation} \rho A_{0,1} \mathrm{\dfrac{dH_1}{dt}} = - \mathrm{C_v} (t)\sqrt{\rho g \mathrm{H_1}} \end{equation} $$ $$\begin{equation} \rho A_{0,2} \mathrm{\dfrac{dH_2}{dt}} = + \mathrm{C_v}(t)\sqrt{\rho g \mathrm{H_1}} \end{equation} $$ ## Analytical solution The system of differential equations above can be quite complex to solve analytically given the time dependence of $\mathrm{C_v}$. Therefore, a numerical solver can be used to solve the system, and we will show in this tutorial how to create a model and to solve it using _numerous_. However, in order to prove the correct implementation of the solver, we will compare the results of the numerical solution against the analytical solution for the simplyfing case of having a characteristic valve parameter $\mathrm{C_v}$ independent from time. If we equal the parameter to a constant value over time $\mathrm{C_v}(t) = \mathrm{C_v}$, it is possible to solve the system analytically. In fact, the integration of the previous equations leads to the solution: $$\begin{equation} \mathrm{H_1(t)} = \Bigg[\sqrt{\mathrm{H_{1,0}}} - \dfrac{\mathrm{C_v}}{2 A_{0,1}} \sqrt{\dfrac{g}{\rho}} \cdot t \Bigg]^{2} \end{equation} $$ $$\begin{equation} \mathrm{H_2(t)} = \mathrm{H_{2,0}} + \dfrac{\mathrm{C_v}}{A_{0,2}} \sqrt{ \dfrac{g}{\rho}} \sqrt{\mathrm{H_{1,0}}} \cdot t - \dfrac{\mathrm{C_v^{2}}}{4 A_{0,1} A_{0,2}} \dfrac{g}{\rho} \cdot t^2 \end{equation} $$ The solution can also be expressed as function of the total mass in the tanks, which is related to the liquid height by the volume as: $$\begin{equation} \mathrm{M} = \rho \mathrm{H} \mathrm{A_0} \end{equation} $$ The analytical solution would thus be expressed as: $$\begin{equation} \mathrm{M_1(t)} = \Bigg[\sqrt{\rho \mathrm{H_{1,0} A_{0,1}}} - \dfrac{\mathrm{C_v}}{2} \sqrt{\dfrac{g}{A_{0,1}}} \cdot t \Bigg]^{2} \end{equation} $$ $$\begin{equation} \mathrm{M_2(t)} = \mathrm{\rho H_{2,0}} A_{0,2} + \mathrm{C_v} \sqrt{ \dfrac{g}{A_{0,1}}} \sqrt{\mathrm{\rho \mathrm{H_{1,0} A_{0,1}}}} \cdot t - \dfrac{\mathrm{C_v^{2}}}{4} \dfrac{g}{A_{0,1}} \cdot t^2 \end{equation} $$ ## Input data The input data used for the analytical solution and the simulation are reported below: * liquid density, $\rho$ = 1000 kg/m$^3$ * cross-sectional area tank 1, $A_{0,1}$ = 0.05 m$^2$ * cross-sectional area tank 2, $A_{0,2}$ = 0.1 m$^2$ * initial conditions: * initial height tank 1 $H_{0,1}$ = 0.5 m * initial height tank 2 $H_{0,2}$ = 0.0 m The implementation of the model with a time-dependent $\mathrm{C_v}$ was carried out by using the following formulation: * $\mathrm{C_v} = \mathrm{C_{v,0}} \cdot \big[sin{(\omega \cdot t)}+1\big] $ and imposing: * $\mathrm{C_{v,0}} = 0.1 \; \; \; (\mathrm{kg}/\mathrm{s})/\mathrm{Pa}$ * $\omega \; \; \; = 1.5 \; \; \;\mathrm{rad/s}$ A constant value of the valve parameter $\mathrm{C_v}$ was instead obtained by imposing $\omega = 0$, so that: * $\mathrm{C_v} = \mathrm{C_{v,0}}$ at any time step, and the solution could be compared against the analytical formulation derived above. This is translated into the followinf python code: ### Definition of input data - code: ``` import numpy as np ## Input data # Define liquid and geometry for the case in analysis rho_water = 1000 # Water density [kg/m3] A0 = [0.05, 0.1] # Tanks cross-sectional area [m2] Cv_0 = 0.1 # [(kg s)/Pa] Amplitude of valve characteristic parameter g = 9.81 # [m/s2] gravitational accelleration ## Initial conditions H0 = [0.5, 0] # Initial condition - liquid height [m] # Estimate the initial value of total mass M [kg] in the two tanks M0 = rho_water * np.multiply(A0, H0) # Caclulate what is the time (analytical solution) to entirely fill the tank (simple case - analytical sol) t_filling = np.sqrt(A0[0]/g) * np.sqrt(M0[0]) *2* 1/Cv_0 # Define start and stop time for the analysis and simulation t_start_sim = 0 t_stop_sim = 10 ``` ### Analytical soltion: Python code First of all, we define a function which is able to return the analytical solution for a given time span, given the input parameters. The implementation below is based on the analytical solution derived above. ``` import pandas as pd N_t = 11 # Number of time steps at which the analytical solution is evaluated time_range = [t_start_sim ,t_stop_sim] # Time span in which the solution is plotted def analytical_solution(N_t=N_t, time_range=time_range, g=9.81, A0=[0.05, 0.1], Cv_0=0.1, M0=[25, 0], H0=[0.5, 0] , rho_water=1000): time_vector = np.linspace(start = time_range[0], stop = time_range[1], num = N_t) # Create a dictionary with time, evolution of liquid heights and mass in the two tanks for the analytical solution: result_analytical = {'t': [], 'M_1': [], 'M_2': [], 'H_1': [], 'H_2': []} # Calculate what is the time after which the first tank is empty and the second tank is full t_filling = np.sqrt(A0[0]/g) * np.sqrt(M0[0]) *2* 1/Cv_0 for i in range(N_t): if time_vector[i] < t_filling: M_1_analytical = (np.sqrt(M0[0]) - Cv_0 / 2 * np.sqrt(g / A0[0]) * time_vector[i] ) ** 2 M_2_analytical = np.sqrt(M0[1]) + Cv_0 * np.sqrt(g / A0[0]) * np.sqrt( M0[0]) * time_vector[i] - Cv_0 ** 2 * g / A0[0] / 4 * (time_vector[i] ** 2) H_1_analytical = (np.sqrt(H0[0]) - Cv_0 / 2 /A0[0] * np.sqrt(g/ rho_water) * time_vector[i] ) ** 2 H_2_analytical = np.sqrt(H0[1]) + Cv_0 /A0[1] * np.sqrt(g/ rho_water) * np.sqrt( H0[0]) * time_vector[i] - Cv_0 ** 2 * g / A0[0] / 4 /A0[1]/rho_water* (time_vector[i] ** 2) else: M_2_analytical = M0[0] + M0[1] M_1_analytical = 0 H_2_analytical = (M0[0] + M0[1])/rho_water/A0[1] H_1_analytical = 0 result_analytical['t'].append(time_vector[i]) result_analytical['M_1'].append(M_1_analytical) result_analytical['M_2'].append(M_2_analytical) result_analytical['H_1'].append(H_1_analytical) result_analytical['H_2'].append(H_2_analytical) return result_analytical ``` We can run the equation to get the analytical soluton for the input data defined above, and we can create a table (using pandas data frame) containing the analytical solution at each evaluated time. The code is shown below. ``` result_analytical = analytical_solution(g=g, A0=A0, M0=M0, rho_water = rho_water, N_t=N_t, time_range=time_range, Cv_0=Cv_0) data = {'Time, s': result_analytical['t'], 'H1, m':result_analytical['H_1'], 'H2, m':result_analytical['H_2'], 'M1, kg':result_analytical['M_1'],'M2, kg':result_analytical['M_2']} pd.DataFrame(data) ``` ## Implementation using _numerous_ ### Preliminary steps The first step for the implementation is to include all the relevant _numerous_ modules. For this tutorial we need: * _Item_ for defining item objects * _Model_ for defining model objects * _ConnectorTwoWay_ for defining a special connectors object * _Subsystems_ for defining the subsystem object * _Simulation_ for defining the simulation * _Equation_ and equation decorator for objects of equation class Moreover, we will need HistoryDataFrame to store results. ``` # We include all the relevant modules from numerous: from numerous.engine.system import Item from numerous.engine.model import Model from numerous.engine.system import Subsystem from numerous.engine.simulation import Simulation from numerous.engine.system import ConnectorTwoWay from numerous.engine import VariableType, VariableDescription, OverloadAction from numerous.multiphysics import Equation from numerous.multiphysics import EquationBase ``` ### Define Tank Equation The first item that we will model is the tank. Before creating the tank item, we need to define the equation to apply, and we thus create a Tank_Equation item using Equation class. We need to define all the parameters and constants of the equation, by using the method ` Equation.add_parameter ` . _g_ (gravitational accelleration) is the only variable defined as a constant, as its value cannot be modified. Note that the line of code ``` python super().__init__(tag='for info only') ``` in `__init__` is a necessary line to create any class using _numerous_ in the current release. The only _state_ variable in the tank equation is given by the tank height, which is determined by the differential equation presented in the description above and reported in the equation definition. ``` class Tank_Equation(EquationBase): def __init__(self, tag="tank_equation", H=0.5, rho=1000, A0=0.05, g=9.81): super().__init__(tag='tank_equation') self.add_state('H', H) # [m] Liquid height in the tank self.add_parameter('rho', rho) # [kg/m3] Liquid density self.add_parameter('A0', A0) # [m2] Tank cross-sectional area self.add_constant('g', g) # [m/s2] Gravitational acceleration self.add_parameter('mdot', 0) # [kg/s] Mass flow rate @Equation() def eval(self, scope): # Differential equation for mass conservation in a general tank with a mass flow rate entering or leaving the tan scope.H_dot = scope.mdot / scope.rho / scope.A0 ``` ### Define Valve Equation The second item that we will model is the valve. Before creating the valve item, we need to define the equation to apply, and we thus create a `Valve_Equation` item using `EquationBase` class. Please, note the use of the **global variable** time, which is defined as ` scope.globals.time ` in _numerous_ , and thus it has not to be specified as the other parameters and constants. Note that in the `Valve_Equation` no state is defined, since the valve characteristic parameter is a function of time, but we have an explicit formulation for it. ``` # We define the equation (using EquationBase class) determining the mass flow rate across the valve class Valve_Equation(EquationBase): def __init__(self, Cv_0=0.1, rho=1000, g=9.81, omega=1): super().__init__(tag='valve_equation') self.add_parameter('omega', omega) # [rad/sec] Angular frequency of valve characteristic parameter self.add_parameter('Cv_0', Cv_0) # [(kg/s)/Pa] Amplitude of valve characteristic parameter self.add_parameter('mdot1', 0) # [kg/s] Mass flow rate in one side of the valve self.add_parameter('mdot2', 0) # [kg/s] Mass flow rate in the other side of the valve self.add_parameter('H1', 0) # [m] Liquid height in the tank 1 connected to the valve (top tank) self.add_parameter('H2', 0) # [m] Liquid height in the tank 2 connected to the valve (bottom tank) self.add_parameter('rho', rho) # [kg/m3] Liquid density self.add_constant('g', g) # [m/s2] Gravitational acceleration self.add_parameter('Cv', Cv_0) # [(kg/s)/Pa] Valve characteristic parameter @Equation() def eval(self,scope,global_variables): scope.Cv = scope.Cv_0 * (np.sin(scope.omega * global_variables.time)+1) #[(kg/s)/Pa] deltaP = scope.rho * scope.g * (scope.H1) #[Pa] mdot = np.sign(deltaP) * np.sqrt(np.absolute(deltaP)) * scope.Cv #[kg/s] # The valve will be associated with two mass flow rates (one leaving and one entering the component), #which - for conservation of mass - have the same magnitude and opposite sign scope.mdot1 = -mdot #[kg/s] scope.mdot2 = mdot #[kg/s] ``` ### Define Tank as Item We define the `Tank` class as an `Item` class. We then create a namespace 'v1' to contain the variables for the `Tank_Equation`. The equation is associated to the namespace using the `add_equations` method, as shown in the code below. ``` class Tank(Item): def __init__(self, tag="tank", H=0.5, rho=1000, A0=0.05, g=9.81): super(Tank, self).__init__(tag) v1 = self.create_namespace('v1') v1.add_equations([Tank_Equation(H=H, rho=rho, A0=A0, g=g)]) ``` ### Define Valve as ConnectorTwoWay Once that we have defined the equation describing the mass flow rate flowing through the valve, we need to create the Valve as a class `ConnectorTwoWay` and to assign an equation to it. `ConnectorTwoWay` is a special case of a `Connector` class, and the reader is referred to _numerous_ documentation for an exhuastive explanation. The peculiarity of this connector is the possibility of defining two sides, i.e. variables can be binded to the connectors by specifying to different items as sides. In the code lines ``` python super().__init__(tag, side1_name='side1', side2_name='side2') ``` we have to specify the names of the two sides. The steps that we have to take are the following (refer to the numbering in the code comments # to see which lines of codes belong to the different steps) 1. We create a namespace 'v1' to contain the variables for the valve equation. This is done using the Item method `Item.create_namespace `. The namespace is then associated to an equation using the `add_equations` method. 2. We create variables at each side of the connector item, and we associated them to the same namespace containing the valve equation. The variables must be created because when we first instantiate the ConnectorTwoWay object no information on side1 and side2 is passed. 3. The binding between the ConnectorTwoWay and the two items at each side is done, using the variables previosuly created in the name space. In this particular example: * the value of v1.H1 and v1.H2 (liquid heights of the tanks connected to the valve, stored inside the valve object) must point to the respective tank heights in the two side objects. This implies that the value of H is determined by the tank equation and not by the valve equation. * the value of the mass flow rate entering or leaving each tank (for example the value self.side1.v1.mdot stored inside the side1 object (tank 1)) must point to the mass flow rate flowing through the valve (in this case determined by the valve equation) ``` # Define the valve as a connector item - connecting two tanks class Valve(ConnectorTwoWay): def __init__(self, tag="valve", Cv_0=0.1, rho=1000, g=9.81, omega=0): super().__init__(tag, side1_name='side1', side2_name='side2') #1 Create a namespace for mass flow rate equation and add the valve equation v1 = self.create_namespace('v1') v1.add_equations([Valve_Equation(Cv_0=Cv_0, rho=rho, g=g, omega=omega)]) #2 Create variables H and mdot in side 1 adn 2 #(side 1 represents a connection with one tank, with related liquid height H_1) #(side 1 represents a connection with the second tank, with related liquid height H_2) self.side1.v1.create_variable(name='H') self.side1.v1.create_variable(name='mdot') self.side2.v1.create_variable(name='H') self.side2.v1.create_variable(name='mdot') # Map variables between binding and internal variables for side 1 and 2 # This is needed to update the values of the variables in the binding according to the equtions of the items v1.H1 = self.side1.v1.H v1.H2 = self.side2.v1.H self.side1.v1.mdot = v1.mdot1 self.side2.v1.mdot = v1.mdot2 ``` ### Create the sub-system of components After defining all the classes for the items that will consitute the system, we are ready for the system assembly. We create a special class of `Subsystem`, inside which we: (refer to the numbering in the code comments # to see which lines of codes belong to the different steps) 1. create the gravitational accelleration constant and assign a value to it 2. create two instances of the class Tank called Tank_1 (top tank) and Tank_2 (bottom tank) 3. create one instance of the class Valve called Valve_1 4. bind Tank_1 and Tank_2 by assigning each of them to the two sides of Valve_1. we use the `ConnectorTwoWay.bind` method for this. 5. register the instanciated items in the Two_Tanks class The inputs needed to the subsystem are: * H0, which is a vector containing the initial state of the system (initial liquid height of Tank_1 and Tank_2) * Cv_0 and omega, which are amplitude and angular frequency of the valve characteristic parameter We assume that geometry (A0) and liquid (rho) are given by the input data as fixed values ``` # Define the subsystem composed by two tanks and one valve connecting them class Two_Tanks(Subsystem): def __init__(self, tag, H0, Cv_0, omega): super().__init__(tag) #1. Gravitational acceleration g = 9.81 #2. Instances of Tank class Tank_1 = Tank('tank_1', H=H0[0], rho=rho_water, A0=A0[0], g=g) Tank_2 = Tank('tank_2', H=H0[1], rho=rho_water, A0=A0[1], g=g) #3. Valve_1 is one instance of valve class Valve_1 = Valve('valve_1', Cv_0=Cv_0, rho=rho_water, g=g, omega=omega) #4. Binding Valve_1.bind(side1=Tank_1, side2=Tank_2) #5. Register all the instanciated items in the sub-system self.register_items([Tank_1, Tank_2, Valve_1]) ``` ### Create the system model and simulation Finally we are ready to define the model and the simulation of the implemented system, and we do it by creating a function named `t_1_item_model`. The inputs to the function are given by: * H0, Cv_0, omega, which represent initial conditions of the tanks, and valve characteristic * hdf, which is the historian where the simulation results will be stored * t_start_sim and t_stop_sim determing the time span in which the system will be simulated The steps are the following: 1. First a model object m1 is instantiated based on the `Two_Tanks` subsystem with given inputs 2. A simulation s1 is connected to the model object, and some solver settings are chosen. The parameter 'num' is used to specify the number of steps at which the solution is evaluated between t_start_sim and t_stop_sim 3. The `simulation.solve` method is called, and the solution is returned as output of the function ``` def t_1_item_model(H0, Cv_0, omega, t_start_sim, t_stop_sim): # 1. Instantiate the model m1 = Model(Two_Tanks('subsystem', H0, Cv_0, omega)) # 2. Setting up the simulation: s1 = Simulation(m1, t_start=t_start_sim, t_stop=t_stop_sim, num=5000) # 3. Call the solve method sol = s1.solve() return s1, sol ``` ## Running the simulations - Case 1: Constant Cv We impose a Cv to be constant in time by setting the angular frequency $\omega$ to be equal to 0 (i.e. the cosine of 0 will be equal to 1 at each time step). To obtain the solution of the system for Case 1, we simply need to call the `t_1_item_model` function with the inputs defined by the input data, and omega = 0. The progress bar is shown below. ``` omega = 0 # Collect the historical data at each time step s1, sol = t_1_item_model(H0, Cv_0,omega, t_start_sim, t_stop_sim) ``` ### Plotting: comparison vs. analytical solution To plot the solution, we add the following code which uses `plotly`. We shall not go into detail on the code, but simply include it here as it's used for the purpose illustrating the results of this tutorial only. We can plot the solution by accessing the `Model` object `historian`, which contains a log of all variables as a `Pandas` dataframe. The `Simulation` object, `sim1` contains the `model` object and the time logged variables are accessible through `sim1.model.historian`. ``` # Plot the comparison # Nb: the package plotly is needed for the plotting from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.graph_objs as go fig = go.Figure() fig.add_trace(go.Scatter( x=hdf.df.index.total_seconds(), y=hdf.df['subsystem.tank_1.v1.H'], name='Tank 1 - numerical', mode='lines',line_color='rgba(102,51,255,1)')) fig.add_trace(go.Scatter( x=result_analytical['t'], y=result_analytical['H_1'], name='Tank 1 - analytical', mode='markers', marker_color='rgba(102,51,255,1)')) fig.add_trace(go.Scatter( x=hdf.df.index.total_seconds(), y=hdf.df['subsystem.tank_2.v1.H'], name='Tank 2 - numerical', mode='lines',line_color='rgba(152, 0, 0, .8)')) fig.add_trace(go.Scatter( x=result_analytical['t'], y=result_analytical['H_2'], name='Tank 2 - analytical', mode='markers',marker_color='rgba(152, 0, 0, .8)')) fig.update_layout(title='Liquid height in tanks - analytical vs. numerical solution', yaxis_zeroline=False, xaxis_zeroline=False,xaxis_title="time, sec", yaxis_title="liquid height H, m",) fig.show() fig = go.Figure() fig.add_trace(go.Scatter( x=hdf.df.index.total_seconds(), y= hdf.df['subsystem.tank_1.v1.H']*rho_water*A0[0], name='Tank 1 - numerical', mode='lines',line_color='rgba(102,51,255,1)')) fig.add_trace(go.Scatter( x=result_analytical['t'], y=result_analytical['M_1'], name='Tank 1 - analytical', mode='markers', marker_color='rgba(102,51,255,1)')) fig.add_trace(go.Scatter( x=hdf.df.index.total_seconds(), y=hdf.df['subsystem.tank_2.v1.H']*rho_water*A0[1], name='Tank 2 - numerical', mode='lines',line_color='rgba(152, 0, 0, .8)')) fig.add_trace(go.Scatter( x=result_analytical['t'], y=result_analytical['M_2'], name='Tank 2 - analytical', mode='markers',marker_color='rgba(152, 0, 0, .8)')) fig.update_layout(title='Liquid mass in tanks - analytical vs. numerical solution', yaxis_zeroline=False, xaxis_zeroline=False,xaxis_title="time, sec", yaxis_title="liquid mass M, kg",) fig.show() ``` ## Running the simulations - Case 2: Time dependent Cv We assign now a value to the parameter $\omega$, so that the valve has a characteristic parameter that is dependent from time, with a sinusoidal behaviour. To obtain the solution of the system for Case 2 the procedure is analogous to the previous case. ``` omega_2 = 1.5 # rad/s # Solve the model and collect hystorical data hdf_2 = SimpleHistoryDataFrame() s2, sol_2 = t_1_item_model(H0, Cv_0,omega_2, hdf_2, t_start_sim, t_stop_sim) ``` ### Plotting the results ``` # Plot the results # Nb: the package plotly is needed for the plotting from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.graph_objs as go fig = go.Figure() fig.add_trace(go.Scatter( x=hdf_2.df.index.total_seconds(), y=hdf_2.df['subsystem.tank_1.v1.H'], name='Tank 1', mode='lines',line_color='rgba(102,51,255,1)')) fig.add_trace(go.Scatter( x=hdf_2.df.index.total_seconds(), y=hdf_2.df['subsystem.tank_2.v1.H'], name='Tank 2', mode='lines',line_color='rgba(152, 0, 0, .8)')) fig.update_layout(title='Liquid height in tanks', yaxis_zeroline=False, xaxis_zeroline=False,xaxis_title="time, sec", yaxis_title="liquid height H, m",) fig.show() fig = go.Figure() fig.add_trace(go.Scatter( x=hdf_2.df.index.total_seconds(), y= hdf_2.df['subsystem.tank_1.v1.H']*rho_water*A0[0], name='Tank 1', mode='lines',line_color='rgba(102,51,255,1)')) fig.add_trace(go.Scatter( x=hdf_2.df.index.total_seconds(), y=hdf_2.df['subsystem.tank_2.v1.H']*rho_water*A0[1], name='Tank 2', mode='lines',line_color='rgba(152, 0, 0, .8)')) fig.update_layout(title='Liquid mass in tanks', yaxis_zeroline=False, xaxis_zeroline=False,xaxis_title="time, sec", yaxis_title="liquid mass M, kg",) fig.show() fig = go.Figure() fig.add_trace(go.Scatter( x=hdf_2.df.index.total_seconds(), y= hdf_2.df['subsystem.valve_1.v1.Cv'], name='Cv', mode='lines',line_color='rgba(41, 241, 195, 1)')) fig.update_layout(title='Valve characteristc parameter', yaxis_zeroline=False, xaxis_zeroline=False,xaxis_title="time, sec", yaxis_title="Cv, (kg/s)/Pa",) fig.show() ```
github_jupyter
``` import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import cv2 as cv %matplotlib inline import os pastas = [] for p in sorted(os.listdir("DatasetBBOX")): if len(os.listdir("DatasetBBOX/"+p)) > 0: pastas.append(p) template = [0 for i in range(len(pastas))] dic = {} for i,item in enumerate(pastas): copy = template.copy() copy[i] = 1 dic[item] = copy print(dic) print(pastas) def process_img(img,coef=0.5): img = cv.bilateralFilter(img,9,75,75) gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) #new_size = (int(gray.shape[0]*coef),int(gray.shape[1]*coef)) #gray = cv.resize(gray,new_size,cv.INTER_CUBIC) return gray.reshape((gray.shape[0],gray.shape[1],1)) from tqdm.notebook import tqdm, trange dataset = [] classes = [] for pasta in pastas: num = len(os.listdir("DatasetBBOX/"+pasta)) for i in trange(0,num): img = cv.imread("DatasetBBOX/"+pasta+"/img"+str(i)+".png") processed_img = process_img(img) dataset.append(processed_img) classes.append(dic[pasta]) dataset = np.array(dataset) classes = np.array(classes) print(dataset.shape,classes.shape) plt.imshow(dataset[0].reshape(dataset[0].shape[0],dataset[0].shape[1])) os.listdir("BBOXS") import pickle from sklearn.preprocessing import MinMaxScaler bboxs = [] scaler = MinMaxScaler() coef = 0.5 for file in sorted(os.listdir("BBOXS")): with open("BBOXS/"+file,"rb") as f: print(file) conj = pickle.load(f) #for i in range(len(conj)): #n = conj[i] #conj[i] = ([int(n[0]*coef),int(n[1]*coef),int(n[2]*coef),int(n[3]*coef)]) #conj = scaler.fit_transform(conj) bboxs.extend(conj) bboxs = np.array(bboxs) print(bboxs.shape) print(bboxs[0:10]) ``` 1. calcular centro: - x+w/2 - y+h/2 (2,3) 2. subtrai x,y do centro: (2,3) - (3,5) = (-1,-2) 3. multiplica pela escala (0.5): (-1,-2) * 0.5 = (-0.5,-1) 4. soma o centro: (-0.5,-1)+(3,5) = (2.5,4) ``` i = 0 res = dataset[i].reshape((dataset[i].shape[0],dataset[i].shape[1])).copy() bbox = bboxs[i] cv.rectangle(res,pt1=(bbox[0],bbox[1]),pt2=(bbox[2],bbox[3]),color=(0),thickness=2) print(bbox) plt.imshow(res) i = 50 res = dataset[i].reshape((dataset[i].shape[0],dataset[i].shape[1])).copy() res = cv.resize(res,(res.shape[0]//2,res.shape[1]//2),cv.INTER_CUBIC) bbox = bboxs[i].copy() centerx = bbox[0]/2 centery = bbox[1]/2 print(bbox,centerx,centery) bbox[0] -= centerx bbox[1] -= centery bbox[0] *= 0.5 bbox[1] *= 0.5 bbox[0] += centerx bbox[1] += centery bbox[2] -= centerx bbox[3] -= centery bbox[2] *= 0.5 bbox[3] *= 0.5 bbox[2] += centerx bbox[3] += centery print(bbox,"fim") cv.rectangle(res,pt1=(bbox[0],bbox[1]),pt2=(int(bbox[2]),int(bbox[3])),color=(0),thickness=2) plt.imshow(res) from sklearn.model_selection import train_test_split bbox_class_indexs = [i for i in range(len(dataset))] X_train, X_test, y_train, y_test = train_test_split(dataset, bbox_class_indexs, test_size=0.3) print(X_train.shape,X_test.shape) y_train_bbox = np.array([bboxs[i] for i in y_train]) y_train_class = np.array([classes[i] for i in y_train]) print(y_train_class.shape,y_train_bbox.shape) y_test_bbox = np.array([bboxs[i] for i in y_test]) y_test_class = np.array([classes[i] for i in y_test]) print(y_test_class.shape,y_test_bbox.shape) SHAPE_CNN = (dataset[0].shape[0],dataset[0].shape[1],1) OUTPUT_SHAPE = 2 LR = 0.00094 def feature_extractor(inputs): x = tf.keras.layers.Conv2D(8,kernel_size=3,activation='relu',input_shape=(SHAPE_CNN[0],SHAPE_CNN[1],1),padding='same')(inputs) x = tf.keras.layers.MaxPool2D((2,2))(x) x = tf.keras.layers.Conv2D(16,kernel_size=3,activation='relu',padding='same')(x) x = tf.keras.layers.MaxPool2D((2,2))(x) x = tf.keras.layers.Conv2D(32,kernel_size=3,activation='relu',padding='same')(x) x = tf.keras.layers.MaxPool2D((2,2))(x) x = tf.keras.layers.Conv2D(64,kernel_size=3,activation='relu',padding='same')(x) x = tf.keras.layers.MaxPool2D((2,2))(x) x = tf.keras.layers.Conv2D(128,kernel_size=3,activation='relu',padding='same')(x) x = tf.keras.layers.MaxPool2D((2,2))(x) return x def hidden_layers(inputs): x = tf.keras.layers.Flatten()(inputs) x = tf.keras.layers.Dense(16,activation='relu')(x) x = tf.keras.layers.Dense(32,activation='relu')(x) x = tf.keras.layers.Dense(32,activation='relu')(x) x = tf.keras.layers.Dense(64,activation='relu')(x) return x def classifier(inputs): classification = tf.keras.layers.Dense(OUTPUT_SHAPE,activation='sigmoid',name='classification')(inputs) return classification def bounding_box_regression(inputs): bounding_box_regression_output = tf.keras.layers.Dense(units = '4', name = 'bounding_box')(inputs) return bounding_box_regression_output def final_model(inputs): cnn_layer = feature_extractor(inputs) dense_layers = hidden_layers(cnn_layer) classification_layer = classifier(dense_layers) bbox_regression = bounding_box_regression(dense_layers) model = tf.keras.Model(inputs=inputs, outputs=[classification_layer,bbox_regression]) return model def define_and_compile_model(inputs): from tensorflow.keras.optimizers import SGD from tensorflow.keras.optimizers import Adam model = final_model(inputs) model.compile( optimizer=Adam(learning_rate=LR), loss = {'classification' : 'binary_crossentropy', 'bounding_box' : 'mse' }, metrics = {'classification' : 'accuracy', 'bounding_box' : 'mse' } ) return model inputs = tf.keras.layers.Input(shape=(SHAPE_CNN[0],SHAPE_CNN[1],1)) model = define_and_compile_model(inputs) model.summary() model.fit(X_train,(y_train_class,y_train_bbox),epochs=10) predict,bbox = model.predict(dataset[0].reshape((1,dataset[0].shape[0],dataset[0].shape[1],1))) print(predict,bbox) bbox_norm = scaler.inverse_transform(bbox).astype(int)[0] print(bbox_norm) res = dataset[0].reshape(dataset[0].shape[0],dataset[0].shape[1]).copy() cv.rectangle(res,pt1=(bbox_norm[1],bbox_norm[0]),pt2=(bbox_norm[3],bbox_norm[2]),color=(255),thickness=2) plt.imshow(res) tf.keras.models.save_model(model,"Models/model_loc1.h5") ```
github_jupyter
# Curvature Both convexity and the curvature distribution are computed from the same surface. For a molecular dynamics simulation, the creation of that surface is computationally expensive, relatively speaking. It is likely best to compute convexity and the curvature distribution together. Our experience was that creating the mesh surface accounted for roughly half of the computational time, with the calculation of the curvature distribution taking up most of the other half. In any case, we recommend running a test of just a few frames to get the timing information and determine if you need to run the calculation in parallel or not. Finally, this code relies heavily on the PyTim package. This is the package that can compute the Willard-Chandler surface and turn a collection of atoms into an analyzable mesh surface. However, the PyTim package only seems to work on Linux and Mac operating systems. We're looking into re-writing the PyTim package to interface with Windows systems but that is not implemented at this time. There is, of course, the possibility that there's a required dependency for Windows that I have not properly adjusted for, so you can try investigating that option as well. ## File Size The other issue to be considered with this analysis is the amount of data you generate. For our relatively small micelles, the typical mesh had 1-3,000 points and this code will compute curvature at every point. This means that for a decent sized trajectory, you are computing and writing to file several million to tens of millions of data points for txt files in the Gb file size ranges. There are a couple of options you may wish to explore to help reduce the size of these files. First, one of our biggest discoveries about the curvature distribution was that for the typical MD simulation where stoichiometry is set, the constant volume of your object keeps the curvature distribution relatively static. You can almost certainly get away with radically reduced sampling in time. Computing the curvature every 100 ps is probably a good high accuracy target, and every 500 ps-1 ns is probably a perfectly fine sampling rate. We have only tested these on reverse micelles, specifically a system with only one, pre-built reverse micelle, so we cannot guarantee this works for every system. However, it should work for most systems, especially if you meet the constant volume condition. Second, a major issue with the curvature distributions is the creation of outliers. We do not have a real, curved surface and instead have small sections of straight lines approximating a curved surface. The curvature will only ever be estimated and even the best algorithm will occassionally produce erroneously large values. Removing these outliers will at least slightly reduce the amount of data involved. We are working on producing a function to do this, but in the mean time, that work falls on you. Third, the most likely thing you'll do with this data is take a histogram of the values at each time point. If that is what you intend to do with it, then you can take a histogram before writing to file and write the histogram to file instead of the raw values. For even a large number of bins, you should be reducing the number of data entries by at least an order of magnitude this way. ``` # Prereq Packages import numpy as np import MDAnalysis as mda from scipy import spatial import pytim import pyvista as pv from pytim.datafiles import * ``` ## The Willard-Chandler Surface The interface needed to create a Willard-Chandler surface of the micelle is relatively simple. We are demonstrating it's use with a trajectory loaded via MDAnalysis. Check out PyTim's GitHub page for example usage in other interfaces using the link below. https://github.com/Marcello-Sega/pytim We specifically modelled using the GROMACS package and those are the file types we're showing here. See MDAnalysis documentation for other acceptable file types. https://userguide.mdanalysis.org/stable/examples/quickstart.html ``` # Importing your trajectory as a universe object # I prefer to set file names here for easy changing and reading Path = 'Path/To/Files/' Top = 'Your_Topology.gro' #"lists atoms, residues, and their connectivity" Traj = 'Your_Trajectory.xtc' #'contains a list of coordinates in the order defined in the topology' # creating a universe object # equivalently, loading the trajectory into MDAnalysis u = mda.Universe(Path+Top,Path+Traj) # now we need to select the atoms that will define our surface # MDAnalysis has a rich atom selection language # Here is the selection used for our micelles # Selection includes the water interior as well as the hydrophilic head group # of AOT up to the ester oxygens Core_Selection = '(resname AOT and (type O or type S or name C1 or name H1 or name C2 or name H2 or name H3 or name C3 or name C12)) or resname SOL' # Now to actually create a selection Core = u.select_atoms(Core_Selection) ``` ## Radii Dictionary The Willard-Chandler surface defines a continuous surface "as the isodensity surface of a Gaussian kernel density estimate". As such, it needs to know what Gaussian estimate of electron density to use for each type of atom. PyTim has default values set up for the CHARMM27, Amber03, and G43a1, but you will need to build your own dictionary for other force fields. The main purpose is just to define where the atoms your interested in are, so small changes or errors in the Gaussian electron density should not impact your shape significantly. A simple copy of one of the pre-built dictionaries with a similar atom type mapping should be sufficient. PyTim may have a function to create this type of map for an arbitrary force field and we will look into adapting that into a useable function in the future. Reference: Sega, M.; Hantal, G.; Fabian, B.; Jedlovszky, P.; Pytim: A Python Package for the Interfacial Analysis of Molecular Simulations. *J. Comp. Chem.*,**2018**, *39*. 2118-2125. Willard Chandler Documentation: https://marcello-sega.github.io/pytim/WillardChandler.html ``` ''' Creating the Willard-Chandler surface The first argument is your universe object "group" is the selection of atoms you're interested in Alpha will control the precision that the Gaussian density functions are computed out to. 3 should be more than adequate. The mesh value will control the grid spacing of points the volume is sampled on. A smaller number produces a smoother surface with more points on it at the cost of dramatically increased computational time (~O(N^2)) 1.1 seems perfectly adequate for shape analyses. We used 0.5 to produce cleaner surfaces on the example micelle images in Figure 4 of our paper, but it took long enough that we do not recommend this for analyzing a full trajectory "fast" gives the option to use a faster version with truncated Gaussians. We did not experiment much with this option and opted not to use it. Feel free to set it to your preference. ''' WC = pytim.WillardChandler(u,group=Core,alpha=3.0, mesh=1.1,fast=False, radii_dict=pytim_data.vdwradii(CHARMM27_TOP)) ``` ## Mesh I/O Format The actual creation of the Willard-Chandler surface is very easy and convenient with PyTim. The only catch is the output. We are using PyVista as a powerful and convenient package to handle manipulating, analyzing and visualizing the mesh. However, PyVista requires that the output be in a specific format. A typical mesh output usually contains 2 arrays. The first is just a list of the x, y, and z coordinates of every point. The second is a list of how those points connect to create the surface. Usually, that second list is of a form like: >[1,4,3]\ [1,6,9,2] etc. Where each number is the index of the point from the list of xyz coordinates. PyTim creates a Delauney triangulated mesh where every face on the surface is made up of only 3 points. However, in general a mesh can have faces of any number of vertices. So PyTim spits out a nice, concise array with only 3 columns, but PyVista requires a format that is more general to accomodate any number of vertices in a face. They do this by creating a flattened (1D) array where the first number denotes the number of vertices for that face, then the index number of each vertex in that face, then the number of vertices for the next face, and so on. This is all a long way to say that this is why the code looks a bit more complicated than it needs to be. We're just adjusting formats from PyTim to PyVista styles. ``` # adjusting formats # getting vertex xyz coord array and face connectivity array verts = WC.triangulated_surface[0] faces = WC.triangulated_surface[1] # creating blank list of 3s to match PyVista formatting threes = 3*np.ones((faces.shape[0],1),dtype=int) # converting faces = np.concatenate((threes,faces),axis=1) # creating a PyVista PolyData object to handle the mesh Poly = pv.PolyData(verts,faces) ''' Computing Curvature PyVista have a function to compute the curvature values by default This will compute the curvature at every vertex in your mesh, which can be an immense amount of data. While simple, this will be one of the most computationally expensive portions of the code. The data I write to file in the python files includes all points in case you want to do a more nuanced method of aggregating the data. However, you could dramatically reduce the file size by taking the histogram of the curvature values and only writing those results to file (for micelles, we had 1-3,000 points while a good histogram only had 100 bins for 200 items written to file) ''' mean_curv = Poly.curvature(curv_type='mean') G_curv = Poly.curvature(curv_type='Gaussian') ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Hello, many worlds <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/hello_many_worlds"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/hello_many_worlds.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial shows how a classical neural network can learn to correct qubit calibration errors. It introduces <a target="_blank" href="https://github.com/quantumlib/Cirq" class="external">Cirq</a>, a Python framework to create, edit, and invoke Noisy Intermediate Scale Quantum (NISQ) circuits, and demonstrates how Cirq interfaces with TensorFlow Quantum. ## Setup ``` !pip install tensorflow==2.3.1 ``` Install TensorFlow Quantum: ``` !pip install tensorflow-quantum ``` Now import TensorFlow and the module dependencies: ``` import tensorflow as tf import tensorflow_quantum as tfq import cirq import sympy import numpy as np # visualization tools %matplotlib inline import matplotlib.pyplot as plt from cirq.contrib.svg import SVGCircuit ``` ## 1. The Basics ### 1.1 Cirq and parameterized quantum circuits Before exploring TensorFlow Quantum (TFQ), let's look at some <a target="_blank" href="https://github.com/quantumlib/Cirq" class="external">Cirq</a> basics. Cirq is a Python library for quantum computing from Google. You use it to define circuits, including static and parameterized gates. Cirq uses <a target="_blank" href="https://www.sympy.org" class="external">SymPy</a> symbols to represent free parameters. ``` a, b = sympy.symbols('a b') ``` The following code creates a two-qubit circuit using your parameters: ``` # Create two qubits q0, q1 = cirq.GridQubit.rect(1, 2) # Create a circuit on these qubits using the parameters you created above. circuit = cirq.Circuit( cirq.rx(a).on(q0), cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1)) SVGCircuit(circuit) ``` To evaluate circuits, you can use the `cirq.Simulator` interface. You replace free parameters in a circuit with specific numbers by passing in a `cirq.ParamResolver` object. The following code calculates the raw state vector output of your parameterized circuit: ``` # Calculate a state vector with a=0.5 and b=-0.5. resolver = cirq.ParamResolver({a: 0.5, b: -0.5}) output_state_vector = cirq.Simulator().simulate(circuit, resolver).final_state_vector output_state_vector ``` State vectors are not directly accessible outside of simulation (notice the complex numbers in the output above). To be physically realistic, you must specify a measurement, which converts a state vector into a real number that classical computers can understand. Cirq specifies measurements using combinations of the <a target="_blank" href="https://en.wikipedia.org/wiki/Pauli_matrices" class="external">Pauli operators</a> $\hat{X}$, $\hat{Y}$, and $\hat{Z}$. As illustration, the following code measures $\hat{Z}_0$ and $\frac{1}{2}\hat{Z}_0 + \hat{X}_1$ on the state vector you just simulated: ``` z0 = cirq.Z(q0) qubit_map={q0: 0, q1: 1} z0.expectation_from_state_vector(output_state_vector, qubit_map).real z0x1 = 0.5 * z0 + cirq.X(q1) z0x1.expectation_from_state_vector(output_state_vector, qubit_map).real ``` ### 1.2 Quantum circuits as tensors TensorFlow Quantum (TFQ) provides `tfq.convert_to_tensor`, a function that converts Cirq objects into tensors. This allows you to send Cirq objects to our <a target="_blank" href="https://www.tensorflow.org/quantum/api_docs/python/tfq/layers">quantum layers</a> and <a target="_blank" href="https://www.tensorflow.org/quantum/api_docs/python/tfq/get_expectation_op">quantum ops</a>. The function can be called on lists or arrays of Cirq Circuits and Cirq Paulis: ``` # Rank 1 tensor containing 1 circuit. circuit_tensor = tfq.convert_to_tensor([circuit]) print(circuit_tensor.shape) print(circuit_tensor.dtype) ``` This encodes the Cirq objects as `tf.string` tensors that `tfq` operations decode as needed. ``` # Rank 1 tensor containing 2 Pauli operators. pauli_tensor = tfq.convert_to_tensor([z0, z0x1]) pauli_tensor.shape ``` ### 1.3 Batching circuit simulation TFQ provides methods for computing expectation values, samples, and state vectors. For now, let's focus on *expectation values*. The highest-level interface for calculating expectation values is the `tfq.layers.Expectation` layer, which is a `tf.keras.Layer`. In its simplest form, this layer is equivalent to simulating a parameterized circuit over many `cirq.ParamResolvers`; however, TFQ allows batching following TensorFlow semantics, and circuits are simulated using efficient C++ code. Create a batch of values to substitute for our `a` and `b` parameters: ``` batch_vals = np.array(np.random.uniform(0, 2 * np.pi, (5, 2)), dtype=np.float32) ``` Batching circuit execution over parameter values in Cirq requires a loop: ``` cirq_results = [] cirq_simulator = cirq.Simulator() for vals in batch_vals: resolver = cirq.ParamResolver({a: vals[0], b: vals[1]}) final_state_vector = cirq_simulator.simulate(circuit, resolver).final_state_vector cirq_results.append( [z0.expectation_from_state_vector(final_state_vector, { q0: 0, q1: 1 }).real]) print('cirq batch results: \n {}'.format(np.array(cirq_results))) ``` The same operation is simplified in TFQ: ``` tfq.layers.Expectation()(circuit, symbol_names=[a, b], symbol_values=batch_vals, operators=z0) ``` ## 2. Hybrid quantum-classical optimization Now that you've seen the basics, let's use TensorFlow Quantum to construct a *hybrid quantum-classical neural net*. You will train a classical neural net to control a single qubit. The control will be optimized to correctly prepare the qubit in the `0` or `1` state, overcoming a simulated systematic calibration error. This figure shows the architecture: <img src="./images/nn_control1.png" width="1000"> Even without a neural network this is a straightforward problem to solve, but the theme is similar to the real quantum control problems you might solve using TFQ. It demonstrates an end-to-end example of a quantum-classical computation using the `tfq.layers.ControlledPQC` (Parametrized Quantum Circuit) layer inside of a `tf.keras.Model`. For the implementation of this tutorial, this architecture is split into 3 parts: - The *input circuit* or *datapoint circuit*: The first three $R$ gates. - The *controlled circuit*: The other three $R$ gates. - The *controller*: The classical neural-network setting the parameters of the controlled circuit. ### 2.1 The controlled circuit definition Define a learnable single bit rotation, as indicated in the figure above. This will correspond to our controlled circuit. ``` # Parameters that the classical NN will feed values into. control_params = sympy.symbols('theta_1 theta_2 theta_3') # Create the parameterized circuit. qubit = cirq.GridQubit(0, 0) model_circuit = cirq.Circuit( cirq.rz(control_params[0])(qubit), cirq.ry(control_params[1])(qubit), cirq.rx(control_params[2])(qubit)) SVGCircuit(model_circuit) ``` ### 2.2 The controller Now define controller network: ``` # The classical neural network layers. controller = tf.keras.Sequential([ tf.keras.layers.Dense(10, activation='elu'), tf.keras.layers.Dense(3) ]) ``` Given a batch of commands, the controller outputs a batch of control signals for the controlled circuit. The controller is randomly initialized so these outputs are not useful, yet. ``` controller(tf.constant([[0.0],[1.0]])).numpy() ``` ### 2.3 Connect the controller to the circuit Use `tfq` to connect the controller to the controlled circuit, as a single `keras.Model`. See the [Keras Functional API guide](https://www.tensorflow.org/guide/keras/functional) for more about this style of model definition. First define the inputs to the model: ``` # This input is the simulated miscalibration that the model will learn to correct. circuits_input = tf.keras.Input(shape=(), # The circuit-tensor has dtype `tf.string` dtype=tf.string, name='circuits_input') # Commands will be either `0` or `1`, specifying the state to set the qubit to. commands_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float32, name='commands_input') ``` Next apply operations to those inputs, to define the computation. ``` dense_2 = controller(commands_input) # TFQ layer for classically controlled circuits. expectation_layer = tfq.layers.ControlledPQC(model_circuit, # Observe Z operators = cirq.Z(qubit)) expectation = expectation_layer([circuits_input, dense_2]) ``` Now package this computation as a `tf.keras.Model`: ``` # The full Keras model is built from our layers. model = tf.keras.Model(inputs=[circuits_input, commands_input], outputs=expectation) ``` The network architecture is indicated by the plot of the model below. Compare this model plot to the architecture diagram to verify correctness. Note: May require a system install of the `graphviz` package. ``` tf.keras.utils.plot_model(model, show_shapes=True, dpi=70) ``` This model takes two inputs: The commands for the controller, and the input-circuit whose output the controller is attempting to correct. ### 2.4 The dataset The model attempts to output the correct correct measurement value of $\hat{Z}$ for each command. The commands and correct values are defined below. ``` # The command input values to the classical NN. commands = np.array([[0], [1]], dtype=np.float32) # The desired Z expectation value at output of quantum circuit. expected_outputs = np.array([[1], [-1]], dtype=np.float32) ``` This is not the entire training dataset for this task. Each datapoint in the dataset also needs an input circuit. ### 2.4 Input circuit definition The input-circuit below defines the random miscalibration the model will learn to correct. ``` random_rotations = np.random.uniform(0, 2 * np.pi, 3) noisy_preparation = cirq.Circuit( cirq.rx(random_rotations[0])(qubit), cirq.ry(random_rotations[1])(qubit), cirq.rz(random_rotations[2])(qubit) ) datapoint_circuits = tfq.convert_to_tensor([ noisy_preparation ] * 2) # Make two copied of this circuit ``` There are two copies of the circuit, one for each datapoint. ``` datapoint_circuits.shape ``` ### 2.5 Training With the inputs defined you can test-run the `tfq` model. ``` model([datapoint_circuits, commands]).numpy() ``` Now run a standard training process to adjust these values towards the `expected_outputs`. ``` optimizer = tf.keras.optimizers.Adam(learning_rate=0.05) loss = tf.keras.losses.MeanSquaredError() model.compile(optimizer=optimizer, loss=loss) history = model.fit(x=[datapoint_circuits, commands], y=expected_outputs, epochs=30, verbose=0) plt.plot(history.history['loss']) plt.title("Learning to Control a Qubit") plt.xlabel("Iterations") plt.ylabel("Error in Control") plt.show() ``` From this plot you can see that the neural network has learned to overcome the systematic miscalibration. ### 2.6 Verify outputs Now use the trained model, to correct the qubit calibration errors. With Cirq: ``` def check_error(command_values, desired_values): """Based on the value in `command_value` see how well you could prepare the full circuit to have `desired_value` when taking expectation w.r.t. Z.""" params_to_prepare_output = controller(command_values).numpy() full_circuit = noisy_preparation + model_circuit # Test how well you can prepare a state to get expectation the expectation # value in `desired_values` for index in [0, 1]: state = cirq_simulator.simulate( full_circuit, {s:v for (s,v) in zip(control_params, params_to_prepare_output[index])} ).final_state_vector expt = cirq.Z(qubit).expectation_from_state_vector(state, {qubit: 0}).real print(f'For a desired output (expectation) of {desired_values[index]} with' f' noisy preparation, the controller\nnetwork found the following ' f'values for theta: {params_to_prepare_output[index]}\nWhich gives an' f' actual expectation of: {expt}\n') check_error(commands, expected_outputs) ``` The value of the loss function during training provides a rough idea of how well the model is learning. The lower the loss, the closer the expectation values in the above cell is to `desired_values`. If you aren't as concerned with the parameter values, you can always check the outputs from above using `tfq`: ``` model([datapoint_circuits, commands]) ``` ## 3 Learning to prepare eigenstates of different operators The choice of the $\pm \hat{Z}$ eigenstates corresponding to 1 and 0 was arbitrary. You could have just as easily wanted 1 to correspond to the $+ \hat{Z}$ eigenstate and 0 to correspond to the $-\hat{X}$ eigenstate. One way to accomplish this is by specifying a different measurement operator for each command, as indicated in the figure below: <img src="./images/nn_control2.png" width="1000"> This requires use of <code>tfq.layers.Expectation</code>. Now your input has grown to include three objects: circuit, command, and operator. The output is still the expectation value. ### 3.1 New model definition Lets take a look at the model to accomplish this task: ``` # Define inputs. commands_input = tf.keras.layers.Input(shape=(1), dtype=tf.dtypes.float32, name='commands_input') circuits_input = tf.keras.Input(shape=(), # The circuit-tensor has dtype `tf.string` dtype=tf.dtypes.string, name='circuits_input') operators_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string, name='operators_input') ``` Here is the controller network: ``` # Define classical NN. controller = tf.keras.Sequential([ tf.keras.layers.Dense(10, activation='elu'), tf.keras.layers.Dense(3) ]) ``` Combine the circuit and the controller into a single `keras.Model` using `tfq`: ``` dense_2 = controller(commands_input) # Since you aren't using a PQC or ControlledPQC you must append # your model circuit onto the datapoint circuit tensor manually. full_circuit = tfq.layers.AddCircuit()(circuits_input, append=model_circuit) expectation_output = tfq.layers.Expectation()(full_circuit, symbol_names=control_params, symbol_values=dense_2, operators=operators_input) # Contruct your Keras model. two_axis_control_model = tf.keras.Model( inputs=[circuits_input, commands_input, operators_input], outputs=[expectation_output]) ``` ### 3.2 The dataset Now you will also include the operators you wish to measure for each datapoint you supply for `model_circuit`: ``` # The operators to measure, for each command. operator_data = tfq.convert_to_tensor([[cirq.X(qubit)], [cirq.Z(qubit)]]) # The command input values to the classical NN. commands = np.array([[0], [1]], dtype=np.float32) # The desired expectation value at output of quantum circuit. expected_outputs = np.array([[1], [-1]], dtype=np.float32) ``` ### 3.3 Training Now that you have your new inputs and outputs you can train once again using keras. ``` optimizer = tf.keras.optimizers.Adam(learning_rate=0.05) loss = tf.keras.losses.MeanSquaredError() two_axis_control_model.compile(optimizer=optimizer, loss=loss) history = two_axis_control_model.fit( x=[datapoint_circuits, commands, operator_data], y=expected_outputs, epochs=30, verbose=1) plt.plot(history.history['loss']) plt.title("Learning to Control a Qubit") plt.xlabel("Iterations") plt.ylabel("Error in Control") plt.show() ``` The loss function has dropped to zero. The `controller` is available as a stand-alone model. Call the controller, and check its response to each command signal. It would take some work to correctly compare these outputs to the contents of `random_rotations`. ``` controller.predict(np.array([0,1])) ``` Success: See if you can adapt the `check_error` function from your first model to work with this new model architecture.
github_jupyter
# Breast cancer analysis using fastai tabular application ``` from fastai.tabular.all import *; df = pd.read_csv('breast_cancer_dataset.csv') headers = list(df.columns) headers.pop() dls = TabularDataLoaders.from_csv('breast_cancer_dataset.csv', y_names="label", cont_names = headers, procs = [Categorify, FillMissing, Normalize]) ``` The last part is the list of pre-processors we apply to our data: - `Categorify` is going to take every categorical variable and make a map from integer to unique categories, then replace the values by the corresponding index. - `FillMissing` will fill the missing values in the continuous variables by the median of existing values (you can choose a specific value if you prefer) - `Normalize` will normalize the continuous variables (substract the mean and divide by the std) ``` splits = RandomSplitter(valid_pct=0.3)(range_of(df)) ``` **Note**: Since the labels are encoded (as 0 and 1), we explicitly pass `y_block = CategoryBlock` in the constructor so that `fastai` does not presume we are doing regression. ``` to = TabularPandas(df, procs=[Categorify, FillMissing, Normalize], cont_names = headers, y_names='label', splits=splits, y_block = CategoryBlock) ``` Once we build our `TabularPandas` object, our data is completely preprocessed as seen below: ``` to.xs.iloc[:2] ``` Now we can build our `DataLoaders` again: ``` dls = to.dataloaders(bs=32) dls.show_batch() learn = tabular_learner(dls, metrics=accuracy) ``` We can train that model with the `fit_one_cycle` method. ``` import time start_time = time.time_ns() learn.fit_one_cycle(8) training_time = time.time_ns() - start_time ``` We can then have a look at some predictions: ``` learn.show_results() ``` Or use the predict method on a row: ``` row, clas, probs = learn.predict(df.iloc[10]) row.show() clas, probs ``` To get prediction on a new dataframe, you can use the `test_dl` method of the `DataLoaders`. That dataframe does not need to have the dependent variable in its column. ``` test_df = df.copy() test_df.drop(['label'], axis=1, inplace=True) dl = learn.dls.test_dl(test_df) ``` Use `Learner.get_preds` to get the predictions and them compare them with the actual labels. ``` pred = learn.get_preds(dl=dl) pred = pd.DataFrame(pred[0]).iloc[:,0].apply(lambda x:0 if x >= 0.5 else 1) pred = pred == df['label'] Overall_accuracy = np.count_nonzero(pred==True)*100/len(df) print("Overall accuracy: {}, Training time: {}\n".format(round(Overall_accuracy,2), round(training_time/1000000000,2))) ```
github_jupyter
## Generating metre spaced subnodes for a route In this notebook we define a function which generates a list of metre spaced points given a list of nodes along with their osmid's for a given route. Given these metre spaced points we can use LIDAR data to detect any short steep ascents for a given route, which may be staircases or ramps which are too steep for a wheelchair user. ``` import osmnx as ox import networkx as nx import pandas as pd from vincenty import vincenty import numpy as np def getSubnodes(route_map,route): """ Get roughly metre-spaced subnodes along a route Parameters: route_map (graph): graph of walking map. route (list): list of nodes. Returns: subnodes (list): list of dictionaries detailing lat,lon and osmid of the containing edge for each subnode """ subnodes = [] for i in range(len(route)-1): # break up each edge into straigt line segments if "geometry" in route_map.edges[(route[i],route[i+1],0)]: _edge = route_map.edges[(route[i],route[i+1],0)] _edge_geometry = _edge["geometry"] _joints = [(coord[1],coord[0]) for coord in list(_edge_geometry.coords)] #_joints = [{"lat":coord[1],"lon":coord[0],"edge_osmid":_edge["osmid"]} for coord in list(_edge_geometry.coords)] else: _edge = route_map.edges[(route[i],route[i+1],0)] _joints = [(route_map.nodes[route[i]]['y'],route_map.nodes[route[i]]['x']), (route_map.nodes[route[i+1]]['y'],route_map.nodes[route[i+1]]['x'])] if len(_joints) < 2: raise TypeError("No joints found!") for j in range(len(_joints)-1): # for each straight line segment generate metre-spaced subnodes _dist = 1000*vincenty(_joints[j],_joints[j+1]) _n = int(_dist) if _n > 1: _edge_subnodes = [{"lat":subnode[0],"lon":subnode[1],"node_osmids":(route[i],route[i+1])} for subnode in np.linspace(_joints[j],_joints[j+1],_n+1)] else: _edge_subnodes = [{"lat":subnode[0],"lon":subnode[1],"node_osmids":(route[i],route[i+1])} for subnode in [_joints[j],_joints[j+1]]] # avoid adding duplicate subnodes at the endpoints subnodes += list(_edge_subnodes[:-1]) # add the final node separately subnodes.append({"lat":route_map.nodes[route[-1]]['y'],"lon":route_map.nodes[route[-1]]['x'],"node_osmids":(route[i],route[i+1])}) return subnodes ```
github_jupyter
## Training This script executes a training experiment on Azure ML. Once the data is prepared, you can train a model and see the results on Azure. #### There are several steps to follow: * Configure the workspace * Create an experiment * Create or attach a compute cluster * Upload the data to Azure * Create an estimator * Submit the work to the remote cluster * Register the model Import Azure Machine Learning Python SDK and other modules. ``` import datetime as dt import math import os import urllib.request import warnings import azureml.core import azureml.dataprep as dprep import matplotlib.pyplot as plt import numpy as np import pandas as pd from azureml.core import Experiment, Workspace from azureml.core.compute import AmlCompute, ComputeTarget from azureml.core.environment import Environment from azureml.train.estimator import Estimator from IPython.display import Image, display from sklearn.preprocessing import MinMaxScaler from statsmodels.tsa.statespace.sarimax import SARIMAX get_ipython().run_line_magic("matplotlib", "inline") pd.options.display.float_format = "{:,.2f}".format np.set_printoptions(precision=2) warnings.filterwarnings("ignore") # specify to ignore warning messages ``` ### Configure the workspace Set up your Azure Machine Learning services workspace and configure your notebook library. Make sure that you have the correct version of Azure ML SDK. If that's not the case, you can run: * `!pip install --upgrade azureml-sdk[automl,notebooks,explain]` * `!pip install --upgrade azuremlftk` Then configure your workspace and write the configuration to a [config.json](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/machine-learning/service/how-to-configure-environment.md#create-a-workspace-configuration-file) file or read your config.json file to get your workspace. As a second option, one can copy the config file from the Azure workspace in an `.azureml` folder. #### In an Azure workspace you will find: * Experiment results * Trained models * Compute targets * Deployment containers * Snapshots * Environments * and more For more information about the AML services workspace set up, see this [notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb). ``` print("This notebook was created using version 1.14.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") # # Configure the workspace, if no config file has been downloaded. # # Give your subscription ID, your ressource group, your workspace_name and your workspace_region # subscription_id = os.getenv("SUBSCRIPTION_ID", default="d0b8947b-5a39-4d74-944c-48c45b1ccdf3") # resource_group = os.getenv("RESOURCE_GROUP", default="aml") # workspace_name = os.getenv("WORKSPACE_NAME", default="timeseries") # workspace_region = os.getenv("WORKSPACE_REGION", default="centralus") # try: # ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name) # # write the details of the workspace to a configuration file to the notebook library # ws.write_config() # print("Workspace configuration succeeded") # except: # print("Workspace not accessible. Change your parameters or create a new workspace below") # Or take the configuration of the existing config.json file ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep="\n") ``` ### Create an environment We'll create an Azure Machine Learning environment which will help us specify requirements for our model training. This will help us ensure that we use the same versions of libraries such as statsmodels across training and serving If the environment already exists, then the environment will be overwritten. ``` my_azureml_env = Environment.from_conda_specification( name="my_azureml_env", file_path="./energydemandforecasting/azureml-env.yml" ) my_azureml_env.register(workspace=ws) ``` ### Create an experiment We’ll create an Azure Machine Learning experiment which will help keep track of the specific data used, as well as the model training job logs. If the experiment already exists on the selected workspace, the run will be added to the existing experiment. If not, the experiment will be added to the workspace. ``` experiment_name = "energydemandforecasting" exp = Experiment(workspace=ws, name=experiment_name) ``` ### Create or attach an existing compute cluster * For training an ARIMA model, a CPU cluster is enough. * Note the min_nodes parameter is 0, meaning by default this will have no machines in the cluster and it will automatically scale up and down, so you won't pay for the cluster when you’re not using it. * You can also enforce policies to control your costs. ``` # choose a name for your cluster compute_name = os.environ.get("AML_COMPUTE_CLUSTER_NAME", "cpucluster") compute_min_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MIN_NODES", 0) compute_max_nodes = os.environ.get("AML_COMPUTE_CLUSTER_MAX_NODES", 4) # This example uses CPU VM. For using GPU VM, set SKU to STANDARD_NC6 vm_size = os.environ.get("AML_COMPUTE_CLUSTER_SKU", "STANDARD_D2_V2") if compute_name in ws.compute_targets: compute_target = ws.compute_targets[compute_name] if compute_target and type(compute_target) is AmlCompute: print("found compute target. just use it. " + compute_name) else: print("creating a new compute target...") provisioning_config = AmlCompute.provisioning_configuration( vm_size=vm_size, min_nodes=compute_min_nodes, max_nodes=compute_max_nodes ) # create the cluster compute_target = ComputeTarget.create(ws, compute_name, provisioning_config) # can poll for a minimum number of nodes and for a specific timeout. # if no min node count is provided, it will use the scale settings for the cluster compute_target.wait_for_completion( show_output=True, min_node_count=None, timeout_in_minutes=20 ) # For a more detailed view of current AmlCompute status, use 'get_status()' print(compute_target.get_status().serialize()) ``` ### Upload data to a datastore * Firstly, you can download GEFCom2014 dataset and save the files into a `data` directory locally, which can be done by executing the commented lines in the cell. The data in this example is taken from the GEFCom2014 forecasting competition<sup>1</sup>. It consists of 3 years of hourly electricity load and temperature values between 2012 and 2014. * Then, the data is uploaded to the default blob data storage attached to your workspace. The energy file is uploaded into a directory named energy_data at the root of the datastore. The upload of data must be run only the first time. If you run it again, it will skip the uploading of files already present on the datastore. <sup>1</sup>Tao Hong, Pierre Pinson, Shu Fan, Hamidreza Zareipour, Alberto Troccoli and Rob J. Hyndman, "Probabilistic energy forecasting: Global Energy Forecasting Competition 2014 and beyond", International Journal of Forecasting, vol.32, no.3, pp 896-913, July-September, 2016. ``` # data = pd.read_csv("./data/energy.csv") # # Preview the first 5 lines of the loaded data # data.head() # save the files into a data directory locally data_folder = "./data" # data_folder = os.path.join(os.getcwd(), 'data') os.makedirs(data_folder, exist_ok=True) # import shutil # from common.utils import extract_data, download_file # if not os.path.exists(os.path.join(data_folder, 'energy.csv')): # # Download and move the zip file # download_file("https://mlftsfwp.blob.core.windows.net/mlftsfwp/GEFCom2014.zip") # shutil.move("GEFCom2014.zip", os.path.join(data_dir,"GEFCom2014.zip")) # # If not done already, extract zipped data and save as csv # extract_data(data_dir) # # get the default datastore ds = ws.get_default_datastore() print(ds.name, ds.datastore_type, ds.account_name, ds.container_name, sep="\n") # upload the data ds.upload( src_dir=data_folder, target_path="energy_data", overwrite=True, show_progress=True ) ds = ws.get_default_datastore() print(ds.datastore_type, ds.account_name, ds.container_name) ``` ### Create an estimator The following parameters will be given to the Estimator: * source directory: the directory which will be uploaded to Azure and contains the script `train.py`. * entry_script: the script that will be executed (train.py). * script_params: the parameters that will be given to the entry script. * compute_target: the the compute cluster that was created above. * conda_dependencies_file: a conda environment yaml specifying the packages in your conda environment, that the script needs. For more information to define an estimator, see [here](https://docs.microsoft.com/de-ch/python/api/azureml-train-core/azureml.train.estimator.estimator?view=azure-ml-py). ``` script_params = { "--data-folder": ds.path("energy_data").as_mount(), "--filename": "energy.csv", } script_folder = os.path.join(os.getcwd(), "energydemandforecasting") est = Estimator( source_directory=script_folder, script_params=script_params, compute_target=compute_target, entry_script="train.py", conda_dependencies_file="azureml-env.yml", ) ``` ### Submit the job to the cluster ``` run = exp.submit(config=est) # specify show_output to True for a verbose log run.wait_for_completion(show_output=False) ``` ### Register model As a last step, we register the model in the workspace, which saves it under 'Models' on Azure, so that you and other collaborators can later query, examine, and deploy this model. `outputs` is a directory in your Azure experiment in which the trained model is automatically saved while running the experiment. By registering the model, it is now available on your workspace. ``` # see files associated with that run print(run.get_file_names()) # register model model = run.register_model(model_name="arimamodel", model_path="outputs/arimamodel.pkl") ```
github_jupyter
# Week 2: Tackle Overfitting with Data Augmentation Welcome to this assignment! As in the previous week, you will be using the famous `cats vs dogs` dataset to train a model that can classify images of dogs from images of cats. For this, you will create your own Convolutional Neural Network in Tensorflow and leverage Keras' image preprocessing utilities, more so this time around since Keras provides excellent support for augmenting image data. You will also need to create the helper functions to move the images around the filesystem as you did last week, so if you need to refresh your memory with the `os` module be sure to take a look a the [docs](https://docs.python.org/3/library/os.html). Let's get started! ``` import os import zipfile import random import shutil import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator from shutil import copyfile import matplotlib.pyplot as plt ``` Download the dataset from its original source by running the cell below. Note that the `zip` file that contains the images is unzipped under the `/tmp` directory. ``` # If the URL doesn't work, visit https://www.microsoft.com/en-us/download/confirmation.aspx?id=54765 # And right click on the 'Download Manually' link to get a new URL to the dataset # Note: This is a very large dataset and will take some time to download !wget --no-check-certificate \ "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip" \ -O "/tmp/cats-and-dogs.zip" local_zip = '/tmp/cats-and-dogs.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() ``` Now the images are stored within the `/tmp/PetImages` directory. There is a subdirectory for each class, so one for dogs and one for cats. ``` source_path = '/tmp/PetImages' source_path_dogs = os.path.join(source_path, 'Dog') source_path_cats = os.path.join(source_path, 'Cat') # os.listdir returns a list containing all files under the given path print(f"There are {len(os.listdir(source_path_dogs))} images of dogs.") print(f"There are {len(os.listdir(source_path_cats))} images of cats.") ``` **Expected Output:** ``` There are 12501 images of dogs. There are 12501 images of cats. ``` You will need a directory for cats-v-dogs, and subdirectories for training and testing. These in turn will need subdirectories for 'cats' and 'dogs'. To accomplish this, complete the `create_train_test_dirs` below: ``` # Define root directory root_dir = '/tmp/cats-v-dogs' # Empty directory to prevent FileExistsError is the function is run several times if os.path.exists(root_dir): shutil.rmtree(root_dir) # GRADED FUNCTION: create_train_test_dirs def create_train_test_dirs(root_path): ### START CODE HERE # HINT: # Use os.makedirs to create your directories with intermediate subdirectories pass ### END CODE HERE try: create_train_test_dirs(root_path=root_dir) except FileExistsError: print("You should not be seeing this since the upper directory is removed beforehand") # Test your create_train_test_dirs function for rootdir, dirs, files in os.walk(root_dir): for subdir in dirs: print(os.path.join(rootdir, subdir)) ``` **Expected Output (directory order might vary):** ``` txt /tmp/cats-v-dogs/training /tmp/cats-v-dogs/testing /tmp/cats-v-dogs/training/cats /tmp/cats-v-dogs/training/dogs /tmp/cats-v-dogs/testing/cats /tmp/cats-v-dogs/testing/dogs ``` Code the `split_data` function which takes in the following arguments: - SOURCE: directory containing the files - TRAINING: directory that a portion of the files will be copied to (will be used for training) - TESTING: directory that a portion of the files will be copied to (will be used for testing) - SPLIT SIZE: to determine the portion The files should be randomized, so that the training set is a random sample of the files, and the test set is made up of the remaining files. For example, if `SOURCE` is `PetImages/Cat`, and `SPLIT` SIZE is .9 then 90% of the images in `PetImages/Cat` will be copied to the `TRAINING` dir and 10% of the images will be copied to the `TESTING` dir. All images should be checked before the copy, so if they have a zero file length, they will be omitted from the copying process. If this is the case then your function should print out a message such as `"filename is zero length, so ignoring."`. **You should perform this check before the split so that only non-zero images are considered when doing the actual split.** Hints: - `os.listdir(DIRECTORY)` returns a list with the contents of that directory. - `os.path.getsize(PATH)` returns the size of the file - `copyfile(source, destination)` copies a file from source to destination - `random.sample(list, len(list))` shuffles a list ``` # GRADED FUNCTION: split_data def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE): ### START CODE HERE pass ### END CODE HERE # Test your split_data function # Define paths CAT_SOURCE_DIR = "/tmp/PetImages/Cat/" DOG_SOURCE_DIR = "/tmp/PetImages/Dog/" TRAINING_DIR = "/tmp/cats-v-dogs/training/" TESTING_DIR = "/tmp/cats-v-dogs/testing/" TRAINING_CATS_DIR = os.path.join(TRAINING_DIR, "cats/") TESTING_CATS_DIR = os.path.join(TESTING_DIR, "cats/") TRAINING_DOGS_DIR = os.path.join(TRAINING_DIR, "dogs/") TESTING_DOGS_DIR = os.path.join(TESTING_DIR, "dogs/") # Empty directories in case you run this cell multiple times if len(os.listdir(TRAINING_CATS_DIR)) > 0: for file in os.scandir(TRAINING_CATS_DIR): os.remove(file.path) if len(os.listdir(TRAINING_DOGS_DIR)) > 0: for file in os.scandir(TRAINING_DOGS_DIR): os.remove(file.path) if len(os.listdir(TESTING_CATS_DIR)) > 0: for file in os.scandir(TESTING_CATS_DIR): os.remove(file.path) if len(os.listdir(TESTING_DOGS_DIR)) > 0: for file in os.scandir(TESTING_DOGS_DIR): os.remove(file.path) # Define proportion of images used for training split_size = .9 # Run the function # NOTE: Messages about zero length images should be printed out split_data(CAT_SOURCE_DIR, TRAINING_CATS_DIR, TESTING_CATS_DIR, split_size) split_data(DOG_SOURCE_DIR, TRAINING_DOGS_DIR, TESTING_DOGS_DIR, split_size) # Check that the number of images matches the expected output print(f"\n\nThere are {len(os.listdir(TRAINING_CATS_DIR))} images of cats for training") print(f"There are {len(os.listdir(TRAINING_DOGS_DIR))} images of dogs for training") print(f"There are {len(os.listdir(TESTING_CATS_DIR))} images of cats for testing") print(f"There are {len(os.listdir(TESTING_DOGS_DIR))} images of dogs for testing") ``` **Expected Output:** ``` 666.jpg is zero length, so ignoring. 11702.jpg is zero length, so ignoring. ``` ``` There are 11250 images of cats for training There are 11250 images of dogs for training There are 1250 images of cats for testing There are 1250 images of dogs for testing ``` Now that you have successfully organized the data in a way that can be easily fed to Keras' `ImageDataGenerator`, it is time for you to code the generators that will yield batches of images, both for training and validation. For this, complete the `train_val_generators` function below. Something important to note is that the images in this dataset come in a variety of resolutions. Luckily, the `flow_from_directory` method allows you to standarize this by defining a tuple called `target_size` that will be used to convert each image to this target resolution. **For this exercise use a `target_size` of (150, 150)**. **Note:** So far, you have seen the term `testing` being used a lot for referring to a subset of images within the dataset. In this exercise, all of the `testing` data is actually being used as `validation` data. This is not very important within the context of the task at hand but it is worth mentioning to avoid confusion. ``` # GRADED FUNCTION: train_val_generators def train_val_generators(TRAINING_DIR, VALIDATION_DIR): ### START CODE HERE # Instantiate the ImageDataGenerator class (don't forget to set the arguments to augment the images) train_datagen = ImageDataGenerator(rescale=None, rotation_range=None, width_shift_range=None, height_shift_range=None, shear_range=None, zoom_range=None, horizontal_flip=None, fill_mode=None) # Pass in the appropriate arguments to the flow_from_directory method train_generator = train_datagen.flow_from_directory(directory=None, batch_size=None, class_mode=None, target_size=(None, None)) # Instantiate the ImageDataGenerator class (don't forget to set the rescale argument) validation_datagen = None # Pass in the appropriate arguments to the flow_from_directory method validation_generator = validation_datagen.flow_from_directory(directory=None, batch_size=None, class_mode=None, target_size=(None, None)) ### END CODE HERE return train_generator, validation_generator # Test your generators train_generator, validation_generator = train_val_generators(TRAINING_DIR, TESTING_DIR) ``` **Expected Output:** ``` Found 22498 images belonging to 2 classes. Found 2500 images belonging to 2 classes. ``` One last step before training is to define the architecture of the model that will be trained. Complete the `create_model` function below which should return a Keras' `Sequential` model. Aside from defining the architecture of the model, you should also compile it so make sure to use a `loss` function that is compatible with the `class_mode` you defined in the previous exercise, which should also be compatible with the output of your network. You can tell if they aren't compatible if you get an error during training. **Note that you should use at least 3 convolution layers to achieve the desired performance.** ``` # GRADED FUNCTION: create_model def create_model(): # DEFINE A KERAS MODEL TO CLASSIFY CATS V DOGS # USE AT LEAST 3 CONVOLUTION LAYERS ### START CODE HERE model = tf.keras.models.Sequential([ None, ]) model.compile(optimizer=None, loss=None, metrics=['accuracy']) ### END CODE HERE return model ``` Now it is time to train your model! Note: You can ignore the `UserWarning: Possibly corrupt EXIF data.` warnings. ``` # Get the untrained model model = create_model() # Train the model # Note that this may take some time. history = model.fit(train_generator, epochs=15, verbose=1, validation_data=validation_generator) ``` Once training has finished, you can run the following cell to check the training and validation accuracy achieved at the end of each epoch. **To pass this assignment, your model should achieve a training and validation accuracy of at least 80% and the final testing accuracy should be either higher than the training one or have a 5% difference at maximum**. If your model didn't achieve these thresholds, try training again with a different model architecture, remember to use at least 3 convolutional layers or try tweaking the image augmentation process. You might wonder why the training threshold to pass this assignment is significantly lower compared to last week's assignment. Image augmentation does help with overfitting but usually this comes at the expense of requiring more training time. To keep the training time reasonable, the same number of epochs as in the previous assignment are kept. However, as an optional exercise you are encouraged to try training for more epochs and to achieve really good training and validation accuracies. ``` #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['accuracy'] val_acc=history.history['val_accuracy'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(epochs, acc, 'r', "Training Accuracy") plt.plot(epochs, val_acc, 'b', "Validation Accuracy") plt.title('Training and validation accuracy') plt.show() print("") #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(epochs, loss, 'r', "Training Loss") plt.plot(epochs, val_loss, 'b', "Validation Loss") plt.show() ``` You will probably encounter that the model is overfitting, which means that it is doing a great job at classifying the images in the training set but struggles with new data. This is perfectly fine and you will learn how to mitigate this issue in the upcomming week. Before closing the assignment, be sure to also download the `history.pkl` file which contains the information of the training history of your model. You can download this file by running the cell below: ``` def download_history(): import pickle from google.colab import files with open('history_augmented.pkl', 'wb') as f: pickle.dump(history.history, f) files.download('history_augmented.pkl') download_history() ``` You will also need to submit this notebook for grading. To download it, click on the `File` tab in the upper left corner of the screen then click on `Download` -> `Download .ipynb`. You can name it anything you want as long as it is a valid `.ipynb` (jupyter notebook) file. **Congratulations on finishing this week's assignment!** You have successfully implemented a convolutional neural network that classifies images of cats and dogs, along with the helper functions needed to pre-process the images! **Keep it up!**
github_jupyter
``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from lxml import etree import pickle # Make function to compare two series together: def ppm_matrix(series1, series2): ''' GOAL - create a matrix of pairwise ppm differences INPUT - 2 pandas Series, with index OUTPUT - dataframe with indexes containing pairwise ppm comparisons format: rows are series1 labels, columns are series2 labels ''' # do series1-series2 using numpy's broadcasting # This is faster than using a loop diff_matrix = abs(series1.values[:,np.newaxis] - series2.values) # Get max of pairwise comparisons max_matrix = np.maximum(series1.values[:,np.newaxis], series2.values) return (diff_matrix / max_matrix)*10**6 def pairwise_difference(series1, series2): ''' GOAL - create matrix of pairwise differences INPUT - 2 pandas series, with index OUTPUT - dataframe, with indexes, format: columns are series1 labels, rows are series2 labels ''' diff_matrix = abs(series1.values[:, np.newaxis] - series2.values) output = pd.DataFrame(diff_matrix, index=series1.index, columns=series2.index, ) return output s1 = pd.Series({'A': 1, 'B':2}, dtype='float64') s2 = pd.Series({'X':1, 'Y':2, 'Z':3}, dtype='float64') ppm_output = ppm_matrix(s1, s2) print ppm_output true_ppm_output = pd.DataFrame({'X':[0.0, (1/2.0)*10**6], 'Y':[(1/2.0)*10**6, 0.0], 'Z':[(2/3.0)*10**6, (1/3.0)*10**6]}, index=['A', 'B'], dtype='float64') rt_output = pairwise_difference(s1,s2) true_rt_output = pd.DataFrame({'X':[0.0, 1.0], 'Y':[1.0, 0.0], 'Z':[2.0, 1.0]}, index=['A', 'B'], dtype='float64') print 'ppm should be this:\n', true_ppm_output print '\n\n Is it?\n', ppm_output assert (ppm_output.all() == true_ppm_output.as_matrix().all()).all() assert (rt_output.all() == true_rt_output.all()).all() ``` <h2> Let's import a couple datasets and take them for a spin</h2> ``` ### import two datasets def reindex_xcms_by_mzrt(df): df.index = (df.loc[:,'mz'].astype('str') + ':' + df.loc[:, 'rt'].astype('str')) return df # alzheimers local_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/'\ 'projects' alzheimers_path = local_path + '/revo_healthcare/data/processed/MTBLS72/positive_mode/'\ 'mtbls_no_retcor_bw2.csv' ## Import the data and remove extraneous columns df_alzheimers = pd.read_csv(alzheimers_path, index_col=0) df_alzheimers = reindex_xcms_by_mzrt(df_alzheimers) # malaria malaria_path = local_path + ('/revo_healthcare/data/processed/MTBLS315/'+ 'uhplc_pos/xcms_result_4.csv') df_malaria = pd.read_csv(malaria_path, index_col=0) df_malaria = reindex_xcms_by_mzrt(df_malaria) ppm_alz_v_malaria = ppm_matrix(df_malaria['mz'], df_alzheimers['mz']) rt_alz_v_malaria = pairwise_difference(df_malaria['rt'], df_alzheimers['rt']) ``` <h2> Looks like There aren't too many ppm m/z overlaps </h2> ``` sns.heatmap(np.log10(ppm_alz_v_malaria)) plt.title('Log10 ppm difference') plt.show() # How many for differences at 30ppm? ppm_window = 30 within_ppm = (ppm_alz_v_malaria[ppm_alz_v_malaria < 30] .dropna(axis=0, how='all') .dropna(axis=1, how='all') ) print 'shape', ppm_alz_v_malaria.shape print ('ppm within {ppm} ppm: '.format(ppm=ppm_window) + '{num}'.format(num=(ppm_alz_v_malaria < 30).sum().sum())) # Get indexes print 'shape of htose within 30ppm:, ', within_ppm.shape # How many m/z from one dataset could be m/z isomers from # other dataset? print ('\n\nMass matches between datasets (isomers and 1:1 matches)', (within_ppm < 30).sum().sum()) print '\nAlzheimers "isomers" in other dataset that are match >1 feature in other set', ((within_ppm < 30).sum(axis=0)>1).sum() print 'Alzheimers total', df_alzheimers['rt'].shape print '\n\nMalaria "isomers in other dataset that match >1 feature in other set', ((within_ppm < 30).sum(axis=1) > 1).sum() print 'Malaria total', df_malaria['rt'].shape # Show distribution of # of isomers per feature in both malaria and fever datasets print (within_ppm < 30).sum(axis=0).hist(bins=30) plt.title('Alzheimers isomers in malaria dataset') plt.show() (within_ppm < 30).sum(axis=1).hist(bins=30) plt.title('Malaria isomers in alzheimers dataset') plt.show() ``` <h2> So, about 1/4 of the mass-matches have potential isomers in the other dataset...? </h2> Notice how there are more matches to the malaria set, which has more peaks. Makes sense - more peaks either means more actual molecules, or more adducts that could be mistakenly matched as molecules <h2> Get masses of all hmdb serum metabolites </h2> parse the xml file ``` local = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/' xml_file = local + 'revo_healthcare/data/external/toy_database.xml' xml_file = local + 'revo_healthcare/data/external/serum_metabolites.xml' #xml_tree = etree.iterparse(xml_file, tag='metabolite') # # namespace - at the top of file. fucks with every tag. # very annoying, so name all tags ns + tag ns = '{http://www.hmdb.ca}' nsmap = {None : ns} # If you're within a metabolite tag count = 0 seen_mass = 0 d = {} for event, element in etree.iterparse(xml_file, tag=ns+'metabolite'): tree = etree.ElementTree(element) # Aggregate info into a dictionary of # {HMDB_ID: iso_mass} accession = [] # Get accession number and masses for each metabolite # Could be multiple accessions. Grab all of them, # sort to make unique identifier for elem in tree.iter(): if elem.tag == ns+'accession': accession.append(elem.text) # If you just saw a 'mono_mass' entry, # get the mass value and reset, saying you # havent seen 'mono_mass' in the text of next metabolite if (elem.tag == ns+'value') & (seen_mass == 1): mass = float(elem.text) seen_mass = 0 if elem.text == 'mono_mass': seen_mass = 1 elem.clear() # sort accession numbers and join with '_' accession_key = '_'.join(sorted(accession)) # add to dictionary if mass: d[accession_key] = mass # reset mass - only add feature if mass listed mass = None # reset accession numbers accession = [] element.clear() count += 1 if count % 1000 == 0: print('Made it through ' + str(count) + ' metabolites') #pickle.dump(d, open('serumdb_dict.p', 'wb')) print 'Number of metabolites: %s' % len(d.keys()) serumdb_masses = pd.Series(d, dtype='float32') serumdb_ppm_matrix = ppm_matrix(serumdb_masses, serumdb_masses)*10**6 #df = pd.DataFrame(serumdb_ppm_matrix, index=serumdb_masses.index, # columns=serumdb_masses.index)*10**6 # Forget about using a dataframe - uses too much memory ``` <h2> So, we've got 6,315,000 pairs of molecules that could be isomers at 1 ppm </h2> That's about 10% of possible pairs from 25,000 molecules ``` top_ppm = 30 pairs = np.full((top_ppm), np.nan) print(pairs) for i in range(1,top_ppm): # div by two, b/c half matrix is redundant # subtract length of diagonal of matrix, too num = ((serumdb_ppm_matrix < i).sum() / 2) - serumdb_ppm_matrix.shape[0] pairs[i] = num plt.scatter(x=range(1,30), y=pairs[1:]) plt.title('Number of pairs of molecules that could overlap in human serum database\n') plt.show() ``` <h2> Looks like there are more isomers than 1:1 pairings, by a lot </h2> Less than 6000 of ``` # how to plot the number of overlaps per molecule? num_below_1ppm = (serumdb_ppm_matrix < 1).sum(axis=1) - 1 plt.hist((serumdb_ppm_matrix < 1).sum(axis=1) - 1 ) plt.title('Pairs of overlapping mz at ppm 1') plt.show() num_below_1ppm ```
github_jupyter
# Curso de introducción al análisis y modelado de datos con Python <img src="../images/cacheme.png" alt="logo" style="width: 150px;"/> <img src="../images/aeropython_logo.png" alt="logo" style="width: 115px;"/> --- # Scikit-Learn: Introducción y Problema de Clasificación. En los últimos tiempos se habla mucho de _machine learning_, _deep learning_, _reinforcement learning_, muchas más cosas que contienen la palabra _learning_ y, por supuesto, _Big Data_. Todo ello motivado por los avances en capacidad de cálculo de los últimos años, y la popularización de lenguajes de alto nivel, que han permitido entrar de lleno en la fiebre de hacer que las máquinas aprendan. En esta clase veremos una breve introducción al machine learning, y aprenderemos a utilizar el paquete `scikit-learn` de Python, con el objetivo de crear modelos predictivos a partir de nuestros datos de una manera rápida y sencilla. En concreto, veremos cómo resolver el problema de clasificación. ## ¿En qué consiste el machine learning? El machine learning es una rama de la inteligencia artificial, cuyo objetivo es desarrollar técnicas para enseñar a las máquinas a llevar a cabo ciertas tareas, mostrándoles previamente algunos ejemplos y cómo o cómo no llevar a cabo la tarea de forma exitosa. Por lo tanto, se busca crear programas capaces de generalizar comportamientos a partir de una información suministrada en forma de ejemplos, aprendiendo de ellos, y sin que hayan sido programados a mano punto a punto y detalle a detalle para realizar exitosamente esa tarea. Los diferentes algoritmos de aprendizaje automático, se agrupan en dos grandes grupos: * **Aprendizaje supervisado**, cuando tengo datos _etiquetados_, es decir: conozco la variable a predecir de un cierto número de observaciones. Pasándole esta información al algoritmo, este será capaz de predecir dicha variable cuando reciba observaciones nuevas. Por lo tanto, se produce una función que establece una correspondencia entre las entradas y las salidas deseadas del sistema. Además, dentro de este grupo, tendremos dos tipos de problemas dependiendo de la naturaleza de la variable a predecir: - **Clasificación**, si la variable a predecir es discreta o categórica (sí/no, color de ojos, etc) - **Regresión**, si la variable a predecir es continua. * **Aprendizaje no supervisado**, cuando no tenemos datos _etiquetados_ y por tanto no tengo ninguna información _a priori_ sobre las categorías de esos ejemplos. Por lo tanto, en este caso, los algoritmos deben ser capaces de descubrir patrones en los datos y agruparlos. Si bien, tendremos que manualmente inspeccionar el resultado después y ver qué sentido podemos darle a esos grupos. Dentro de este grupo podemos distinguir: - **Clustering**, agrupamiento automáticos de objetos similares en sets.. - **Reducción de la dimensionalidad**, reducir el número de variables aleatorias a considerar. En función de la naturaleza de nuestro problema, `scikit-learn` proporciona una gran variedad de algoritmos que podemos elegir. ![Machine Learning map](../images/ml_map.png) ## ¿Qué es scikit-learn? `scikit-learn` es una libreria que nos proporciona un conjunto de algoritmos de machine learning, que incluyen regresión, clasificación, reducción de la dimensionalidad y clustering. Se articula sobre la librería `NumPy` y `SciPy` y nos permite enfrentarnos a la resolución de estos problemas a través de un a APi limpia y bien hecha. En ese sentido, se trabaja igual que con SciPy, es decir, se importan explícitamente los módulos que se necesitan de la librería. Hay que indicar que no está especialmente diseñada para datasets super grandes, pero hay cada vez más mejoras en ese área. ``` # preserve from IPython.display import HTML HTML('<iframe src=http://scikit-learn.org/stable/#" width="700" height="400"></iframe>') ``` #### ¿Cómo se trabaja con scikit-learn? El proceso para usar `scikit-learn` es el siguiente: 1. Separar los datos en matriz de características `features` y variable a predecir `target` 2. Seleccionar el modelo `estimator`. 3. Elegir los hiperparámetros 4. Ajustar o entrenar el modelo (`model.fit`) 5. Predecir con datos nuevos (`model.predict`) ## Ejemplos ``` # importamos las librerías que usamos de forma habitual ``` ## Problema de Regresión En primer lugar vamos a resolver un problema muy sencillo de regresión, que consiste en ajustar una recta a unos datos. Esto difícilmente se puede llamar _machine learning_, pero nos servirá para ver cómo es la forma de trabajar con `scikit-learn`. Como partida, fabricamos unos datos distribuidos a lo largo de una recta con un poco de ruido y los pintamos para ver el resultado. A continuación, importamos el estimador de Regresión Lineal y creamos nuestro modelo. <div class="alert alert-info">Tenemos que hacer este `reshape` para transformar nuestro vector en una matriz de columnas. Rara vez tendremos que repetir este paso, puesto que en la práctica siempre tendremos varias variables.</div> El siguiente paso es ajustar nuestro modelo. Y una vez hecho esto, ya podemos calcular predicciones para los mismos datos Ahora, lo que vamos a hacer es calcular un tipo de error asocido a esta predicción, usando el módulo `sklearn.metrics`: Y ahora predecimos con datos nuevos y vemos el resultado ### Problema de Clasificación En `scikit-learn` tenemos disponibles muchos datasets clásicos de ejemplo que podemos utilizar para practicar. Uno de ellos es el dataset MNIST, que consiste en imágenes escaneadas de números escritos a mano por funcionarios de los EEUU, y que pueden ser de 10 posibles clases diferentes. Para cargarlo, importamos la función correspondiente de `sklearn.datasets`: ``` # importamos los datasets ``` Un dataset es un objeto parecido a un diccionario que almacena los datos y algunos metadatos asociados. ``` # cargamos el dataset the digits que es con el que vamos a trabajar. ``` Los datos de las muestras, están almacenados en `.data`, que siempre es un array 2D de `n_samples` por `n_features`. ``` # vemos el contenido de los datos # vemos cuantas muestras y características tenemos ``` Tenemos 1797 muestras, y cada una está caracterizada por 64 valores. En este caso, cada muestra original consiste en una imagen de (8,8), es decir, 64 características, a la que se puede acceder por índice, por ejemplo: ``` # acceso a una muestra ``` Por otro lado, en el caso de problemas supervisados, se almacenan en `.target` una o más variables de respuesta, que en nuestro ejemplo consisten en un número para cada muestra, y que correponde con el dígito que estamos intentando aprender. ``` # vemos los targets # vemos cuantos targets tenemos (mismos que muestras) ``` Además, podemos ver cuáles son los posibles valores que toman estos targets. Por último, podemos extraer información global sobre el dataset de la siguiente forma: Ya tenemos los datos separados en matriz de características y vector de predicción. En este caso, tendremos 64 = 8x8 características (un valor numérico por cada pixel de la imagen) y una variable a predecir que será el número en sí y que irá de 0 a 8. Vamos a visualizar una de las imágenes como ejemplo para hacernos una idea. ``` # elegimos por ejemplo los datos asociados a la muestra 42 # vemos qué número sabemos que almacena esta muestra # hacemos un reshape a la muestra para poder represetnarla #(sabemos que viene en un array 1d, pero se corresponde con uno 2d de 8x8) # pintamos el resultado # (si todo ha ido bien, deberíamos ver el valor indicado por label_num_ej) ``` Ten en cuenta que nosotros sabemos qué número es cada imagen porque somos humanos y podemos leerlas. El ordenador lo sabe porque están etiquetadas, pero ¿qué pasa si viene una imagen nueva? El objetivo por lo tanto es, dada una imagen, predecir qué dígito representa, y como hemos indicado a la hora de explicar el proceso a seguir, el siguiente paso es construir un modelo de clasificación. Cada algoritmo está expuesto desde scikit-learn a través de un objeto `"Estimador"`. Por ejemplo, en este caso vamos a elegir un modelo de regresión logística: ``` # importamos el modelo # creamos la instancia del modelo ``` Una vez importado y creado, lo que hacemos es ajustar nuestro modelo con él, usando `fit`. ``` # ajustamos el modelo ``` Y tras ajustar el modelo, vamos a calcular sus predicciones para los mismos datos de entrenamiento, usando `predict`. Por últimos, vamos a comparar esas predicciones con los datos reales, para ver qué tal ha sido el ajuste. Para ello usamos `sklearn.metrics` para medir la eficacia del algoritmo. ¡Parece que hemos acertado prácticamente todas! Más tarde volveremos sobre este porcentaje de éxito, que bien podría ser engañoso. De momento, representemos otra medida de éxito que es la matriz de confusión, y que nos indica el nñumero de observaciones Cij, que sabemos que tendrían que ir en el grupo i, pero que que se ha predecido que están en el grupo j. ¡Y ya está! Lo básico de `scikit-learn` está aquí. Lo próximo será usar diferentes tipos de modelos y examinar con rigor su rendimiento para poder seleccionar el que mejor funcione para nuestros datos. --- Hemos aprendido: * En que consiste el machine learning. * Como empezar a usar la librería scikit-learn * Resolver un problema de clasificación y otro de regresión. ###### Juan Luis Cano, Alejandro Sáez, Mabel Delgado --- _Las siguientes celdas contienen configuración del Notebook_ _Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_ File > Trusted Notebook ``` # esta celda da el estilo al notebook ```
github_jupyter
``` %matplotlib inline ``` # A demo of K-Means clustering on the handwritten digits data In this example we compare the various initialization strategies for K-means in terms of runtime and quality of the results. As the ground truth is known here, we also apply different cluster quality metrics to judge the goodness of fit of the cluster labels to the ground truth. Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for definitions and discussions of the metrics): =========== ======================================================== Shorthand full name =========== ======================================================== homo homogeneity score compl completeness score v-meas V measure ARI adjusted Rand index AMI adjusted mutual information silhouette silhouette coefficient =========== ======================================================== ``` print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn.cluster import KMeans from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.preprocessing import scale np.random.seed(42) digits = load_digits() data = scale(digits.data) n_samples, n_features = data.shape n_digits = len(np.unique(digits.target)) labels = digits.target sample_size = 300 print("n_digits: %d, \t n_samples %d, \t n_features %d" % (n_digits, n_samples, n_features)) print(79 * '_') print('% 9s' % 'init' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_k_means(estimator, name, data): t0 = time() estimator.fit(data) print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labels, estimator.labels_), metrics.completeness_score(labels, estimator.labels_), metrics.v_measure_score(labels, estimator.labels_), metrics.adjusted_rand_score(labels, estimator.labels_), metrics.adjusted_mutual_info_score(labels, estimator.labels_), metrics.silhouette_score(data, estimator.labels_, metric='euclidean', sample_size=sample_size))) bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10), name="k-means++", data=data) bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10), name="random", data=data) # in this case the seeding of the centers is deterministic, hence we run the # kmeans algorithm only once with n_init=1 pca = PCA(n_components=n_digits).fit(data) bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1), name="PCA-based", data=data) print(79 * '_') ``` Visualize the results on PCA-reduced data ``` reduced_data = PCA(n_components=2).fit_transform(data) kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10) kmeans.fit(reduced_data) # Step size of the mesh. Decrease to increase the quality of the VQ. h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max]. # Plot the decision boundary. For that, we will assign a color to each x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1 y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Obtain labels for each point in mesh. Use last trained model. Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1) plt.clf() plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower') plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2) # Plot the centroids as a white X centroids = kmeans.cluster_centers_ plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10) plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n' 'Centroids are marked with white cross') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) plt.show() ```
github_jupyter
``` # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Feedback or issues? For any feedback or questions, please open an [issue](https://github.com/googleapis/python-aiplatform/issues). # AI Platform (Unified) SDK: AutoML Text Sentiment Example To use this Jupyter notebook, copy the notebook to an AI Platform(Unified) Notebooks instance with Tensorflow installed and open it. You can run each step, or cell, and see its results. To run a cell, use Shift+Enter. Jupyter automatically displays the return value of the last line in each cell. For more information about running notebooks in AI Platform(Unified) Notebook, see the [AI Platform(Unified) Notebook guide](https://cloud.google.com/ai-platform-unified/docs/general/notebooks). This notebook demonstrate how to create an AutoML Text Sentiment Model, with an AI Platform (Unified) Text Dataset, and how to serve the model for online prediction. Note: you may incur charges for training, prediction, storage or usage of other GCP products in connection with testing this SDK ### Install AI Platform (Unified) SDK After the SDK installation the kernel will be automatically restarted. ``` !pip3 uninstall -y google-cloud-aiplatform !pip3 install google-cloud-aiplatform import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) import sys if "google.colab" in sys.modules: from google.colab import auth auth.authenticate_user() ``` ### Enter Your Project and GCS Bucket Enter your Project Id in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. ``` MY_PROJECT = "YOUR PROJECT ID" MY_STAGING_BUCKET = "gs://YOUR BUCKET" # bucket should be in same region as ucaip ``` ## Dataset To create a sentiment analysis model, we will use the open dataset from FigureEight that analyzes Twitter mentions of the allergy medicine Claritin. Please reference [AutoML Documentation](https://cloud.google.com/natural-language/automl/docs/quickstart#model_objectives) for more information. ``` # Text Classification IMPORT_FILE = "gs://cloud-samples-data/language/claritin.csv" SENTIMENT_MAX = 4 ``` ## Initialize AI Platform (Unified) SDK Initialize the *client* for AI Platform (Unified). ``` from google.cloud import aiplatform aiplatform.init(project=MY_PROJECT, staging_bucket=MY_STAGING_BUCKET) ``` # Create Managed Text Dataset from CSV ``` ds = aiplatform.TextDataset.create( display_name="text-sentiment", gcs_source=[IMPORT_FILE], import_schema_uri=aiplatform.schema.dataset.ioformat.text.sentiment, ) ds.resource_name ``` # Launch Training Job and get Model ``` job = aiplatform.AutoMLTextTrainingJob( display_name="text-sentiment", prediction_type="sentiment", sentiment_max=SENTIMENT_MAX, ) # This will take around an hour to run model = job.run( dataset=ds, training_fraction_split=0.6, validation_fraction_split=0.2, test_fraction_split=0.2, model_display_name="text-sentiment", ) ``` # Deploy Model ``` endpoint = model.deploy() ``` # Predict on Endpoint ``` instances_list = [{"content": "Claritin is the absolute best"}] prediction = endpoint.predict(instances) prediction ```
github_jupyter
``` import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import gym import matplotlib.pyplot as plt import random import argparse from collections import OrderedDict from copy import copy import scipy import scipy.linalg from Utility import data_collecter import os os.environ['KMP_DUPLICATE_LIB_OK'] = "TRUE" from Utility import DerivativeLiftFunc, data_collecter,RBFLiftFunc import lqr # physics engine import pybullet as pb import pybullet_data # Franka simulator from franka_env import FrankaEnv from scipy.io import loadmat, savemat Methods = ["KoopmanDerivative","KoopmanRBF",\ "KNonlinear","KNonlinearRNN","KoopmanU",\ "KoopmanNonlinearA","KoopmanNonlinear",\ ] method_index = 1 # suffix = "CartPole1_26" # env_name = "CartPole-v1" # suffix = "Pendulum1_26" # env_name = "Pendulum-v1" # suffix = "DampingPendulum1_26" # env_name = "DampingPendulum" # suffix = "MountainCarContinuous1_26" # env_name = "MountainCarContinuous-v0" env_name = "Franka" Data = np.load("Prediction_Results/"+"Kd_"+env_name+"_KoopmanRBF"+".npz") Kd = Data["Kd"] center = Data["Center"] Data_collecter = data_collecter(env_name) Nstate = Data_collecter.Nstates udim = Data_collecter.udim Nrbf = 50 LiftFunc = RBFLiftFunc(env_name,Nstate,udim,Nrbf,Data_collecter.observation_space,center=center) NKoopman = LiftFunc.NKoopman simFranka = False # whether to open Graphical interface to animate Franka motion np.set_printoptions(precision=3) nStates = 10 accuracy_invKin = 0.000001 def quat_to_euler(q): """ Function that converts quaternions to Euler angles Inputs ------ q : quaternions Outputs ------- phi, theta, psi : Euler angles """ pi = math.pi q0, q1, q2, q3 = q[3], q[0], q[1], q[2] test = np.dot(q0,q2) - np.dot(q1,q3) if (test > 0.4999): phi = math.atan2(q1*q2 - q0*q3, q1*q3 + q0*q2) theta = pi/2 psi= 0 elif (test < -0.4999): phi = math.atan2(- q1*q2 + q0*q3, - q1*q3 - q0*q2) theta = -pi/2 psi = 0 else: sqx = q1*q1 sqy = q2*q2 sqz = q3*q3 psi = math.atan2(2*q2*q1+2*q0*q3 , 1 - 2*sqy - 2*sqz) # Yaw theta = math.asin(2*test) # Pitch phi = math.atan2(2*q2*q3+2*q0*q1 , 1 - 2*sqx - 2*sqy) # Roll return phi, theta, psi def Psi_x(x): return x def Run_Franka(Steps, state_desired, LQR_gains, x0 = None, y0 = None, z0 = None): """ Function that simulates forward the dynamics of Franka using LQR control, given a desired trajectory and LQR gains Inputs ------ Steps : Number of steps to propagate discrete dynamics into the future state_desired: desired trajectory to track (used for the applied control) LQR_gains: LQR gains used together with the current state and desired trajectory to compute control response Outputs ------- state_traj : state trajectory of the controlled system control_traj : control trajectory that was applied to the system """ state = env.reset() # Set state to desired state to help initiation for i, jnt in enumerate(state_desired[0,3:9]): pb.resetJointState(env.robot, i, jnt) # set initial y,z states to given y0, z0 if x0: JointAngles_Fig8 = accurateCalculateInverseKinematics(env.robot, env.ee_id, [x0, y0, z0], accuracy_invKin, 10000) for i, jnt in enumerate(JointAngles_Fig8[0:6]): pb.resetJointState(env.robot, i, jnt) state = env.get_state() # Initialize states and controls trajectories state_traj, control_traj = np.empty((Steps+1,17)), np.empty((Steps, 7)) state_traj[:], control_traj[:] = np.NaN, np.NaN # Simulate dynamics forward for t in range(Steps): state = np.append(state[0:3], state[7:]) state = LiftFunc.Psi_s(state).reshape(NKoopman,1) state_traj[t, :] = state[:17].reshape(-1) control = - np.dot(LQR_gains, (state - LiftFunc.Psi_s(state_desired[t+1,:]).reshape(NKoopman,1))) control_traj[t, :] = control.reshape(-1) state = env.step(control) # time.sleep(1.0/6000.) # time.sleep(1.0/600000.) state = np.append(state[0:3], state[7:]) state = LiftFunc.Psi_s(state).reshape(NKoopman,1) state_traj[t+1, :] = state[:17].reshape(-1) return state_traj, control_traj def accurateCalculateInverseKinematics(kukaId, endEffectorId, targetPos, threshold, maxIter): """ Calculates the joint poses given the End Effector location using inverse kinematics Note: It changes the Franka configuration during the optimization to the desired configuration Input: kukaId : Object that represents the Franka system endEffectorId : targetPos : threshold : accuracy threshold maxIter : maximum iterations to fine tune solution Output: jointPoses: The angles of the 7 joints of the Franka """ numJoints = 7 closeEnough = False iter = 0 dist2 = 1e30 while (not closeEnough and iter < maxIter): jointPoses = pb.calculateInverseKinematics(kukaId, endEffectorId, targetPos) for i in range(numJoints): pb.resetJointState(kukaId, i, jointPoses[i]) ls = pb.getLinkState(kukaId, endEffectorId) newPos = ls[4] diff = [targetPos[0] - newPos[0], targetPos[1] - newPos[1], targetPos[2] - newPos[2]] dist2 = (diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2]) closeEnough = (dist2 < threshold) iter = iter + 1 return jointPoses[:7] def runLQRonFranka(steps, desired_target, LQR_gains, x0 = None, y0 = None, z0 = None, method = False): """ Executes a simulation of specified number of steps applying LQR on Franka with the input LQR gains Inputs ------ steps : number of steps to forward simulate the system desired_target : desired_trajectory to track LQR_gains : LQR gains used to compute control method : method to print on pdf label; if none is specified, no pdf is created """ state_traj, controls_traj = Run_Franka(steps, desired_target, LQR_gains, x0, y0, z0) error = np.linalg.norm(state_traj[:,:nStates] - desired_target[:,:nStates]) if method: fig = plt.plot(state_traj[:,1], state_traj[:,2], 'b-', linewidth = 1, markersize = 1) plt.plot(desired_target[:,1], desired_target[:,2], 'k--', linewidth = 1) plt.axis('equal') plt.title(method +': Error = {0:.2f}'.format(error), fontdict=None, loc='center', pad=None) return state_traj, controls_traj, error def desiredStates_from_EndEffector(xyzEndEffector): """ This function takes in the x,y,z coordinates of the end effector (EE) of Franka and returns the closest (locally, using Inverse Kinematics) desired configuration to achieve that EE position . Inputs ------ x_d, y_d, z_d: End effector coordinates Outputs desired_states: x, y, z, JointAngles, Joint Velocities """ x_d, y_d, z_d = xyzEndEffector[0], xyzEndEffector[1], xyzEndEffector[2] jointAngles = np.asarray(accurateCalculateInverseKinematics(env.robot, env.ee_id, [x_d, y_d, z_d], accuracy_invKin, 10000)) state_des = np.concatenate((xyzEndEffector, jointAngles, np.zeros(7))) return state_des # initialize Franka object env = FrankaEnv(render = simFranka) center = np.array([0.0,0.6]) radius = 0.3 theta_ = np.pi/10.0 eradius = np.tan(2*theta_)*radius*np.cos(theta_)-radius*np.sin(theta_) Star_points = np.zeros((11,2)) for i in range(5): theta = 2*np.pi/5*(i+0.25) Star_points[2*i,0] = np.cos(theta)*radius+center[0] Star_points[2*i,1] = np.sin(theta)*radius+center[1] beta = 2*np.pi/5*(i+0.75) Star_points[2*i+1,0] = np.cos(beta)*eradius+center[0] Star_points[2*i+1,1] = np.sin(beta)*eradius+center[1] Star_points[-1,:] = Star_points[0,:] T = 6 *10 # time horizon t = 0.02*np.linspace(0, T*5, T*50+1) # time steps refs = np.zeros((len(t),2)) Steps = len(t)-1 each_num = int((len(t)-10)/9.5) for i in range(10): refs[(each_num+1)*i,:] = Star_points[i,:] if i!= 9: num = each_num else: num = len(t)-(each_num+1)*i-1 for j in range(num): t_ = (j+1)/(each_num+1) refs[(each_num+1)*i+j+1,:] = t_*Star_points[i+1,:] + (1-t_)*Star_points[i,:] # print(each_num,each_num*10.5) # print(t.shape) # plt.plot(Star_points[:,0],Star_points[:,1]) x = 0.3*np.ones((len(t),1)) z = refs[:,1].reshape(-1,1) y = refs[:,0].reshape(-1,1) plt.plot(y,z) plt.axis('equal') plt.xlabel('y (m)', Fontsize = 14) plt.ylabel('z (m)', Fontsize = 14) # Translate desired y-z coordinates to desired joint angles of the Franka robotic arm JointAngles_Fig8 = np.empty((len(t),7)) JointAngles_Fig8[:] = np.NaN for i in range(len(t)): JointAngles_Fig8[i,:] = accurateCalculateInverseKinematics(env.robot, env.ee_id, [x[i], y[i], z[i]], accuracy_invKin, 10000) states_des = np.concatenate( (x, y, z, JointAngles_Fig8, np.zeros((len(y), 7))), axis = 1) # states_des = np.concatenate((x,y,z), axis = 1) ##LQR import lqr import time Ad = Kd[:NKoopman,:NKoopman] Bd = Kd[:NKoopman,NKoopman:] Ad = np.matrix(Ad) Bd = np.matrix(Bd) Q = np.zeros((NKoopman,NKoopman)) Q[:10,:10] = np.eye(10) R = 0.1*np.eye(7) Q = np.matrix(Q) R = np.matrix(R) Kopt = lqr.lqr_regulator_k(Ad,Bd,Q,R) print(Kopt.shape) np.random.seed(0) a = [0.01,0.021,-0.015,-0.012,0.093,0.058,0.014,-0.086,-0.096,0.056] b = [0.043,0.009,0.029,0.078,-0.023,0.006,0.085,-0.083,0.067,0.074] for i in range(10): y0, z0 = y[0,0] + a[i], z[0,0] + b[i] state_traj_RBF, controls_traj_RBF, error_RBF = runLQRonFranka(Steps, states_des, Kopt, x[0,0], y0, z0) savemat('FrankaControlResults/RBFFrankaFigStar_SimData'+str(i)+'.mat', {'desired_states':states_des,'states_RBF': state_traj_RBF, 'u_RBF' : controls_traj_RBF, 'error_RBF':error_RBF}) # save variables to Matlab file trajs = loadmat('FrankaControlResults/RBFFrankaFigStar_SimData8.mat') state_traj_RBF = trajs['states_RBF'] desired_traj = trajs['desired_states'] plt.plot(desired_traj[:,1],desired_traj[:,2],label='Desired') plt.plot(state_traj_RBF[:,1],state_traj_RBF[:,2],label='RBF') print("error error_RBF:{}".format(trajs['error_RBF'])) plt.legend() plt.show() env.reset() T = 6 *10 # time horizon t = 1.6 + 0.02*np.linspace(0, T*5, T*50+1) # time steps Steps = len(t)-1 a = 0.3 # scaling parameter of Fig. 8 area x = 0.3*np.ones((len(t),1)) z = np.expand_dims(0.59 + 2* a * np.sin(t) * np.cos(t) / (1+np.sin(t)**2), axis = 1) y = np.expand_dims(a * np.cos(t) / (1+np.sin(t)**2), axis = 1) plt.plot(y,z) plt.axis('equal') plt.xlabel('y (m)', Fontsize = 14) plt.ylabel('z (m)', Fontsize = 14) # Translate desired y-z coordinates to desired joint angles of the Franka robotic arm JointAngles_Fig8 = np.empty((len(t),7)) JointAngles_Fig8[:] = np.NaN for i in range(len(t)): JointAngles_Fig8[i,:] = accurateCalculateInverseKinematics(env.robot, env.ee_id, [x[i], y[i], z[i]], accuracy_invKin, 10000) states_des = np.concatenate( (x, y, z, JointAngles_Fig8, np.zeros((len(y), 7))), axis = 1) # states_des = np.concatenate((x,y,z), axis = 1) np.random.seed(0) a = [0.01,0.021,-0.015,-0.012,0.093,0.058,0.014,-0.086,-0.096,0.056] b = [0.043,0.009,0.029,0.078,-0.023,0.006,0.085,-0.083,0.067,0.074] for i in range(10): y0, z0 = y[0,0] + a[i], z[0,0] + b[i] state_traj_RBF, controls_traj_RBF, error_RBF = runLQRonFranka(Steps, states_des, Kopt, x[0,0], y0, z0) savemat('FrankaControlResults/RBFFrankaFig8_SimData'+str(i)+'.mat', {'desired_states':states_des,'states_RBF': state_traj_RBF, 'u_RBF' : controls_traj_RBF, 'error_RBF':error_RBF}) # save variables to Matlab file trajs = loadmat('FrankaControlResults/RBFFrankaFig8_SimData8.mat') state_traj_RBF = trajs['states_RBF'] desired_traj = trajs['desired_states'] plt.plot(desired_traj[:,1],desired_traj[:,2],label='Desired') plt.plot(state_traj_RBF[:,1],state_traj_RBF[:,2],label='RBF') print("error error_RBF:{}".format(trajs['error_RBF'])) plt.legend() plt.show() ```
github_jupyter
``` """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. 5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies !pip install wget !apt-get install sox libsndfile1 ffmpeg !pip install unidecode !pip install matplotlib>=3.3.2 ## Install NeMo !python -m pip install git+https://github.com/NVIDIA/NeMo.git@main#egg=nemo_toolkit[all] ## Grab the config we'll use in this example !mkdir configs !wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/asr/conf/config.yaml """ Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)! Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case that you want to use the "Run All Cells" (or similar) option. """ # exit() ``` # Introduction to End-To-End Automatic Speech Recognition This notebook contains a basic tutorial of Automatic Speech Recognition (ASR) concepts, introduced with code snippets using the [NeMo framework](https://github.com/NVIDIA/NeMo). We will first introduce the basics of the main concepts behind speech recognition, then explore concrete examples of what the data looks like and walk through putting together a simple end-to-end ASR pipeline. We assume that you are familiar with general machine learning concepts and can follow Python code, and we'll be using the [AN4 dataset from CMU](http://www.speech.cs.cmu.edu/databases/an4/) (with processing using `sox`). ## Conceptual Overview: What is ASR? ASR, or **Automatic Speech Recognition**, refers to the problem of getting a program to automatically transcribe spoken language (speech-to-text). Our goal is usually to have a model that minimizes the **Word Error Rate (WER)** metric when transcribing speech input. In other words, given some audio file (e.g. a WAV file) containing speech, how do we transform this into the corresponding text with as few errors as possible? Traditional speech recognition takes a generative approach, modeling the full pipeline of how speech sounds are produced in order to evaluate a speech sample. We would start from a **language model** that encapsulates the most likely orderings of words that are generated (e.g. an n-gram model), to a **pronunciation model** for each word in that ordering (e.g. a pronunciation table), to an **acoustic model** that translates those pronunciations to audio waveforms (e.g. a Gaussian Mixture Model). Then, if we receive some spoken input, our goal would be to find the most likely sequence of text that would result in the given audio according to our generative pipeline of models. Overall, with traditional speech recognition, we try to model `Pr(audio|transcript)*Pr(transcript)`, and take the argmax of this over possible transcripts. Over time, neural nets advanced to the point where each component of the traditional speech recognition model could be replaced by a neural model that had better performance and that had a greater potential for generalization. For example, we could replace an n-gram model with a neural language model, and replace a pronunciation table with a neural pronunciation model, and so on. However, each of these neural models need to be trained individually on different tasks, and errors in any model in the pipeline could throw off the whole prediction. Thus, we can see the appeal of **end-to-end ASR architectures**: discriminative models that simply take an audio input and give a textual output, and in which all components of the architecture are trained together towards the same goal. The model's encoder would be akin to an acoustic model for extracting speech features, which can then be directly piped to a decoder which outputs text. If desired, we could integrate a language model that would improve our predictions, as well. And the entire end-to-end ASR model can be trained at once--a much easier pipeline to handle! ### End-To-End ASR With an end-to-end model, we want to directly learn `Pr(transcript|audio)` in order to predict the transcripts from the original audio. Since we are dealing with sequential information--audio data over time that corresponds to a sequence of letters--RNNs are the obvious choice. But now we have a pressing problem to deal with: since our input sequence (number of audio timesteps) is not the same length as our desired output (transcript length), how do we match each time step from the audio data to the correct output characters? Earlier speech recognition approaches relied on **temporally-aligned data**, in which each segment of time in an audio file was matched up to a corresponding speech sound such as a phoneme or word. However, if we would like to have the flexibility to predict letter-by-letter to prevent OOV (out of vocabulary) issues, then each time step in the data would have to be labeled with the letter sound that the speaker is making at that point in the audio file. With that information, it seems like we should simply be able to try to predict the correct letter for each time step and then collapse the repeated letters (e.g. the prediction output `LLLAAAAPPTOOOPPPP` would become `LAPTOP`). It turns out that this idea has some problems: not only does alignment make the dataset incredibly labor-intensive to label, but also, what do we do with words like "book" that contain consecutive repeated letters? Simply squashing repeated letters together would not work in that case! ![Alignment example](https://raw.githubusercontent.com/NVIDIA/NeMo/master/examples/asr/notebooks/images/alignment_example.png) Modern end-to-end approaches get around this using methods that don't require manual alignment at all, so that the input-output pairs are really just the raw audio and the transcript--no extra data or labeling required. Let's briefly go over two popular approaches that allow us to do this, Connectionist Temporal Classification (CTC) and sequence-to-sequence models with attention. #### Connectionist Temporal Classification (CTC) In normal speech recognition prediction output, we would expect to have characters such as the letters from A through Z, numbers 0 through 9, spaces ("\_"), and so on. CTC introduces a new intermediate output token called the **blank token** ("-") that is useful for getting around the alignment issue. With CTC, we still predict one token per time segment of speech, but we use the blank token to figure out where we can and can't collapse the predictions. The appearance of a blank token helps separate repeating letters that should not be collapsed. For instance, with an audio snippet segmented into `T=11` time steps, we could get predictions that look like `BOO-OOO--KK`, which would then collapse to `"BO-O-K"`, and then we would remove the blank tokens to get our final output, `BOOK`. Now, we can predict one output token per time step, then collapse and clean to get sensible output without any fear of ambiguity from repeating letters! A simple way of getting predictions like this would be to apply a bidirectional RNN to the audio input, apply softmax over each time step's output, and then take the token with the highest probability. The method of always taking the best token at each time step is called **greedy decoding, or max decoding**. To calculate our loss for backprop, we would like to know the log probability of the model producing the correct transcript, `log(Pr(transcript|audio))`. We can get the log probability of a single intermediate output sequence (e.g. `BOO-OOO--KK`) by summing over the log probabilities we get from each token's softmax value, but note that the resulting sum is different from the log probability of the transcript itself (`BOOK`). This is because there are multiple possible output sequences of the same length that can be collapsed to get the same transcript (e.g. `BBO--OO-KKK` also results in `BOOK`), and so we need to **marginalize over every valid sequence of length `T` that collapses to the transcript**. Therefore, to get our transcript's log probability given our audio input, we must sum the log probabilities of every sequence of length `T` that collapses to the transcript (e.g. `log(Pr(output: "BOOK"|audio)) = log(Pr(BOO-OOO--KK|audio)) + log(Pr(BBO--OO-KKK|audio)) + ...`). In practice, we can use a dynamic programming approach to calculate this, accumulating our log probabilities over different "paths" through the softmax outputs at each time step. If you would like a more in-depth explanation of how CTC works, or how we can improve our results by using a modified beam search algorithm, feel free to check out the Further Reading section at the end of this notebook for more resources. #### Sequence-to-Sequence with Attention One problem with CTC is that predictions at different time steps are conditionally independent, which is an issue because the words in a continuous utterance tend to be related to each other in some sensible way. With this conditional independence assumption, we can't learn a language model that can represent such dependencies, though we can add a language model on top of the CTC output to mitigate this to some degree. A popular alternative is to use a sequence-to-sequence model with attention. A typical seq2seq model for ASR consists of some sort of **bidirectional RNN encoder** that consumes the audio sequence timestep-by-timestep, and where the outputs are then passed to an **attention-based decoder**. Each prediction from the decoder is based on attending to some parts of the entire encoded input, as well as the previously outputted tokens. The outputs of the decoder can be anything from word pieces to phonemes to letters, and since predictions are not directly tied to time steps of the input, we can just continue producing tokens one-by-one until an end token is given (or we reach a specified max output length). This way, we do not need to deal with audio alignment, and our predicted transcript is just the sequence of outputs given by our decoder. Now that we have an idea of what some popular end-to-end ASR models look like, let's take a look at the audio data we'll be working with for our example. ## Taking a Look at Our Data (AN4) The AN4 dataset, also known as the Alphanumeric dataset, was collected and published by Carnegie Mellon University. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, as well as their corresponding transcripts. We choose to use AN4 for this tutorial because it is relatively small, with 948 training and 130 test utterances, and so it trains quickly. Before we get started, let's download and prepare the dataset. The utterances are available as `.sph` files, so we will need to convert them to `.wav` for later processing. If you are not using Google Colab, please make sure you have [Sox](http://sox.sourceforge.net/) installed for this step--see the "Downloads" section of the linked Sox homepage. (If you are using Google Colab, Sox should have already been installed in the setup cell at the beginning.) ``` # This is where the an4/ directory will be placed. # Change this if you don't want the data to be extracted in the current directory. data_dir = '.' import glob import os import subprocess import tarfile import wget # Download the dataset. This will take a few moments... print("******") if not os.path.exists(data_dir + '/an4_sphere.tar.gz'): an4_url = 'http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz' an4_path = wget.download(an4_url, data_dir) print(f"Dataset downloaded at: {an4_path}") else: print("Tarfile already exists.") an4_path = data_dir + '/an4_sphere.tar.gz' if not os.path.exists(data_dir + '/an4/'): # Untar and convert .sph to .wav (using sox) tar = tarfile.open(an4_path) tar.extractall(path=data_dir) print("Converting .sph to .wav...") sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True) for sph_path in sph_list: wav_path = sph_path[:-4] + '.wav' cmd = ["sox", sph_path, wav_path] subprocess.run(cmd) print("Finished conversion.\n******") ``` You should now have a folder called `an4` that contains `etc/an4_train.transcription`, `etc/an4_test.transcription`, audio files in `wav/an4_clstk` and `wav/an4test_clstk`, along with some other files we will not need. Now we can load and take a look at the data. As an example, file `cen2-mgah-b.wav` is a 2.6 second-long audio recording of a man saying the letters "G L E N N" one-by-one. To confirm this, we can listen to the file: ``` import librosa import IPython.display as ipd # Load and listen to the audio file example_file = data_dir + '/an4/wav/an4_clstk/mgah/cen2-mgah-b.wav' audio, sample_rate = librosa.load(example_file) ipd.Audio(example_file, rate=sample_rate) ``` In an ASR task, if this WAV file was our input, then "G L E N N" would be our desired output. Let's plot the waveform, which is simply a line plot of the sequence of values that we read from the file. This is a format of viewing audio that you are likely to be familiar with seeing in many audio editors and visualizers: ``` %matplotlib inline import librosa.display import matplotlib.pyplot as plt # Plot our example audio file's waveform plt.rcParams['figure.figsize'] = (15,7) plt.title('Waveform of Audio Example') plt.ylabel('Amplitude') _ = librosa.display.waveplot(audio) ``` We can see the activity in the waveform that corresponds to each letter in the audio, as our speaker here enunciates quite clearly! You can kind of tell that each spoken letter has a different "shape," and it's interesting to note that last two blobs look relatively similar, which is expected because they are both the letter "N." ### Spectrograms and Mel Spectrograms However, since audio information is more useful in the context of frequencies of sound over time, we can get a better representation than this raw sequence of 57,330 values. We can apply a [Fourier Transform](https://en.wikipedia.org/wiki/Fourier_transform) on our audio signal to get something more useful: a **spectrogram**, which is a representation of the energy levels (i.e. amplitude, or "loudness") of each frequency (i.e. pitch) of the signal over the duration of the file. A spectrogram (which can be viewed as a heat map) is a good way of seeing how the *strengths of various frequencies in the audio vary over time*, and is obtained by breaking up the signal into smaller, usually overlapping chunks and performing a Short-Time Fourier Transform (STFT) on each. Let's examine what the spectrogram of our sample looks like. ``` import numpy as np # Get spectrogram using Librosa's Short-Time Fourier Transform (stft) spec = np.abs(librosa.stft(audio)) spec_db = librosa.amplitude_to_db(spec, ref=np.max) # Decibels # Use log scale to view frequencies librosa.display.specshow(spec_db, y_axis='log', x_axis='time') plt.colorbar() plt.title('Audio Spectrogram'); ``` Again, we are able to see each letter being pronounced, and that the last two blobs that correspond to the "N"s are pretty similar-looking. But how do we interpret these shapes and colors? Just as in the waveform plot before, we see time passing on the x-axis (all 2.6s of audio). But now, the y-axis represents different frequencies (on a log scale), and *the color on the plot shows the strength of a frequency at a particular point in time*. We're still not done yet, as we can make one more potentially useful tweak: using the **Mel Spectrogram** instead of the normal spectrogram. This is simply a change in the frequency scale that we use from linear (or logarithmic) to the mel scale, which is "a perceptual scale of pitches judged by listeners to be equal in distance from one another" (from [Wikipedia](https://en.wikipedia.org/wiki/Mel_scale)). In other words, it's a transformation of the frequencies to be more aligned to what humans perceive; a change of +1000Hz from 2000Hz->3000Hz sounds like a larger difference to us than 9000Hz->10000Hz does, so the mel scale normalizes this such that equal distances sound like equal differences to the human ear. Intuitively, we use the mel spectrogram because in this case we are processing and transcribing human speech, such that transforming the scale to better match what we hear is a useful procedure. ``` # Plot the mel spectrogram of our sample mel_spec = librosa.feature.melspectrogram(audio, sr=sample_rate) mel_spec_db = librosa.power_to_db(mel_spec, ref=np.max) librosa.display.specshow( mel_spec_db, x_axis='time', y_axis='mel') plt.colorbar() plt.title('Mel Spectrogram'); ``` ## Convolutional ASR Models Let's take a look at the model that we will be building, and how we specify its parameters. ### The Jasper Model We will be training a small [Jasper (Just Another SPeech Recognizer) model](https://arxiv.org/abs/1904.03288) from scratch (e.g. initialized randomly). In brief, Jasper architectures consist of a repeated block structure that utilizes 1D convolutions. In a Jasper_KxR model, `R` sub-blocks (consisting of a 1D convolution, batch norm, ReLU, and dropout) are grouped into a single block, which is then repeated `K` times. We also have a one extra block at the beginning and a few more at the end that are invariant of `K` and `R`, and we use CTC loss. ### The QuartzNet Model The QuartzNet is better variant of Jasper with a key difference that it uses time-channel separable 1D convolutions. This allows it to dramatically reduce number of weights while keeping similar accuracy. A Jasper/QuartzNet models look like this (QuartzNet model is pictured): ![QuartzNet with CTC](https://developer.nvidia.com/blog/wp-content/uploads/2020/05/quartznet-model-architecture-1-625x742.png) # Using NeMo for Automatic Speech Recognition Now that we have an idea of what ASR is and how the audio data looks like, we can start using NeMo to do some ASR! We'll be using the **Neural Modules (NeMo) toolkit** for this part, so if you haven't already, you should download and install NeMo and its dependencies. To do so, just follow the directions on the [GitHub page](https://github.com/NVIDIA/NeMo), or in the [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/index.html). NeMo lets us easily hook together the components (modules) of our model, such as the data layer, intermediate layers, and various losses, without worrying too much about implementation details of individual parts or connections between modules. NeMo also comes with complete models which only require your data and hyperparameters for training. ``` # NeMo's "core" package import nemo # NeMo's ASR collection - this collections contains complete ASR models and # building blocks (modules) for ASR import nemo.collections.asr as nemo_asr ``` ## Using an Out-of-the-Box Model NeMo's ASR collection comes with many building blocks and even complete models that we can use for training and evaluation. Moreover, several models come with pre-trained weights. Let's instantiate a complete QuartzNet15x5 model. ``` # This line will download pre-trained QuartzNet15x5 model from NVIDIA's NGC cloud and instantiate it for you quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name="QuartzNet15x5Base-En") ``` Next, we'll simply add paths to files we want to transcribe into the list and pass it to our model. Note that it will work for relatively short (<25 seconds) files. ``` files = ['./an4/wav/an4_clstk/mgah/cen2-mgah-b.wav'] for fname, transcription in zip(files, quartznet.transcribe(paths2audio_files=files)): print(f"Audio in {fname} was recognized as: {transcription}") ``` That was easy! But there are plenty of scenarios where you would want to fine-tune the model on your own data or even train from scratch. For example, this out-of-the box model will obviously not work for Spanish and would likely perform poorly for telephone audio. So if you have collected your own data, you certainly should attempt to fine-tune or train on it! ## Training from Scratch To train from scratch, you need to prepare your training data in the right format and specify your models architecture. ### Creating Data Manifests The first thing we need to do now is to create manifests for our training and evaluation data, which will contain the metadata of our audio files. NeMo data sets take in a standardized manifest format where each line corresponds to one sample of audio, such that the number of lines in a manifest is equal to the number of samples that are represented by that manifest. A line must contain the path to an audio file, the corresponding transcript (or path to a transcript file), and the duration of the audio sample. Here's an example of what one line in a NeMo-compatible manifest might look like: ``` {"audio_filepath": "path/to/audio.wav", "duration": 3.45, "text": "this is a nemo tutorial"} ``` We can build our training and evaluation manifests using `an4/etc/an4_train.transcription` and `an4/etc/an4_test.transcription`, which have lines containing transcripts and their corresponding audio file IDs: ``` ... <s> P I T T S B U R G H </s> (cen5-fash-b) <s> TWO SIX EIGHT FOUR FOUR ONE EIGHT </s> (cen7-fash-b) ... ``` ``` # --- Building Manifest Files --- # import json # Function to build a manifest def build_manifest(transcripts_path, manifest_path, wav_path): with open(transcripts_path, 'r') as fin: with open(manifest_path, 'w') as fout: for line in fin: # Lines look like this: # <s> transcript </s> (fileID) transcript = line[: line.find('(')-1].lower() transcript = transcript.replace('<s>', '').replace('</s>', '') transcript = transcript.strip() file_id = line[line.find('(')+1 : -2] # e.g. "cen4-fash-b" audio_path = os.path.join( data_dir, wav_path, file_id[file_id.find('-')+1 : file_id.rfind('-')], file_id + '.wav') duration = librosa.core.get_duration(filename=audio_path) # Write the metadata to the manifest metadata = { "audio_filepath": audio_path, "duration": duration, "text": transcript } json.dump(metadata, fout) fout.write('\n') # Building Manifests print("******") train_transcripts = data_dir + '/an4/etc/an4_train.transcription' train_manifest = data_dir + '/an4/train_manifest.json' if not os.path.isfile(train_manifest): build_manifest(train_transcripts, train_manifest, 'an4/wav/an4_clstk') print("Training manifest created.") test_transcripts = data_dir + '/an4/etc/an4_test.transcription' test_manifest = data_dir + '/an4/test_manifest.json' if not os.path.isfile(test_manifest): build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk') print("Test manifest created.") print("***Done***") ``` ### Specifying Our Model with a YAML Config File For this tutorial, we'll build a *Jasper_4x1 model*, with `K=4` blocks of single (`R=1`) sub-blocks and a *greedy CTC decoder*, using the configuration found in `./configs/config.yaml`. If we open up this config file, we find model section which describes architecture of our model. A model contains an entry labeled `encoder`, with a field called `jasper` that contains a list with multiple entries. Each of the members in this list specifies one block in our model, and looks something like this: ``` - filters: 128 repeat: 1 kernel: [11] stride: [2] dilation: [1] dropout: 0.2 residual: false separable: true se: true se_context_size: -1 ``` The first member of the list corresponds to the first block in the Jasper architecture diagram, which appears regardless of `K` and `R`. Next, we have four entries that correspond to the `K=4` blocks, and each has `repeat: 1` since we are using `R=1`. These are followed by two more entries for the blocks that appear at the end of our Jasper model before the CTC loss. There are also some entries at the top of the file that specify how we will handle training (`train_ds`) and validation (`validation_ds`) data. Using a YAML config such as this is helpful for getting a quick and human-readable overview of what your architecture looks like, and allows you to swap out model and run configurations easily without needing to change your code. ``` # --- Config Information ---# from ruamel.yaml import YAML config_path = './configs/config.yaml' yaml = YAML(typ='safe') with open(config_path) as f: params = yaml.load(f) print(params) ``` ### Training with PyTorch Lightning NeMo models and modules can be used in any PyTorch code where torch.nn.Module is expected. However, NeMo's models are based on [PytorchLightning's](https://github.com/PyTorchLightning/pytorch-lightning) LightningModule and we recommend you use PytorchLightning for training and fine-tuning as it makes using mixed precision and distributed training very easy. So to start, let's create Trainer instance for training on GPU for 50 epochs ``` import pytorch_lightning as pl trainer = pl.Trainer(gpus=1, max_epochs=50) ``` Next, we instantiate and ASR model based on our ``config.yaml`` file from the previous section. Note that this is a stage during which we also tell the model where our training and validation manifests are. ``` from omegaconf import DictConfig params['model']['train_ds']['manifest_filepath'] = train_manifest params['model']['validation_ds']['manifest_filepath'] = test_manifest first_asr_model = nemo_asr.models.EncDecCTCModel(cfg=DictConfig(params['model']), trainer=trainer) ``` With that, we can start training with just one line! ``` # Start training!!! trainer.fit(first_asr_model) ``` There we go! We've put together a full training pipeline for the model and trained it for 50 epochs. If you'd like to save this model checkpoint for loading later (e.g. for fine-tuning, or for continuing training), you can simply call `first_asr_model.save_to(<checkpoint_path>)`. Then, to restore your weights, you can rebuild the model using the config (let's say you call it `first_asr_model_continued` this time) and call `first_asr_model_continued.restore_from(<checkpoint_path>)`. ### After Training: Monitoring Progress and Changing Hyperparameters We can now start Tensorboard to see how training went. Recall that WER stands for Word Error Rate and so the lower it is, the better. ``` try: from google import colab COLAB_ENV = True except (ImportError, ModuleNotFoundError): COLAB_ENV = False # Load the TensorBoard notebook extension if COLAB_ENV: %load_ext tensorboard %tensorboard --logdir lightning_logs/ else: print("To use tensorboard, please use this notebook in a Google Colab environment.") ``` We could improve this model by playing with hyperparameters. We can look at the current hyperparameters with the following: ``` print(params['model']['optim']) ``` Let's say we wanted to change the learning rate. To do so, we can create a `new_opt` dict and set our desired learning rate, then call `<model>.setup_optimization()` with the new optimization parameters. ``` import copy new_opt = copy.deepcopy(params['model']['optim']) new_opt['lr'] = 0.001 first_asr_model.setup_optimization(optim_config=DictConfig(new_opt)) # And then you can invoke trainer.fit(first_asr_model) ``` ## Inference Let's have a quick look at how one could run inference with NeMo's ASR model. First, ``EncDecCTCModel`` and its subclasses contain a handy ``transcribe`` method which can be used to simply obtain audio files' transcriptions. It also has batch_size argument to improve performance. ``` print(first_asr_model.transcribe(paths2audio_files=['./an4/wav/an4_clstk/mgah/cen2-mgah-b.wav', './an4/wav/an4_clstk/fmjd/cen7-fmjd-b.wav', './an4/wav/an4_clstk/fmjd/cen8-fmjd-b.wav', './an4/wav/an4_clstk/fkai/cen8-fkai-b.wav'], batch_size=4)) ``` Below is an example of a simple inference loop in pure PyTorch. It also shows how one can compute Word Error Rate (WER) metric between predictions and references. ``` # Bigger batch-size = bigger throughput params['model']['validation_ds']['batch_size'] = 16 # Setup the test data loader and make sure the model is on GPU first_asr_model.setup_test_data(test_data_config=params['model']['validation_ds']) first_asr_model.cuda() # We will be computing Word Error Rate (WER) metric between our hypothesis and predictions. # WER is computed as numerator/denominator. # We'll gather all the test batches' numerators and denominators. wer_nums = [] wer_denoms = [] # Loop over all test batches. # Iterating over the model's `test_dataloader` will give us: # (audio_signal, audio_signal_length, transcript_tokens, transcript_length) # See the AudioToCharDataset for more details. for test_batch in first_asr_model.test_dataloader(): test_batch = [x.cuda() for x in test_batch] targets = test_batch[2] targets_lengths = test_batch[3] log_probs, encoded_len, greedy_predictions = first_asr_model( input_signal=test_batch[0], input_signal_length=test_batch[1] ) # Notice the model has a helper object to compute WER wer_num, wer_denom = first_asr_model._wer(greedy_predictions, targets, targets_lengths) wer_nums.append(wer_num.detach().cpu().numpy()) wer_denoms.append(wer_denom.detach().cpu().numpy()) # We need to sum all numerators and denominators first. Then divide. print(f"WER = {sum(wer_nums)/sum(wer_denoms)}") ``` This WER is not particularly impressive and could be significantly improved. You could train longer (try 100 epochs) to get a better number. Check out the next section on how to improve it further. ## Model Improvements You already have all you need to create your own ASR model in NeMo, but there are a few more tricks that you can employ if you so desire. In this section, we'll briefly cover a few possibilities for improving an ASR model. ### Data Augmentation There exist several ASR data augmentation methods that can increase the size of our training set. For example, we can perform augmentation on the spectrograms by zeroing out specific frequency segments ("frequency masking") or time segments ("time masking") as described by [SpecAugment](https://arxiv.org/abs/1904.08779), or zero out rectangles on the spectrogram as in [Cutout](https://arxiv.org/pdf/1708.04552.pdf). In NeMo, we can do all three of these by simply adding in a `SpectrogramAugmentation` neural module. (As of now, it does not perform the time warping from the SpecAugment paper.) Our toy model does not do spectrogram augmentation. But the real one we got from cloud does: ``` print(quartznet._cfg['spec_augment']) ``` If you want to enable SpecAugment in your model, make sure your .yaml config file contains 'model/spec_augment' section which looks like the one above. ### Transfer learning Transfer learning is an important machine learning technique that uses a model’s knowledge of one task to make it perform better on another. Fine-tuning is one of the techniques to perform transfer learning. It is an essential part of the recipe for many state-of-the-art results where a base model is first pretrained on a task with abundant training data and then fine-tuned on different tasks of interest where the training data is less abundant or even scarce. In ASR you might want to do fine-tuning in multiple scenarios, for example, when you want to improve your model's performance on a particular domain (medical, financial, etc.) or on accented speech. You can even transfer learn from one language to another! Check out [this paper](https://arxiv.org/abs/2005.04290) for examples. Transfer learning with NeMo is simple. Let's demonstrate how the model we got from the cloud could be fine-tuned on AN4 data. (NOTE: this is a toy example). And, while we are at it, we will change model's vocabulary, just to demonstrate how it's done. ``` # Check what kind of vocabulary/alphabet the model has right now print(quartznet.decoder.vocabulary) # Let's add "!" symbol there. Note that you can (and should!) change the vocabulary # entirely when fine-tuning using a different language. quartznet.change_vocabulary( new_vocabulary=[ ' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'", "!" ] ) ``` After this, our decoder has completely changed, but our encoder (which is where most of the weights are) remained intact. Let's fine tune-this model for 2 epochs on AN4 dataset. We will also use the smaller learning rate from ``new_opt` (see the "After Training" section)`. ``` # Use the smaller learning rate we set before quartznet.setup_optimization(optim_config=DictConfig(new_opt)) # Point to the data we'll use for fine-tuning as the training set quartznet.setup_training_data(train_data_config=params['model']['train_ds']) # Point to the new validation data for fine-tuning quartznet.setup_validation_data(val_data_config=params['model']['validation_ds']) # And now we can create a PyTorch Lightning trainer and call `fit` again. trainer = pl.Trainer(gpus=[1], max_epochs=2) trainer.fit(quartznet) ``` ### Fast Training Last but not least, we could simply speed up training our model! If you have the resources, you can speed up training by splitting the workload across multiple GPUs. Otherwise (or in addition), there's always mixed precision training, which allows you to increase your batch size. You can use [PyTorch Lightning's Trainer object](https://pytorch-lightning.readthedocs.io/en/latest/trainer.html) to handle mixed-precision and distributed training for you. Below are some examples of flags you would pass to the `Trainer` to use these features: ```python # Mixed precision: trainer = pl.Trainer(amp_level='O1', precision=16) # Trainer with a distributed backend: trainer = pl.Trainer(gpus=2, num_nodes=2, distributed_backend='ddp') # Of course, you can combine these flags as well. ``` Finally, have a look at [example scripts in NeMo repository](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/speech_to_text.py) which can handle mixed precision and distributed training using command-line arguments. ## Under the Hood NeMo is open-source and we do all our model development in the open, so you can inspect our code if you wish. In particular, ``nemo_asr.model.EncDecCTCModel`` is an encoder-decoder model which is constructed using several ``Neural Modules`` taken from ``nemo_asr.modules.`` Here is what its forward pass looks like: ```python def forward(self, input_signal, input_signal_length): processed_signal, processed_signal_len = self.preprocessor( input_signal=input_signal, length=input_signal_length, ) # Spec augment is not applied during evaluation/testing if self.spec_augmentation is not None and self.training: processed_signal = self.spec_augmentation(input_spec=processed_signal) encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len) log_probs = self.decoder(encoder_output=encoded) greedy_predictions = log_probs.argmax(dim=-1, keepdim=False) return log_probs, encoded_len, greedy_predictions ``` Here: * ``self.preprocessor`` is an instance of ``nemo_asr.modules.AudioToMelSpectrogramPreprocessor``, which is a neural module that takes audio signal and converts it into a Mel-Spectrogram * ``self.spec_augmentation`` - is a neural module of type ```nemo_asr.modules.SpectrogramAugmentation``, which implements data augmentation. * ``self.encoder`` - is a convolutional Jasper/QuartzNet-like encoder of type ``nemo_asr.modules.ConvASREncoder`` * ``self.decoder`` - is a ``nemo_asr.modules.ConvASRDecoder`` which simply projects into the target alphabet (vocabulary). Also, ``EncDecCTCModel`` uses the audio dataset class ``nemo_asr.data.AudioToCharDataset`` and CTC loss implemented in ``nemo_asr.losses.CTCLoss``. You can use these and other neural modules (or create new ones yourself!) to construct new ASR models. # Further Reading/Watching: That's all for now! If you'd like to learn more about the topics covered in this tutorial, here are some resources that may interest you: - [Stanford Lecture on ASR](https://www.youtube.com/watch?v=3MjIkWxXigM) - ["An Intuitive Explanation of Connectionist Temporal Classification"](https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c) - [Explanation of CTC with Prefix Beam Search](https://medium.com/corti-ai/ctc-networks-and-language-models-prefix-beam-search-explained-c11d1ee23306) - [Listen Attend and Spell Paper (seq2seq ASR model)](https://arxiv.org/abs/1508.01211) - [Explanation of the mel spectrogram in more depth](https://towardsdatascience.com/getting-to-know-the-mel-spectrogram-31bca3e2d9d0) - [Jasper Paper](https://arxiv.org/abs/1904.03288) - [QuartzNet paper](https://arxiv.org/abs/1910.10261) - [SpecAugment Paper](https://arxiv.org/abs/1904.08779) - [Explanation and visualization of SpecAugment](https://towardsdatascience.com/state-of-the-art-audio-data-augmentation-with-google-brains-specaugment-and-pytorch-d3d1a3ce291e) - [Cutout Paper](https://arxiv.org/pdf/1708.04552.pdf) - [Transfer Learning Blogpost](https://developer.nvidia.com/blog/jump-start-training-for-speech-recognition-models-with-nemo/)
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. # AML Pipeline with AdlaStep This notebook is used to demonstrate the use of AdlaStep in AML Pipeline. ## AML and Pipeline SDK-specific imports ``` import os import azureml.core from azureml.core.compute import ComputeTarget, DatabricksCompute from azureml.exceptions import ComputeTargetException from azureml.core import Workspace, Run, Experiment from azureml.pipeline.core import Pipeline, PipelineData from azureml.pipeline.steps import AdlaStep from azureml.core.datastore import Datastore from azureml.data.data_reference import DataReference from azureml.core import attach_legacy_compute_target # Check core SDK version number print("SDK version:", azureml.core.VERSION) ``` ## Initialize Workspace Initialize a workspace object from persisted configuration. Make sure the config file is present at .\config.json ``` ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') script_folder = '.' experiment_name = "adla_101_experiment" ws._initialize_folder(experiment_name=experiment_name, directory=script_folder) ``` ## Register Datastore ``` workspace = ws.name datastore_name='MyAdlsDatastore' subscription_id=os.getenv("ADL_SUBSCRIPTION_62", "<my-subscription-id>") # subscription id of ADLS account resource_group=os.getenv("ADL_RESOURCE_GROUP_62", "<my-resource-group>") # resource group of ADLS account store_name=os.getenv("ADL_STORENAME_62", "<my-datastore-name>") # ADLS account name tenant_id=os.getenv("ADL_TENANT_62", "<my-tenant-id>") # tenant id of service principal client_id=os.getenv("ADL_CLIENTID_62", "<my-client-id>") # client id of service principal client_secret=os.getenv("ADL_CLIENT_62_SECRET", "<my-client-secret>") # the secret of service principal try: adls_datastore = Datastore.get(ws, datastore_name) print("found datastore with name: %s" % datastore_name) except: adls_datastore = Datastore.register_azure_data_lake( workspace=ws, datastore_name=datastore_name, subscription_id=subscription_id, # subscription id of ADLS account resource_group=resource_group, # resource group of ADLS account store_name=store_name, # ADLS account name tenant_id=tenant_id, # tenant id of service principal client_id=client_id, # client id of service principal client_secret=client_secret) # the secret of service principal print("registered datastore with name: %s" % datastore_name) ``` ## Create DataReferences and PipelineData In the code cell below, replace datastorename with your default datastore name. Copy the file `testdata.txt` (located in the pipeline folder that this notebook is in) to the path on the datastore. ``` datastorename = "MyAdlsDatastore" adls_datastore = Datastore(workspace=ws, name=datastorename) script_input = DataReference( datastore=adls_datastore, data_reference_name="script_input", path_on_datastore="testdata/testdata.txt") script_output = PipelineData("script_output", datastore=adls_datastore) print("Created Pipeline Data") ``` ## Setup Data Lake Account ADLA can only use data that is located in the default data store associated with that ADLA account. Through Azure portal, check the name of the default data store corresponding to the ADLA account you are using below. Replace the value associated with `adla_compute_name` in the code cell below accordingly. ``` adla_compute_name = 'testadl' # Replace this with your default compute from azureml.core.compute import ComputeTarget, AdlaCompute def get_or_create_adla_compute(workspace, compute_name): try: return AdlaCompute(workspace, compute_name) except ComputeTargetException as e: if 'ComputeTargetNotFound' in e.message: print('adla compute not found, creating...') provisioning_config = AdlaCompute.provisioning_configuration() adla_compute = ComputeTarget.create(workspace, compute_name, provisioning_config) adla_compute.wait_for_completion() return adla_compute else: raise e adla_compute = get_or_create_adla_compute(ws, adla_compute_name) # CLI: # Create: az ml computetarget setup adla -n <name> # BYOC: az ml computetarget attach adla -n <name> -i <resource-id> ``` Once the above code cell completes, run the below to check your ADLA compute status: ``` print("ADLA compute state:{}".format(adla_compute.provisioning_state)) print("ADLA compute state:{}".format(adla_compute.provisioning_errors)) print("Using ADLA compute:{}".format(adla_compute.cluster_resource_id)) ``` ## Create an AdlaStep **AdlaStep** is used to run U-SQL script using Azure Data Lake Analytics. - **name:** Name of module - **script_name:** name of U-SQL script - **inputs:** List of input port bindings - **outputs:** List of output port bindings - **adla_compute:** the ADLA compute to use for this job - **params:** Dictionary of name-value pairs to pass to U-SQL job *(optional)* - **degree_of_parallelism:** the degree of parallelism to use for this job *(optional)* - **priority:** the priority value to use for the current job *(optional)* - **runtime_version:** the runtime version of the Data Lake Analytics engine *(optional)* - **root_folder:** folder that contains the script, assemblies etc. *(optional)* - **hash_paths:** list of paths to hash to detect a change (script file is always hashed) *(optional)* ### Remarks You can use `@@name@@` syntax in your script to refer to inputs, outputs, and params. * if `name` is the name of an input or output port binding, any occurences of `@@name@@` in the script are replaced with actual data path of corresponding port binding. * if `name` matches any key in `params` dict, any occurences of `@@name@@` will be replaced with corresponding value in dict. #### Sample script ``` @resourcereader = EXTRACT query string FROM "@@script_input@@" USING Extractors.Csv(); OUTPUT @resourcereader TO "@@script_output@@" USING Outputters.Csv(); ``` ``` adla_step = AdlaStep( name='adla_script_step', script_name='test_adla_script.usql', inputs=[script_input], outputs=[script_output], compute_target=adla_compute) ``` ## Build and Submit the Experiment ``` pipeline = Pipeline( description="adla_102", workspace=ws, steps=[adla_step], default_source_directory=script_folder) pipeline_run = Experiment(workspace, experiment_name).submit(pipeline) pipeline_run.wait_for_completion() ``` ### View Run Details ``` from azureml.widgets import RunDetails RunDetails(pipeline_run).show() ``` ### Examine the run You can cycle through the node_run objects and examine job logs, stdout, and stderr of each of the steps. ``` step_runs = pipeline_run.get_children() for step_run in step_runs: status = step_run.get_status() print('node', step_run.name, 'status:', status) if status == "Failed": joblog = step_run.get_job_log() print('job log:', joblog) stdout_log = step_run.get_stdout_log() print('stdout log:', stdout_log) stderr_log = step_run.get_stderr_log() print('stderr log:', stderr_log) with open("logs-" + step_run.name + ".txt", "w") as f: f.write(joblog) print("Job log written to logs-"+ step_run.name + ".txt") if status == "Finished": stdout_log = step_run.get_stdout_log() print('stdout log:', stdout_log) ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Transformer model for language understanding <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/text/transformer"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/transformer.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/transformer.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/transformer.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial trains a <a href="https://arxiv.org/abs/1706.03762" class="external">Transformer model</a> to translate Portuguese to English. This is an advanced example that assumes knowledge of [text generation](text_generation.ipynb) and [attention](nmt_with_attention.ipynb). The core idea behind the Transformer model is *self-attention*—the ability to attend to different positions of the input sequence to compute a representation of that sequence. Transformer creates stacks of self-attention layers and is explained below in the sections *Scaled dot product attention* and *Multi-head attention*. A transformer model handles variable-sized input using stacks of self-attention layers instead of [RNNs](text_classification_rnn.ipynb) or [CNNs](../images/intro_to_cnns.ipynb). This general architecture has a number of advantages: * It make no assumptions about the temporal/spatial relationships across the data. This is ideal for processing a set of objects (for example, [StarCraft units](https://deepmind.com/blog/alphastar-mastering-real-time-strategy-game-starcraft-ii/#block-8)). * Layer outputs can be calculated in parallel, instead of a series like an RNN. * Distant items can affect each other's output without passing through many RNN-steps, or convolution layers (see [Scene Memory Transformer](https://arxiv.org/pdf/1903.03878.pdf) for example). * It can learn long-range dependencies. This is a challenge in many sequence tasks. The downsides of this architecture are: * For a time-series, the output for a time-step is calculated from the *entire history* instead of only the inputs and current hidden-state. This _may_ be less efficient. * If the input *does* have a temporal/spatial relationship, like text, some positional encoding must be added or the model will effectively see a bag of words. After training the model in this notebook, you will be able to input a Portuguese sentence and return the English translation. <img src="https://www.tensorflow.org/images/tutorials/transformer/attention_map_portuguese.png" width="800" alt="Attention heatmap"> ``` # Pin matplotlib version to 3.2.2 since in the latest version # transformer.ipynb fails with the following error: # https://stackoverflow.com/questions/62953704/valueerror-the-number-of-fixedlocator-locations-5-usually-from-a-call-to-set !pip install matplotlib==3.2.2 import tensorflow_datasets as tfds import tensorflow as tf import time import numpy as np import matplotlib.pyplot as plt ``` ## Setup input pipeline Use [TFDS](https://www.tensorflow.org/datasets) to load the [Portugese-English translation dataset](https://github.com/neulab/word-embeddings-for-nmt) from the [TED Talks Open Translation Project](https://www.ted.com/participate/translate). This dataset contains approximately 50000 training examples, 1100 validation examples, and 2000 test examples. ``` examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True, as_supervised=True) train_examples, val_examples = examples['train'], examples['validation'] ``` Create a custom subwords tokenizer from the training dataset. ``` tokenizer_en = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus( (en.numpy() for pt, en in train_examples), target_vocab_size=2**13) tokenizer_pt = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus( (pt.numpy() for pt, en in train_examples), target_vocab_size=2**13) sample_string = 'Transformer is awesome.' tokenized_string = tokenizer_en.encode(sample_string) print ('Tokenized string is {}'.format(tokenized_string)) original_string = tokenizer_en.decode(tokenized_string) print ('The original string: {}'.format(original_string)) assert original_string == sample_string ``` The tokenizer encodes the string by breaking it into subwords if the word is not in its dictionary. ``` for ts in tokenized_string: print ('{} ----> {}'.format(ts, tokenizer_en.decode([ts]))) BUFFER_SIZE = 20000 BATCH_SIZE = 64 ``` Add a start and end token to the input and target. ``` def encode(lang1, lang2): lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode( lang1.numpy()) + [tokenizer_pt.vocab_size+1] lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode( lang2.numpy()) + [tokenizer_en.vocab_size+1] return lang1, lang2 ``` You want to use `Dataset.map` to apply this function to each element of the dataset. `Dataset.map` runs in graph mode. * Graph tensors do not have a value. * In graph mode you can only use TensorFlow Ops and functions. So you can't `.map` this function directly: You need to wrap it in a `tf.py_function`. The `tf.py_function` will pass regular tensors (with a value and a `.numpy()` method to access it), to the wrapped python function. ``` def tf_encode(pt, en): result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64]) result_pt.set_shape([None]) result_en.set_shape([None]) return result_pt, result_en ``` Note: To keep this example small and relatively fast, drop examples with a length of over 40 tokens. ``` MAX_LENGTH = 40 def filter_max_length(x, y, max_length=MAX_LENGTH): return tf.logical_and(tf.size(x) <= max_length, tf.size(y) <= max_length) train_dataset = train_examples.map(tf_encode) train_dataset = train_dataset.filter(filter_max_length) # cache the dataset to memory to get a speedup while reading from it. train_dataset = train_dataset.cache() train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE) train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE) val_dataset = val_examples.map(tf_encode) val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE) pt_batch, en_batch = next(iter(val_dataset)) pt_batch, en_batch ``` ## Positional encoding Since this model doesn't contain any recurrence or convolution, positional encoding is added to give the model some information about the relative position of the words in the sentence. The positional encoding vector is added to the embedding vector. Embeddings represent a token in a d-dimensional space where tokens with similar meaning will be closer to each other. But the embeddings do not encode the relative position of words in a sentence. So after adding the positional encoding, words will be closer to each other based on the *similarity of their meaning and their position in the sentence*, in the d-dimensional space. See the notebook on [positional encoding](https://github.com/tensorflow/examples/blob/master/community/en/position_encoding.ipynb) to learn more about it. The formula for calculating the positional encoding is as follows: $$\Large{PE_{(pos, 2i)} = sin(pos / 10000^{2i / d_{model}})} $$ $$\Large{PE_{(pos, 2i+1)} = cos(pos / 10000^{2i / d_{model}})} $$ ``` def get_angles(pos, i, d_model): angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model)) return pos * angle_rates def positional_encoding(position, d_model): angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model) # apply sin to even indices in the array; 2i angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # apply cos to odd indices in the array; 2i+1 angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) pos_encoding = angle_rads[np.newaxis, ...] return tf.cast(pos_encoding, dtype=tf.float32) pos_encoding = positional_encoding(50, 512) print (pos_encoding.shape) plt.pcolormesh(pos_encoding[0], cmap='RdBu') plt.xlabel('Depth') plt.xlim((0, 512)) plt.ylabel('Position') plt.colorbar() plt.show() ``` ## Masking Mask all the pad tokens in the batch of sequence. It ensures that the model does not treat padding as the input. The mask indicates where pad value `0` is present: it outputs a `1` at those locations, and a `0` otherwise. ``` def create_padding_mask(seq): seq = tf.cast(tf.math.equal(seq, 0), tf.float32) # add extra dimensions to add the padding # to the attention logits. return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len) x = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) create_padding_mask(x) ``` The look-ahead mask is used to mask the future tokens in a sequence. In other words, the mask indicates which entries should not be used. This means that to predict the third word, only the first and second word will be used. Similarly to predict the fourth word, only the first, second and the third word will be used and so on. ``` def create_look_ahead_mask(size): mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0) return mask # (seq_len, seq_len) x = tf.random.uniform((1, 3)) temp = create_look_ahead_mask(x.shape[1]) temp ``` ## Scaled dot product attention <img src="https://www.tensorflow.org/images/tutorials/transformer/scaled_attention.png" width="500" alt="scaled_dot_product_attention"> The attention function used by the transformer takes three inputs: Q (query), K (key), V (value). The equation used to calculate the attention weights is: $$\Large{Attention(Q, K, V) = softmax_k(\frac{QK^T}{\sqrt{d_k}}) V} $$ The dot-product attention is scaled by a factor of square root of the depth. This is done because for large values of depth, the dot product grows large in magnitude pushing the softmax function where it has small gradients resulting in a very hard softmax. For example, consider that `Q` and `K` have a mean of 0 and variance of 1. Their matrix multiplication will have a mean of 0 and variance of `dk`. Hence, *square root of `dk`* is used for scaling (and not any other number) because the matmul of `Q` and `K` should have a mean of 0 and variance of 1, and you get a gentler softmax. The mask is multiplied with -1e9 (close to negative infinity). This is done because the mask is summed with the scaled matrix multiplication of Q and K and is applied immediately before a softmax. The goal is to zero out these cells, and large negative inputs to softmax are near zero in the output. ``` def scaled_dot_product_attention(q, k, v, mask): """Calculate the attention weights. q, k, v must have matching leading dimensions. k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v. The mask has different shapes depending on its type(padding or look ahead) but it must be broadcastable for addition. Args: q: query shape == (..., seq_len_q, depth) k: key shape == (..., seq_len_k, depth) v: value shape == (..., seq_len_v, depth_v) mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None. Returns: output, attention_weights """ matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) # add the mask to the scaled tensor. if mask is not None: scaled_attention_logits += (mask * -1e9) # softmax is normalized on the last axis (seq_len_k) so that the scores # add up to 1. attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights ``` As the softmax normalization is done on K, its values decide the amount of importance given to Q. The output represents the multiplication of the attention weights and the V (value) vector. This ensures that the words you want to focus on are kept as-is and the irrelevant words are flushed out. ``` def print_out(q, k, v): temp_out, temp_attn = scaled_dot_product_attention( q, k, v, None) print ('Attention weights are:') print (temp_attn) print ('Output is:') print (temp_out) np.set_printoptions(suppress=True) temp_k = tf.constant([[10,0,0], [0,10,0], [0,0,10], [0,0,10]], dtype=tf.float32) # (4, 3) temp_v = tf.constant([[ 1,0], [ 10,0], [ 100,5], [1000,6]], dtype=tf.float32) # (4, 2) # This `query` aligns with the second `key`, # so the second `value` is returned. temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3) print_out(temp_q, temp_k, temp_v) # This query aligns with a repeated key (third and fourth), # so all associated values get averaged. temp_q = tf.constant([[0, 0, 10]], dtype=tf.float32) # (1, 3) print_out(temp_q, temp_k, temp_v) # This query aligns equally with the first and second key, # so their values get averaged. temp_q = tf.constant([[10, 10, 0]], dtype=tf.float32) # (1, 3) print_out(temp_q, temp_k, temp_v) ``` Pass all the queries together. ``` temp_q = tf.constant([[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32) # (3, 3) print_out(temp_q, temp_k, temp_v) ``` ## Multi-head attention <img src="https://www.tensorflow.org/images/tutorials/transformer/multi_head_attention.png" width="500" alt="multi-head attention"> Multi-head attention consists of four parts: * Linear layers and split into heads. * Scaled dot-product attention. * Concatenation of heads. * Final linear layer. Each multi-head attention block gets three inputs; Q (query), K (key), V (value). These are put through linear (Dense) layers and split up into multiple heads. The `scaled_dot_product_attention` defined above is applied to each head (broadcasted for efficiency). An appropriate mask must be used in the attention step. The attention output for each head is then concatenated (using `tf.transpose`, and `tf.reshape`) and put through a final `Dense` layer. Instead of one single attention head, Q, K, and V are split into multiple heads because it allows the model to jointly attend to information at different positions from different representational spaces. After the split each head has a reduced dimensionality, so the total computation cost is the same as a single head attention with full dimensionality. ``` class MultiHeadAttention(tf.keras.layers.Layer): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = tf.keras.layers.Dense(d_model) self.wk = tf.keras.layers.Dense(d_model) self.wv = tf.keras.layers.Dense(d_model) self.dense = tf.keras.layers.Dense(d_model) def split_heads(self, x, batch_size): """Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) """ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, d_model) k = self.wk(k) # (batch_size, seq_len, d_model) v = self.wv(v) # (batch_size, seq_len, d_model) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = scaled_dot_product_attention( q, k, v, mask) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights ``` Create a `MultiHeadAttention` layer to try out. At each location in the sequence, `y`, the `MultiHeadAttention` runs all 8 attention heads across all other locations in the sequence, returning a new vector of the same length at each location. ``` temp_mha = MultiHeadAttention(d_model=512, num_heads=8) y = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model) out, attn = temp_mha(y, k=y, q=y, mask=None) out.shape, attn.shape ``` ## Point wise feed forward network Point wise feed forward network consists of two fully-connected layers with a ReLU activation in between. ``` def point_wise_feed_forward_network(d_model, dff): return tf.keras.Sequential([ tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff) tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model) ]) sample_ffn = point_wise_feed_forward_network(512, 2048) sample_ffn(tf.random.uniform((64, 50, 512))).shape ``` ## Encoder and decoder <img src="https://www.tensorflow.org/images/tutorials/transformer/transformer.png" width="600" alt="transformer"> The transformer model follows the same general pattern as a standard [sequence to sequence with attention model](nmt_with_attention.ipynb). * The input sentence is passed through `N` encoder layers that generates an output for each word/token in the sequence. * The decoder attends on the encoder's output and its own input (self-attention) to predict the next word. ### Encoder layer Each encoder layer consists of sublayers: 1. Multi-head attention (with padding mask) 2. Point wise feed forward networks. Each of these sublayers has a residual connection around it followed by a layer normalization. Residual connections help in avoiding the vanishing gradient problem in deep networks. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis. There are N encoder layers in the transformer. ``` class EncoderLayer(tf.keras.layers.Layer): def __init__(self, d_model, num_heads, dff, rate=0.1): super(EncoderLayer, self).__init__() self.mha = MultiHeadAttention(d_model, num_heads) self.ffn = point_wise_feed_forward_network(d_model, dff) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) def call(self, x, training, mask): attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model) attn_output = self.dropout1(attn_output, training=training) out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model) ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model) ffn_output = self.dropout2(ffn_output, training=training) out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model) return out2 sample_encoder_layer = EncoderLayer(512, 8, 2048) sample_encoder_layer_output = sample_encoder_layer( tf.random.uniform((64, 43, 512)), False, None) sample_encoder_layer_output.shape # (batch_size, input_seq_len, d_model) ``` ### Decoder layer Each decoder layer consists of sublayers: 1. Masked multi-head attention (with look ahead mask and padding mask) 2. Multi-head attention (with padding mask). V (value) and K (key) receive the *encoder output* as inputs. Q (query) receives the *output from the masked multi-head attention sublayer.* 3. Point wise feed forward networks Each of these sublayers has a residual connection around it followed by a layer normalization. The output of each sublayer is `LayerNorm(x + Sublayer(x))`. The normalization is done on the `d_model` (last) axis. There are N decoder layers in the transformer. As Q receives the output from decoder's first attention block, and K receives the encoder output, the attention weights represent the importance given to the decoder's input based on the encoder's output. In other words, the decoder predicts the next word by looking at the encoder output and self-attending to its own output. See the demonstration above in the scaled dot product attention section. ``` class DecoderLayer(tf.keras.layers.Layer): def __init__(self, d_model, num_heads, dff, rate=0.1): super(DecoderLayer, self).__init__() self.mha1 = MultiHeadAttention(d_model, num_heads) self.mha2 = MultiHeadAttention(d_model, num_heads) self.ffn = point_wise_feed_forward_network(d_model, dff) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(rate) self.dropout2 = tf.keras.layers.Dropout(rate) self.dropout3 = tf.keras.layers.Dropout(rate) def call(self, x, enc_output, training, look_ahead_mask, padding_mask): # enc_output.shape == (batch_size, input_seq_len, d_model) attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model) attn1 = self.dropout1(attn1, training=training) out1 = self.layernorm1(attn1 + x) attn2, attn_weights_block2 = self.mha2( enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model) attn2 = self.dropout2(attn2, training=training) out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model) ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model) ffn_output = self.dropout3(ffn_output, training=training) out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model) return out3, attn_weights_block1, attn_weights_block2 sample_decoder_layer = DecoderLayer(512, 8, 2048) sample_decoder_layer_output, _, _ = sample_decoder_layer( tf.random.uniform((64, 50, 512)), sample_encoder_layer_output, False, None, None) sample_decoder_layer_output.shape # (batch_size, target_seq_len, d_model) ``` ### Encoder The `Encoder` consists of: 1. Input Embedding 2. Positional Encoding 3. N encoder layers The input is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the encoder layers. The output of the encoder is the input to the decoder. ``` class Encoder(tf.keras.layers.Layer): def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, maximum_position_encoding, rate=0.1): super(Encoder, self).__init__() self.d_model = d_model self.num_layers = num_layers self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model) self.pos_encoding = positional_encoding(maximum_position_encoding, self.d_model) self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)] self.dropout = tf.keras.layers.Dropout(rate) def call(self, x, training, mask): seq_len = tf.shape(x)[1] # adding embedding and position encoding. x = self.embedding(x) # (batch_size, input_seq_len, d_model) x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32)) x += self.pos_encoding[:, :seq_len, :] x = self.dropout(x, training=training) for i in range(self.num_layers): x = self.enc_layers[i](x, training, mask) return x # (batch_size, input_seq_len, d_model) sample_encoder = Encoder(num_layers=2, d_model=512, num_heads=8, dff=2048, input_vocab_size=8500, maximum_position_encoding=10000) temp_input = tf.random.uniform((64, 62), dtype=tf.int64, minval=0, maxval=200) sample_encoder_output = sample_encoder(temp_input, training=False, mask=None) print (sample_encoder_output.shape) # (batch_size, input_seq_len, d_model) ``` ### Decoder The `Decoder` consists of: 1. Output Embedding 2. Positional Encoding 3. N decoder layers The target is put through an embedding which is summed with the positional encoding. The output of this summation is the input to the decoder layers. The output of the decoder is the input to the final linear layer. ``` class Decoder(tf.keras.layers.Layer): def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size, maximum_position_encoding, rate=0.1): super(Decoder, self).__init__() self.d_model = d_model self.num_layers = num_layers self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model) self.pos_encoding = positional_encoding(maximum_position_encoding, d_model) self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)] self.dropout = tf.keras.layers.Dropout(rate) def call(self, x, enc_output, training, look_ahead_mask, padding_mask): seq_len = tf.shape(x)[1] attention_weights = {} x = self.embedding(x) # (batch_size, target_seq_len, d_model) x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32)) x += self.pos_encoding[:, :seq_len, :] x = self.dropout(x, training=training) for i in range(self.num_layers): x, block1, block2 = self.dec_layers[i](x, enc_output, training, look_ahead_mask, padding_mask) attention_weights['decoder_layer{}_block1'.format(i+1)] = block1 attention_weights['decoder_layer{}_block2'.format(i+1)] = block2 # x.shape == (batch_size, target_seq_len, d_model) return x, attention_weights sample_decoder = Decoder(num_layers=2, d_model=512, num_heads=8, dff=2048, target_vocab_size=8000, maximum_position_encoding=5000) temp_input = tf.random.uniform((64, 26), dtype=tf.int64, minval=0, maxval=200) output, attn = sample_decoder(temp_input, enc_output=sample_encoder_output, training=False, look_ahead_mask=None, padding_mask=None) output.shape, attn['decoder_layer2_block2'].shape ``` ## Create the Transformer Transformer consists of the encoder, decoder and a final linear layer. The output of the decoder is the input to the linear layer and its output is returned. ``` class Transformer(tf.keras.Model): def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, target_vocab_size, pe_input, pe_target, rate=0.1): super(Transformer, self).__init__() self.encoder = Encoder(num_layers, d_model, num_heads, dff, input_vocab_size, pe_input, rate) self.decoder = Decoder(num_layers, d_model, num_heads, dff, target_vocab_size, pe_target, rate) self.final_layer = tf.keras.layers.Dense(target_vocab_size) def call(self, inp, tar, training, enc_padding_mask, look_ahead_mask, dec_padding_mask): enc_output = self.encoder(inp, training, enc_padding_mask) # (batch_size, inp_seq_len, d_model) # dec_output.shape == (batch_size, tar_seq_len, d_model) dec_output, attention_weights = self.decoder( tar, enc_output, training, look_ahead_mask, dec_padding_mask) final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size) return final_output, attention_weights sample_transformer = Transformer( num_layers=2, d_model=512, num_heads=8, dff=2048, input_vocab_size=8500, target_vocab_size=8000, pe_input=10000, pe_target=6000) temp_input = tf.random.uniform((64, 38), dtype=tf.int64, minval=0, maxval=200) temp_target = tf.random.uniform((64, 36), dtype=tf.int64, minval=0, maxval=200) fn_out, _ = sample_transformer(temp_input, temp_target, training=False, enc_padding_mask=None, look_ahead_mask=None, dec_padding_mask=None) fn_out.shape # (batch_size, tar_seq_len, target_vocab_size) ``` ## Set hyperparameters To keep this example small and relatively fast, the values for *num_layers, d_model, and dff* have been reduced. The values used in the base model of transformer were; *num_layers=6*, *d_model = 512*, *dff = 2048*. See the [paper](https://arxiv.org/abs/1706.03762) for all the other versions of the transformer. Note: By changing the values below, you can get the model that achieved state of the art on many tasks. ``` num_layers = 4 d_model = 128 dff = 512 num_heads = 8 input_vocab_size = tokenizer_pt.vocab_size + 2 target_vocab_size = tokenizer_en.vocab_size + 2 dropout_rate = 0.1 ``` ## Optimizer Use the Adam optimizer with a custom learning rate scheduler according to the formula in the [paper](https://arxiv.org/abs/1706.03762). $$\Large{lrate = d_{model}^{-0.5} * min(step{\_}num^{-0.5}, step{\_}num * warmup{\_}steps^{-1.5})}$$ ``` class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) temp_learning_rate_schedule = CustomSchedule(d_model) plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) plt.ylabel("Learning Rate") plt.xlabel("Train Step") ``` ## Loss and metrics Since the target sequences are padded, it is important to apply a padding mask when calculating the loss. ``` loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=2)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32) return tf.reduce_sum(accuracies)/tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.Mean(name='train_accuracy') ``` ## Training and checkpointing ``` transformer = Transformer(num_layers, d_model, num_heads, dff, input_vocab_size, target_vocab_size, pe_input=input_vocab_size, pe_target=target_vocab_size, rate=dropout_rate) def create_masks(inp, tar): # Encoder padding mask enc_padding_mask = create_padding_mask(inp) # Used in the 2nd attention block in the decoder. # This padding mask is used to mask the encoder outputs. dec_padding_mask = create_padding_mask(inp) # Used in the 1st attention block in the decoder. # It is used to pad and mask future tokens in the input received by # the decoder. look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask ``` Create the checkpoint path and the checkpoint manager. This will be used to save checkpoints every `n` epochs. ``` checkpoint_path = "./checkpoints/train" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print ('Latest checkpoint restored!!') ``` The target is divided into tar_inp and tar_real. tar_inp is passed as an input to the decoder. `tar_real` is that same input shifted by 1: At each location in `tar_input`, `tar_real` contains the next token that should be predicted. For example, `sentence` = "SOS A lion in the jungle is sleeping EOS" `tar_inp` = "SOS A lion in the jungle is sleeping" `tar_real` = "A lion in the jungle is sleeping EOS" The transformer is an auto-regressive model: it makes predictions one part at a time, and uses its output so far to decide what to do next. During training this example uses teacher-forcing (like in the [text generation tutorial](./text_generation.ipynb)). Teacher forcing is passing the true output to the next time step regardless of what the model predicts at the current time step. As the transformer predicts each word, *self-attention* allows it to look at the previous words in the input sequence to better predict the next word. To prevent the model from peeking at the expected output the model uses a look-ahead mask. ``` EPOCHS = 20 # The @tf.function trace-compiles train_step into a TF graph for faster # execution. The function specializes to the precise shape of the argument # tensors. To avoid re-tracing due to the variable sequence lengths or variable # batch sizes (the last batch is smaller), use input_signature to specify # more generic shapes. train_step_signature = [ tf.TensorSpec(shape=(None, None), dtype=tf.int64), tf.TensorSpec(shape=(None, None), dtype=tf.int64), ] @tf.function(input_signature=train_step_signature) def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(accuracy_function(tar_real, predictions)) ``` Portuguese is used as the input language and English is the target language. ``` for epoch in range(EPOCHS): start = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> portuguese, tar -> english for (batch, (inp, tar)) in enumerate(train_dataset): train_step(inp, tar) if batch % 50 == 0: print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format( epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 5 == 0: ckpt_save_path = ckpt_manager.save() print ('Saving checkpoint for epoch {} at {}'.format(epoch+1, ckpt_save_path)) print ('Epoch {} Loss {:.4f} Accuracy {:.4f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print ('Time taken for 1 epoch: {} secs\n'.format(time.time() - start)) ``` ## Evaluate The following steps are used for evaluation: * Encode the input sentence using the Portuguese tokenizer (`tokenizer_pt`). Moreover, add the start and end token so the input is equivalent to what the model is trained with. This is the encoder input. * The decoder input is the `start token == tokenizer_en.vocab_size`. * Calculate the padding masks and the look ahead masks. * The `decoder` then outputs the predictions by looking at the `encoder output` and its own output (self-attention). * Select the last word and calculate the argmax of that. * Concatentate the predicted word to the decoder input as pass it to the decoder. * In this approach, the decoder predicts the next word based on the previous words it predicted. Note: The model used here has less capacity to keep the example relatively faster so the predictions maybe less right. To reproduce the results in the paper, use the entire dataset and base transformer model or transformer XL, by changing the hyperparameters above. ``` def evaluate(inp_sentence): start_token = [tokenizer_pt.vocab_size] end_token = [tokenizer_pt.vocab_size + 1] # inp sentence is portuguese, hence adding the start and end token inp_sentence = start_token + tokenizer_pt.encode(inp_sentence) + end_token encoder_input = tf.expand_dims(inp_sentence, 0) # as the target is english, the first word to the transformer should be the # english start token. decoder_input = [tokenizer_en.vocab_size] output = tf.expand_dims(decoder_input, 0) for i in range(MAX_LENGTH): enc_padding_mask, combined_mask, dec_padding_mask = create_masks( encoder_input, output) # predictions.shape == (batch_size, seq_len, vocab_size) predictions, attention_weights = transformer(encoder_input, output, False, enc_padding_mask, combined_mask, dec_padding_mask) # select the last word from the seq_len dimension predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size) predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32) # return the result if the predicted_id is equal to the end token if predicted_id == tokenizer_en.vocab_size+1: return tf.squeeze(output, axis=0), attention_weights # concatentate the predicted_id to the output which is given to the decoder # as its input. output = tf.concat([output, predicted_id], axis=-1) return tf.squeeze(output, axis=0), attention_weights def plot_attention_weights(attention, sentence, result, layer): fig = plt.figure(figsize=(16, 8)) sentence = tokenizer_pt.encode(sentence) attention = tf.squeeze(attention[layer], axis=0) for head in range(attention.shape[0]): ax = fig.add_subplot(2, 4, head+1) # plot the attention weights ax.matshow(attention[head][:-1, :], cmap='viridis') fontdict = {'fontsize': 10} ax.set_xticks(range(len(sentence)+2)) ax.set_yticks(range(len(result))) ax.set_ylim(len(result)-1.5, -0.5) ax.set_xticklabels( ['<start>']+[tokenizer_pt.decode([i]) for i in sentence]+['<end>'], fontdict=fontdict, rotation=90) ax.set_yticklabels([tokenizer_en.decode([i]) for i in result if i < tokenizer_en.vocab_size], fontdict=fontdict) ax.set_xlabel('Head {}'.format(head+1)) plt.tight_layout() plt.show() def translate(sentence, plot=''): result, attention_weights = evaluate(sentence) predicted_sentence = tokenizer_en.decode([i for i in result if i < tokenizer_en.vocab_size]) print('Input: {}'.format(sentence)) print('Predicted translation: {}'.format(predicted_sentence)) if plot: plot_attention_weights(attention_weights, sentence, result, plot) translate("este é um problema que temos que resolver.") print ("Real translation: this is a problem we have to solve .") translate("os meus vizinhos ouviram sobre esta ideia.") print ("Real translation: and my neighboring homes heard about this idea .") translate("vou então muito rapidamente partilhar convosco algumas histórias de algumas coisas mágicas que aconteceram.") print ("Real translation: so i 'll just share with you some stories very quickly of some magical things that have happened .") ``` You can pass different layers and attention blocks of the decoder to the `plot` parameter. ``` translate("este é o primeiro livro que eu fiz.", plot='decoder_layer4_block2') print ("Real translation: this is the first book i've ever done.") ``` ## Summary In this tutorial, you learned about positional encoding, multi-head attention, the importance of masking and how to create a transformer. Try using a different dataset to train the transformer. You can also create the base transformer or transformer XL by changing the hyperparameters above. You can also use the layers defined here to create [BERT](https://arxiv.org/abs/1810.04805) and train state of the art models. Futhermore, you can implement beam search to get better predictions.
github_jupyter
![license_header_logo](../../../images/license_header_logo.png) > **Copyright (c) 2021 CertifAI Sdn. Bhd.**<br> <br> This program is part of OSRFramework. You can redistribute it and/or modify <br>it under the terms of the GNU Affero General Public License as published by <br>the Free Software Foundation, either version 3 of the License, or <br>(at your option) any later version. <br> <br>This program is distributed in the hope that it will be useful <br>but WITHOUT ANY WARRANTY; without even the implied warranty of <br>MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the <br>GNU Affero General Public License for more details. <br> <br>You should have received a copy of the GNU Affero General Public License <br>along with this program. If not, see <http://www.gnu.org/licenses/>. <br> # Introduction This notebook is to introduce the basic of data extraction from various source. The first stage of NLP project is to extract the required textual data. The data is usually unstructured and is stored in a varying number of sources. This article illustrates how we can extract text based data from the most common sources. # Notebook Content * [Extract Table From A Webpage](#Extract-Table-From-A-Webpage) * [Extract Tweets](#Extract-Tweets) * [Extract Text From A HTML Webpage](#Extract-Text-From-A-HTML-Webpage) * [Read A Word Document](#Read-A-Word-Document) * [Read A PDF Document](#Read-A-PDF-Document) * [Read Text From A Csv File](#Read-Text-From-A-Csv-File) * [Read Text From An Excel Spreadsheet](#Read-Text-From-An-Excel-Spreadsheet) * [Read Outlook Emails](#Read-Outlook-Emails) * [Extract RSS Feeds](#Extract-RSS-Feeds) ## Extract Table From A Webpage Often the facts and figures are represented in a table in a HTML webpage. If we want to extract a HTML table from a web page then we can use Pandas library. The method reads HTML tables into a `list` of `DataFrame` objects. ``` import pandas as pd # Pass in the url to extract the tables url = "https://en.wikipedia.org/wiki/Artificial_intelligence" table_lists = pd.read_html(url) table_lists ``` ## Extract Tweets Twitter tweets can be extracted and fed into a NLP model to get a wider public view. We can use the tweepy library to extract the tweets for our target keywords. Let’s assume we are interested in a company or certain Twitter members, we can use the Tweepy library to extract the required tweets. The first stage is to generate the required tokens and secret security information: * Navigate to https://apps.twitter.com/ and ‘Create New App’ * Choose Create your Twitter Application, fill in the details and you will get your token from ‘Keys and Access Tokens’ tab. Lastly use the following code to extract the tweets. Let’s assume you want to extract last 5 tweets about FinTechExplained and MachineLearning: ### Import the Classes ``` import tweepy from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream ``` ### Get the API ``` # Enter the details below # auth = tweepy.auth.OAuthHandler(enter_key_consumer, enter_secret_consumer) # auth.set_access_token(token, secret) # api = tweepy.API(auth) ``` ### Get Tweets and Print Them ``` def get_tweets(api, keywords, count): return api.search(q=keywords, result_type='recent', lang='en', count=count) # tweets = get_tweets(api, ['FinTechExplained','MachineLearning'], 5) # #Print Output # for tweet in tweets: # print(tweet.text) ``` The tweet object has a number of additional properties including place, friends count, followers count, screen name and so on. We can also extract tweets from a specific user. ## Extract Text From A HTML Webpage For HTML scarping, use BeautifulSoap library ### Step 1: Install BeautifulSoap ### Step 2: Use the required classes ``` from urllib.request import urlopen from bs4 import BeautifulSoup ``` ### Step 3: Pass Url and Parse HTML ``` url = "https://en.wikipedia.org/wiki/Natural_language_processing" all_html = BeautifulSoup(urlopen(url), 'html.parser') ``` Use find() method to get the text of the required tag ``` text = all_html.find_all('p') text ``` ## Read A Word Document We can use the docx libary to read and extract text from the word documents ``` from docx import Document ``` ### Open File and Extract Text ``` all_text = [] doc = Document("../../../resources/day_02/NLP.docx") for paragrah in doc.paragraphs: all_text.append(paragrah.text) print("\n".join(all_text)) ``` ## Read A PDF Document PyPDF2 library can work with PDF documents ``` from PyPDF2 import PdfFileReader ``` ### Extract theText from the First Page ``` reader = PdfFileReader(open("../../../resources/day_02/ELMo.pdf", 'rb')) print(reader.getPage(0).extractText()) #0 is first page ``` ## Read Text From A Csv File Pandas is a great library to use if you want to read text from a csv file. pandas.read_csv() can read a comma-separated values (csv) file into DataFrame. We can also optionally iterate or break the file into chunks. ``` dataframe = pd.read_csv("../../../resources/day_02/financial.csv", sep=',') dataframe ``` ## Read Text From An Excel Spreadsheet Pandas can be used to read text from an excel spreadsheet. The key is to import the Excel sheets as dataframes. ``` dataframe = pd.read_excel("../../../resources/day_02/person.xlsx") dataframe ``` ## Read Outlook Emails There are a lot of useful information that is sent via Email messages. We can use Python to read text from the emails. Win32 is a great API for that. ### Use the api to get the contents of an email ``` import win32com.client # my_outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI") # folder = outlook.GetDefaultFolder(6) #index # for item in folder.Items: # print(item.body) ``` ## Extract RSS Feeds feedparser is a fantastic library to extract the RSS feeds. ### Use the feedparser to extract the keys ``` import feedparser feed = feedparser.parse("https://www.feedotter.com/blog/find-an-rss-feed-url/") for entry in feed.entries: print(entry.keys()) feed ``` # Contributors **Author** <br>Chee Lam # References 1. [NLP: Python Data Extraction From Social Media, Emails, Documents, Webpages, RSS & Images](https://medium.com/fintechexplained/nlp-python-data-extraction-from-social-media-emails-images-documents-web-pages-58d2f148f5f4)
github_jupyter
# Software Engineering for Data Scientists ## *Manipulating Data with Python* ## CSE 599 B1 ## Today's Objectives #### 1. Opening & Navigating the IPython Notebook #### 2. Simple Math in the IPython Notebook #### 3. Loading data with ``pandas`` #### 4. Cleaning and Manipulating data with ``pandas`` #### 5. Visualizing data with ``pandas`` ## 1. Opening and Navigating the IPython Notebook We will start today with the interactive environment that we will be using often through the course: the [IPython/Jupyter Notebook](http://ipython.org). We will walk through the following steps together: 1. Download [miniconda]() (be sure to get Version 3.5) and install it on your system (hopefully you have done this before coming to class) ``` ``` 2. Use the ``conda`` command-line tool to update your package listing and install the IPython notebook: Update ``conda``'s listing of packages for your system: ``` $ conda update conda ``` Install IPython notebook and all its requirements ``` $ conda install ipython-notebook ``` 3. Navigate to the directory containing the course material. For example: ``` $ cd ~/courses/CSE599/ ``` You should see a number of files in the directory, including these: ``` $ ls ... Breakout-Simple-Math.ipynb CSE599_Lecture_2.ipynb ... ``` 4. Type ``ipython notebook`` in the terminal to start the notebook ``` $ ipython notebook ``` If everything has worked correctly, it should automatically launch your default browser ``` ``` 5. Click on ``CSE599_Lecture_2.ipynb`` to open the notebook containing the content for this lecture. With that, you're set up to use the IPython notebook! ## 2. Simple Math in the IPython Notebook Now that we have the IPython notebook up and running, we're going to do a short breakout exploring some of the mathematical functionality that Python offers. Please open [Breakout-Simple-Math.ipynb](Breakout-Simple-Math.ipynb), find a partner, and make your way through that notebook, typing and executing code along the way. ## 3. Loading data with ``pandas`` With this simple Python computation experience under our belt, we can now move to doing some more interesting analysis. ### Python's Data Science Ecosystem In addition to Python's built-in modules like the ``math`` module we explored above, there are also many often-used third-party modules that are core tools for doing data science with Python. Some of the most important ones are: #### [``numpy``](http://numpy.org/): Numerical Python Numpy is short for "Numerical Python", and contains tools for efficient manipulation of arrays of data. If you have used other computational tools like IDL or MatLab, Numpy should feel very familiar. #### [``scipy``](http://scipy.org/): Scientific Python Scipy is short for "Scientific Python", and contains a wide range of functionality for accomplishing common scientific tasks, such as optimization/minimization, numerical integration, interpolation, and much more. We will not look closely at Scipy today, but we will use its functionality later in the course. #### [``pandas``](http://pandas.pydata.org/): Labeled Data Manipulation in Python Pandas is short for "Panel Data", and contains tools for doing more advanced manipulation of labeled data in Python, in particular with a columnar data structure called a *Data Frame*. If you've used the [R](http://rstats.org) statistical language (and in particular the so-called "Hadley Stack"), much of the functionality in Pandas should feel very familiar. #### [``matplotlib``](http://matplotlib.org): Visualization in Python Matplotlib started out as a Matlab plotting clone in Python, and has grown from there in the 15 years since its creation. It is the most popular data visualization tool currently in the Python data world (though other recent packages are starting to encroach on its monopoly). ### Installing Pandas & friends Because the above packages are not included in Python itself, you need to install them separately. While it is possible to install these from source (compiling the C and/or Fortran code that does the heavy lifting under the hood) it is much easier to use a package manager like ``conda``. All it takes is to run ``` $ conda install numpy scipy pandas matplotlib ``` and (so long as your conda setup is working) the packages will be downloaded and installed on your system. ### Loading Data with Pandas ``` import numpy numpy.__path__ import pandas df = pandas.DataFrame() ``` Because we'll use it so much, we often import under a shortened name using the ``import ... as ...`` pattern: ``` import pandas as pd df = pd.DataFrame() ``` Now we can use the ``read_csv`` command to read the comma-separated-value data: ``` data = pd.read_csv('2015_trip_data.csv') ``` *Note: strings in Python can be defined either with double quotes or single quotes* ### Viewing Pandas Dataframes The ``head()`` and ``tail()`` methods show us the first and last rows of the data ``` data.head() data.tail() ``` The ``shape`` attribute shows us the number of elements: ``` data.shape ``` The ``columns`` attribute gives us the column names ``` data.columns ``` The ``index`` attribute gives us the index names ``` data.index ``` The ``dtypes`` attribute gives the data types of each column: ``` data.dtypes ``` ## 4. Manipulating data with ``pandas`` Here we'll cover some key features of manipulating data with pandas Access columns by name using square-bracket indexing: ``` data["trip_id"] ``` Mathematical operations on columns happen *element-wise*: ``` data['tripduration'] / 60 ``` Columns can be created (or overwritten) with the assignment operator. Let's create a *tripminutes* column with the number of minutes for each trip ``` data['tripminutes'] = data['tripduration'] / 60 data.head() ``` ### Working with Times One trick to know when working with columns of times is that Pandas ``DateTimeIndex`` provides a nice interface for working with columns of times: ``` times = pd.DatetimeIndex(data['starttime']) ``` With it, we can extract, the hour of the day, the day of the week, the month, and a wide range of other views of the time: ``` times times.dayofweek times.month ``` *Note: math functionality can be applied to columns using the NumPy package: for example:* ``` import numpy as np np.exp(data['tripminutes']) ``` ### Simple Grouping of Data The real power of Pandas comes in its tools for grouping and aggregating data. Here we'll look at *value counts* and the basics of *group-by* operations. #### Value Counts Pandas includes an array of useful functionality for manipulating and analyzing tabular data. We'll take a look at two of these here. The ``pandas.value_counts`` returns statistics on the unique values within each column. We can use it, for example, to break down rides by gender: ``` pd.value_counts(data['gender']) pd.value_counts(data['birthyear']) ``` Or to break down rides by age: ``` pd.value_counts(data['birthyear']).sort_index() pd.value_counts(2015 - data['birthyear']).sort_index() ``` What else might we break down rides by? ``` pd.value_counts(times.dayofweek) ``` *We can sort by the index rather than the counts if we wish:* ``` pd.value_counts(times.dayofweek, sort=False) pd.value_counts(times.month) pd.value_counts(times.month, sort=False) ``` ### Group-by Operation One of the killer features of the Pandas dataframe is the ability to do group-by operations. You can visualize the group-by like this (image borrowed from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do)) ``` from IPython.display import Image Image('split_apply_combine.png') ``` Let's break take this in smaller steps. First, let's look at the data by hour across all days in the year. ``` pd.value_counts(times.hour) ``` groupby allows us to look at the number of values for each column and each value. ``` data.groupby(times.hour).count() ``` Now, let's find the average length of a ride as a function of time of day: ``` data.groupby(times.hour)['tripminutes'].mean() ``` You can specify a groupby using the names of table columns and compute other functions, such as the mean. ``` data.groupby(['gender'])['tripminutes'].mean() ``` The simplest version of a groupby looks like this, and you can use almost any aggregation function you wish (mean, median, sum, minimum, maximum, standard deviation, count, etc.) ``` <data object>.groupby(<grouping values>).<aggregate>() ``` You can even group by multiple values: for example we can look at the trip duration by time of day and by gender: ``` grouped = data.groupby([times.hour, 'gender'])['tripminutes'].mean() grouped ``` The ``unstack()`` operation can help make sense of this type of multiply-grouped data. What this technically does is split a multiple-valued index into an index plus columns: ``` grouped.unstack() ``` ## 5. Visualizing data with ``pandas`` Of course, looking at tables of data is not very intuitive. Fortunately Pandas has many useful plotting functions built-in, all of which make use of the ``matplotlib`` library to generate plots. Whenever you do plotting in the IPython notebook, you will want to first run this *magic command* which configures the notebook to work well with plots: ``` %matplotlib inline ``` Now we can simply call the ``plot()`` method of any series or dataframe to get a reasonable view of the data: ``` data.groupby([times.hour, 'usertype'])['tripminutes'].mean().unstack().plot() ``` ### Adjusting the Plot Style The default formatting is not very nice; I often make use of the [Seaborn](http://stanford.edu/~mwaskom/software/seaborn/) library for better plotting defaults. You should do this in bash ``` $ conda install seaborn ``` Then this in python ``` import seaborn seaborn.set() data.groupby([times.hour, 'usertype'])['tripminutes'].mean().unstack().plot() ``` ### Other plot types Pandas supports a range of other plotting types; you can find these by using the <TAB> autocomplete on the ``plot`` method: ``` data.plot.hist() ``` For example, we can create a histogram of trip durations: ``` data['tripminutes'].plot.hist(bins=100) ``` If you'd like to adjust the x and y limits of the plot, you can use the ``set_xlim()`` and ``set_ylim()`` method of the resulting object: ``` plot = data['tripminutes'].plot.hist(bins=500) plot.set_xlim(0, 50) ``` ## Breakout: Exploring the Data 1. Make a plot of the total number of rides as a function of month of the year (You'll need to extract the month, use a ``groupby``, and find the appropriate aggregation to count the number in each group). 2. Split this plot by gender. Do you see any seasonal ridership patterns by gender? 3. Split this plot by user type. Do you see any seasonal ridership patterns by usertype? 4. Repeat the above three steps, counting the number of rides by time of day rather thatn by month. 5. Are there any other interesting insights you can discover in the data using these tools? ### Looking Forward to Homework In the homework this week, you will have a chance to apply some of these patterns to a brand new (but closely related) dataset.
github_jupyter
# Perceptron * Lets now look at the famous perceptron algorithm * this is seen as the precursor to neural networks * This is a **linear binary classifier** # History * invented in 1957 by Rosenblatt * originally was designed for image recognition * Lead to a great deal of excitement for AI * Famous for not being able to solve XOR * Caused significant decline in interest # Theory - Limitations of our problem * Setup: * Perceptrons only handle Binary classification * Instead of using targets = {0, 1}, we will use targets = {-1, +1} * This is very convenient and can be seen in training # Prediction * prediction with a perceptron is very simple! It is just like any other linear classifier * We simply take the input x, comput the dot product of it with the weights, and add the bias b $$w^Tx+b$$ * if $w^Tx+b = 0$ we fall directly on the line/hyperplane * if $w^Tx+b > 0$ we predict +1 * if $w^Tx+b < 0$ we predict -1 * in other words, our prediction is: $$sign(w^Tx+b)$$ # Perceptron Training * Training with the perceptron is the interesting part * It is an iterative procedure, meaning we go through a for loop a certain number of times, and at each iteration, called an "epoch", the classification rate should go up on average as we converge to the final solution * Lets look at the pseudocode ![perceptron%20pseudo%20code.png](attachment:perceptron%20pseudo%20code.png) * the first thing we need to do is randomly initialize the weights * it is common to make w gaussian distributed, and to set b = 0 to start * we won't consider the bias right now, since we will soon see that it can be absorbed into w (weights) * Then we loop through the maximum number of iterations- we won't necessarily go through each iteration, since if we reach the point where we classify every correctly, we can just break out of the loop * inside of the loop the first thing we do is retrieve all of the currently misclassified examples * of course this means we need to a do a prediction * we pick a misclassified sample at random, and update w (eta is a known as a learning rate) # How does this process help get the optimal weights? * How does: $$w = w + \eta*yx$$ * help move w in the right direction? * Well first we need to understand the geometry behind planes and lines * recall that a vector that is perpendicular to a line, can define the line * we call this the normal vector * n = (a,b) ![normal%20vector.png](attachment:normal%20vector.png) ## Lines * we refer to this normal vector as w ![normal%20vector%202.png](attachment:normal%20vector%202.png) * the bias term tells us where we intersect the x2 (aka vertical/y) axis. * however, we will ignore it for now, since the bias term can also be absorbed into w * How? By assuming we have another column of x that is always equal to 1 * Initial model: $$y = w_0+w_1x_1+w_2x_2, x_0 = 1$$ * New model: $$y = w_0x_0+w_1x_1+w_2x_2, x_0 = 1$$ * Hence, any model where we do not consider bias explicitly can be be assumed to contain a bias term anyway ## Training - case 1 * So currently, we have some w pointing in some direction which is not yet correct * The line (the classifying line) and its corresponding w are both shown in black ![perceptron%20training.png](attachment:perceptron%20training.png) * We find an x which is not classified correctly, which is shown in red * suppose w and x are both on the same side of the line * then the dot product is greater than 0, because the angle is less than 90 degrees * that means it should be classified as -1, but is mistakenly being classified as +1 * so we update w, which is equivalent to subtracting x from w, since y (the target) is minus 1 * w = w + (y*x) = w + (-1*x) = w - x * the result is that this shifts the line, so that it is now facing a direction where it is either classifying x correctly, or the line is at least closer to x, so that the next time we move w maybe it will classify x correctly * in this particular picture, the new line in green classifies x correctly ## Training - case 2 * now lets consider the case where x is on the other side of the line from w ![perceptron%20training%202.png](attachment:perceptron%20training%202.png) * so the black vector w, and the black line, correspond to our incorrect setting of w * so this means that the angle between x and w will be greater than 90 degrees, so the dot product will be negative * which means we predict -1, but the target is plus 1 * when we update, this is equivalent to adding x to w, since y is now plus 1 * notice how this shifts the line so that we will either be able to classify x correctly, or we will be closer to being able to do so * in this example, shown in green, we are now classifying x correctly # Summary * we have introduced an iterative algorithm that can train a linear binary classifier * we have seen how the update rule fixes w, so that it better predicts current misclassified samples and we have seen geometrically how it works --- # Perceptron in Code ``` import numpy as np import matplotlib.pyplot as plt from datetime import datetime import pandas as pd %matplotlib inline ``` We are going to start by writing our own import data function, which creates linearly seperable data that we can plot. We use a uniformly distributed variable, it starts out distributed from 0 to 1, but then we multiply by 2 and subtract by 1, so it is uniformly distributed between -1 and +1. ``` def get_data(): w = np.array([-0.5, 0.5]) # data will be in 2 dimensions, so weights should be in two dimensions b = 0.1 X = np.random.random((300, 2))*2 - 1 Y = np.sign(X.dot(w) + b) return X, Y ``` Define Perceptron Class. ``` class Perceptron: def fit(self, X, Y, learning_rate=1.0, epochs=1000): D = X.shape[1] # get dimensionality, shape of X self.w = np.random.randn(D) # get w which is of size D self.b = 0 N = len(Y) # get length of Y costs = [] # create cost array, starts empty for epoch in range(epochs): # loop through epochs Yhat = self.predict(X) # get predictions incorrect = np.nonzero(Y != Yhat)[0] # get any samples index where prediction does not match target if len(incorrect) == 0: # if no incorrect predictions, then we break break i = np.random.choice(incorrect) # grab a random sample from the incorrect predictions self.w += learning_rate*Y[i]*X[i] # use update rule self.b += learning_rate*Y[i] # equivalent to treating x as 1 c = len(incorrect) / float(N) # get incorrect rate costs.append(c) # append that to the costs # print("final w: ", self.w, "final b:", self.b, "epochs:", (epochs + 1), "/", epochs) plt.plot(costs) plt.show() def predict(self, X): return np.sign(X.dot(self.w) + self.b) def score(self, X, Y): P = self.predict(X) return np.mean(P == Y) if __name__ == '__main__': X, Y = get_data() plt.scatter(X[:,0], X[:,1], c=Y, s=100, alpha=0.5) plt.show() Ntrain = len(Y)//2 Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain] Xtest, Ytest = X[Ntrain:], Y[Ntrain:] model = Perceptron() # set timer to see how long it takes knn to fit the training data t0 = datetime.now() model.fit(Xtrain, Ytrain) print ("Training Time: ", (datetime.now() - t0)) # now get training accuracy and time this as well t0 = datetime.now() print ("Train accuracy:", model.score(Xtrain, Ytrain)) print("Time to compute train accuracy:", (datetime.now() - t0), "Train size:", len(Ytrain)) # now print test accuracy t0 = datetime.now() print ("Test accuracy:", model.score(Xtest, Ytest)) print("Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest)) print('-----------------------------------------------------------------------') ``` --- # Perceptron for MNIST or XOR ``` def get_simple_xor(): X = np.array([[0,0], [0,1],[1,0],[1,1]]) Y = np.array([0,1,1,0]) return X, Y ``` ## First lets do MNIST ``` def get_mnist(limit=None): print("Reading in and transforming data...") df = pd.read_csv('data/train.csv') data = df.as_matrix() np.random.shuffle(data) X = data[:, 1:] / 255.0 # data is from 0..255 Y = data[:, 0] if limit is not None: X, Y = X[:limit], Y[:limit] return X, Y X, Y = get_mnist() idx = np.logical_or(Y == 0, Y == 1) X = X[idx] Y = Y[idx] Y[Y == 0] = -1 # perceptron uses -1 and +1 for targets, so swapping out 0 for -1 model = Perceptron() t0 = datetime.now() model.fit(X, Y, learning_rate=10e-3) print ("MNIST train accuracy: ", model.score(X, Y)) print('------------------------------------------') print('XOR results:') X, Y = get_simple_xor() model.fit(X,Y) t0 = datetime.now() print ("XOR accuracy:", model.score(X ,Y)) ``` # Disadvantage of Perceptron Clearly we can see from the above plot that while the perceptron was able to perfectly learn the MNIST problem, not matter how hard it tried it would never be able to solve the XOR problem. This is because it is a linear classifier! --- # Perceptron Loss Function * Lets now take a deeper look at perceptron training * Note that this is not necessary to understand how the perceptron works - that can be understood fully by the geometry of the problem * This is going to make use of gradient descent! # Recall * In Machine Learning we often like to formulate a loss function, and then minimize this loss * Deep learning, logistic regression, k-means clustering, and gaussian mixture models all do this * sometimes this is done ad hoc * it is entirely plausible that you could invent an algorithm to solve a problem, and only discover later that it actually optimizes a certain loss function, just out of coincedence # Loss Function * The lost function for the perceptron is defined as follows: ![perceptron%20loss%20function.png](attachment:perceptron%20loss%20function.png) * above $\hat{y}$ is the prediction * so the 1 function here acts as a true or false checker * if the argument is true, then it returns 1 * if the argument is false then we get 0 * In other words, the loss function only increases for any misclassified samples * If if is classified correctly, then the contribution to the loss for that sample is zero * Remember, $y_i$ is either +1 or -1, and for a sample to be misclassified, that means that $w^Tx_i$ must be of the opposite sign. Hence have: $$y_i*w^Tx_i = (sign)*(oppostite \; sign) = negative$$ * and then the outer negative (outside of the sum) makes the entire thing positve # Loss Function Geometry * another fact is that the further x_i is away from the hyperplane, defined by w, the larger the loss is! * But why...? And why is this a desirable property? ![loss%20geometry.png](attachment:loss%20geometry.png) * imagine that xi is very close the the hyperplane, but is still classified wrong * that means that the angle between xi and w is almost 90 degrees * this then brings the dot product closer to 0 * as a result, it is still classified wrong, but it is not very wrong- just a small nudge of w could make it right * Now lets compare that to an xi that is nearly parallel to w, and is classified as wrong ![loss%20geometry%202.png](attachment:loss%20geometry%202.png) * That means that it is as far away from the hyper plane as it could be * and the angle between xi and w is almost 0 * so this hyperplane is not even close to being able to classify this point correctly, and a small nudge to change w won't work * the data point xi is in a very wrong place- in fact it is in the most wrong place that it could possibly be * hence, $w^Tx_i$ is almost $|w||x_i|$, the maximum value! * therefore, when xi is very wrong it leads to a bigger loss! # Gradient Descent ![gradient%20descent.png](attachment:gradient%20descent.png) * By performing gradient descent we take small steps in the direction of dL/dw * so we can see that the training algorithm that we implemented earlier is just **stochastic gradient descent**, given the loss function above
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import pandas as pd import os import time import sys import sklearn from tensorflow import keras import tensorflow as tf %matplotlib inline fashion_mnist = keras.datasets.fashion_mnist (x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data() print(x_train_all.shape, y_train_all.shape) # 划分训练集和验证集 x_train, x_valid = x_train_all[:55000], x_train_all[55000:] y_train, y_valid = y_train_all[:55000], y_train_all[55000:] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # 归一化 x_train_scaled = scaler.fit_transform( x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28) # 展开 x_valid_scaled = scaler.fit_transform( x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28) # 展开 x_test_scaled = scaler.fit_transform( x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28) # 展开 print(x_test.astype(np.float32).reshape(-1, 1).shape, x_test_scaled.shape) print(np.max(x_train_scaled), np.min(x_train_scaled)) # 模型1 # model = keras.models.Sequential() # model.add(keras.layers.Flatten(input_shape=[28, 28])) # model.add(keras.layers.Dense(300, activation="relu")) # model.add(keras.layers.Dense(100, activation="relu")) # model.add(keras.layers.Dense(10, activation="softmax")) # 模型2 model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) model.add(keras.layers.Dense(100)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation("relu")) # model.add(keras.layers.Dropout(rate=0.5)) model.add(keras.layers.Dense(10, activation="softmax")) # model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics=["accuracy"]) print("x train:", x_train.shape, " x train scaled:", x_train_scaled.shape) # 回调函数试验 logdir = ".\callbacks" if not os.path.exists(logdir): os.mkdir(logdir) output_model_file = os.path.join(logdir, "fashion_mnist_model.h5") # 回调函数 callbacks = [ keras.callbacks.TensorBoard(logdir), keras.callbacks.ModelCheckpoint(output_model_file, save_best_only=True), keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3), ] history = model.fit(x_train, y_train, epochs=15, validation_data=(x_valid, y_valid), callbacks=callbacks) def plot_learning_curves(history): pd.DataFrame(history.history).plot(figsize = (8, 5)) plt.grid(True) plt.gca().set_ylim(0, 1) plt.show() plot_learning_curves(history) model.evaluate(x_test, y_test) # 回调函数试验 logdir = ".\callbacks" if not os.path.exists(logdir): os.mkdir(logdir) output_model_file = os.path.join(logdir, "fashion_mnist_model.h5") # 回调函数 callbacks = [ keras.callbacks.TensorBoard(logdir), keras.callbacks.ModelCheckpoint(output_model_file, save_best_only=True), keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3), ] history = model.fit(x_train, y_train, epochs=15, validation_data=(x_valid, y_valid), callbacks=callbacks) ```
github_jupyter
``` import os import numpy as np import matplotlib.pyplot as plt import cv2 import tensorflow as tf print(np.__version__) print(tf.__version__) print(cv2.__version__) ## The fully convolutional encoder-decoder network and then creating a new loss function with the VGG_19 loss from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Conv2DTranspose, UpSampling2D, add from tensorflow.keras.models import Model from tensorflow.keras import regularizers import tensorflow as tf vgg_19 = tf.keras.applications.VGG19(include_top=False,weights='imagenet',input_shape=(512,512,3),pooling=None) vgg_19.summary() new_vgg = Model(inputs= vgg_19.input,outputs=vgg_19.get_layer('block5_conv4').output) new_vgg.summary() from tensorflow.keras.layers import BatchNormalization import tensorflow.keras as keras ## The fully convolutional encoder-decoder network and then creating a new loss function with the VGG_19 loss from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, Conv2DTranspose, UpSampling2D, add from tensorflow.keras.models import Model from tensorflow.keras import regularizers import tensorflow as tf def build_model(): input_img = Input(shape=(256, 256, 3)) l1 = Conv2D(64, (3, 3), padding='same', activation='relu', bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(input_img) l2 = Conv2D(64, (3, 3), padding='same', activation='relu', bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l1) print('This is the l2\'s shape {}'.format(l2.shape)) l3 = MaxPooling2D(padding='same')(l2) print(l3.shape) #l3 = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001)(l3) l3 = Dropout(0.3)(l3) l4 = Conv2D(128, (3, 3), padding='same', activation='relu', bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l3) l5 = Conv2D(128, (3, 3), padding='same', activation='relu', bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l4) print('This is L5 shape {}'.format(l5.shape)) l6 = MaxPooling2D(padding='same')(l5) l6 = Dropout(0.3)(l6) #l6 = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001)(l6) l7 = Conv2D(256, (3, 3), padding='same', activation='relu', bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l6) print('This is L7 shape {}'.format(l7.shape)) #l8 = UpSampling2D()(l7) l8 = Conv2DTranspose(128,(1,1),padding='same',strides=(2,2),activation='relu',bias_initializer='zeros',activity_regularizer=regularizers.l1(10e-10), kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32),data_format='channels_last')(l7) # increasing the stide to 2,2 enables upsampling of the images print('This is L8 shape {}'.format(l8.shape)) #l8 = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001)(l8) l8 = Dropout(0.3)(l8) l9 = Conv2D(128, (3, 3), padding='same', activation='relu',bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l8) l10 = Conv2D(128, (3, 3), padding='same', activation='relu',bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l9) #l10 = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001)(l10) l10 = Dropout(0.3)(l10) l11 = add([l5, l10]) print('This is the l11 shape {}'.format(l11.shape)) #l12 = UpSampling2D()(l11) l12 = Conv2DTranspose(128,(2,2),padding='same',strides=(2,2),activation='relu',bias_initializer='zeros',activity_regularizer=regularizers.l1(10e-10), kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32),data_format='channels_last')(l11) print('This is the l12\'s shape {}'.format(l12.shape)) l13 = Conv2D(64, (3, 3), padding='same', activation='relu',bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l12) l14 = Conv2D(64, (3, 3), padding='same', activation='relu', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l13) #l14 = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001)(l14) l14 = Dropout(0.3)(l14) l15 = add([l14, l2]) l16 = Conv2DTranspose(64,(2,2),padding='same',strides=(2,2),activation='relu',bias_initializer='zeros',activity_regularizer=regularizers.l1(10e-10), kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32),data_format='channels_last')(l15) decoded = Conv2D(3, (3, 3), padding='same', activation='relu', bias_initializer='zeros', activity_regularizer=regularizers.l1(10e-10),kernel_initializer=tf.compat.v1.keras.initializers.glorot_normal(seed=None,dtype=tf.dtypes.float32))(l16) print('This is the decoder\'s shape {}'.format(decoded.shape)) model = Model(input_img, decoded) #model.compile(optimizer=keras.optimizers.Adam(1e-4), loss='mean_squared_error') #model.compile(optimizer='adam', loss='mean_squared_error') model.compile(optimizer=keras.optimizers.Adam(1e-4), loss=vgg_19_loss) return model ```
github_jupyter
# Exploring Oxford Nanopore DRS sequencing and alignment errors: ERCC spike-ins Code for exploring the error rates observed in spike-in data from the ONT DRS datasets published in the paper Native long-read RNA sequencing of the Arabidopsis thaliana transcriptome. For this we're focussing on the ONT DRS reads, aligned to the spike-in sequences with minimap2, generated from adding the ERCC spike-ins to each of the four biological replicates of WT col-0 tissue. ONT DRS data are supposed to be about ~85-90% accurate now, but what exactly doesthis means. Does is mean that out of 100 sequenced bases, 85 will be correct? Does this include mismatched, insertions and deletions? ``` import os, sys, pysam, numpy, json, matplotlib, re %matplotlib inline import matplotlib.pyplot as plt from matplotlib.ticker import FormatStrFormatter # Datasets import os, re bam_files = {"col0_rep1":{"path":"01_WT_Col0_2916", "bamfile":"201901_col0_2916.bam"}, "col0_rep2a":{"path":"02a_WT_Col0_2917", "bamfile":"201903_col0_2917_exp2.bam"}, "col0_rep2b":{"path":"02b_WT_Col0_2917", "bamfile":"201901_col0_2917.bam"}, "col0_rep3":{"path":"03_WT_Col0_2918", "bamfile":"201901_col0_2918.bam"}, "col0_rep4":{"path":"04_WT_Col0_2919", "bamfile":"201901_col0_2919.bam"}, "col0_5padapter_rep1":{"path":"01_5p_WT_Col0_2916", "bamfile":"201902_col0_2916_5adapter_exp2.bam"}, "col0_5padapter_rep2":{"path":"03_5p_WT_Col0_2918", "bamfile":"201902_col0_2918_5adapter.bam"}} bamfiles=[] for keystr in bam_files.keys(): fullpath = os.path.join("../../datasets/ONT_DRS/", bam_files[keystr]["path"], "aligned_data/ERCC92/", bam_files[keystr]["bamfile"]) bamfiles.append(fullpath) def getBestAlignments(bamfile): """Get the best alignment of each read - where best == longest""" best_alns={} rej_alns={} spikein_counts={} antisense_counts={} for readaln in bamfile.fetch(): if not readaln.is_unmapped and readaln.seq is not None: isbest=True if readaln.query_name not in best_alns.keys(): best_alns[readaln.query_name] = readaln elif readaln.alen > best_alns[readaln.query_name].alen: best_alns[readaln.query_name] = readaln else: isbest=False rej_alns[readaln.query_name] = readaln if isbest: thisref = bamfile.get_reference_name(readaln.reference_id) try: spikein_counts[thisref]+=1 except KeyError: spikein_counts[thisref]=1 if readaln.is_reverse: try: antisense_counts[thisref]+=1 except KeyError: antisense_counts[thisref]=1 return(best_alns, rej_alns, spikein_counts, antisense_counts) # get the reads for our datasets alignments={} spikein_counts={} antisense_counts={} for bamfile in bamfiles: thisbam = pysam.AlignmentFile(bamfile, "rb") alns = getBestAlignments(thisbam) filename = os.path.basename(bamfile) print("{}:\n\tBest Alignments:\t{:>4d}\n\tRejected Alignments:\t{:>4d}".format(filename, len(alns[0].keys()), len(alns[1].keys()))) alignments[os.path.basename(bamfile)]=alns[0] for refname in alns[2]: try: spikein_counts[refname]+=alns[2][refname] except KeyError: spikein_counts[refname]=alns[2][refname] for refname in alns[3]: try: antisense_counts[refname]+=alns[3][refname] except KeyError: antisense_counts[refname]=alns[3][refname] print("\nDetected {} spike-ins with {} reads ({} antisense):\n".format(len(spikein_counts.keys()), sum(spikein_counts.values()), sum(antisense_counts.values()))) for refname in sorted(spikein_counts, key=spikein_counts.get, reverse=True): asc=0 if refname in antisense_counts: asc=antisense_counts[refname] print("{}: {:>5d} ({:>5d})".format(refname, spikein_counts[refname], asc)) def countBaseInstances(thisstr, updatedic): """ for a string count the a, t, g,& c's and update the dictionary""" bases = ["A","T","G","C"] thiscount = {"A":0,"T":0,"G":0,"C":0} for base in bases: updatedic[base]+=thisstr.count(base) thiscount[base]+=thisstr.count(base) # sanity check if numpy.array(list(thiscount.values())).sum()!=len(thisstr): print(thiscount) raise ValueError("bases counted from string {} do not match string length {}".format(thisstr, len(thisstr))) return(updatedic) def parseCStag(cstag, readseq): """Pases and extracts the identity and substitution information storred in a bam alignmnents cs flag See https://github.com/lh3/minimap2""" r = re.compile(":[0-9]+|\*[a-z][a-z]|[=\+\-][A-Za-z]+") csarr = numpy.array(r.findall(cstag)) cs_stats = {"identity":0, "insertion":0, "deletion":0, "substitution":0, "refbases":"" } cs_bpstats = {"insertion":{"A":0, "T":0, "C":0, "G":0}, "deletion":{"A":0, "T":0, "C":0, "G":0}, "substitution":{"A":[], "T":[], "C":[], "G":[]}, "identity":{"A":0, "T":0, "C":0, "G":0}} pos_in_read = 0 for block in csarr: if block.startswith(":"): ilen = int(block.split(":")[1]) cs_stats["identity"]+=ilen bases = readseq[pos_in_read:pos_in_read+ilen] pos_in_read+=ilen cs_stats["refbases"] = "{}{}".format(cs_stats["refbases"], bases) cs_bpstats["identity"] = countBaseInstances(bases, cs_bpstats["identity"]) elif block.startswith("*"): bases = block.split("*")[1].upper() if len(bases)==2: cs_stats["substitution"]+=1 cs_bpstats["substitution"][bases[0]].append(bases[1]) cs_stats["refbases"] = "{}{}".format(cs_stats["refbases"], bases[0]) pos_in_read+=1 else: print(block) elif block.startswith("+"): bases = block.split("+")[1].upper() cs_stats["insertion"]+=len(bases) cs_bpstats["insertion"] = countBaseInstances(bases, cs_bpstats["insertion"]) pos_in_read+=len(bases) elif block.startswith("-"): bases = block.split("-")[1].upper() cs_stats["deletion"]+=len(bases) cs_bpstats["deletion"] = countBaseInstances(bases, cs_bpstats["deletion"]) cs_stats["refbases"] = "{}{}".format(cs_stats["refbases"], bases) else: print(block) for key in cs_bpstats["substitution"].keys(): cs_bpstats["substitution"][key] = numpy.array(cs_bpstats["substitution"][key]) return(cs_stats, cs_bpstats) def getGlobalAlignmentStats(reads): """Get a summary of the alignment stats for the reads based on the cs tags""" stats = {"matches":[], "insertion":[], "deletion":[], "skip":[], "softclip":[], "hardclip":[], "padding":[], "seqmatch":[], "seqmismatch":[], "back":[], "EditDist":[], "nbases":[], "nalignedbases":[] } stats["refbases"]={"A":0, "T":0, "C":0, "G":0} stats["identity"]=[] stats["substitution"]=[] stats["bp_stats"]={"insertion":{"A":0, "T":0, "C":0, "G":0}, "deletion":{"A":0, "T":0, "C":0, "G":0}, "substitution":{"A":[], "T":[], "C":[], "G":[]}, "identity":{"A":0, "T":0, "C":0, "G":0}} for read in reads: try: # get cs tag info try: cs_stats, bp_stats = parseCStag(read.get_tag('cs'), read.seq) except: print(read) print(read.seq) raise # basic info stats["nbases"].append(read.query_length) stats["nalignedbases"].append(read.query_alignment_length) # sam cigar information read_cigar_stats = read.get_cigar_stats()[0] stats["matches"].append(read_cigar_stats[0]) stats["insertion"].append(read_cigar_stats[1]) stats["deletion"].append(read_cigar_stats[2]) stats["skip"].append(read_cigar_stats[3]) stats["softclip"].append(read_cigar_stats[4]) stats["hardclip"].append(read_cigar_stats[5]) stats["padding"].append(read_cigar_stats[6]) stats["seqmatch"].append(read_cigar_stats[7]) stats["seqmismatch"].append(read_cigar_stats[8]) stats["back"].append(read_cigar_stats[9]) stats["EditDist"].append(read_cigar_stats[10]) # sanity checks: if cs_stats["insertion"]!=read_cigar_stats[1] or cs_stats["deletion"]!=read_cigar_stats[2] or (cs_stats["identity"]+cs_stats["substitution"])!=read_cigar_stats[0]: print(read.query_name) print("cs stats\n", cs_stats) print("cigar stats\n", read_cigar_stats) raise ValueError("cs flag information does not tally with sam cigar string information") else: stats["refbases"] = countBaseInstances(cs_stats["refbases"], stats["refbases"]) stats["identity"].append(cs_stats["identity"]) stats["substitution"].append(cs_stats["substitution"]) for key in bp_stats.keys(): for base in bp_stats[key]: if key=="substitution": stats["bp_stats"][key][base] = numpy.append(stats["bp_stats"][key][base],bp_stats[key][base]) else: stats["bp_stats"][key][base]+=bp_stats[key][base] except KeyError: raise for key in stats.keys(): if key!="bp_stats" and key!="refbases": stats[key] = numpy.array(stats[key]) return(stats) # get the stats for our datasets stats={} for bamfile in alignments.keys(): thisstats = getGlobalAlignmentStats(alignments[bamfile].values()) stats[bamfile] = thisstats def getCS(datadic): """Get the combined stats accross all datasets""" def getICS(datadic, statkey, datasets): retarr=None for dataset in datasets: if retarr is None: retarr = datadic[dataset][statkey] else: retarr = numpy.append(retarr, datadic[dataset][statkey]) return(retarr) datasets = sorted(list(datadic.keys())) ret_stats = {} refbases = None bpstats = None for statkey in datadic[datasets[0]].keys(): if statkey=="refbases": for dataset in datasets: if refbases is None: refbases = datadic[dataset]["refbases"] else: for base in refbases.keys(): refbases[base] = refbases[base]+datadic[dataset]["refbases"][base] elif statkey=="bp_stats": for dataset in datasets: if bpstats is None: bpstats = datadic[dataset]["bp_stats"] else: for akey in bpstats.keys(): if akey == "substitution": for base in bpstats["substitution"].keys(): bpstats["substitution"][base] = numpy.append(bpstats["substitution"][base], datadic[dataset]["bp_stats"]["substitution"][base]) else: for base in bpstats[akey].keys(): bpstats[akey][base] = bpstats[akey][base] + datadic[dataset]["bp_stats"][akey][base] else: ret_stats[statkey] = getICS(datadic, statkey, datasets) ret_stats["refbases"]=refbases ret_stats["bp_stats"]=bpstats return(ret_stats) aln_stats = getCS(stats) ``` # Plotting some of the global alignment accuracy distributions ``` # Some plots. labsize=8 bins=300 fig = plt.figure(figsize=(12,4), dpi=150) fig.subplots_adjust(wspace=0.02) ax1=plt.subplot(131) ax1.tick_params(axis='both', which='major', labelsize=labsize) ax1.yaxis.set_major_formatter(FormatStrFormatter('%i')) ax1.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) x = plt.hist(aln_stats["nalignedbases"]/aln_stats["nbases"], bins=bins, alpha=0.8, label="aligned") plt.xlim((0.6,0.99)) plt.xlabel("fraction of bases in read") plt.legend(loc=2, fontsize=labsize) plt.ylabel("count") ax2=plt.subplot(132, sharey=ax1) ax2.tick_params(axis='x', which='major', labelsize=labsize) ax2.tick_params(axis='y', which='both', left=True, right=True, labelleft=False) ax2.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) y = plt.hist(aln_stats["identity"]/aln_stats["nalignedbases"], bins=bins, alpha=0.8, label="identity match") plt.xlim((0.75,0.99)) plt.xlabel("fraction of aligned bases in read") plt.legend(loc=2, fontsize=labsize) #plt.ylabel("count") ax3=plt.subplot(133, sharey=ax1) ax3.tick_params(axis='x', which='major', labelsize=labsize) ax3.tick_params(axis='y', which='both', left=True, right=False, labelleft=False) ax3.xaxis.set_major_formatter(FormatStrFormatter('%.2f')) x = plt.hist(aln_stats["insertion"]/aln_stats["nalignedbases"], bins=bins, label="insertions", color='black') y = plt.hist(aln_stats["substitution"]/aln_stats["nalignedbases"], bins=x[1], alpha=0.8, label="substitutions", color='orange') z = plt.hist(aln_stats["deletion"]/aln_stats["nalignedbases"], bins=x[1], alpha=0.8, label="deletions", color='steelblue') plt.xlim((0.0,0.25)) plt.xlabel("fraction of aligned bases in read") plt.legend(loc=1, fontsize=labsize) plt.xlabel("fraction of aligned bases in read") plt.savefig("../figures/Figure_01B.png", dpi=300, transparent=True, format='png') plt.savefig("../figures/Figure_01B.svg", format="svg") # get a few interesting basic numbers such as the number of bases & the number of errors print("Number of sequenced bases: {:>15d}".format(aln_stats["nbases"].sum())) print("Number of aligned bases: {:>15d}".format(aln_stats["nalignedbases"].sum())) print("Number of errors bases: {:>15d}".format(aln_stats["nalignedbases"].sum()-aln_stats["identity"].sum())) # So lets actually get a number for the accuracy of our data.... mean_alnfrac = (aln_stats["nalignedbases"]/aln_stats["nbases"]).mean() stddev_alnfrac = (aln_stats["nalignedbases"]/aln_stats["nbases"]).std() print("Mean alignment fraction: {:.2f} +{:.4f} -{:.4f}".format(mean_alnfrac*100, 100*2*stddev_alnfrac, 100*2*stddev_alnfrac)) mean_identity = (aln_stats["identity"]/aln_stats["nalignedbases"]).mean() stddev_identity = (aln_stats["identity"]/aln_stats["nalignedbases"]).std() print("Mean identity: {:.2f} +{:.4f} -{:.4f}".format(mean_identity*100, 100*2*stddev_identity, 100*2*stddev_identity)) mean_insertion = (aln_stats["insertion"]/aln_stats["nalignedbases"]).mean() stddev_insertion = (aln_stats["insertion"]/aln_stats["nalignedbases"]).std() print("Mean insertions: {:.2f} +{:.4f} -{:.4f}".format(mean_insertion*100, 100*2*stddev_insertion, 100*2*stddev_insertion)) mean_deletion = (aln_stats["deletion"]/aln_stats["nalignedbases"]).mean() stddev_deletion = (aln_stats["deletion"]/aln_stats["nalignedbases"]).std() print("Mean deletion: {:.2f} +{:.4f} -{:.4f}".format(mean_deletion*100, 100*2*stddev_deletion, 100*2*stddev_deletion)) mean_substitution = (aln_stats["substitution"]/aln_stats["nalignedbases"]).mean() stddev_substitution = (aln_stats["substitution"]/aln_stats["nalignedbases"]).std() print("Mean substitution: {:.2f} +{:.4f} -{:.4f}".format(mean_substitution*100, 100*2*stddev_substitution, 100*2*stddev_substitution)) ``` # Enrichment of error types by base Now lets look to see if the base distributions for each error category are random or match the sequence distributions We'l start by looking at the frations of insertions, substitutions and deletions by base (See http://onlinestatbook.com/2/estimation/proportion_ci.html). ``` print("Fractions of each base in the reference sequence underlying each read:") proportions={"refbases":{}, "bp_stats":{}} for base in aln_stats["refbases"]: proportion = aln_stats["refbases"][base]/sum(aln_stats["refbases"].values()) SE = numpy.sqrt((proportion*(1-proportion))/sum(aln_stats["refbases"].values())) CI = (1.96*SE) + (0.5/sum(aln_stats["refbases"].values())) proportions["refbases"][base] = {"proportion": proportion, "SE": SE, "95CI": CI} print("{}: {:.2f} +/-{:.2f} (95% CI)".format(base, proportion*100, CI*100)) for key in aln_stats["bp_stats"]: proportions["bp_stats"][key] = {} if key!="substitution": print("{} fractions relative to all {}s by (reference) base:".format(key, key)) for base in aln_stats["bp_stats"][key].keys(): proportion = aln_stats["bp_stats"][key][base]/sum(aln_stats["bp_stats"][key].values()) SE = numpy.sqrt((proportion*(1-proportion))/sum(aln_stats["bp_stats"][key].values())) CI = (1.96*SE) + (0.5/sum(aln_stats["bp_stats"][key].values())) proportions["bp_stats"][key][base] = {"proportion": proportion, "SE": SE, "95CI": CI} print("{}: {:.2f} +/-{:.2f} (95% CI)".format(base, proportion*100, CI*100)) print("Substitution fractions relative to all substitutions by reference base:") for base in aln_stats["bp_stats"]["substitution"].keys(): proportion = len(aln_stats["bp_stats"]["substitution"][base])/aln_stats["substitution"].sum() SE = numpy.sqrt((proportion*(1-proportion))/aln_stats["substitution"].sum()) CI = (1.96*SE) + (0.5/aln_stats["substitution"].sum()) proportions["bp_stats"]["substitution"][base] = {"proportion": proportion, "SE": SE, "95CI": CI, "breakdown":{}} print("{}({:.2f} +/-{:.2f} 95% CI):".format(base, proportion*100, CI*100)) baseto_unique, baseto_counts = numpy.unique(aln_stats["bp_stats"]["substitution"][base], return_counts=True) baseto_dict = dict(zip(baseto_unique, baseto_counts)) proportions["bp_stats"]["substitution"][base]["breakdown"] = {} print("\tSubstitution fractions relative to all substitutions of reference base {}, by target base:".format(base)) for baseto in baseto_dict: proportion = baseto_dict[baseto]/baseto_counts.sum() SE = numpy.sqrt((proportion*(1-proportion))/baseto_counts.sum()) CI = (1.96*SE) + (0.5/baseto_counts.sum()) proportions["bp_stats"]["substitution"][base]["breakdown"][baseto] = {"proportion": proportion, "SE": SE, "95CI": CI} print("\t{}: {:.2f} +/-{:.2f} (95% CI)".format(baseto, proportion*100, CI*100)) # Plot these proportions for each type of error. Here I'm going to use lines to highlight # the different behaviours of the different errors... bases = ["A","T","G","C"] cols=['black', 'steelblue', 'gold', 'blueviolet', 'olivedrab'] markers = ['.','p','*','D','X'] markersize=10 fig = plt.figure(figsize=(10,6), dpi=150) plotprops = [] ploterrors = [] for base in bases: plotprops.append(proportions['refbases'][base]["proportion"]) ploterrors.append(proportions['refbases'][base]["95CI"]) refline = plt.plot(bases, plotprops, linestyle='--', zorder=1, color=cols[0]) refpoints = plt.errorbar(bases, plotprops, ploterrors, fmt="o", marker=markers[0], markersize=markersize, label="Reference proportions", zorder=2, color=cols[0]) i=1 for key in proportions['bp_stats'].keys(): plotprops = [] ploterrors = [] for base in bases: plotprops.append(proportions['bp_stats'][key][base]["proportion"]) ploterrors.append(proportions['bp_stats'][key][base]["95CI"]) thisline = plt.plot(bases, plotprops, linestyle='--', zorder=1, color=cols[i]) thispoints = plt.errorbar(bases, plotprops, ploterrors, fmt="o", marker=markers[i], markersize=markersize, label=key.capitalize(), zorder=2, color=cols[i]) i+=1 plt.ylim((0.1,0.35)) plt.legend(title="Set:", loc=3, fontsize=10) plt.xlabel("Base Pair") plt.ylabel("Proportion of base in the set") plt.savefig("../supplementary/figures/Figure_02A.png", dpi=300, transparent=True, format='png') plt.savefig("../supplementary/figures/Figure_02A.svg", format="svg") fig = plt.figure(figsize=(10,4), dpi=150) x=141 p=None for base in bases: if p is None: p = plt.subplot(x) ax = plt.gca() plt.ylabel("Proportion of target base in the set") else: ax = plt.subplot(x, sharey=p) plt.setp(ax.get_yticklabels(), visible=False) ax.tick_params(axis='both', which='major', labelsize=10) plotbases = [] plotprops = [] ploterrors = [] for baseto in bases: if baseto in proportions['bp_stats']["substitution"][base]["breakdown"].keys(): plotbases.append("{}->{}".format(base, baseto)) plotprops.append(proportions['bp_stats']["substitution"][base]["breakdown"][baseto]["proportion"]) ploterrors.append(proportions['bp_stats']["substitution"][base]["breakdown"][baseto]["95CI"]) plt.bar(plotbases, plotprops, label="{} substitutions".format(base.capitalize())) plt.ylim((0.05,0.85)) x+=1 plt.legend(loc=1, fontsize=8) plt.savefig("../supplementary/figures/Figure_02B.png", dpi=300, transparent=True, format='png') plt.savefig("../supplementary/figures/Figure_02B.svg", format="svg") # Plot these proportions for each type of error. Here I'm going to using multiplebar plots because # Geoff thinks it will look better. It won't. ... bases = ["A","T","G","C"] plotprops = [] ploterrors = [] for base in bases: plotprops.append(proportions['refbases'][base]["proportion"]) ploterrors.append(proportions['refbases'][base]["95CI"]) plotvals = [plotprops] barlabels = ["Reference"] barorder=['identity', 'insertion', 'deletion', 'substitution'] for key in barorder: plotprops = [] ploterrors = [] for base in bases: plotprops.append(proportions['bp_stats'][key][base]["proportion"]) ploterrors.append(proportions['bp_stats'][key][base]["95CI"]) plotvals.append(plotprops) barlabels.append(key) fig = plt.figure(figsize=(10,6), dpi=150) plt.set_cmap('jet') def subcategorybar(X, vals, labels, colors, width=0.8): n = len(vals) _X = numpy.arange(len(X)) for i in range(n): plt.bar(_X - width/2. + i/float(n)*width, vals[i], width=width/float(n), align="edge", label=labels[i], color=colors[i]) plt.xticks(_X, X) cols=['black', 'steelblue', 'gold', 'blueviolet', 'olivedrab'] subcategorybar(bases, plotvals, barlabels, colors=cols) plt.legend(title="Set:", loc=(0.6,0.7)) plt.ylim((0,0.35)) plt.xlabel("Base Pair") plt.ylabel("Proportion of base in the set") fig = plt.figure(figsize=(10,4), dpi=150) x=141 p=None for base in bases: if p is None: p = plt.subplot(x) ax = plt.gca() plt.ylabel("Proportion of target base in the set") else: ax = plt.subplot(x, sharey=p) plt.setp(ax.get_yticklabels(), visible=False) ax.tick_params(axis='both', which='major', labelsize=10) plotbases = [] plotprops = [] ploterrors = [] for baseto in bases: if baseto in proportions['bp_stats']["substitution"][base]["breakdown"].keys(): plotbases.append("{}->{}".format(base, baseto)) plotprops.append(proportions['bp_stats']["substitution"][base]["breakdown"][baseto]["proportion"]) ploterrors.append(proportions['bp_stats']["substitution"][base]["breakdown"][baseto]["95CI"]) plt.bar(plotbases, plotprops, label="{} substitutions".format(base.capitalize())) plt.ylim((0.05,0.85)) x+=1 plt.legend(loc=1, fontsize=8) # Plot these proportions for each type of error. If we're going to use bar plots, stacked # bar plots by set with the relative base fractions might look better. It won't, but ... bases = ["A","T","G","C"] plotprops = [] ploterrors = [] for base in bases: plotprops.append(proportions['refbases'][base]["proportion"]) ploterrors.append(proportions['refbases'][base]["95CI"]) plotvals = [plotprops] pltorder = ['reference'] barorder=['identity', 'insertion', 'deletion', 'substitution'] for key in barorder: plotprops = [] ploterrors = [] for base in bases: plotprops.append(proportions['bp_stats'][key][base]["proportion"]) ploterrors.append(proportions['bp_stats'][key][base]["95CI"]) plotvals.append(plotprops) pltorder.append(key) plotvals = numpy.array(plotvals) fig = plt.figure(figsize=(10,4), dpi=150) i=0 bottom=numpy.zeros(len(plotvals[:,0])) cols=['black', 'darkorange', 'slateblue', 'darkred'] while i<len(bases): plt.bar(pltorder, plotvals[:,i], bottom=bottom, label=bases[i], color=cols[i]) j=0 while j<len(pltorder): plt.text(j, bottom[j]+(plotvals[j,i]/2), bases[i], color="white", fontweight='bold',) j+=1 bottom=bottom+plotvals[:,i] i+=1 plt.ylim((0,1)) plt.ylabel("Proportion of base in the set") fig = plt.figure(figsize=(10,4), dpi=150) x=141 p=None for base in bases: if p is None: p = plt.subplot(x) ax = plt.gca() plt.ylabel("Proportion of target base in the set") else: ax = plt.subplot(x, sharey=p) plt.setp(ax.get_yticklabels(), visible=False) ax.tick_params(axis='both', which='major', labelsize=10) plotbases = [] plotprops = [] ploterrors = [] for baseto in bases: if baseto in proportions['bp_stats']["substitution"][base]["breakdown"].keys(): plotbases.append("{}->{}".format(base, baseto)) plotprops.append(proportions['bp_stats']["substitution"][base]["breakdown"][baseto]["proportion"]) ploterrors.append(proportions['bp_stats']["substitution"][base]["breakdown"][baseto]["95CI"]) plt.bar(plotbases, plotprops, label="{} substitutions".format(base.capitalize())) plt.ylim((0.05,0.85)) x+=1 plt.legend(loc=1, fontsize=8) # OK lets use the g-test to see if these are geninely significantly different proportions from scipy.stats import chisquare ref_props = [] for base in sorted(list(aln_stats["refbases"].keys())): ref_props.append(proportions['refbases'][base]["proportion"]) ref_props = numpy.array(ref_props) for key in aln_stats["bp_stats"]: key_counts=[] for base in sorted(list(aln_stats["refbases"].keys())): if key!="substitution": key_counts.append(aln_stats["bp_stats"][key][base]) else: key_counts.append(len(aln_stats["bp_stats"]["substitution"][base])) key_counts = numpy.array(key_counts) print("pvalue for {} proportions different from the reference: {:.2e}".format(key, chisquare(key_counts, ref_props*key_counts.sum()).pvalue)) ```
github_jupyter
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAACrCAYAAABIdcoWAAAACXBIWXMAABcSAAAXEgFnn9JSAAAAB3RJTUUH4goGCgAhuX3C3AAAAAZiS0dEAP8A/wD/oL2nkwAAWmVJREFUeNrtfXd8VFXa/zehig0V7IqFYlfEQieZ+5w76SAIgkhP5j7nTkJCU0RKAoi99977uvbeaFJDGoiou+u77/5293Xb+66o29T8/rhnkjuTmWTm3kkySe7z+RwRZuaW55zn+Z6nHsAjjzzyyCOPPPLII4888sgjjzzyyCOPPPLII4888sijrkdkdsO08Y1/9wc8nnjkkUceedQCafO7gWQuiCeBjFPCP6z3+OORRx555FEUypQA8REgfg3Ee0D8OoivhS5HhH2vYK7HK4888sgjjyLIxz4Q7wXxLhBXqT+3QuOXIMxJHoM88sgjjzyKTkI+BOI6BRz2UQXi3RByPTRjOoj7gWQPj2EeeeSRR12aVGyD5MkguS8KeNhHNYi/gJCbQbwUmjEMxAehcJ7HRo888sijLkeXiZD1cSuIa1sAkHCLhHgLhHGJx0SPPPLIo65K2Xw0iLfHCR72sR6Ch3sM9MgjjzzqqkSyFMSVDgDkMYxd2s1joEceeeRRV6T84kNA8mUH4FEJwdMAAFrQ42MklZd7PGgL/np89sijOEifEf93E6nVEOwH8baEAUTITTizC2ZiTZ7c+veob/iPx9vWoAe6KH896gLkXxD7s2y9O3yFhyMjcDL04NnQi63hN88EmSdBk0c2e+2MiN0acW8Qr00geB4auyFkWdcG9OJjoPNwCDkFgpdAl7dD8MPQ5NPQ5Ssg+S6IPwHxxxDyHQj5CjT5HIR8AsT3Q5PrQHIRhJwDIQugG6Pg44EYlN0zDnTp3Os9p6QHdHMgdNYg5BzoshxC3gPBj4Hk8xDydZD8EMTrQfwhhHwTQr4MTT4DIR8FyTuh8SoIWQIhL4cu/aDABRgnj01p4M6Y7em/2CZn+9w2uyRF+RH5YFlF6SDjAJBxKPTgCdBMH4SUIL4Dgn+pFPc+EH+uiv0+ixh7bWMriF8E8Y0QUkIzMkHBYyFkXwijD3KKrbiFkCeAeJOtaDDODCxZB9/sgzvlOvXZUpIDd8OaEz4CIjgMxCZ0fgJC1ql52KOy0WpBXKPSnKtj8LNKjdB3atTv6tQ19qi5+xxC7gDxSxB8MwTPhy4nwC/Ph1ZyDIgPB/EhEGYfkNEDf4+h8HJl6vM61+wGMg6CCB4HIf0gXgGd37Kt4z2KP7URvK2KwduqZnj7mU1uPoHgxyG4AoILoZs6/DwEmaX9QEZf6PJgaGZv6Ebs+F5r8VcrOhS6MaBNRpY8CXklPTuMbJ61sEeb8UY3ToIuj0oxDtQDl+oRVoB5LEiOAvFcEN8B4nfVQv9CLfY6JRAJKnmuVcLzmVW7wV9A8McgvhdCFoNYgHi+UoSJWB97IOQaa7EXdh7gyLIphHGX9wDJM0FyipqTHSD+SvGy1kG8KNFRpeY8NH+fg/hLtSY+BfErIHkPiK8CyWkQ7IOQ50HwiRhrHpiSOyk7f0XgMPjMERAsQfysesd9StHXtAF/q9U8hkB7n5rfPSD+CIKfA8mbQFwMISeAeDR0eQY0Pgq+4u6tyifB89U8f9UG4/fQpZb6whmqVePRbcib/wIZT6WQf9dmqvtKu0PIHJC8EcQvKYtht22n1RaCU+Uo9kFcAyo+u9NayiTzQPwgiN9XvKpNELzbClxqbbvrShB/BOJfgvhREN8KIUtBnA0yT8G0pentx0+z8f/zzENAshjEzykgbCvASFRGamzWy24ln++C+HkQPwDidRByDohHQQ/2wxXL0pIIIAaId7bRu+6B4IqUd41OHaZ4I69xmC3qZNSBjPtTwEy37byWr+kG4iK1GLdH9JzqCKMGxA9C8CGYO7Tjg0WGTblpPBLEv1Dz0pHmJJarbKfaIHwCzTypXfkrSntBM6Ryl+7swPy1u892qLEamtm7gwJINUhu7BChtRHFPUH8dBvOdTsDCKlzNIbNS4Nf9oPOAZDcqXY0HVmApncaa+OHekAz+0HIO5WLqCMDR6yxFRnm0W3G0zMmA9OuUgHyku4gOUxtmPZ0Qt7WQuMKZJg9OyiA7ALxl9CDp6W+Z4DPUOuokwOI3eKgor7QeBaIP1a+1o6uoN6GME7uFFbHsCu6Q8jxEPJjBeq7OunYgtzSvm3OZ804BkIuhGiI4e3qlAAi5DXILevegQFkN4gXWy7GFD48TvCENnRfpYAForOmTK6aVo5rtOW4rkODh73gkXi1EtTqTgweFoDogZ5tyl+NT1fB8bpOzttaEC+GXpTWgQGkCkK+n9JyK7gHBF/dxuupjQFkmELvrEXdIMwyFXjrXC4Rwe+DgiMb3rkjVQFnFNoX5GNdADhC49M25S/JUyH4g07qDmyqZDSjJMnKsq0BxBq6eWrKZmCReThIPt/GMttGAKLZXFYZ3B+6fNR2KFNnG1Ug3gvBq5EbPKxx0RsdA0TGFh0KIZ/qQuDR+gBiL07NDJyhali6Cn/rQGx0CgDRDKt9RfHxKeg5KD5FuUJ3dV4LRA+O6cTBwsixF8RvQDAhd66VIpqyFZsqvSSztDc0eXMn9sfHsho/DrOQk02XZCshN0+DJjd3MXCugeDZnQJASD6Uuq5nnuqgXq0DAIgwQy84Q+W1dyXhqbZSXo1l0AqtIKJenHqLb4Lqr2TVRXQt8CCugsavtX6GTMkxqrajqovxdxc0vrxzAAi/Ad08NqVkN7QpJX6kHeJprQwgOocU0wKILqeYIna55uvQ5/dXlljqZVzpZl4nz7SKDfLCfK5V+esP9oaQ67ogOO8C8U4I89JOAiCbQSlYlT5+Ua92Cgm0EoAIBRxncxp0XqraILR1IVNVQ0v18BHre22gqHgz9ODQlAmwX5ITUnJHtFH6n73PVahnU6VSBqGis522Z4n8fnWU37udxxoQP9Sq/NVkRhvIQFUUfu1qhr8t8TZZ/N0G4vxOAiC1EDKFXAj1Ie9Otmp1s6vzWCB+7gXia1p5V1sFMmogjGoIuRUi+An0knehz38N/tKX4V/4PLIWPo2shc80jOyFzyOr7EX4S1+DXvI2RPGHEOan1vWMmmYa/SXLpbUFGk9s4NMZk9t/ERLf2krvXKXM6s9BvBWCX1UtUG4BcQWIF0FIE4KLQHI2hJwJIQtB0gRxGYS8Uq2hctUR+XoQ3wzi2yHkgyB+SlXGv6W6+m5TaaP2Zpq7I5oNVkXx0d+WdLaG6gUyFh6qWqhUt0p8wYon7oHgT1QbkXtAfAOIl4O4FEJKCJ5n8ZZnQ8iA1SpFLoTgpSBeoebiWvW7W0B8FwQ/Ao2fhZCvgPg9EG9USnuPjb/RGmU2TVAQ7O8kAFID4ntBxiEpUZZ+6ZiQh+fudrJuWwFAdAZyOB2CF7begxs1IKMWovhjZJU9j5wr70fu1bcjd/mNyFt1LfLLy5FfsRIFFauQH2UUVKxEfkU58latRd6K65G77FZkL70b2YuehD7/TQhzK8horZ5b1SDeBo3nW/q7vn1bVGusq3Tq5GeiWS1PboRu5ELI86DzAOjBQ+EPODsj5awr0pBv9MDYQG/kzD8UGveHbh4PTZ4KkmeAeChIXgyfkQldXgKN50HIZRDydmjyWZD8SCm9r1TDuc+tJpryulbk7y2tsImqsZ5fvgFhLILOY6DLM6DLY+APHgj/bGd9vUZemo48syfI7IOs4GHI5KOhmwOgyUEgPgvEw0ByJIh11UQzCCErQPJ+kPwlSG62NfLb17AGdOnrJABSBeKN8BUfjzmDU8MIOX1Jd8u11l4ZdskEkMyi0ARf2kqWRxV0roIoexm5y29C3qo1yF+1GvnlFckbq9Ygb9Va5F59D/ylb7Wim6sKJNc18G7ygrZdeJPLAQocYu2okrp7qbUK83g6so0DQNwdk6+I/gzrbWZ4VHLh4puwMA2ZMh262Q1C9oAme0Jwb4y56gCQcSx0eTE0nmRVgsvxSectAFDgIofNOFvi74vw8zAI7gXRTDv1FjfJDvk7qRjQgmnQZDp0sztI9gDJXiDZG+MWHgw/DwTJDGjGDAhZBiGHdBIAUcdFyMyU8WJZma07OjaA2PPcfYGRreLvFeY2+Be8iLzyay0LIpmgEXWUo6BiJXJXrYV/wXMgc2MrNLmrhs4rcPlV3ZrwsVVJ3ccfHJrEzI0qEG+GkGVYtzGtiSsnVXzGsZSqaIWzK4S8NbkyIN+DkBkpm0raHGIlk7/tCyB7oHF5yqxtIZe2cfuSVgAQfaYCEuNMkNySRCVbDZI74C99GbnX3IT8ipWWe6rVwSMCSFavQN6qNche/DhE8QdJ7hBcDSEXYdR9aZhzOtrEtzp7Vij17+4kWYp7QPI5kHlxk114VyWNzwLxB0kEjzXIMPsCALof1LX56x5Adiv35WcOlG81iDekBh9kDxA/6aLAc5/LmrwkurDIPEr19kmSYjVqoZd8gJyr77QUeUVbA0eUUbEKeSvXIefKhyGCnybJ9VOlXHNWbm/utLQ2EsLTklTQ+TmIH4BW1tjN1t4CviuSzt1AXJKEg7Uq1eFkJdBX9vB4mxQA2Q0hr4RgTTVxdVJ89wWyi89odz6QPB3E7zjckNwJYQyHkAtc6OwkAAgFgKz53SHk4qT60vUFLyJv1dr2B42YYw38pb9QmVtJCoxKy5TTW7HtScHc0O5lXRLcV7UgfhxCWi1b9NEdq/dXa/FXyEMh5MtJSML4DEI2Bsf8YzzwcA8gn4NkPvJL0kDyKHVufLUDEFoUZ6Cp9chv5kM4XlvjFC9F+wFIqCCO+OIk+dKrQHIbshY/ibw2iXO4t0iylz4AIbckCUQqoRujW33hzRzfDUImw4f8ITQ+19NokYJtnA0hv3Bfc6DqU+bM9niabADR56dB4wNAfJMDS7EKgt9r3zUW6GEd1+xI724D/gftCyCh+oWs0gNVf6sa97EAcxNylt6L/NXLUx88bCCSe82t0EveT4I1UmXVNJinWLHuJO/mR8wLAX5BUgJvGhd52iwqfytcugerQfwexgVU175yj7etASDWta5wrLt87XTuz8XTAX/wUAj5vINnr4XgW2y8bCcAmX9LSFjWJSEQWw3d/AQ5y26zajU6CniExUaug172UlLcQkI+gJmLuuPCea3kO+V7knA+wls4aHl6mOL0CMCY9ORk60n2eNsWACKHqcPsEvcWCJ4DoPWacDZHGYUDHOrdfSCbl6NdAMSv3LKZclRS8tz14CbkLr+5nYEgCfUkq1YjZ/ETSUgk2NPQMkFP8uLMlEeD+FX3nYYD1iJcN9FTaOH8HZOEthJvgIqO9pjZBgBCRQerCn4nXZwfbD8eyGkOEgCqIGQlSpZ2a38LJPuq7iD5oPvCQLkNOde0r+WRs7Ji5JJHrk4fv2ZpUoAka+EzrvPUBX8OYVjxhelzkyl8I0G8wV17bvk2AKBXnqfMGoLnpSH+rnCdfSXkao+/bQEgQ0MW+VqH13oN2SXHtem7p/lDAPKog3W2G8Q3hrnH2w1ArONot7oOmOdcfXcbFQbGtjxGLwp8/89//eHoK266MC2//LrkBNcXPuMiQyIUD3kNVHxQmIJyv3OZ4TI7aF9Dd9WunnEV3T34lksLtBp68UCPv20AICH+kvQ5vN5miHaoSp8QOADCsftKj+BlGwNIdglARm8Ivs21vz/7ygfbvb5Dv2bZkx/XmPX19f/5+eeft2LYfCMtv3xNUgoQcxY9Cc1wuRs1k9fjJIt7QfBqFztkq9qcUuxMhFQhP58A4i3uUrn5BY+RbejCAoDSa7tDyO0O45USy3e0tfvKSffdKhBvgMYD2hdAAEA33RahVUEvexl5ye5jlfBYm1v+7KR6G11+7eOE7JXXJs26yVrwCwjH2VlWVpbggUnaHR+h/L3Vzt1XfA90PqghDuaRnb/5qoGk87oPnWeFxRk9an0Asa55u4ONVQ2I74bgg9vkvS+eHrKY7nSQfVVjJc8Yh4SdjNq2AHKmegHjNleZVyL4CfJWrmuHtiSNI3vF6tFXPVamcOPnEID8898/fnvQlHVFyE0SuOWtWAe9+D13SkWWwF/SDTluDqI6EBDyeNW5082CCWBoTpqnxSJ4awn2Qpfp0fuQOc8LnrcHgFDQ5ywozRtA3HZxkB7npDu0cmtAbEbhZRtbIJp5rEP/W+PIXXqv6mvVLuCRVlBxLdLysiPBI0R1X3/zFkYsLE1enciyOyDMbS6skE3IMvsnQfDOctnocivI1DwNFtMCud3dwVbyHQDAzJM8XrY1gExa3MNhj7svIMzWbxEQitfo5miHVu5GCDm2/QAktPsludK5D92oQVbZL5C/un2D5ucEjR1f/P6++vr6H+uj05+zVj1djLxVSaxYX/yYi2LLvSDD2j2Qi7Re3ZzosPdPaLwDXZ7uabBo8lF6EIgfdyGMuyHk9ci7AilxWFFXApCQS0fjWx14VvaAVHfeVp23S0LvfpVDK/d1+Iy+7WuBXLaolzLZHMY9zG1Jcw05T9ldecfr256rj4MOnrzuquTUh5RXoGD1Sojgew7jD9YEa5enuxM8ucxl7OoFZHNfT4NFC6AHT3JZX7MXOl8BP3u8bA8LBAA06XdgoVdDyM1tY+GW9ALxo866RsjbY/CyDQBELw6Z6HkugoTVyF78eHtXjXefsPrKP/5t/w3xAMgLm/f6oC1bnpYsEMlZfoOL1N46CGn1j9EKnQLI4y5rFB70tFdM3p7vsKLZlh4tL0BumcfL9gCQ4r6ANv84CLnVkRsrq/j0NlhjQ0D8tqMNqF/mR7WS2tQCEfI6560vguuRt+L69gSPtILyteeVPTg5Vuwjgn6ur6+vv/utHbdAuzp5Ljf/gucdurKqIPgZRwtv8uSQ+/FDFxlYldBleZjJ75FtdyjHudhcVUHwduhyoMfI9rJA6gHBBzs8oXM3BFvdeVuzrYnG2Y5l128c2H4WCBA67+NFxzfzL3guCVlXq+G8RmM1MlYu/PePP9bVJ0D/+s+PvwGyMtMKypPTWj532a0Q5qcOJ2wDNONEF0FeNy02KqFzoae9YvI210VmYhUEvw2dT/AY2Y4uLAAQJqdkd14t0B3ESxyssVoQPwwAeG9RewIIXwzijc5uZG6xGiW6ApDVyCu/EvkVpiMQyV23ChMvurneAe348vdPI+PK0qSdcOgve8XhpG1tPL87wYCdNu9wELtpMb4TQvo97RWFMiangYwZLuJL1SB+GkL285jZzgCi8RiHem4HiE9tlfctLwc0eQg0+awD62gvNCMvpnXUJgASHBZq3uVMQPwl76FgzQpX7qf88nUYW+p7bmPNKPivWZlQcDtvzSpMnfFYWmDID32Kz32nvr7+h0RBZMFD712fvLTepfeDpFN3x3JnOxh5rssMrB0Q8mxPe0Uhn+wO4gUuLJAaEN8FIQ/2mNnOAEJmPxC/4miDRTwbADBzUPLfOcs8wcH6qoLgPcgtPaAZXrYBgBD3Um3b6xyZ5zmLH3bdLNF39fKP9+y7sr6+vn7SygdXIWtFvAq7HJcsugnzhn6FwOBvUXjqX323zry9vr7+60QA5Lt//PsJ5K5akLSsLGFucJiN9ZCjyleNc126sLbDV3i8p72i8raXashX5xxAjOuQOa+Xx8x2BhBL393msAX/Pa23xuQUBxlidRD8QLNxyzYBECEPBvFLjoK/giuTUPi3Fumj6R///vf/1NfX13/52z9uwuDCmXEp84KVqzFTvI/AoO8QGPwtAoP3Y9oR67/45uuvE7VCim562oe8VdcnIaV3BbIWOIknVakzkAc7yOCY7TKFdzvGBw70tFdU3h4A4rtcZLjVQPByTCpK95jZjgASqrEScoLDWotXQIHk9olraF/CTrrv7oPgS6FLtJ8FkhEI9VCqdnCjaojid1Cw2pX7CplLr9r42W8/tCvzB9/d9TSyV7R83Qnzb0VgyN8RGLxfAci3CAz+Nyb2Lvrp55++TxBDfsSQwCTkuw2oV5Qj75pbHWZEVYLk8MSDvGapy4O/tnmaKyZvDwTJR10UitZCY6/5VapYIBkzD3YIIJtBUaq93dIJmT0cNk/c3uJms00sEB+fA+KvHFSe1yJrySNu3VdpBaum//TTjy9EZEetRfYqs1krJG9NOeadvxeBQd/bwONbBAZ/j6mHb/jTX//8r0StkMnXvTgc2Svdd+sdv2YFyNzmYPL2QuN8YFKiu+RyV92Thdzoaa5mLHQhX3ABIDUQpuExMgUAxDcv5JZ8wlE6LyXxmOeGdvOc7cB7UA3ix0DG4e0PIJp5mbMArFGLnBWuztfoNXHtyj/+bf870Wo0PvvvPz0HfXl0cMq9bjkun/QCjFP/GQEe1ig85V98w+JnEgWQv3//z29w1LRL3bd8r1gJf+nrDqyQPSCeD83oltBi1OVtrtq4C6nSFL02G1F4eygEv+mixqYKQs71+JsiFggACPMSq4VQwimzd0HwgUmZxyMnh+Ifdznqviv4yhYbsLYNgMhrHCCg1bokz3nQOb2g4kYMmHNacwodGHpWt/EVNzRxEY2/6noUnvX7qOBhjX/h8iPudZDV+4+739z5dEzgSsSNlbX4YVDC54XUQchbIcyeCQqdGxdLFYR8w9NcsQCE+0Lwx64O6tLlTI+RKQQgucaBENKJ22gzMucfiblJKunRi3qCeL2D99wCMrMBAEsua2cAEXyfA/dHNfSSd51mLaXll1+LY2dkxVHo93ecOm9qWG1I3rUrMFN/X8U+YgHIfsw+4Y9O6kKe3bKxClmlq13Xg+QuuxVk1DlI+fwFiHsnmMXxsgsFVw3ilzzNFZO3h0OT210I4k4QX+4xMoUAxLrPQw703j5oxuikvWumHOXw5Ne3oQVVm/nydgeQVxPfvRo18Je97Lho8HRDfvvDv75tSZn//NNP9W9u2PEpRi9Z3PD7ifNvxbxzv44InDcZaYEh/7roukvzEwGP//z0YzU0lKZNnf6U+6r0VWshZE3iylx+BCH7JBbodXXUqlXo5lEs3h6heFvluMaGeIrHyBQDEGInrvvG7rz6jGS86yJHAX0hH43z+m2Sxrsp8d2rUYPsRU87UqxZK8pLHnyn/Meffv5bfEr9569zKp5ZgbxV5chfVYErCl6xpe02N74fdu0ljycCIN/+47uvMOXge1F43m+R77az8Kq1Dpq3VYF4JzQ+MKEzs4nfc3VWRbwLsusCSK0L/m6HkBM9RqYYgOiBgRBcmeC8VoOSlHCSW9wTxA87SvfX2Mr/zQu0I4BkNGQB1CR+E6MWWYsfcqRYhy8wq3/zx+cTUewf1329CP5rrkbBNWtReN5v4gSQ/UNW+n8XZ3PF+vr6+vpvvv3L73DZYS/CGPhvTJ/wMnLXOU9Rzlu1BmSuT5i3gr9AQXFixYQkP3Z32BHf13YKwmwvV5RDAJH9XNbYbAVxQafmbUcDEH0GQObhIPmEA9fv59DMM5IQWxusPAeJK/a8kn5x8rJNKtH3OUvhveo+J4p13h2vP+okNjFyyRNFmHb50zEzr6JmYw3c95s//255vPf47V9//xdMO2IDAoO/Q2DIXzF++bXIr3DY42vVGoiSDxxM4FcYMyexgiXLitzluE6B+I62VRKmBBk3gIzr2mCsgc88xvmzyn4Q0k2bmC0gzmk73gZPBPHyNuLtDSC+Aj6zV4cCEAC4ZHYaiBc7yF6sA0nVubDezXs6Ue41IPliK9+jjQAkZ+ndToLL1zz18dVOAOR/fveH9zDjzG0IDPohfgA59b8f2fzSC/He48tvvv4O04/8siG+Mmf0NuRdu9IxgOgl7zvY4XwJYSZWjU5yizsAkTe0rTVgPKnSKGvbYFQiSw5x/qxmf5D80gV/N4Ok3oa8PQfEG1SAuLV5+xkE34bsBGN2qQAgltxkOwhiV0Hjd1y9o8/oDo0XOij+3QvBV3QOAMldelfCBz6NX736/77/54v1DglzTipEYNBPCQDIX1a+dsd/x3v97fuq63HF0X9p+H3R6X/GlNkPIW/NqjYEkK8g+JQE/fTbXSi4Omi8uo0B5FG4O/wqsSp7dwByFIh/5eL+m6Cxrw15ezaIP2oj3tZA8M3I6qAAIuTxDt1I2xOW0QZZDYS6GzznIPX+M5DZP7UARCRcUGMBSPZV9ySafdXn0usW1bugv33390pM6PlSWmDIP+IDkUHf9Vt48Wv//M+/auO5/ti1U8tQeOqPtt/vx5yRlRi/bJ0LF1biAKKXDGhTABHSA5BYlC3dA4iQHoCkIoBY93vYUWq2n2c5zsYi41gHcbVaaPwsLqpIx/SLUwBAQv3jrclKPIiefeX9CSrVtYdMXz7RDYD89NNPP137wl1fY+6Av8VphezH9KM2fP7HX38Vl4WTlxZEYPCPYdcwBn2Py2Y84swCCX7iKAZCxnEJAshWVy4sIa/3ACQmb/uD2J0LS0jhAUiKAgjJmQ67897tPIDuqPvHZyAuhOC0uGMvbePCkh85SOOtRdaSxxJUqtf1L/YPr3dJf/z2z7W9g2dvaqkOpGHMOv5/X6x85zfxZGJh8iH3ITD4X5FWDArP/Q0KVqxJMAtrLcjc5AhANHlUgkpuoysA0eWtHoDE5G0/l2etbIEusz0ASVEA8ZnHOrzXy9CCiSVn9J8WesdHHKz/avgC57RBoD7hGMgzib+MUQN/2XPxK9NrV2Lq9Of7X3nRxfVJoMW/uP49FJ7653gAJM0Y8m+Mw+qffv6pxXReTD18AwKDv29yHePUf2J6zuvIvX55YgAiHVh3/Bn8RQcnuFDcpfFqfLcHIDF528/lWStboXGeByApCiCWDnzFgat5M8hMvCr90kUHOOr8QfwCdD46FQHkFgfZANXwz381oVYm84Z+3Tt49sP1yaHfH1Z2wR/itEL2Y94pf39m++sFzRW919fX1+Pyfv8T85pFp/0Fk8puQd7qONN6V17noJVJNYTcACH7YPLkRLKwPoC7QsIHPQCJydt+SuCrHN9fyAkegKQggIQOYhJyjoNNQh1IzkXBVYmClZPuuzUQvBqTjPTUAxAhSxz1oxfFH8cFIHlrVuHyS59H4LS/IjD4/zDnpLX7//n9fW4R5Izy3Cubuptijn9j5jE3Nne9U67JPAuBwfXNXmf2qO3IvW55nL2w7nTUC0szfgHNOCAxn6p8x10hoXzcA5CYvD0CQlbDTSU6yUkegKSwBaIF+zk40qIWgu+EHue7Nx5odYcDa2c7SE52wMs2OQ9Ed5TKK2Ql8le1HBeYcNX1mHfhnlD1eFpg8H8w+4Tf49KD78Okg6f8119///N/fvrP/kQB5E+/+8PnmHPif8cdCwkM+n595earYl2vt3mW0SSA3sQKOeMbTJnzIPJbskIqVsG/+AkH3XhrIeQD0GVihVmafMlVM0Uhn/UAJCZvD4fGW+GmF5aQl3kAkqIAogVDluaLCabVVkHwRuhG/Gm1Y2Z1g3AUr/wQenBgagKILk8G8ReJA4hRh5yrb0d+efPKdOrlT0RR8vuV9fATphxaO+n+4k3f/OmbbxOuC8lCWVpgyI9xWiHfnbd2/NuxrtVdnvEaAoN/aCktGLPHbWo5oF6xEtmlTs6Q2A0hy6GbPRJcKG7auVdD4194ABKTt27bue+E1obdeD0ASdwCEQwILnJwRsiXEMbIFq8f6mun8SjVmSDRzfpzDnnZ2gByLqDLviAnAmLUwF/2IgoqYldq56xehcLzf91C76rvMPek/zt24ai3rGDEzz/HCyBPvfvSx5hzwj/iLSzsaZ759Z7ff/lq5HX++vs/1XeTp/0pDmtmPwKn/RWTSm5vwW23HMSfOjqRkOR0aCUJCoB5u6sDpYhf9wAkJm8PhZBuDpTaBeKZHoCkMICMWwlowXMcbML2QKjuvM2l1qZdEop/LHTQfbcamrSanM0dmmIAUl4OCO4D4nsdCbQwtyN/dXQAyVu7CtMuexrGwHh7V/3rxKvGLkzEAvnrd//7b0w7Yo/VuyqeyvRT/rbslVu+ibzOveufNRAY/H/xPefAHzBr3MZmemSVI2/FDY4UjuBq6HJU4kqDK1wcaVsFwR8CQEKB+y7jwuKDofGLriw8wYVtxl8PQBIHEMuFdRSIX0xQbqtBcnNc188t7Q3i+x0o9FoI49QWQar9XFgl3UAy6CATy1I+OVff3fRc9IoK5KxZiaKz/4DAoP1xtx4pGvjnW959eFbcR9D+Y//3mNL3zZZdTzZrZ0rfjVt/Xf2BPaX3sgfL3kBg8N/jfs7AoP2YcPV10UGkYiVyFj3hqE0z8YfQZeKdPoVc7HD+GoN0bUlkPK6etzrOUdWOFshBEPIJFwBSC+L5bWyBfJIAb6s9AAGQVdoDJMsdzPM++M3T48i+GgjiNx20j3/fCXa0HYBYuyyf40nTy15Dwerwtuc51y3H9LjP7QgbB8iz6n79x//6Ii4A+WH/D7is77tRazdi14X8Bz6s+eHb73+ur6+v/+d//vWrAUszf53QcxoDf8AM8UHUupCC1SuhF3/g0JX0HHzyaAcAMs9ly/HtuDDQvc2UnJB5EFwEwfPiGHNB8jEXFotLAJF9QHyPi/vXgvgqHDO3jXhrHgnB0+PmrZALlQu76wLIo6tDVsil6gCwRNN5F1ub8RnNWTiao5owIYsBNB6/kZIA4pengvh1RzcS5ibkLr+hMZheUYHxS9dh3nm/SRQ8VAPEv/dfNPzTn37+6ceWAOS7f/5Qj2n9/iv+TKzGewxfe2nV67UfTTirIvf+eIsSw8cZf7aC6WFWSDlyr7kNJLc6cyXJmxwJnU/muix22w4fH9FG4OEkE2qhi/dzByA+2Qsar3XhIqwB8Wpkco+U5K0veAyIf9mlASTU1kkzh4D4/cQ3frL57rw+ozuIFyS+yZNfwR8Y4IKXbQQg1s3ucnyzrEXPNCjSvDWrMG3iCyga8jdHAGKNHzC9/5stZmFNPfwaBAb/x8V9/u7qt1fkvRp+6FTFKvhLX3ReL6BOGkt4oRjnuWy3sQPEg5CqpMnFLiwslxaI0V0FP3e7AJBbQNwnNXkbPEFVYndtF1bj/Z931K7Gz6fGjDOT7AOSzyboHqsByddAskdqA8iIeSEf3eUOzDe1cw5+hNwV1yO/vALjl12LOSN2OXFfRbia/onLDtux6BfXL/rHv//5ZX19/X8amir+/NN+FA2ck9ZS4V9rj3nD9oWfg778BghHDRR3gXgjBJ/maKHkFB/pKBU7rFbBGOcBSBSauCANQs52cf9qED8KYRzuAUgKA8jF00N6sMSB/O6AYMt/NTNKppSfj3awfvaAjIUQCVaft5sF4jMOA8nNjoUka4nVsfbSwD0oOu1vSVLS32Peyd9Ax5pbPni05smtr+589NNffHXu6oK3UTTw23YFj1Bh4WWzHm5ob5K1+FHnQUn5hnOpOxIuLZCdIDnNA5CYAdDxLgHkFZA8xgOQDmGBDHYW75K3NRNjnuygWLsWJEfhHImOASDWDa9zGCysgjC3I3fFWszyfYzAwB+SqKj3IzD4H5h38h8x96SvMfek36Fo4P52B49QYeEM/T3kr6pAfvkaCPmpwwmrheCgUpaJzVljW/5NLjJqKkFyYYPJ7QFIJID4HOTv25Mj1oPkyR6AdAAAsWJJTg6Cewl6RHfejLJQAD3RJJAqFZM+0SUv2whAGoJIxSc6O2CqocHiOzAG/SXhoHaHHYO+w7zzv8QlS65DzoJfgAynqZ67oZce6nKxPO6ytuJOzwKJydvzXWQqWe4IXZ6F+g0egKQygIT6VWlOAt68CcQjmlzzovIeEAkngFSD+GZQYa+OASDWQlI3NW92IaxVmDHyDzAGdREAGfwtjFO/w4TAUy7qBOpA8g6L98UuFIGxzGUq79PQi3p7ABLVvXsSiF91wdvPQVwAYXgA0hEskAxjoAOX0x6QnIWMhWlhljwZ2Q4SMHZCsMr71juQCwsAdGOwMted3Th7di3mnPdnBAZ1BQvkWxSe8X/Imelm5/8FNHm+a8Hzycku4yCvIUsO8AAkmkIxDwHxEy6EcQ90vhJ+9gAk1QFEXgagHhD8boKbwloQ3wHiA4B6oN9EpU/5TgfusPXwB8917VJucwDxG0AGd4cmlzlPW5S7kD9lLwpP/79ODyBFp/0dE/O+cqG0a0HyKQjzAOBCd4JHxnkuj17dAJIjPACJKYx3uxBGa56ffxM4Y7IHICltgZQDOUYaBCe65qpA/CkyFvQFqxOpR83oBeIPHTRPfCNJvGwHC8QKGp4L4g8cKyNfsBKTs3/T6QHkipG/B8ldrgBEM9yfVpcxGxDyBAi5w52Sa8OusR0FQDJmhwKrS10E0ndByL04+7JeqcdbD0Ca0OBCwCcvdhAP/gLCaNyEkRxpnVyYaO80cxkAYPKCDgogFogsddVozTd/J6ZoXyMw5O+dL+4xaD9mX/AnCKPKBYBUgfhZCD4yKYKn8xEQ/JyLTKxaCF4Lv9E75TKxUsEC0TlfFXs6j4PoTK7dEh6AtD6AWPrveBC/kaAC3gOSq2wAMt/BpqMOFDyv41ogocLCguKDlB+wyp0lkvVrFA3+e6cKms+66BvohVXQXFkfO0E8L2mCpwd7gXiNi0ysahC/jSx5OIbM8gCkKX9PcHSWQ+QJdgBSir8egMR6jt4gvinBOEg1SG4CAIye30t1Oa9KaFMpeGMSedlOFsiwUAKAORQk97qyRIh3YXLWbzqN5THrwm+QNdt9K3Ih3wVNtVwaBUlqtKfL2S6ywazzSHRzrOfCiiWQ8n2X874DeaWHeS6sDgAg1rPMdmBB7IUoPgM6D3BkwWjyGgCN57V3WBeWboaCs05O6gofmlmJCRO+cNl7qv3H3KF/RtacZJxjsQeaMdri84zkKQOS41Q++i7Hi0bIOz0AiYyrlof4e63rc0yEnO0BSEcBEDnUQf1PrVVHIi90sJn7An55bse3QBoze4Bs2QuC73V5boDlzsqZvhtzz/lLh0zXnX3Bn5TbqtI1eOh8MwBgQlZylQGZxzruqmwP9urFJ1nKpdADkHCAznC9mRLyJWSV9fYApAMASMbV3R10Kq4C8Tsgw0hwzVZDyPUYUdEDPLiTAEgjEp8A4tdcHuyzC5qshH9ODS4f97sGxZzawLEfgSHf4jLfbyECbmMeIYH7EFnF6syP+uQrBJJ3uH5G4kcaL1jvAUgDVaSBeKtLOdgO4ktSh7cegESlyzJCum+lg/muVG3hE/ndbhCvggh0S9q6SBkAsayRUyF4s2sQIbkLwqhCweS9KDzrf2EM3J+y4FF41v9i/KWfg4xkHQG6HbrMTZqPM2y3ZIYWTYGrdNPGStgcAMCkfA9Awvlb7rLiv9o6OKy0b2rw1gOQ6KSUuE9e4DCu6ODwKFPHlAokkZcpAiChHjGZhYNAsso9iKjhD1ThijH/D4EhqQces4Z9g6x5NS7rPCLdF9cDy9JaVSH4l/RyIYD28Uv45XFh89/lLRAAZA512To/dNJcaRP58gAktVxYjR6YbUnTebHB5j1oST6TJ6UskMyikABdrLrOVieFeb7incidvgczLv4jCs/8X6uP1qD2iXMEBn+LOef8BRPzvoIvuDOJC6Qagl9H/oK+YbvZZFOol5aQN7g4Qc/eo6sCgg9ofyWXKllYxYAuD4GQr7rMdrPO0ham1u4g4gFIbAp1DRCywsWBYvFapffCLw/qvAACAJpSfLoUELwpaaismZXQgpUomLIXUzN+i7nn/BXGwO/apinjoP0wTvkOc877C6boXyNrTi00szKJi6PGahNingkAyCpufaXgLz4RWlIW/G6QLMdwnyWQmtG1AQQA8hakg2SR62wsS3Z2guQEz4WV6hZIcCiIv2pFAKkEyfnJ31CmGoCEPZx5IUjuTK5pJ3eBjF3ImlOLS8Z/iVkXfANj0H4Yp35nBbSTWdMxcD944HeYO/QvmDj+y8baDpls03QHBFuNrsYuaH2FMGG82tHyzUnaNVVD8MPIDPT3XFgha4FPB/G7SVojWyC4EZlDZ0h4AJI6AKKX9ITgDUnzukRvBT8q6fOfsgAiQgFF42QXhyjFByh6YRUm5n+JWRd+k7RYybxz/oqpvt8iZ/ruJKTlNl84psnxGLsGyGtjF4VunAuSyTS7P4DPHIGM0nQMzunaAAIAJG9NIm93Q+O7kVHWt8l9hgU8AGlvAPFxOjRe3YpurPcwZkrPLmaBqE2TxgNA8oVWRGfLxeUr3gnN3IX8qZ9hUvavMH307zHr/D9h9tA/Y855f8Hcc/+KueeEjznn/QWzz/8TZoz4Iy7L/C0mFnyFrNk18BXvhC9YmYS03JbcE/PRHlQwF9DNA1V76Zrk+WnlPmh8NzKLhsHP/YGjot87UWpJSWpyISjhQ3laB0AKGro0nOuytUm0mFMtNKMMWmAQRODgpPC2Jf5qwWM8AGkJQIoBksLFGmxerjS+0brPvC4EIBYyhwS8PwRfDSHrWjlboTFe4iveCV+xtdD0wmpkza5F9qy6sKEXVjd+N7jTim3IXa36fJairQJxY4l5shdG/NkjF7usTI+xsLgOgp8B8RJoRg6Iz8CEwAHJsZwCh0GXgyHkSAg5HoLngfgZF0CYfAuk/yUhV9aSJCuVKmVpbQbx3SAugmaMhe7yaNMQZS/pDp2Phi7PhpCZEHIyBF9lHbnrAUgLcZBTVWv2ZOu33fAHx7WO/Kc6gADAVcqvnxfoBpK5ELI2ibteBzEU+2jzZ7CsMN0Uth1eu8ZIoctrWwnUq9U8b1Gt/1+F4Kch5M0guQCanAHiyRBcgEz2Q5PZIL4ExFNAPBMaF0GXi0ByLUjeA5LPgPhl1T/ofaXUQkV7bqzb5ANIqM12RkVPCPlOK6z3KnXNKhBvtCqb+WUIfgRCrobGDDKng4xJIM5DJvtBMg/Ek0A8DRrPBrGELq8GyRtB8iEQv6AsjbeUItwE4h2Kt1UegLTksjQPAvE9Sfa0WHxvtQ1kRwCQRjeD9WdW0UkQ/GKrWyKpOTZBSAs8kl0omDCpQqhxVxyodrRtOR9VCY7WfJbWiYGE+KsVXdTKKZ6pzNuuAyDWs5Ukmae7QfIma1MyuYsDiN1He8eINOiyRO106jo5mFSplM7nIfgMpBKFXGeacY5qoVHdBUG9lQBE8dfH6aCkK5aOMroGgGSUh+K9o5RFmCz+fQHiMZ4FYie/YX+B80HGGqW8ajqlABFXQZhL4QseAQCYNA9AeeqASKh2Q5MT22hX2nUAJESZJQeC+L4uCNBdywLJL+2jzkhKzsZT8Fbo8w7yACSS7CmrYl4PZPIw5SvuTMJTCyE/gSYJeaXdLSVtImXJv7g7SF7ZBZVc6wKIuELFmub3U7UhtR6AdFIAsZ7vpiRtwupA8hZQsEdSj3ToFADSoLSUQvXzcSB+uxMJTjWEXIecOVZb7qklSGny2woYSa71AKSV+DteHgiSr3chkO56AEKSkuRN+QyCJyJ3XmuCXQcHkAY3Cs9IUoO/9o5zbAHxo8hUleUA4A+gQ1CutC0sebWaj2oPQJLMXzL7Qsgnughvux6ADMnuloRu17us9kbyrFZdk50CQLLMQyD4/g4aA6lSef7bIXg1KJgBf6lVMZrJ6HBkb+Ko8xSIhiQHD0CSyd9ssz8EL4eQezo5kHQtAMkoDD3jHS7lphrEj8EXOMwDkObo4iBA8ixVWNfa/fSTHRzfC8G7QHIVdHka/IFeDe+VMwEdliYvsFsiJ4P4SZfnW3gAEkaj1RrhHiA5RqVQ7/YApJNYIJYbywfi36rNpbMheGWrL8UODSBPA8goS4MmlzlQUFXw8Ueq0KktXV87QbISgh+BzlnIReNCLC9HpyItzKXFILndA5BW4G9O2RFWoWSHduF6ABJNbtzSuKLW1Ssd3gIZu/hgtRiqEgIPIXdgbMlhyOKTQTwVQq5TO+XXQfyJApY6NUKV7zXKNIw1Qt+ptf3uUxC/B+IXIfg2VSV9XNg7tFaGREpYI+XAAUco87z4MBUbeUvxprYDpvxW2eY6NM+V0OXgduNviHx8EYS8X1XY13VQ11Ykf/eC+BboXcwC6SjUYQGkoDRk6hWqRZZYdaaQa5AdDO9OOWpWN5AxALocDo1zQDwbGl+l2pY/oPolvaoAIXK8DsHPQMj7oPH1ELwIZE4FsQaSZ8Mf6Ns0sybQNRcd8UAIOVO1bdip5q82BRVZjXINfWYpDv5S/dsHIH5GpVsuhMaXYRwf0n5xp7JI/o4A8QK1admnrPNUA5NqNed71Pzvg+AvofF2EL8JwQ9bMUE24eORGMvdPQDxAKQ1fIUbHbzAdghJ4a4jPYYJaHYHmX1A8hAIeRiE7A/io5oMjftDl4dBNw+GzzgAedwt6vU6m5sqEfIviFR0ByGzaACI50CXLyhl8rlS2tVtqMR2hyky4i8gZCUEvwQhbwHxfJAxHoLPBckTofHREHwY/NwH2cHGeW7vpAc7f32BbiA+HJmFZ1vBdv5I8fazNrL8qmxWhB2E94H4cwheD8FPQMjVIJ4HLUDwBwfDx8dB4yOhy0PhN3oh/8nWUHrz1XN86WD8P5Cc6AFIAy+zQfwrh7z8GmQ82Y7gwZc4FITnoKuqbo/ajy6eHv73y67sCd0cDuJFEObLELxddRgIxal2qvTGymbSHCsjRuh3O9TYDuJt1mmX8nkIeSuIF0M3p0HIcdCMgRg6uU+n5G/W/H7QZT5IXg8hP1S8sPO3MkH+7ozJX5LvQ8jHIOS1ENKELieA5IXILDwOh0/vFpdLrnUUnid3qcZLaqdTSEH8tMOjHa2tWoHmLYJUp3HFR0A3zobgLAg5F0KWgvgqCF4Jweugy9tAfJeKL90A4jUQvNICBWlAyGnQZS50OQY+eRb0wFEYfmfLu8f6hv90fCovBzCp6b9Puj4dGfJ46MbFEHIihGQIuQDEV4O4Arq8AULeAeK7QHwzhLwOxBUQfA2ELFPzcSn8UgfJi+EzByGr8NC4numJTsRfjzog6eZFjs4YsHa1VhA7o9zjY5emLjr/rdGVNSoAe/LlUSpS7txuIL7KQeFgDci4w2OgRx555FGXdWsU9QbxItX2I5Fsir3Qi8/2GOiRRx551NVJn38QBM+B4KdUD/3aZrJ3aiHkM9bvgh7vPPLII4+6LAlbzyWtrCfIHAnBS0D8PkRDvr4dQPbBxxke4zzyyCOPPGpK/kB3EB8JCuSrs5k/s1WJvwKND/eY5JFHHnnkUctE5mgQP6vcV4ysxekeUzzyyCOPPIpNwyJahAhjKCgwwGOMRx555JFH8VFk25B2q3j0yCOPPPLII4888sgjjzzyyCOPPPLII4888sgjjzzyyCOPPPLII4888sgjjzzyyCOPPPLII4888sgjjzzyyCOPPPLIo65OTc4KL/d4kiyanETeXlLUtnND7XgEqZCxFmv7zk2s55pYlHpykxXr36Unl6lOog3naFJhAmt3xGK7gkgHcU9o3BsFxX3gk/0xorgPdNkbxD1BsvH85Eu5aZsSjxop1zbhUzgNxN1B3Aua0Ru5Cw9BpuyHUcUHQOfeINkLgntCM7tDmOkYvUwtGrYvoL4QcjmEXA0hKyBkcas9++VzAF2eCyGvU/daBeJRKSBEk23vfyMygke5npt8TgNxDwjuBTJ6I2dBX4yVhyPDPAAiNDfSmhtdpuOot6PNzbkQcpV6trUgKVJ2XWYGj0OmHINMOQo+ORJZ8pgOI1O6TIPGPWKOsso4N0KBdGjcM8o1eoLM9u/RR9wXxHZZD7aiTPWEkFdDyDXqXlcnMCEzrD+z53WDj88DsQnip0C8HcT7QPyV1Y5dVoL4RRAvhJ8vxpiiXh5CJLAY/JwN4rUgfhfEdSD+So0vIbgWJD+G4GdB8haQLIYmc6Bz//CJ5mPUEcB7QLwbxK+22jMPuwHQZB6If63uVQNhzm5/AOEbbe//W+iBgYnPR8A+N8fCz5NBfDsEr1fX/hWIfwUh90FwFUi+D+InQPI6CC6CJjOgGwdFPFee6jK9G8Sfg7gsddejLATx10q+d4N4agfahR8H4hug8SNRxk0QZQc2b2EuUNcxpkLjJ6Nc4xmQvDgFdMax6nC+0Fr/ZSvK1AEQ/Knqlr4bxJ/Gaa4rZmbN6gXBK0D8qXrgWhBXRZzfUaXO9NijXux6kGG1Y88wPZCIpIJSBdB8GogfUaCxWymZaLytVnyvUzz+CsR5ERN9NARvsv3+pVZ7/guvAzSZA+K96l47IMwZKQAg62x8+xLCONXFbjYHxC8phV8Xx9zsVkK2DcTnRDxXttp07VLfLUlhAJml1tgu9S6XdhwA4YEgflvxOHJsBrGl/DPKW1LQ76j5jLzGr6AZ2SkCIJ/a1uMLrQwgn9gO//sk0Ye9X4FDlU1odivl8bn6c0+EcNWA+FkMD/bw0CIG5c8/Chq/H3GYVpVNEe1V4zPbTiO0qPdAyLEegLQCgDz5DKCbY5XCsa/pajUPe2PMTZ2am08g5KAODCCzlVzXqM3g5A4EIKeC+I2Yx2QLbrlDq24OaOaY7X3QjCwPQOLfhQVtu5FdIK5UCD8Tunk6xs3rDzIGQ8jJIP5lxJnne6DLFR5SxCCN71VKJ8SvnSD5SxDPgSaHQ5ingYyz4ZejQZwDMqZCMwwQr4Lgx0BFZ3oA0goAQvJAaPyLCGDfCcEPQeNLocnzIcwhoMB58JvjQJwPMqZDM4IQfC0E3wGt6JgODCAZEPI2CHmTFeMyR3RgAKkM2wBofCsyzAOi/vbaBuU8O+L3lR6AOAEQX/BICLnVxrzdEOb1OD07vZkXWxamFAXXggInWR/Wx3HT+vCvlZc7fOvyFv6eIGXMtrn2XFwr9D5CDlM+Zrvlkbivz+6v72wAMjli43vG5HgWu3MAaZybCcpFGLrOJgiZ43JunAGIEx5EI/vv8ppJbLlqVgy+mm0rb3Hri2YBpArEr4PkiwoEqiDkW9DNE1sA0Idtv38Zgl9OHEAi9JibZKJoc+cYQBzMU8IA4psXYmShzaKoAfELGDvHQm9fIPpv8BhsPn3FdBW1n362xcgl853ujJr/PLskccEOTe7Wpc6eKTvBjWSv/NACuMU2Ibsh+KaG7+Q4TKhIBEAogQXtDyYPQCbmR1EMDhSFVph8AGmcm5ds1kctBM9Pwty0ACD1wNtJBnk9gZTqTPXd6hgJNtHO5nEqb80pVI0t0Oz2TjIskGqQfAAkp4B4R4OsEY/EqBi8yQoeD+I31XfrQNKEkCttMhUbQKgNM06JD0+6BRLr+R1ZIKefkgbiu23CuBeCx+PSEiAjhvCGguWZgREg+YXtt89j+Jxu4YDDo6FxEBpLaFwIjY/BuZN7KFfY/WoRvAmNHwbJaSDTyuoKBfabTLzKWJ2yLB3E00B8PwS/odxtr4H4NuhyYsP3Cy5rqrg0NtTzBKHxFIt5Rj9osgiCHwbxW9bikg9CyMsdL5zsol4q2ypkpX0JCliKLqPMjakZJ4DU2+fhVAgugy6fhuA3LX7Jl0HydmhmPiYstPiexy0ByHbowcuRFTwFgudDyCcVv95S2SwMv+wf9XGuCQEVHwld6hCyGELeDI2fVevgbcX3x0FswDf7wJi7YrcuLL9xuFIyobn5DONm9kzC3DQPICEeTFjcG8K4CLo5G0JWQOPHQPyK4sHbIH4ZJFeAAqfH5MGwABAsC4HIQAi5VB0V/ZYaz0DIWyDkYhAXIIOPiQCTodC4RMmCATLOjClvWcW9QTwLxA83rB8hXwXxTdBlY5LH+Chx+IsDB0HjMpvM6eEAaFwAIStA5uUQwW4OAeQx+ORRKhnAcqsLXgSd02P8XleKeReIt4LkOAi5rEUAuWykXY+MguBV0OVLit9vg+TzILkawmz8Yh5HtzhPv11dxzzXen9+Uc39GyB+EiRvBPESkNzWIoCE5skf7KNcc49EzNMN0G3W9YRJSQIQ4iNB/LxNGDdD8Fkt/u7i6aFUuk9sL/cWdOPECGviShVbqQHxNgh5NwRvjZLhFQoqv4ts82QAwOLLbYssYGfWWGvSuS5GtkwtiD+GLi+M8c471fPUQfDb0OQNILk3IoEgdK06CPkuqOgIB7uHUxV/rEUu5LsQ8pAwN1lrAcjlo60/x6zsBTIqIFQKbnR+7QbxR8jiizGqQm0SymMByC61y6uLwa8aEG+FxhpyS9OaPruRoVJHa9V3q5uZw50Q5vBWiYEQj7IBSC2EfK6JC7NVLBAAeuBwFZSva4YHoWD+XghZFPN+h85MB8k16nrVUeYjlD1WDYoILFtB9L1Rg+jh8palPo8mb6HMtLcgzDOjbrQy5x8F4t+o++yGMG9UvBLQ+M3GZBJZAt0xgDyBzPm9QXxbw9oS8jX4zejXE7LM5j15E1rxERByebMAEpKpzOCJEPy0uk91zHkjfh55fHzjxlvJ1LGXKWCZ2QO6vBWCP49yndC81UT8+wsRa8luVeUoF15tM/P0OoR5WtR5cgQgggcr9Aw99C+hyxMSMK+ett3wI5AcFrFIl6gHb1TIIeUdnm20q8GcFLwBY+YfGtXtQWY+RIPiCl2vTi1w+7VqQLwRxOdjfHb4dRoFfJctZ7/Gljq7O2LiqiHk47ikLLFMM585WmX4hJ7nMZA8MAnBruYBJKQEfYt6QsiHbDGYKhvPa2zAvksJ3B4IVfjmv6w5AKmKMod25WItfH/x6U38r355vgKQ0O/22Ebo79WN95Efwhc4IekA4jNn2tbKbhBfm6RAZMsAMq20p+39Y/HAtv7kr6AFqAnACaM7iNdEyE+1bV72hM+NnBEhmy2n8ZJ5OQTvtclb6HqVtlT/0Lt+CDIGNZFbC0C+sin7Z5T34XPbe+4ByfmuAMQXPBRCjlHXtdZFdvD4Jjt/wYeC+KGGewt+UIHKipgAEuI7BY+B4LdtfKuOSAO2r99aCLkewhzSIFOhuOrEYHeQfEwB/y4bD/dEZPzVNgsgjfM0UwFRrHmqaXTX8fsg4+Qm8+QQQIbZdslVIH4Wujw6zkk8FMQP2B5uM4SkZgBkl1JEb4N4HYiDIC6B4PvVIq1sFGi51hK2sfZdw0kK7EJM2gEyVoDM8SCpQZiTQHyPbRHsBvEjOPu+dFxxUTMAIndbyp2XgTgAYqlcWXXhAVYenZgykQXKUgotpnuhx8gMaQ0XlpALbbyvhuBtIF4CwQWWopPTQfywDRyqQLweZJv/qAAidyhXyQ0QvAhkSBDfELGLqoMunwEA8HQ7748G8bUgXgSSJojngHgqNPMSaLJAZZ+9G34tOSPpACLklWHKS8ir2gxALD6Ug/hKkCwBcSEET4fGk0BmAXw8BWQ8YlNSVSD5WpTYx0XqXlW2zddbEHwdiEtBRiEEl4L4Ggto5MiEAETIsyJ0w1aQsQTCzAdJgpCTofEjNj5+BuI7ASBM3sIBJLSZsgPP5yD+AsTzkGm6AZAjECxKB8kQTz4DcSBMUZ61BhDGiSDe0sA3YU5X79tyDITkQzalXwPBH4I4AMG5IM4FyYBypdfZ+HYXyOwTwdvCCCthj4qXToPGkyCMmZZ+lDdEZLy+EGUtnwfiDTZA2wIyFkbM0+O2edoL4tuazJMzAJHDbbvkKkuRGvG5azQ+EMS32x5sK4TMbQZAtkPwbOjcF4J7I5O7wR/ohizZCySLbDuHXSDegGx5cMN1xsg0aHy5ze2wHRoHcM794W6S+npAyJdtk/M5hHFahPButy28jzCu6ASQeRB8gZ4QwXTogXRosq9Sio1BVmrGlRCdt5NtPtk6kLwRmumscl83EwMQX6AfiF+1zWslfDwSZHaP4MUhqqp7ty2dMdgQ64oeRGeQ0Rua7I1sozsy56XDF+gO3cyIsFTqMNYMtx6yZ6WBjJ7Qze7wze+GTG7q5tLkJBvfaiHkCuSWdk8ygFTYBOUzaGbT6HGudDI38QGIZvSELnuASrphnNmUBxmlh4H4A9szbkFOyCoH4JfdIeSSsNR7wQ9A8JHwc29o3B1jZ6XBL7uDuCeE7A2f7B43gIww06FxqU12t0Djybh4afizzl7UzXLx2nipR1iMTQGkSiWU1EHIRyHkFRB8MnzcG6MNOAYQLdhPfXZ9Q7Guxq8DAK4KW/M5Nl1TjXGyX1wA4gucHrYhI7kJY40jkV3SyBN9QRp0HqJKHULX2gjioTa+Hwzix22f74GQRRCyEWRmZgMU7AaSA8LALhJAhpvp0HiJTU99Co0nYPii8NjPtLLuEPJ92zztgR44LhkxkNG2XXI1iO+DFjw4TiHsrSyJRoAgntAMgHwK4swm1+FjgOFreoLketvDbwUZoxp2D8LsDSEftF3rI4wKHhC2uwilZ2rSZ9slfAad58UAkBoQv4asYHrUtEnivDALgngZfIHu8WcQ8XTbveqg8Upkyp5NM5Y4DcIcBTJXg8wVEWMVdHNYwhaIkBkg3mR7z3ugy4OjZsVkGudHWImP4rQr0xPKwtKDofu+GAa6urykeSbVAz80yQY7RynP0LNfD1+gd1IBRAuzmD6HT14W9XtzjHSQOQNkrowyN1dCN09yZIG0GOTnbhDydttOdit8gRNt9+lttbyxKY5MPl+5tuKMA8UCkHpAmIcpV1OIRy9jZHHPGPKWZ1sfn0GPaIkSDiBV1m5ZzkQB93GcTdYsgBjjbG7bWmjFR4cl/5Bxs60tzws2mYkOIKHYhWXRNwKlkDJmphnJq8O8LhpPw+jiNPXsZ1tupAbr8nWQOSCGfm4mjbceEGZ/K3Df8FzPY0RxuKu9cZ7G24BzD/SIwlGHADLWtturBvG9EBH9fWKmBBq9oPFam/LZAeKJLQCIL6oSEXO7gYyltgW9A5rZGEX3FR+kkFwFCuVTMZ8rv+xQ2wKqA5m3NAsgFCNTQzfPsE10rdXDSvZOYJHPCAMQ4hUQUQAkqyQNxPOUkO2JGJ9DqEyxxADk8gjfamzrSZfHqAyQxusJ86iEAKRxvq+wfbcGJK8M31mXAwV69N8WLEyHz+wNIS8A8Xu2OboJFKFsXFsgfFMYgAg5JfpaWtRNpavviTK2QPBFSQcQn9kLQh6m3rsRQIhPbZQXo49KRqly1naiBQuEzOPCFZe8I+Z1ckqOBvGXNnlb0wyA1IC4UR77T3TqKowNINr8IyF4g83V3bheR5V0g2hwce0FBaa0CCCNnz8S5tnQQnEEG4U2orrMsGV57bbciLKHbXO3w6Zzb0R+abfEAQQAmSeFWyjypthZocXH2+apFmSWuwcQnUfYXFjVIH4Emjwsvl1S8EAIeWuYC0uL6NsUF4AAIE4D8Qyb5bATxI1dZkXJEbaXrwbxQ9D5dAge2mRQYKhNidVCyMccAYhmDlDxmtB3bwNx/EFwIS8Lc2EJvh5+burCyi5JA8m5yg9cFxHAr4HgSQkDCBmmTflUQpiTm9ntHqZiPtUN2XSCBzsCEE2OU+8REuwboqcdGodCyBFW6rZcDOLVSmHeBeJnbHxrJQCRa2zvuxd6DICdsKibihPV2fpkhe67HoIvSBhA7NkvOp8NkgXQZDEEr7J6y/EdKsj7kW2ObQACwLfgIAi2p9C/HTXpxCmAiOAg2zzustLj+YwY8jY8bMcv5N0tAMgdSYg1xQYQv9EHgm+26Z2HG/lmDLe91x5k86FxAwjx6zarYSeE2R8ZgVgydaba8IbWwZ0QSvathpuN8S2hNlmXXpg4gAjzzIh5uin2PBmjbPNUAxGxKXAGIPJChdYhxjwDEWdrbM04BMT3hZnRxH5HADJ2GUBhiqoSxI2717FFAyL8qNtV4GhjlLHBxvBaED/kCECy5x9tKzaqgZAPQjcPTWCRTwgPovNdENw0iD4mkAbNyAIZj4KM+9VCrXYMIAeVARrbA+jbIDi3GaV/kFLcoXu+B1Kp3AkDSOAsG9BXhSkTIQFtWho0Y7ay7DaFx6MaRmSvtdawQK4OC/4KXhT1excWpYOMhSDjAZDxkJVp1PB8iQNI6CwHCpwLjV9V63WrWu+RPLDzIRxARs09THULDvH56YYso2QASEbgPNs8hj6PJW8bI+TtjnYFEGtnfoVt/b+NDNWqXpjrbL3MHgEApGfHASAr0sLuJ/hjCO7XjEydaAts1yortrdysU22bZQrIUwOc5UlAiCZxkURABLvPIVbgi5cWGfYit2qIPhl6PL4+BYgH2a1t7bdUOPhjgDEPxcgzrL56Cqh8ZJGZc6nRyxoe557c2M3hFznCEB88kjboqkB8UMQMn4A0eQYm3VXC5KPQJN9WgZ1Y0aYGyhRAMlbkQaSi2x83wri7GYsyT5q1xuax/ehGWc7AhBhnB0OIBHKQsjZEQu+Uj3fpwpQNoLkhojOCMkHEE3OsvFnDzRZ0TSQHbG7zAt0g+C1NsvOmQUi5NCIDL+Q4H+q1ovaBMktMS2QnAWHRwWQZFkgfh4ZMU+JyNs17Q4gfh6mLDhL7wipK95vsxVMT42Yl9gAMv7a9PD78YcgPqIZmTrOymgMxWGMR6EZva0kH54a5qonnhtz7loCED9nRLRKSmSeliYBQAIh/3foxh9DyCFxCCAg5FEg+Y7thu9AN091BCB5Zhp0OdWmqHaCbAclWdkI9kDcRyqX/KEWxuPQjMnOAMR0ByDEg22LqBok3wTJg6AXtwQgs10BiHXvEpuS2tkkNhUuOKF07OqGHRvJ0xwBiE+ODXNhCXld45oJHgQh7fUilRC8EpoxFlnFg0BFlgLI5BMiXIfJBxAr9rfbBu6PW7xvZm78Rm91X7cA8lSEi/JBq1Ejn4ks81jopb1VoH+V7RnDAWTcgoPCFLyQbwEAbklWDERGWiDvxSlvj0EL5Lc7gGSYB4L4OZvCnw/duKChVxbJndCLB8YNIE1cWLwFxP1jtnkRckiEC+uehvipkJeE1X8IWQYA2OEkBsKRFsi7CcxTjnsAEaXd1QUbayeEzML0S1vezRCfF9HB95e40OjlLAYSSAcZHIbMQs5s/LzkCBujqiDYEvgCs1ezY3ywFwZd0a1dACRjSneIhiC8cpUET4rDAkkGgMyyLYTqsEBiE8Vo9ofgp2zffwU+VUyaeBB9os2KrIHgxmMuKajZrlMHIW+MnpzBQ9Q5Da1ogcw5VNUeNVb9XrKq+Qy7ZAAIBe3gWA3Br4KitH7JNNJVgsru6EF07qM2BqE18HFyg+jBQWE7W6HqBuKRtwHT0tsdQKz3W2lzDd6u0ntD8vAoiA8La5jaIoDIZ8IbzjZTL+c3L7R5H+qg8Vr4VAINcbZtDVWDmulk3iKABM8MK38QfGPc83RCxDw57sZr5XvvtPnqH8PBf07DFF+swHvIz3hTRD+hNVEWacsAkmECfqM7RFjb860Q5tiw3Wt4mu/7CS24EfPaHkCsSbk5LPeauFzt1FsbQHIiihhXI7e0V4xFOiTiTIyn4C/qkxCAaEbo2W4NbxFi5Nuy2q6wffY5SKX4RrqK2gJArGu8FFGhO6/ZuUkGgOjBMyNSlG8FyQOb8MDH3WIDCKwUeiFfsT3/JujFFujH20W6xSwsaV9jzzuWt7YGkFBaLZmjbXptg7IaQhbnoihWQ3QACWVWWYkejVlY/mBe0+SQUIyLi2z33gNiA3ogXenbkbYMrWoQPwhdHuIiC8suu086nifHAKKbZ9leKFR8F4Relo6RTTEBA4vSoPPkCN9bJfw8Mg4AyYzh+jgTQu4Lq/z2lxwVITD3h/n1W6wxiInqbQcgFBzVpBLfJ7OxopmuwCIJAGJl0bxr22G/A8FN21trhekQckZYAaDgtY1B5AQsEM04s0khoR48wTaHFF6fI80YFshptmevAcmboQUtAAm1lEjKeSDmlIjn3QKfHIr8srQYG6dergFEyBNtrYNqIPhh6FHW1JjCbtD42jAA0YKDbNfpoTK26mznmFzZ4jufI+LMwpKHqSOtaxqK4XTpi4uvOdOREhaIJetbovSq2gw9yln1LabxmiKsgl7w4xhzRdO1ki0PVG6kxoP5dGOsje+DbMk5qvgveEFT/RPsBuKTmy0kFLKf6gpR3bAm9YgD6OKdJ0cAcvw5oQdZHZYZYOXGPwiS+RDyQpA8F8K8ACT90PgWJXiVtqKau6zskCnNAcg2EEuQHAqSZ4J4EEieBeKJEGEoWgtd3m/tJpQLPb8sDUJOjWgRsB5CzgTJC6DxaSA+HZpxtirtHwYhR0CTPmhmersAyOzQucvy/oieN7utc7WlH8QXgOS5ljtQXggKjlEV8LtdAYj1nneHVbsSPwwyh0PjUyB4APzydJAxHSR32b63E5o6DnTy5GgAshMkl0Pj86CrIXg4BBeq/Ppdtsp7K7B7zOWh5z4xwg35CXyBM5FpHAkhjwWZgyGM4dCMUlsRZDWIX4JfDsJF+WlJs0BmLwDGci+1i68Lb9chF0OYGSAepuZmKEheBDL9KmmkxrkFwr1V111bEoFxCTL5WJBxFMg8GYLPgxYYDwo77GoHdL4M49UudrgBaJwbEYzfAZLLQcGxEHweSJ4LjS8E8ShowUz4I4oemwOQiYvSIWSJ7f7VIH4fwpwCksMa5I1YyZu8wJI3s+kGsV1iIIGQ7F0XpZ/UW/AHj0wYQJCVBuKPw9K/iVeDzHOg8UkQfBJ0ORTEK0DSfqrri9BsnZAvKQp5W3bZPAS/hMZZEHw+ND4fGmeApAHBz0SkjocDyKWL0yHkorCedsTvWm2dEpwnRwASMne1eQeC5L0RAZnQ+c+hlNmttnz4RmuF+GmQagcQGVRq2gsrFID6UPmC10cck1sDwbuQUTSgSVqikMeo9hzVEY3Mtqq00PfVBG+07Tx+DWH0itFMsXUBJGRKZxUfq9wlESDS8OwblMLcZmuC5rwOxKdcjGL+KRC83cavUNPJ10H8S9XHZ09DqxNr7sNNzujNFK3MKcEbIXij+vtnESmCu6BLKxXYflgYhR3tW63m6hmrI7R8Wz3fnihHzL4CMk5JGoAMCwAzlgG6HK74X2Nbn6EmgZ+qzzarbJmIJoeOg+g3hDeM5B0gfsHig3xN3W9vlE6sG0F8KbSAFdMTxf3VuqqOePZqtZ42qDVVrWJOc+IGEOtdToEIq0UJydsWFVR/X/XK2mhzl37VJKOoPS0QTQ5T965pGELeEyPwHRtAGmQqOE7NTZVNpjYrvfSqkofd4WnotiLVEctC99IjgKFGydCnatRFWW/Re2ERD4JoSNaJZ56qQPxlk3ly7MIKFTbpsi90uS5KMduuGC2Ld1u7axVMinYQVFMAiUw12xWOxLwDwrSaFq6OMs9ZxWcrJtQ2k7pm66LLX2PM0gPazYUV6tNv5YU/EaUYrSUe73FUid5odp8JIXdEgFIkr6wFJeRaCFUtGxKY6ADS3KiF4G3wy2xMnJOGUaoJUaivlG6OiAIQ9uepsbU6t8/xFyDzjKTGQBrdpxcqMN0do636rhittjdD8IVxA0iIBzmlfZXSqY3Bg+rGFudhrbl3W00SbR1rRfBskNwah7xWQkQUS8bTjTer+KIYbcWjraFqCP5NSlggTWNdrytZf60BGApKE7RAAPgXpEOYk0GyNoInVVE2PV9Al3PDQCh8Q1yiNm1VMdZYrapRarkbb1bxCNsRCfHM06+TY4FE9fXKC0F8swpab1KLfUtErv49IJtPNNYpguEAUmm7xqe2a24G8ScQ8mbogePV7iq2Ms4qPhgkK2y7w09tI7Tz+ghCPgqSV2NaWc8IAAkh8ga1+40FICH/Yui7d0DECHY1r6Ds8aY8CPmQAgA7Hzbbnn29igE8ACGXIktVhTdO9FEQ/Jb63icgfiy64Ch+jSs8CIKvUVbfJhuvVM2BfKHBv61HzOOF6wBNkgLt9WoXsykG39dD5zXIlNGPEg1Zp0KOVsK/OeL3H4P4JgjjIviLekDnKyFkyDqrgV48wMaDFerdP7H8/1HaSiQ+N3Mg5PNR3s9Wm8EblDK6E0KayOKjIuaGVJA8tOMrisqDSQsPhWbcZ5MB+30et2oF5vWBzgJCvqH+fQdIzkN2WboCj9A79LXOaJcbovB0o5q3FyBkZFbRVPW9T5RMFESVN3/wcNUVdkMUvoTW7AfK5b0E9fWRANJfyX5Ijq5LAoCcbJPN9SB5D7Tg4VG+FyNxyIhmgSyxrant0AytmU33EAh5t+JvJE/WQ/D90ItPjipTqAfyp4V0Ean1FHEN+SYEz4fPGAySr9hk/ZGYetEf7AeSN8cxTw+A5OIovOoNwa/b7vVaYpOSXQKMmROBuMETQcEzIfgiEJ8NPw/AEr9N4d7T/NnLTYPoOcidfxyEPAt+vggkh0LIU5ssutgB/8b//696gOSpls/QuBDEZ0ELnISR3LRYr7zcOmlu1vlxZqZJJJX0iLizmNsTxCfBHzwHgi+CME+DXnwsxi+K3ofM30KjucxgWlxVyEKeZFVBGxeA+HTkBOI/KGt2QTp88w9BdvGxyJJDLB+r4ntm0bFh350+Gy2ew5xtngKNL4Dgc5BVcnwzyuJYaMZ5uGRmqBVE9CD35MnJmRu96BCQPAV+cyh0vgBkDkJWyVG4ZMkBzc5NLGWVGQx/NvvON7f4CGg8FMIYBr34ZIw3esa4xxHQ+DSIomMw79LY61SYljyQHAZ/8GT4godGzL+ay/NiJZjElrfxV3UHyUHQ2Jp3zTgTWmAARnDvqPIGxD7dkWLMYXzg0Yzcmg6vKdPiApphAWCq7f6ZU7tDyEHQjGEgPh/EA5FvNPKjuboiO6/9weNVrO18+CNOjoyHd/Z5ylvUwzZPF6h5OrHpPOWEu5fd6JWY/nu3FG8WFgDcEeGTc0uxjsXtKDQsiWcvxzpbHAC2IXobhcj4RTwUz4l+zZ2xbVfmBXObfh7t35JFiRxZ7HZu4j1nPFnvm2G2/nrt6PKWTJm64rjYMmWnvGbA8LNE+V/eCecp3kJCjzzyyCOPPPIAxCOPPPLIIw9APPLII4888gDEI4888sijjgUgV6uWJ3tUKh95TPHII4888qhlypTHQ5PnQ5NDocnzoMm+HlM88sgjjzyKTcMCwLLoh7wlvcbCI4888sgjjzzyyCOPPPLII4888sijTkL/HzWjSfdjPJHPAAAAAElFTkSuQmCC" align="left"> # Glaciers as water resources Our previous [notebooks](welcome.ipynb) showed how to use OGGM to answer theoretical questions about glaciers, using idealized experiments: the effect of a different slope, the concept of the equilibrium line altitude, the mass balance gradient, etc. Now, how do we use OGGM to explore real glaciers? This notebook gives us some insight. Goals of this notebook: - prepare a model run for a real world glacier - run simulations using different climate scenarios to explore the role of glaciers as water resources - understand the concept of "peak water" ``` import matplotlib.pyplot as plt import calendar import xarray as xr import salem import numpy as np import pandas as pd import holoviews as hv hv.extension('bokeh') import geoviews as gv import geoviews.tile_sources as gts # set default font size in plots plt.rc('font', size=16) from oggm import cfg, utils, workflow, tasks, graphics from oggm_edu import read_run_results, compute_climate_statistics cfg.initialize(logging_level='WARNING') cfg.PATHS['working_dir'] = utils.gettempdir(dirname='WaterResources') ``` ### Define the glacier we will play with For this notebook we use the Hintereisferner, Austria. Some other possibilities to play with: - Shallap Glacier: RGI60-16.02207 - Artesonraju: RGI60-16.02444 ([reference glacier](https://cluster.klima.uni-bremen.de/~github/crossval/1.1.2.dev45+g792ae9c/web/RGI60-16.02444.html)) - Hintereisferner: RGI60-11.00897 ([reference glacier](https://cluster.klima.uni-bremen.de/~github/crossval/1.1.2.dev45+g792ae9c/web/RGI60-11.00897.html)) - Columbia: RGI60-02.18415 ([reference glacier](https://cluster.klima.uni-bremen.de/~github/crossval/1.1.2.dev45+g792ae9c/web/RGI60-02.18415.html)) And virtually any glacier you can find the RGI Id from, e.g. in the [GLIMS viewer](https://www.glims.org/maps/glims). ``` # Hintereisferner rgi_id = 'RGI60-11.00897' ``` ## Preparing the glacier data This can take up to a few minutes on the first call because of the download of the required data: ``` gdir = workflow.init_glacier_directories([rgi_id], from_prepro_level=3, prepro_border=80)[0] tasks.init_present_time_glacier(gdir) ``` ## Interactive glacier map A first glimpse on the glacier of interest. ``` sh = salem.transform_geopandas(gdir.read_shapefile('outlines')) (gv.Polygons(sh).opts(fill_color=None) * gts.tile_sources['EsriImagery'] * gts.tile_sources['StamenLabels']).opts(width=800, height=500, active_tools=['pan','wheel_zoom']) ``` The "OGGM" view: ``` graphics.plot_centerlines(gdir) ``` ## "Commitment run" This runs a simulation for 300 yrs under a random climate based on the 1985-2015 period (randomly picking years in this period): ``` # file identifier where the model output is saved file_id = '_bias_00' tasks.run_random_climate(gdir, y0=2000, # climate centered around 2000 (1985-2015) unique_samples=True, # see documentation (type `tasks.run_random_climate?` in a cell) nyears=300, # simulation time output_filesuffix=file_id, # an identifier for the output file to read it later store_monthly_step=True, # the default is to store only yearly values mb_elev_feedback='monthly', # the default is to change the MB every year seed=0); # this is for predictability of randomness - it's not necessary though ``` ## Let's analyse the simulation first Here, we plot the length of the glacier ("length_m"), its volume ("volume_m3"), and the change in its water storage ("delta_water_m3") during the simulation: ``` df = read_run_results(gdir, filesuffix=file_id) fig, axes = plt.subplots(3, 1, figsize=(16, 9), sharex=True) for ax, var in zip(axes, df.columns): ax.plot(df.index, df[var], color='k') ax.set_title(var) ax.vlines(75, ymin=df[var].min(), ymax=df[var].max(), color='r') ax.set_xlim(df.index.min(), df.index.max()) ax.set_xlabel('Time / (yr)'); ``` The glacier length and volume decrease during the first ~75 years of the simulation - this is the **glacier retreat** phase. Afterwards, both length and volume oscillate around a more or less constant value indicating that the glacier has **reached equilibrium**. The monthly volume storage **change** ("delta_water_m3"), the glacier volume change per time step in units of m$^\text{3}$ of water per month, isn't very informative - we will compute annual sums below, but first let's introduce the concept of **peak water** of [Matthias Huss](https://vaw.ethz.ch/personen/person-detail.html?persid=96677). Peak water is the point in time when glacier melt supply reaches its maximum, i.e. when the maximum runoff occurs. After peak water, annual runoff sums from glaciers will be steadily decreasing, which might cause problems with water availability. [Here](https://www.nature.com/articles/s41558-017-0049-x) is a recently published paper of Huss's team on the occurence of peak water for glaciers worldwide. <br> <img src="https://media.springernature.com/full/springer-static/image/art%3A10.1038%2Fs41558-017-0049-x/MediaObjects/41558_2017_49_Fig1_HTML.jpg" width="60%"> Now, let's compute **annual sums** of glacier water volume change: ``` annual = df.delta_water_m3.groupby(df.index.astype(int)).sum() / 1e6 ax = annual.plot(label='annual average'); annual.rolling(30, min_periods=30, center=True).mean().plot(ax=ax, label='30 years rolling average'); ax.set_title('Glacier water volume change / (10$^6$ m$^3$)') ax.set_xlabel('Time / (yr)') plt.axhline(y=0, color='k', ls=':') plt.legend(frameon=False); ``` So the glacier is losing mass in the retreat phase, then the mass is more or less constant in the equilibrium phase. This illustrates well that **glaciers in equilibrium are not net water resources**: in the course of the year they gain as much mass as they release. In this commitment run, Hintereisferner is already at "peak water": the water volume change is most negative at the beginning of the simulation, i.e. now in the current state of the glacier. Next, let's look at the annual cycle as well: ``` fig, ax = plt.subplots(1, 1, figsize=(9, 6)) dfr = df.loc[:30] dfr = dfr.groupby(dfr.month).mean() / 1e6 dfr.delta_water_m3.plot(ax=ax, label='Storage change in retreat phase'); dfe = df.loc[270:] dfe = dfe.groupby(dfe.month).mean() / 1e6 dfe.delta_water_m3.plot(ax=ax, label='Storage change at equilibrium'); ax.set_xticks(np.arange(1, 13)) ax.set_xticklabels(list(calendar.month_abbr)[1:]) ax.set_title('Annual cycle: glacier water volume / (10$^6$ m$^3$)') ax.set_xlabel('') plt.legend(frameon=False); ``` The annual cycle in the retreat phase shows a more negative signal in the ablation season compared to the equilibrium phase. Integrating these two curves gives the yearly average net water volume storage: ``` print('Average yearly water storage change in the retreat phase: {:.2f} x 10^6 m^3'.format(np.trapz(dfr.delta_water_m3))) print('Average yearly water storage change in the equilibrium phase: {:.2f} x 10^6 m^3'.format(np.trapz(dfe.delta_water_m3))) ``` The water storage in the equilibrium phase is two orders of magnitude smaller than in the retreat phase. In a "true" equilibrium (never reached in reality), the net water storage change should be exactly zero. ## Let's try to put this in the climate context ``` # this function computes monthly average temperatures and precipitation during the period of interest, # i.e. during 1985-2015 dfc = compute_climate_statistics(gdir) # append water volume change: convert back to m^3 dfc['delta_water_m3'] = dfe.delta_water_m3 * 1e6 fig, axes = plt.subplots(3, 1, figsize=(9, 9), sharex=True) for ax, var in zip(axes, dfc.columns): ax.plot(dfc.index, dfc[var], color='k') ax.set_title(var) ax.set_xlim(dfc.index.min(), dfc.index.max()) ax.set_xticks(np.arange(1, 13)) ax.set_xticklabels(list(calendar.month_abbr)[1:]) ax.hlines(0, xmin=dfc.index.min(), xmax=dfc.index.max(), color='r') ``` **What does this graph show us?** For Hintereisferner this is quite easy: it melts in summer, accumulates in winter. The glacier is a source of water in summer as it releases the water accumulated in winter. Other climates, e.g. tropical climates, are more complex. ## Run another simulation with a temperature bias We will run another simulation, but this time we apply a negative temperature bias, i.e. we force the glacier evolution with a colder climate. ``` file_id = '_bias_expt' temp_bias = -0.8 # temperature bias to be applied in climate. <0 = cold bias, >0 = warm bias tasks.run_random_climate(gdir, y0=2000, # climate centered around 2000 (1985-2015) unique_samples=True, # see documentation (type `tasks.run_random_climate?` in a cell) nyears=300, # simulation time temperature_bias=temp_bias, # add a temperature bias output_filesuffix=file_id, # an identifier for the output file to read it later store_monthly_step=True, # the default is to store only yearly values mb_elev_feedback='monthly', # the default is to change the MB every year seed=0); # this is for predictability of randomness - it's not necessary though ``` Let's see how the results differ from the commitment simulation (recall, climate based on the 1985-2015 period): ``` df_bias = read_run_results(gdir, filesuffix=file_id) fig, axes = plt.subplots(3, 1, figsize=(9, 9), sharex=True) for ax, var in zip(axes, df.columns): if var == 'delta_water_m3': ax.plot(df.index, df[var].rolling(30*12, min_periods=30, center=True).mean(), label='Commitment') ax.plot(df_bias.index, df_bias[var].rolling(30*12, min_periods=30, center=True).mean(), label='{}°C'.format(temp_bias)) else: ax.plot(df.index, df[var], label='Commitment') ax.plot(df_bias.index, df_bias[var], label='{}°C'.format(temp_bias)) ax.set_title(var) axes[0].legend(frameon=False); ``` **What does this graph show us**? If forced with a colder climate, the glacier loses significantly less mass compared to the commitment run based on the climate between 1985-2015. It is worth noting that in both cases the retreat phase is about ~75 years - the difference is that the colder climate leads to an equilibrium state with an approximately tripled glacier extent compared to the commitment climate. For Hintereisferner, a temperature bias of $-0.8^{\circ}$C is not enough to maintain the "present" glacier geometry, i.e. to keep current length and volume constant. Note that "present" refers to the date of the [Randolph Glacier Inventory (RGI)](https://www.glims.org/RGI/) outline: ``` print('Year of "present" glacier geometry: {}'.format(gdir.rgi_date)) ``` **Exercise:** Try to find out how much colder it has to be for the glacier to keep its present geometry, i.e. to keep length and volume constant. *Hint:* Vary the parameter ``temp_bias``. It is worth noting that in a colder climate, the glacier is much bigger, but the annual water storage change ("delta_water_m3") is very similar to the small glacier! How is this possible? Let's see: ``` fig, ax = plt.subplots(1, 1, figsize=(9, 6)) dfsb = df_bias.loc[250:] dfsb = dfsb.groupby(dfsb.month).mean() / 1e6 dfe.delta_water_m3.plot(ax=ax, label='Storage change: Commitment'); dfsb.delta_water_m3.plot(ax=ax, label='Storage change: {:.1f}°C'.format(temp_bias)); ax.set_xticks(np.arange(1, 13)) ax.set_xticklabels(list(calendar.month_abbr)[1:]) ax.set_title('Annual cycle: glacier water volume / (10$^6$ m$^3$)') ax.set_xlabel('') plt.legend(frameon=False); ``` Larger glaciers are thicker and higher, they accumulate more mass in the accumulation season and their mass turnover is larger. In the ablation season, they reach farther down valley and their ablation area is larger, thus more melt occurs. However, the annual average is close to zero: recall that **a glacier in equilibrium gains as much mass as it releases** and is therefore a net zero water reservoir on the annual scale. This holds true for both scenarios, the commitment run and the colder climate scenario. **Question:** What does this imply for the water availability in a catchment? Do the two scenarios differ in seasonality? Can you think of a catchment where larger/smaller glaciers can be beneficial/harmful considering their water supply? ## Take home points - Peak water is the point in time when glacier melt supply reaches its maximum, i.e. when the maximum runoff occurs - Glaciers in equilibrium are not net water resources: they gain as much mass as they release ## What's next? [Back to table of contents](welcome.ipynb)
github_jupyter
``` %pylab inline from random import shuffle class Slide(): def __init__(self, photos, tags): self.photos = photos self.tags = tags def __repr__(self): return 'Tags: {} (IDS: {})'.format(self.tags, ", ".join([str(x.id) for x in self.photos])) class Photo(): def __init__(self, mode, tags, id): self.mode = mode self.tags = tags self.id = id def __repr__(self): return "ID: {} Mode: {} Tags: {}".format(self.id, self.mode, self.tags) def readlines(file): with open(file) as fp: lines = fp.readlines() return [x.rstrip() for x in lines][1:] def lineToPhoto(string, i): mode = string[0:1] tagCount = string[2:3] tags = string[4:].split(' ') return Photo(mode, tags, i) def parseToPhotos(lines): return [lineToPhoto(lines[i], i) for i in range(len(lines))] def getMaxIndex(A): n = len(A) q = np.argmax(A) row = q // n col = q - row*n return (row, col) def createVertMatrix(verticals): n = len(verticals) A = sparse.csc_matrix((n,n)) for i in range(n): for j in range(n): if i is j: continue photoA = verticals[i] photoB = verticals[j] tags = set(photoA.tags + photoB.tags) A[i,j] = max(len(tags), A[i,j]) return A def createSlidesFromVerticalImages(verticals): slides = [] photosInSlides = [] A = createVertMatrix(verticals) k = len(verticals) // 2 for _ in range(k): y, x = getMaxIndex(A) if verticals[x] not in photosInSlides and verticals[y] not in photosInSlides: photos = [verticals[x], verticals[y]] photosInSlides.append(verticals[x]) photosInSlides.append(verticals[y]) tags = list(set(photos[0].tags + photos[1].tags)) slide = Slide(photos, tags) slides.append(slide) A[x,y] = A[y,x] = -1 return slides def fileToSlides(filepath): lines = readlines(filepath) photos = parseToPhotos(lines) verticals = [x for x in photos if x.mode == 'V'] horizontals = [x for x in photos if x.mode == 'H'] return createSlidesFromVerticalImages(verticals) + [Slide([x], x.tags) for x in horizontals] def transitionMatrix(slides): n = len(slides) A = sparse.csc_matrix((n,n)) for i in range(n): for j in range(n): if i is j: continue slideA = slides[i] slideB = slides[j] tags = set(slideA.tags).intersection(set(slideB.tags)) A[i,j] = max(len(tags), A[i,j]) return A def transitionMatrixToSlideshow(A, slides): slideshow = [] (r, c) = getMaxIndex(A) slideshow.append(slides[r]) slideshow.append(slides[c]) print(slideshow) A[r,c] = A[c,r] = -1 prevIndex = r while len(slideshow) < len(slides): c = np.argmax(A[prevIndex:prevIndex+1,:]) if slides[c] not in slideshow: slideshow.append(slides[c]) prevIndex = c A[prevIndex,c] = A[c,prevIndex] = -1 return slideshow def sToFile(s, file): result = [] result.append(len(s)) for r in s: res = "" for p in r.photos: res += str(p.id) + ' ' result.append(res.rstrip()) f = open(file, "w") f.write("\n".join([str(x) for x in result])) f.close() def fileToSlideshow(file, outfile): slides = fileToSlides(file) A = transitionMatrix(slides) A -= np.eye(len(A))*(1000) S = transitionMatrixToSlideshow(A, slides) sToFile(S, outfile) s = fileToSlideshow('b_lovely_landscapes.txt', 'B.txt') from scipy import * A = sparse.csc_matrix((80000,80000)) A[1:10] = 1 A[2,2] ```
github_jupyter
# 302 Classification View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/ My Youtube Channel: https://www.youtube.com/user/MorvanZhou Dependencies: * torch: 0.1.11 * matplotlib ``` import torch from torch.autograd import Variable import torch.nn.functional as F import matplotlib.pyplot as plt %matplotlib inline torch.manual_seed(1) # reproducible # make fake data n_data = torch.ones(100, 2) x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2) y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1) x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2) y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1) x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer # torch can only train on Variable, so convert them to Variable x, y = Variable(x), Variable(y) plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn') plt.show() class Net(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer self.out = torch.nn.Linear(n_hidden, n_output) # output layer def forward(self, x): x = F.relu(self.hidden(x)) # activation function for hidden layer x = self.out(x) return x net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network print(net) # net architecture # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. optimizer = torch.optim.SGD(net.parameters(), lr=0.02) loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted plt.ion() # something about plotting for t in range(100): out = net(x) # input x and predict based on x loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted optimizer.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if t % 10 == 0 or t in [3, 6]: # plot and show learning process plt.cla() _, prediction = torch.max(F.softmax(out), 1) pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn') accuracy = sum(pred_y == target_y)/200. plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'}) plt.show() plt.pause(0.1) plt.ioff() ```
github_jupyter
![](https://i.imgur.com/eBRPvWB.png) # Practical PyTorch: Translation with a Sequence to Sequence Network and Attention In this project we will be teaching a neural network to translate from French to English. ``` [KEY: > input, = target, < output] > il est en train de peindre un tableau . = he is painting a picture . < he is painting a picture . > pourquoi ne pas essayer ce vin delicieux ? = why not try that delicious wine ? < why not try that delicious wine ? > elle n est pas poete mais romanciere . = she is not a poet but a novelist . < she not not a poet but a novelist . > vous etes trop maigre . = you re too skinny . < you re all alone . ``` ... to varying degrees of success. This is made possible by the simple but powerful idea of the [sequence to sequence network](http://arxiv.org/abs/1409.3215), in which two recurrent neural networks work together to transform one sequence to another. An encoder network condenses an input sequence into a single vector, and a decoder network unfolds that vector into a new sequence. To improve upon this model we'll use an [attention mechanism](https://arxiv.org/abs/1409.0473), which lets the decoder learn to focus over a specific range of the input sequence. # Sequence to Sequence Learning A [Sequence to Sequence network](http://arxiv.org/abs/1409.3215), or seq2seq network, or [Encoder Decoder network](https://arxiv.org/pdf/1406.1078v3.pdf), is a model consisting of two separate RNNs called the **encoder** and **decoder**. The encoder reads an input sequence one item at a time, and outputs a vector at each step. The final output of the encoder is kept as the **context** vector. The decoder uses this context vector to produce a sequence of outputs one step at a time. ![](https://i.imgur.com/tVtHhNp.png) When using a single RNN, there is a one-to-one relationship between inputs and outputs. We would quickly run into problems with different sequence orders and lengths that are common during translation. Consider the simple sentence "Je ne suis pas le chat noir" &rarr; "I am not the black cat". Many of the words have a pretty direct translation, like "chat" &rarr; "cat". However the differing grammars cause words to be in different orders, e.g. "chat noir" and "black cat". There is also the "ne ... pas" &rarr; "not" construction that makes the two sentences have different lengths. With the seq2seq model, by encoding many inputs into one vector, and decoding from one vector into many outputs, we are freed from the constraints of sequence order and length. The encoded sequence is represented by a single vector, a single point in some N dimensional space of sequences. In an ideal case, this point can be considered the "meaning" of the sequence. This idea can be extended beyond sequences. Image captioning tasks take an [image as input, and output a description](https://arxiv.org/abs/1411.4555) of the image (img2seq). Some image generation tasks take a [description as input and output a generated image](https://arxiv.org/abs/1511.02793) (seq2img). These models can be referred to more generally as "encoder decoder" networks. ## The Attention Mechanism The fixed-length vector carries the burden of encoding the the entire "meaning" of the input sequence, no matter how long that may be. With all the variance in language, this is a very hard problem. Imagine two nearly identical sentences, twenty words long, with only one word different. Both the encoders and decoders must be nuanced enough to represent that change as a very slightly different point in space. The **attention mechanism** [introduced by Bahdanau et al.](https://arxiv.org/abs/1409.0473) addresses this by giving the decoder a way to "pay attention" to parts of the input, rather than relying on a single vector. For every step the decoder can select a different part of the input sentence to consider. ![](https://i.imgur.com/5y6SCvU.png) Attention is calculated using the current hidden state and each encoder output, resulting in a vector the same size as the input sequence, called the *attention weights*. These weights are multiplied by the encoder outputs to create a weighted sum of encoder outputs, which is called the *context* vector. The context vector and hidden state are used to predict the next output element. ![](https://i.imgur.com/K1qMPxs.png) # Requirements You will need [PyTorch](http://pytorch.org/) to build and train the models, and [matplotlib](https://matplotlib.org/) for plotting training and visualizing attention outputs later. The rest are builtin Python libraries. ``` import unicodedata import string import re import random import time import datetime import math import socket hostname = socket.gethostname() import torch import torch.nn as nn from torch.autograd import Variable from torch import optim import torch.nn.functional as F from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence#, masked_cross_entropy from masked_cross_entropy import * import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np %matplotlib inline ``` Here we will also define a constant to decide whether to use the GPU (with CUDA specifically) or the CPU. **If you don't have a GPU, set this to `False`**. Later when we create tensors, this variable will be used to decide whether we keep them on CPU or move them to GPU. ``` USE_CUDA = False #True ``` ## Loading data files The data for this project is a set of many thousands of English to French translation pairs. [This question on Open Data Stack Exchange](http://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages) pointed me to the open translation site http://tatoeba.org/ which has downloads available at http://tatoeba.org/eng/downloads - and better yet, someone did the extra work of splitting language pairs into individual text files here: http://www.manythings.org/anki/ The English to French pairs are too big to include in the repo, so download `fra-eng.zip`, extract the text file in there, and rename it to `data/eng-fra.txt` before continuing (for some reason the zipfile is named backwards). The file is a tab separated list of translation pairs: ``` I am cold. Je suis froid. ``` Similar to the character encoding used in the character-level RNN tutorials, we will be representing each word in a language as a one-hot vector, or giant vector of zeros except for a single one (at the index of the word). Compared to the dozens of characters that might exist in a language, there are many many more words, so the encoding vector is much larger. We will however cheat a bit and trim the data to only use a few thousand words per language. ### Indexing words We'll need a unique index per word to use as the inputs and targets of the networks later. To keep track of all this we will use a helper class called `Lang` which has word &rarr; index (`word2index`) and index &rarr; word (`index2word`) dictionaries, as well as a count of each word (`word2count`). This class includes a function `trim(min_count)` to remove rare words once they are all counted. ``` PAD_token = 0 SOS_token = 1 EOS_token = 2 class Lang: def __init__(self, name): self.name = name self.trimmed = False self.word2index = {} self.word2count = {} self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} self.n_words = 3 # Count default tokens def index_words(self, sentence): for word in sentence.split(' '): self.index_word(word) def index_word(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 # Remove words below a certain count threshold def trim(self, min_count): if self.trimmed: return self.trimmed = True keep_words = [] for k, v in self.word2count.items(): if v >= min_count: keep_words.append(k) print('keep_words %s / %s = %.4f' % ( len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index) )) # Reinitialize dictionaries self.word2index = {} self.word2count = {} self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} self.n_words = 3 # Count default tokens for word in keep_words: self.index_word(word) ``` ### Reading and decoding files The files are all in Unicode, to simplify we will turn Unicode characters to ASCII, make everything lowercase, and trim most punctuation. ``` # Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 def unicode_to_ascii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # Lowercase, trim, and remove non-letter characters def normalize_string(s): s = unicode_to_ascii(s.lower().strip()) s = re.sub(r"([,.!?])", r" \1 ", s) s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) s = re.sub(r"\s+", r" ", s).strip() return s ``` To read the data file we will split the file into lines, and then split lines into pairs. The files are all English &rarr; Other Language, so if we want to translate from Other Language &rarr; English I added the `reverse` flag to reverse the pairs. ``` def read_langs(lang1, lang2, reverse=False): print("Reading lines...") # Read the file and split into lines # filename = '../data/%s-%s.txt' % (lang1, lang2) filename = '../%s-%s.txt' % (lang1, lang2) lines = open(filename).read().strip().split('\n') # Split every line into pairs and normalize pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines] # Reverse pairs, make Lang instances if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs MIN_LENGTH = 3 MAX_LENGTH = 25 def filter_pairs(pairs): filtered_pairs = [] for pair in pairs: if len(pair[0]) >= MIN_LENGTH and len(pair[0]) <= MAX_LENGTH \ and len(pair[1]) >= MIN_LENGTH and len(pair[1]) <= MAX_LENGTH: filtered_pairs.append(pair) return filtered_pairs ``` The full process for preparing the data is: * Read text file and split into lines * Split lines into pairs and normalize * Filter to pairs of a certain length * Make word lists from sentences in pairs ``` def prepare_data(lang1_name, lang2_name, reverse=False): input_lang, output_lang, pairs = read_langs(lang1_name, lang2_name, reverse) print("Read %d sentence pairs" % len(pairs)) pairs = filter_pairs(pairs) print("Filtered to %d pairs" % len(pairs)) print("Indexing words...") for pair in pairs: input_lang.index_words(pair[0]) output_lang.index_words(pair[1]) print('Indexed %d words in input language, %d words in output' % (input_lang.n_words, output_lang.n_words)) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepare_data('eng', 'fra', True) ``` ### Filtering vocabularies To get something that trains in under an hour, we'll trim the data set a bit. First we will use the `trim` function on each language (defined above) to only include words that are repeated a certain amount of times through the dataset (this softens the difficulty of learning a correct translation for words that don't appear often). ``` MIN_COUNT = 5 input_lang.trim(MIN_COUNT) output_lang.trim(MIN_COUNT) ``` ### Filtering pairs Now we will go back to the set of all sentence pairs and remove those with unknown words. ``` keep_pairs = [] for pair in pairs: input_sentence = pair[0] output_sentence = pair[1] keep_input = True keep_output = True for word in input_sentence.split(' '): if word not in input_lang.word2index: keep_input = False break for word in output_sentence.split(' '): if word not in output_lang.word2index: keep_output = False break # Remove if pair doesn't match input and output conditions if keep_input and keep_output: keep_pairs.append(pair) print("Trimmed from %d pairs to %d, %.4f of total" % (len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs))) pairs = keep_pairs ``` ## Turning training data into Tensors To train we need to turn the sentences into something the neural network can understand, which of course means numbers. Each sentence will be split into words and turned into a `LongTensor` which represents the index (from the Lang indexes made earlier) of each word. While creating these tensors we will also append the EOS token to signal that the sentence is over. ![](https://i.imgur.com/LzocpGH.png) ``` # Return a list of indexes, one for each word in the sentence, plus EOS def indexes_from_sentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] + [EOS_token] ``` We can make better use of the GPU by training on batches of many sequences at once, but doing so brings up the question of how to deal with sequences of varying lengths. The simple solution is to "pad" the shorter sentences with some padding symbol (in this case `0`), and ignore these padded spots when calculating the loss. ![](https://i.imgur.com/gGlkEEF.png) ``` # Pad a with the PAD symbol def pad_seq(seq, max_length): seq += [PAD_token for i in range(max_length - len(seq))] return seq ``` To create a Variable for a full batch of inputs (and targets) we get a random sample of sequences and pad them all to the length of the longest sequence. We'll keep track of the lengths of each batch in order to un-pad later. Initializing a `LongTensor` with an array (batches) of arrays (sequences) gives us a `(batch_size x max_len)` tensor - selecting the first dimension gives you a single batch, which is a full sequence. When training the model we'll want a single time step at once, so we'll transpose to `(max_len x batch_size)`. Now selecting along the first dimension returns a single time step across batches. ![](https://i.imgur.com/nBxTG3v.png) ``` def random_batch(batch_size): input_seqs = [] target_seqs = [] # Choose random pairs for i in range(batch_size): pair = random.choice(pairs) input_seqs.append(indexes_from_sentence(input_lang, pair[0])) target_seqs.append(indexes_from_sentence(output_lang, pair[1])) # Zip into pairs, sort by length (descending), unzip seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True) input_seqs, target_seqs = zip(*seq_pairs) # For input and target sequences, get array of lengths and pad with 0s to max length input_lengths = [len(s) for s in input_seqs] input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs] target_lengths = [len(s) for s in target_seqs] target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs] # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size) input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1) target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1) if USE_CUDA: input_var = input_var.cuda() target_var = target_var.cuda() return input_var, input_lengths, target_var, target_lengths ``` We can test this to see that it will return a `(max_len x batch_size)` tensor for input and target sentences, along with a corresponding list of batch lenghts for each (which we will use for masking later). ``` random_batch(2) ``` # Building the models ## The Encoder <img src="images/encoder-network.png" style="float: right" /> The encoder will take a batch of word sequences, a `LongTensor` of size `(max_len x batch_size)`, and output an encoding for each word, a `FloatTensor` of size `(max_len x batch_size x hidden_size)`. The word inputs are fed through an [embedding layer `nn.Embedding`](http://pytorch.org/docs/nn.html#embedding) to create an embedding for each word, with size `seq_len x hidden_size` (as if it was a batch of words). This is resized to `seq_len x 1 x hidden_size` to fit the expected input of the [GRU layer `nn.GRU`](http://pytorch.org/docs/nn.html#gru). The GRU will return both an output sequence of size `seq_len x hidden_size`. ``` class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderRNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): # Note: we run this all at once (over multiple batches of multiple sequences) embedded = self.embedding(input_seqs) packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs return outputs, hidden ``` ## Attention Decoder ### Interpreting the Bahdanau et al. model [Neural Machine Translation by Jointly Learning to Align and Translate](https://arxiv.org/abs/1409.0473) (Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio) introduced the idea of using attention for seq2seq translation. Each decoder output is conditioned on the previous outputs and some $\mathbf x$, where $\mathbf x$ consists of the current hidden state (which takes into account previous outputs) and the attention "context", which is calculated below. The function $g$ is a fully-connected layer with a nonlinear activation, which takes as input the values $y_{i-1}$, $s_i$, and $c_i$ concatenated. $$ p(y_i \mid \{y_1,...,y_{i-1}\},\mathbf{x}) = g(y_{i-1}, s_i, c_i) $$ The current hidden state $s_i$ is calculated by an RNN $f$ with the last hidden state $s_{i-1}$, last decoder output value $y_{i-1}$, and context vector $c_i$. In the code, the RNN will be a `nn.GRU` layer, the hidden state $s_i$ will be called `hidden`, the output $y_i$ called `output`, and context $c_i$ called `context`. $$ s_i = f(s_{i-1}, y_{i-1}, c_i) $$ The context vector $c_i$ is a weighted sum of all encoder outputs, where each weight $a_{ij}$ is the amount of "attention" paid to the corresponding encoder output $h_j$. $$ c_i = \sum_{j=1}^{T_x} a_{ij} h_j $$ ... where each weight $a_{ij}$ is a normalized (over all steps) attention "energy" $e_{ij}$ ... $$ a_{ij} = \dfrac{exp(e_{ij})}{\sum_{k=1}^{T} exp(e_{ik})} $$ ... where each attention energy is calculated with some function $a$ (such as another linear layer) using the last hidden state $s_{i-1}$ and that particular encoder output $h_j$: $$ e_{ij} = a(s_{i-1}, h_j) $$ ### Interpreting the Luong et al. models [Effective Approaches to Attention-based Neural Machine Translation](https://arxiv.org/abs/1508.04025) (Minh-Thang Luong, Hieu Pham, Christopher D. Manning) describe a few more attention models that offer improvements and simplifications. They describe a few "global attention" models, the distinction between them being the way the attention scores are calculated. The general form of the attention calculation relies on the target (decoder) side hidden state and corresponding source (encoder) side state, normalized over all states to get values summing to 1: $$ a_t(s) = align(h_t, \bar h_s) = \dfrac{exp(score(h_t, \bar h_s))}{\sum_{s'} exp(score(h_t, \bar h_{s'}))} $$ The specific "score" function that compares two states is either *dot*, a simple dot product between the states; *general*, a a dot product between the decoder hidden state and a linear transform of the encoder state; or *concat*, a dot product between a new parameter $v_a$ and a linear transform of the states concatenated together. $$ score(h_t, \bar h_s) = \begin{cases} h_t ^\top \bar h_s & dot \\ h_t ^\top \textbf{W}_a \bar h_s & general \\ v_a ^\top \textbf{W}_a [ h_t ; \bar h_s ] & concat \end{cases} $$ The modular definition of these scoring functions gives us an opportunity to build specific attention module that can switch between the different score methods. The input to this module is always the hidden state (of the decoder RNN) and set of encoder outputs. ### Implementing an attention module ``` class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs): max_len = encoder_outputs.size(0) this_batch_size = encoder_outputs.size(1) # Create variable to store attention energies attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S if USE_CUDA: attn_energies = attn_energies.cuda() # For each batch of encoder outputs for b in range(this_batch_size): # Calculate energy for each encoder output for i in range(max_len): attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0)) # Normalize energies to weights in range 0 to 1, resize to 1 x B x S return F.softmax(attn_energies).unsqueeze(1) def score(self, hidden, encoder_output): if self.method == 'dot': energy = hidden.dot(encoder_output) return energy elif self.method == 'general': energy = self.attn(encoder_output) energy = hidden.dot(energy) return energy elif self.method == 'concat': energy = self.attn(torch.cat((hidden, encoder_output), 1)) energy = self.v.dot(energy) return energy ``` ### Implementing the Bahdanau et al. model In summary our decoder should consist of four main parts - an embedding layer turning an input word into a vector; a layer to calculate the attention energy per encoder output; a RNN layer; and an output layer. The decoder's inputs are the last RNN hidden state $s_{i-1}$, last output $y_{i-1}$, and all encoder outputs $h_*$. * embedding layer with inputs $y_{i-1}$ * `embedded = embedding(last_rnn_output)` * attention layer $a$ with inputs $(s_{i-1}, h_j)$ and outputs $e_{ij}$, normalized to create $a_{ij}$ * `attn_energies[j] = attn_layer(last_hidden, encoder_outputs[j])` * `attn_weights = normalize(attn_energies)` * context vector $c_i$ as an attention-weighted average of encoder outputs * `context = sum(attn_weights * encoder_outputs)` * RNN layer(s) $f$ with inputs $(s_{i-1}, y_{i-1}, c_i)$ and internal hidden state, outputting $s_i$ * `rnn_input = concat(embedded, context)` * `rnn_output, rnn_hidden = rnn(rnn_input, last_hidden)` * an output layer $g$ with inputs $(y_{i-1}, s_i, c_i)$, outputting $y_i$ * `output = out(embedded, rnn_output, context)` ``` class BahdanauAttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1): super(BahdanauAttnDecoderRNN, self).__init__() # Define parameters self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p self.max_length = max_length # Define layers self.embedding = nn.Embedding(output_size, hidden_size) self.dropout = nn.Dropout(dropout_p) self.attn = Attn('concat', hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): # Note: we run this one step at a time # TODO: FIX BATCHING # Get the embedding of the current input word (last output word) word_embedded = self.embedding(word_input).view(1, 1, -1) # S=1 x B x N word_embedded = self.dropout(word_embedded) # Calculate attention weights and apply to encoder outputs attn_weights = self.attn(last_hidden[-1], encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x 1 x N context = context.transpose(0, 1) # 1 x B x N # Combine embedded input word and attended context, run through RNN rnn_input = torch.cat((word_embedded, context), 2) output, hidden = self.gru(rnn_input, last_hidden) # Final output layer output = output.squeeze(0) # B x N output = F.log_softmax(self.out(torch.cat((output, context), 1))) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights ``` Now we can build a decoder that plugs this Attn module in after the RNN to calculate attention weights, and apply those weights to the encoder outputs to get a context vector. ``` class LuongAttnDecoderRNN(nn.Module): def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout=0.1): super(LuongAttnDecoderRNN, self).__init__() # Keep for reference self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout = dropout # Define layers self.embedding = nn.Embedding(output_size, hidden_size) self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size, output_size) # Choose attention model if attn_model != 'none': self.attn = Attn(attn_model, hidden_size) def forward(self, input_seq, last_hidden, encoder_outputs): # Note: we run this one step at a time # Get the embedding of the current input word (last output word) batch_size = input_seq.size(0) embedded = self.embedding(input_seq) embedded = self.embedding_dropout(embedded) embedded = embedded.view(1, batch_size, self.hidden_size) # S=1 x B x N # Get current hidden state from input word and last hidden state rnn_output, hidden = self.gru(embedded, last_hidden) # Calculate attention from current RNN state and all encoder outputs; # apply to encoder outputs to get weighted average attn_weights = self.attn(rnn_output, encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x S=1 x N # Attentional vector using the RNN hidden state and context vector # concatenated together (Luong eq. 5) rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N context = context.squeeze(1) # B x S=1 x N -> B x N concat_input = torch.cat((rnn_output, context), 1) concat_output = F.tanh(self.concat(concat_input)) # Finally predict next token (Luong eq. 6, without softmax) output = self.out(concat_output) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights ``` ## Testing the models To make sure the encoder and decoder modules are working (and working together) we'll do a full test with a small batch. ``` small_batch_size = 3 input_batches, input_lengths, target_batches, target_lengths = random_batch(small_batch_size) print('input_batches', input_batches.size()) # (max_len x batch_size) print('target_batches', target_batches.size()) # (max_len x batch_size) ``` Create models with a small size (a good idea for eyeball inspection): ``` small_hidden_size = 8 small_n_layers = 2 encoder_test = EncoderRNN(input_lang.n_words, small_hidden_size, small_n_layers) decoder_test = LuongAttnDecoderRNN('general', small_hidden_size, output_lang.n_words, small_n_layers) if USE_CUDA: encoder_test.cuda() decoder_test.cuda() ``` To test the encoder, run the input batch through to get per-batch encoder outputs: ``` encoder_outputs, encoder_hidden = encoder_test(input_batches, input_lengths, None) print('encoder_outputs', encoder_outputs.size()) # max_len x batch_size x hidden_size print('encoder_hidden', encoder_hidden.size()) # n_layers * 2 x batch_size x hidden_size ``` Then starting with a SOS token, run word tokens through the decoder to get each next word token. Instead of doing this with the whole sequence, it is done one at a time, to support using it's own predictions to make the next prediction. This will be one time step at a time, but batched per time step. In order to get this to work for short padded sequences, the batch size is going to get smaller each time. ``` max_target_length = max(target_lengths) # Prepare decoder input and outputs decoder_input = Variable(torch.LongTensor([SOS_token] * small_batch_size)) decoder_hidden = encoder_hidden[:decoder_test.n_layers] # Use last (forward) hidden state from encoder all_decoder_outputs = Variable(torch.zeros(max_target_length, small_batch_size, decoder_test.output_size)) if USE_CUDA: all_decoder_outputs = all_decoder_outputs.cuda() decoder_input = decoder_input.cuda() # Run through decoder one time step at a time for t in range(max_target_length): decoder_output, decoder_hidden, decoder_attn = decoder_test( decoder_input, decoder_hidden, encoder_outputs ) all_decoder_outputs[t] = decoder_output # Store this step's outputs decoder_input = target_batches[t] # Next input is current target # Test masked cross entropy loss loss = masked_cross_entropy( all_decoder_outputs.transpose(0, 1).contiguous(), target_batches.transpose(0, 1).contiguous(), target_lengths ) print('loss', loss.data[0]) ``` # Training ## Defining a training iteration To train we first run the input sentence through the encoder word by word, and keep track of every output and the latest hidden state. Next the decoder is given the last hidden state of the decoder as its first hidden state, and the `<SOS>` token as its first input. From there we iterate to predict a next token from the decoder. ### Teacher Forcing vs. Scheduled Sampling "Teacher Forcing", or maximum likelihood sampling, means using the real target outputs as each next input when training. The alternative is using the decoder's own guess as the next input. Using teacher forcing may cause the network to converge faster, but [when the trained network is exploited, it may exhibit instability](http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf). You can observe outputs of teacher-forced networks that read with coherent grammar but wander far from the correct translation - you could think of it as having learned how to listen to the teacher's instructions, without learning how to venture out on its own. The solution to the teacher-forcing "problem" is known as [Scheduled Sampling](https://arxiv.org/abs/1506.03099), which simply alternates between using the target values and predicted values when training. We will randomly choose to use teacher forcing with an if statement while training - sometimes we'll feed use real target as the input (ignoring the decoder's output), sometimes we'll use the decoder's output. ``` def train(input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): # Zero gradients of both optimizers encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() loss = 0 # Added onto for each word # Run words through encoder encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None) # Prepare input and output variables decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size)) decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder max_target_length = max(target_lengths) all_decoder_outputs = Variable(torch.zeros(max_target_length, batch_size, decoder.output_size)) # Move new Variables to CUDA if USE_CUDA: decoder_input = decoder_input.cuda() all_decoder_outputs = all_decoder_outputs.cuda() # Run through decoder one time step at a time for t in range(max_target_length): decoder_output, decoder_hidden, decoder_attn = decoder( decoder_input, decoder_hidden, encoder_outputs ) all_decoder_outputs[t] = decoder_output decoder_input = target_batches[t] # Next input is current target # Loss calculation and backpropagation loss = masked_cross_entropy( all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq target_batches.transpose(0, 1).contiguous(), # -> batch x seq target_lengths ) loss.backward() # Clip gradient norms ec = torch.nn.utils.clip_grad_norm(encoder.parameters(), clip) dc = torch.nn.utils.clip_grad_norm(decoder.parameters(), clip) # Update parameters with optimizers encoder_optimizer.step() decoder_optimizer.step() return loss.data[0], ec, dc ``` ## Running training With everything in place we can actually initialize a network and start training. To start, we initialize models, optimizers, a loss function (criterion), and set up variables for plotting and tracking progress: ``` # Configure models attn_model = 'dot' hidden_size = 500 n_layers = 2 dropout = 0.1 batch_size = 100 batch_size = 50 # Configure training/optimization clip = 50.0 teacher_forcing_ratio = 0.5 learning_rate = 0.0001 decoder_learning_ratio = 5.0 n_epochs = 50000 epoch = 0 plot_every = 20 print_every = 100 evaluate_every = 1000 # Initialize models encoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers, dropout=dropout) decoder = LuongAttnDecoderRNN(attn_model, hidden_size, output_lang.n_words, n_layers, dropout=dropout) # Initialize optimizers and criterion encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio) criterion = nn.CrossEntropyLoss() # Move models to GPU if USE_CUDA: encoder.cuda() decoder.cuda() import sconce job = sconce.Job('seq2seq-translate', { 'attn_model': attn_model, 'n_layers': n_layers, 'dropout': dropout, 'hidden_size': hidden_size, 'learning_rate': learning_rate, 'clip': clip, 'teacher_forcing_ratio': teacher_forcing_ratio, 'decoder_learning_ratio': decoder_learning_ratio, }) job.plot_every = plot_every job.log_every = print_every # Keep track of time elapsed and running averages start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every ``` Plus helper functions to print time elapsed and estimated time remaining, given the current time and progress. ``` def as_minutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def time_since(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) ``` # Evaluating the network Evaluation is mostly the same as training, but there are no targets. Instead we always feed the decoder's predictions back to itself. Every time it predicts a word, we add it to the output string. If it predicts the EOS token we stop there. We also store the decoder's attention outputs for each step to display later. ``` def evaluate(input_seq, max_length=MAX_LENGTH): input_lengths = [len(input_seq)] input_seqs = [indexes_from_sentence(input_lang, input_seq)] input_batches = Variable(torch.LongTensor(input_seqs), volatile=True).transpose(0, 1) if USE_CUDA: input_batches = input_batches.cuda() # Set to not-training mode to disable dropout encoder.train(False) decoder.train(False) # Run through encoder encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None) # Create starting vectors for decoder decoder_input = Variable(torch.LongTensor([SOS_token]), volatile=True) # SOS decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder if USE_CUDA: decoder_input = decoder_input.cuda() # Store output words and attention states decoded_words = [] decoder_attentions = torch.zeros(max_length + 1, max_length + 1) # Run through decoder for di in range(max_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs ) decoder_attentions[di,:decoder_attention.size(2)] += decoder_attention.squeeze(0).squeeze(0).cpu().data # Choose top word from output topv, topi = decoder_output.data.topk(1) ni = topi[0][0] if ni == EOS_token: decoded_words.append('<EOS>') break else: decoded_words.append(output_lang.index2word[ni]) # Next input is chosen word decoder_input = Variable(torch.LongTensor([ni])) if USE_CUDA: decoder_input = decoder_input.cuda() # Set back to training mode encoder.train(True) decoder.train(True) return decoded_words, decoder_attentions[:di+1, :len(encoder_outputs)] ``` We can evaluate random sentences from the training set and print out the input, target, and output to make some subjective quality judgements: ``` def evaluate_randomly(): [input_sentence, target_sentence] = random.choice(pairs) evaluate_and_show_attention(input_sentence, target_sentence) ``` # Visualizing attention A useful property of the attention mechanism is its highly interpretable outputs. Because it is used to weight specific encoder outputs of the input sequence, we can imagine looking where the network is focused most at each time step. You could simply run `plt.matshow(attentions)` to see attention output displayed as a matrix, with the columns being input steps and rows being output steps: ``` import io import torchvision from PIL import Image import visdom vis = visdom.Visdom() def show_plot_visdom(): buf = io.BytesIO() plt.savefig(buf) buf.seek(0) attn_win = 'attention (%s)' % hostname vis.image(torchvision.transforms.ToTensor()(Image.open(buf)), win=attn_win, opts={'title': attn_win}) ``` For a better viewing experience we will do the extra work of adding axes and labels: ``` def show_attention(input_sentence, output_words, attentions): # Set up figure with colorbar fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap='bone') fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90) ax.set_yticklabels([''] + output_words) # Show label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) show_plot_visdom() plt.show() plt.close() def evaluate_and_show_attention(input_sentence, target_sentence=None): output_words, attentions = evaluate(input_sentence) output_sentence = ' '.join(output_words) print('>', input_sentence) if target_sentence is not None: print('=', target_sentence) print('<', output_sentence) show_attention(input_sentence, output_words, attentions) # Show input, target, output text in visdom win = 'evaluted (%s)' % hostname text = '<p>&gt; %s</p><p>= %s</p><p>&lt; %s</p>' % (input_sentence, target_sentence, output_sentence) vis.text(text, win=win, opts={'title': win}) ``` # Putting it all together **TODO** Run `train_epochs` for `n_epochs` To actually train, we call the train function many times, printing a summary as we go. *Note:* If you're running this notebook you can **train, interrupt, evaluate, and come back to continue training**. Simply run the notebook starting from the following cell (running from the previous cell will reset the models). ``` # Begin! ecs = [] dcs = [] eca = 0 dca = 0 while epoch < n_epochs: epoch += 1 # Get training data for this cycle input_batches, input_lengths, target_batches, target_lengths = random_batch(batch_size) # Run the train function loss, ec, dc = train( input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion ) # Keep track of loss print_loss_total += loss plot_loss_total += loss eca += ec dca += dc job.record(epoch, loss) if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print_summary = '%s (%d %d%%) %.4f' % (time_since(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, print_loss_avg) print(print_summary) if epoch % evaluate_every == 0: evaluate_randomly() if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 # TODO: Running average helper ecs.append(eca / plot_every) dcs.append(dca / plot_every) ecs_win = 'encoder grad (%s)' % hostname dcs_win = 'decoder grad (%s)' % hostname vis.line(np.array(ecs), win=ecs_win, opts={'title': ecs_win}) vis.line(np.array(dcs), win=dcs_win, opts={'title': dcs_win}) eca = 0 dca = 0 ``` ## Plotting training loss Plotting is done with matplotlib, using the array `plot_losses` that was created while training. ``` def show_plot(points): plt.figure() fig, ax = plt.subplots() loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals ax.yaxis.set_major_locator(loc) plt.plot(points) show_plot(plot_losses) output_words, attentions = evaluate("je suis trop froid .") plt.matshow(attentions.numpy()) show_plot_visdom() evaluate_and_show_attention("elle a cinq ans de moins que moi .") evaluate_and_show_attention("elle est trop petit .") evaluate_and_show_attention("je ne crains pas de mourir .") evaluate_and_show_attention("c est un jeune directeur plein de talent .") evaluate_and_show_attention("est le chien vert aujourd hui ?") evaluate_and_show_attention("le chat me parle .") evaluate_and_show_attention("des centaines de personnes furent arretees ici .") evaluate_and_show_attention("des centaines de chiens furent arretees ici .") evaluate_and_show_attention("ce fromage est prepare a partir de lait de chevre .") ``` # Exercises * Try with a different dataset * Another language pair * Human &rarr; Machine (e.g. IOT commands) * Chat &rarr; Response * Question &rarr; Answer * Replace the embedding pre-trained word embeddings such as word2vec or GloVe * Try with more layers, more hidden units, and more sentences. Compare the training time and results. * If you use a translation file where pairs have two of the same phrase (`I am test \t I am test`), you can use this as an autoencoder. Try this: * Train as an autoencoder * Save only the Encoder network * Train a new Decoder for translation from there
github_jupyter
``` import panel as pn pn.extension() ``` The ``IntSlider`` widget allows selecting selecting an integer value within a set bounds using a slider. For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). #### Parameters: For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). ##### Core * **``start``** (int): The range's lower bound * **``end``** (int): The range's upper bound * **``step``** (int): The interval between values * **``value``** (int): The selected value as an int type * **``value_throttled``** (int): The selected value as a int type throttled until mouseup ##### Display * **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value * **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl') * **``disabled``** (boolean): Whether the widget is editable * **``format``** (str, bokeh.models.TickFormatter): Formatter to apply to the slider value * **``name``** (str): The title of the widget * **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation. * **``tooltips``** (boolean): Whether to display tooltips on the slider handle ___ ``` int_slider = pn.widgets.IntSlider(name='Integer Slider', start=0, end=8, step=2, value=4) int_slider ``` The ``IntSlider`` value is returned as a integer and can be accessed and set like any other widget: ``` int_slider.value ``` A custom format string or bokeh TickFormatter may be used to format the slider values: ``` from bokeh.models.formatters import PrintfTickFormatter str_format = pn.widgets.IntSlider(name='Rank', format='0o', start=0, end=100) tick_format = pn.widgets.IntSlider(name='Count', format=PrintfTickFormatter(format='%d ducks'), start=0, end=100) pn.Column(str_format, tick_format) ``` ### Controls The `IntSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively: ``` pn.Row(int_slider.controls(jslink=True), int_slider) ```
github_jupyter
<a id=“title_ID”></a> # JWST Pipeline Validation Testing Notebook: Spec2, fringe, MIRI Instruments Affected: MIRI <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#JWST-Pipeline-Validation-Testing-Notebook:-Spec2,-fringe,-MIRI" data-toc-modified-id="JWST-Pipeline-Validation-Testing-Notebook:-Spec2,-fringe,-MIRI-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>JWST Pipeline Validation Testing Notebook: Spec2, fringe, MIRI</a></span><ul class="toc-item"><li><span><a href="#Introduction-and-summary-of-test-being-run" data-toc-modified-id="Introduction-and-summary-of-test-being-run-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Introduction and summary of test being run</a></span></li><li><span><a href="#Documentation" data-toc-modified-id="Documentation-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Documentation</a></span></li><li><span><a href="#Data-used" data-toc-modified-id="Data-used-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Data used</a></span><ul class="toc-item"><li><span><a href="#Create-temporary-directory" data-toc-modified-id="Create-temporary-directory-1.3.1"><span class="toc-item-num">1.3.1&nbsp;&nbsp;</span>Create temporary directory</a></span></li><li><span><a href="#Set-up-import-statements" data-toc-modified-id="Set-up-import-statements-1.3.2"><span class="toc-item-num">1.3.2&nbsp;&nbsp;</span>Set up import statements</a></span></li><li><span><a href="#Print-pipeline-version-number" data-toc-modified-id="Print-pipeline-version-number-1.3.3"><span class="toc-item-num">1.3.3&nbsp;&nbsp;</span>Print pipeline version number</a></span></li><li><span><a href="#Read-in-data-from-artifactory" data-toc-modified-id="Read-in-data-from-artifactory-1.3.4"><span class="toc-item-num">1.3.4&nbsp;&nbsp;</span>Read in data from artifactory</a></span></li><li><span><a href="#Read-in-input-image-as-JWST-data-model" data-toc-modified-id="Read-in-input-image-as-JWST-data-model-1.3.5"><span class="toc-item-num">1.3.5&nbsp;&nbsp;</span>Read in input image as JWST data model</a></span></li><li><span><a href="#Run-output-of-calwebb_detector1-through-calwebb_spec2" data-toc-modified-id="Run-output-of-calwebb_detector1-through-calwebb_spec2-1.3.6"><span class="toc-item-num">1.3.6&nbsp;&nbsp;</span>Run output of calwebb_detector1 through calwebb_spec2</a></span></li><li><span><a href="#Create-image-models-of-rate-and-cal-files" data-toc-modified-id="Create-image-models-of-rate-and-cal-files-1.3.7"><span class="toc-item-num">1.3.7&nbsp;&nbsp;</span>Create image models of rate and cal files</a></span></li><li><span><a href="#Get-fringe-reference-file" data-toc-modified-id="Get-fringe-reference-file-1.3.8"><span class="toc-item-num">1.3.8&nbsp;&nbsp;</span>Get fringe reference file</a></span></li><li><span><a href="#Create-image-lists-for-looping" data-toc-modified-id="Create-image-lists-for-looping-1.3.9"><span class="toc-item-num">1.3.9&nbsp;&nbsp;</span>Create image lists for looping</a></span></li><li><span><a href="#Check-DQ-flagging" data-toc-modified-id="Check-DQ-flagging-1.3.10"><span class="toc-item-num">1.3.10&nbsp;&nbsp;</span>Check DQ flagging</a></span></li><li><span><a href="#Calculate-the-rate/cal-image-ratio" data-toc-modified-id="Calculate-the-rate/cal-image-ratio-1.3.11"><span class="toc-item-num">1.3.11&nbsp;&nbsp;</span>Calculate the rate/cal image ratio</a></span></li><li><span><a href="#Compare-fringe-reference-file-with-the-rate/cal-image-ratio-and-check-that-they-are-equal" data-toc-modified-id="Compare-fringe-reference-file-with-the-rate/cal-image-ratio-and-check-that-they-are-equal-1.3.12"><span class="toc-item-num">1.3.12&nbsp;&nbsp;</span>Compare fringe reference file with the rate/cal image ratio and check that they are equal</a></span></li></ul></li><li><span><a href="#About-this-Notebook" data-toc-modified-id="About-this-Notebook-1.4"><span class="toc-item-num">1.4&nbsp;&nbsp;</span>About this Notebook</a></span></li></ul></li></ul></div> ## Introduction and summary of test being run This notebook processes an image through calwebb_spec2 and examines the output of the fringe step. The steps are as follow: 1) Set up data path and directory and image file names. 2) Run output of calwebb_detector1 through the fringe step in calwebb_spec2. 3) Get fringe reference file from CRDS. 4) Compare the fringe reference file with the rate/cal image ratio and check that they are the same. ## Documentation The pipeline documentation can be found here: https://jwst-pipeline.readthedocs.io/en/latest/ The pipeline code is available on GitHub: https://github.com/spacetelescope/jwst ## Data used The data used in this test is a simulated MIRI image created using MIRISim. The documentation for MIRISim can be found here: https://wiki.miricle.org/bin/view/Public/MIRISim_Public? Author: Mike Engesser (Adapted from Tea Temim's calwebb-image1 notebook) ### Create temporary directory ``` # Create a temporary directory to hold notebook output, and change the working directory to that directory. from tempfile import TemporaryDirectory import os data_dir = TemporaryDirectory() os.chdir(data_dir.name) print(data_dir) ``` ### Set up import statements ``` from astropy.io import fits, ascii import pytest import numpy as np import numpy.ma as ma import jwst from jwst.pipeline import Detector1Pipeline, Image2Pipeline from jwst.datamodels import RampModel, ImageModel, dqflags from jwst.pipeline import calwebb_image2 from jwst.pipeline import calwebb_spec2 from ci_watson.artifactory_helpers import get_bigdata import crds import os # Specify CRDS locations and pmap os.environ['CRDS_SERVER_URL'] = 'https://jwst-crds.stsci.edu' os.environ['TEST_BIGDATA']='https://bytesalad.stsci.edu/artifactory/' ``` ### Print pipeline version number ``` jwst.__version__ ``` ### Read in data from artifactory ``` file1 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'calwebb_spec2', 'spec2_miri_test', 'det_image_seq1_MIRIFUSHORT_12LONGexp1_rate.fits') file2 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'calwebb_spec2', 'spec2_miri_test', 'det_image_seq1_MIRIFUSHORT_12MEDIUMexp1_rate.fits') file3 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'calwebb_spec2', 'spec2_miri_test', 'det_image_seq1_MIRIFUSHORT_12SHORTexp1_rate.fits') file4 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'calwebb_spec2', 'spec2_miri_test', 'det_image_seq1_MIRIFULONG_34LONGexp1_rate.fits') file5 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'calwebb_spec2', 'spec2_miri_test', 'det_image_seq1_MIRIFULONG_34MEDIUMexp1_rate.fits') file6 = get_bigdata('jwst_validation_notebooks', 'validation_data', 'calwebb_spec2', 'spec2_miri_test', 'det_image_seq1_MIRIFULONG_34SHORTexp1_rate.fits') ``` ### Read in input image as JWST data model ``` im1 = ImageModel(file1) im2 = ImageModel(file2) im3 = ImageModel(file3) im4 = ImageModel(file4) im5 = ImageModel(file5) im6 = ImageModel(file6) ``` ### Run output of calwebb_detector1 through calwebb_spec2 ``` def run_spec2(im): """Creates and runs an instance of the JWST spec2 pipeline. """ spec2 = calwebb_spec2.Spec2Pipeline() spec2.bkg_subtract.skip = True spec2.assign_wcs.skip = False spec2.imprint_subtract.skip = True spec2.msa_flagging.skip = True spec2.extract_2d.skip = True spec2.wavecorr.skip = True spec2.srctype.skip = True spec2.straylight.skip = True spec2.fringe.skip = False spec2.pathloss.skip = True spec2.barshadow.skip = True spec2.photom.skip = True spec2.resample_spec.skip = True spec2.cube_build.skip = True spec2.extract_1d.skip = True spec2.flat_field.skip = True spec2.save_results = True spec2.run(im) run_spec2(im1) run_spec2(im2) run_spec2(im3) run_spec2(im4) run_spec2(im5) run_spec2(im6) ``` ### Create image models of rate and cal files ``` # Get names of cal files input_file1 = file1.replace('rate.fits', 'cal.fits') input_file2 = file2.replace('rate.fits', 'cal.fits') input_file3 = file3.replace('rate.fits', 'cal.fits') input_file4 = file4.replace('rate.fits', 'cal.fits') input_file5 = file5.replace('rate.fits', 'cal.fits') input_file6 = file6.replace('rate.fits', 'cal.fits') # create image models of rate and cal files im_cal1 = ImageModel(input_file1) im_cal2 = ImageModel(input_file2) im_cal3 = ImageModel(input_file3) im_cal4 = ImageModel(input_file4) im_cal5 = ImageModel(input_file5) im_cal6 = ImageModel(input_file6) im_rate1 = ImageModel(file1) im_rate2 = ImageModel(file2) im_rate3 = ImageModel(file3) im_rate4 = ImageModel(file4) im_rate5 = ImageModel(file5) im_rate6 = ImageModel(file6) ``` ### Get fringe reference file ``` fringereffile1 = im_cal1.meta.ref_file.fringe.name fringereffile2 = im_cal2.meta.ref_file.fringe.name fringereffile3 = im_cal3.meta.ref_file.fringe.name fringereffile4 = im_cal4.meta.ref_file.fringe.name fringereffile5 = im_cal5.meta.ref_file.fringe.name fringereffile6 = im_cal6.meta.ref_file.fringe.name print('fringe reference file', fringereffile1) print('fringe reference file', fringereffile2) print('fringe reference file', fringereffile3) print('fringe reference file', fringereffile4) print('fringe reference file', fringereffile5) print('fringe reference file', fringereffile6) # find location of file basename1 = crds.core.config.pop_crds_uri(fringereffile1) basename2 = crds.core.config.pop_crds_uri(fringereffile2) basename3 = crds.core.config.pop_crds_uri(fringereffile3) basename4 = crds.core.config.pop_crds_uri(fringereffile4) basename5 = crds.core.config.pop_crds_uri(fringereffile5) basename6 = crds.core.config.pop_crds_uri(fringereffile6) path1 = crds.locate_file(basename1, "jwst") path2 = crds.locate_file(basename2, "jwst") path3 = crds.locate_file(basename3, "jwst") path4 = crds.locate_file(basename4, "jwst") path5 = crds.locate_file(basename5, "jwst") path6 = crds.locate_file(basename6, "jwst") # open reference file fringe_im1 = ImageModel(path1) fringe_im2 = ImageModel(path2) fringe_im3 = ImageModel(path3) fringe_im4 = ImageModel(path4) fringe_im5 = ImageModel(path5) fringe_im6 = ImageModel(path6) ``` ### Create image lists for looping ``` file_list = [file1, file2, file3, file4, file5, file6] im_list = [im_cal1, im_cal2, im_cal3, im_cal4, im_cal5, im_cal6] rate_list = [im_rate1, im_rate2, im_rate3, im_rate4, im_rate5, im_rate6] fringe_list = [fringe_im1, fringe_im2, fringe_im3, fringe_im4, fringe_im5, fringe_im6] ``` ### Check DQ flagging Any pixel flagged as NON_SCIENCE should also be flagged as DO_NOT_USE. Check if this is in place in both the input reference file and for the output science file of the calwebb_image2 pipeline. If there are no assert errors, the test below passes. ``` # Check if the output cal file is flagged properly # return DQ arrays for masking def check_dq_flags(im_cal, fringe_im): # Test that all pixels flagged with NON_SCIENCE are also flagged as DO_NOT_USE nonsciencearray = (im_cal.dq & dqflags.pixel['NON_SCIENCE'] > 0) badarray = (im_cal.dq & dqflags.pixel['DO_NOT_USE'] > 0) assert nonsciencearray.all() == badarray.all() # Test if the input reference file had the flags all set the same way nonsciencearray = (fringe_im.dq & dqflags.pixel['NON_SCIENCE'] > 0) badarray = (fringe_im.dq & dqflags.pixel['DO_NOT_USE'] > 0) assert nonsciencearray.all() == badarray.all() return badarray badarrays = [check_dq_flags(im, fringe) for im,fringe in zip(im_list, fringe_list)] ``` ### Calculate the rate/cal image ratio ``` def get_ratios(im, rate, mask): # compute and return the ratio of the rate and cal images im_mask = ma.masked_array(im.data, mask=mask) ratio = rate.data/im_mask.data return ratio ratios = [get_ratios(cal_im, rate_im, badarray) for cal_im, rate_im, badarray in zip(im_list, rate_list, badarrays)] ``` ### Compare fringe reference file with the rate/cal image ratio and check that they are equal The minimum and maximum values should be 1.0, implying all values are 1, and the images are equal. ``` def check_fringe(ratio_im, fringe_im): return ratio_im/fringe_im.data check_fringes = [check_fringe(ratio, fringe) for ratio, fringe in zip(ratios, fringe_list)] def check_min_max(check_fringe_im, im_name): print('Minimum and maximum values for {} image are: {:.2e} and {:.2e}'.format(im_name, np.nanmin(check_fringe_im), np.nanmax(check_fringe_im))) #assert np.nanmin(check_fringe_im) == 1.0 and np.nanmax(check_fringe_im) == 1.0 for im, name in zip(check_fringes, file_list): im_name = name.split('/')[-1] check_min_max(im, im_name) ``` <a id="about_ID"></a> ## About this Notebook **Author:** Mike Engesser, Science Support Analyst I, INS <br>**Updated On:** 08/30/2021
github_jupyter
## Compare models ``` import warnings, os, sys, glob, nltools, scipy, matplotlib warnings.filterwarnings("ignore", message="numpy.dtype size changed") import numpy as np import pandas as pd from scipy import stats as ss import matplotlib.pyplot as plt import seaborn as sns matplotlib.rcParams['pdf.fonttype'] = 42 sys.path.append('../Tools/') import FigureTools from nltools.stats import one_sample_permutation base_dir = '/project/3014018.02/analysis_mri/DataSharingCollection/' ``` ## Compare models (fig 2C) ``` first_bests_all = pd.read_csv(os.path.join(base_dir,'Results/1.Behavior-FitModels/First_bests_all.csv'), index_col=0) datUse = first_bests_all metric = 'AIC' model1s = ['GR','GA','IA'] model2 = 'MP_ppSOE' dat2 = datUse.loc[(datUse['model']==model2) & (datUse['SSE']>0),metric] print('Mean AIC for %s = %.2f'%(model2,np.mean(dat2))) for model1 in model1s: dat1 = datUse.loc[(datUse['model']==model1) & (datUse['SSE']>0),metric] stats = scipy.stats.ttest_rel(dat1,dat2) stats_nonparam = one_sample_permutation(np.subtract(dat1,dat2),n_permute=100000) print('Compare model %s, n = %i, mean AIC = %.2f, t = %.2f, p = %.4f, perm-p = %.4f'% (model1,len(dat1),np.mean(dat1),stats[0],stats[1],stats_nonparam['p'])) sns.set_context('notebook') metric = 'AIC' fig,ax = plt.subplots(1,1,figsize=[6,5],frameon=False) order = ['GR','GA_ppSOE','IA','MP_ppSOE'] sns.barplot(data=datUse.loc[datUse['SSE']>10],x='model',y=metric, alpha=1,errwidth=0,zorder=1,order=order,color='w',edgecolor='k',linewidth=1) sns.swarmplot(data=datUse.loc[datUse['SSE']>10],x='model',y=metric, alpha=.5,s=3,zorder=2,order=order,color='k') sns.barplot(data=datUse.loc[datUse['SSE']>10],x='model',y=metric, alpha=0,errwidth=1.5,capsize=.1,errcolor='k',zorder=3, order=order) plt.xticks(range(4),['GR','GA','IA','MS']) plt.xlabel('Model') FigureTools.add_sig_markers(ax,relationships=[[2,3,stats[1]],[1,3,0],[0,3,0]], ystart=250,distScale=1.2) plt.ylim([-60,470]) plt.savefig(os.path.join(base_dir,'Results/1.Behavior-FitModels/ModelComparisons.pdf'), transparent=True,bbox_inches='tight') ``` ## Plot participants in parameter space ``` sns.lmplot(data = datUse.loc[(datUse['model']=='MP_ppSOE'),['theta','phi']], x = 'theta', y = 'phi', fit_reg = False) ``` ## Model comparison incl model with self-reported second-order expectations (supp. fig. 11) ``` sns.set_context('notebook') metric = 'AIC' fig,ax = plt.subplots(1,1,figsize=[6,5],frameon=False) order = ['GR','GA_ppSOE','IA','MP','MP_ppSOE'] sns.barplot(data=datUse.loc[datUse['SSE']>10],x='model',y=metric, alpha=1,errwidth=0,zorder=1,order=order,color='w',edgecolor='k',linewidth=1) sns.swarmplot(data=datUse.loc[datUse['SSE']>10],x='model',y=metric, alpha=.5,s=3,zorder=2,order=order,color='k') sns.barplot(data=datUse.loc[datUse['SSE']>10],x='model',y=metric, alpha=0,errwidth=1.5,capsize=.1,errcolor='k',zorder=3, order=order) plt.xticks(range(len(order)),['GR','GA','IA','MS vary SOE','MS fix SOE'],rotation=45) plt.xlabel('Model') plt.savefig(os.path.join(base_dir,'Results/1.Behavior-FitModels/ModelComparisons_VarySOE.pdf'), transparent=True,bbox_inches='tight') ``` ## Model comparison in replication sample (supp. fig. 4A) ``` rep_dat = pd.read_csv(os.path.join(base_dir, 'Data/7.BehavioralReplication/ModelFits.csv'), index_col=0) rep_dat.head() metric = 'AIC' model1s = ['GR','GA','IA'] model2 = 'MP_ppSOE' perfectFitSubs = np.unique(rep_dat.loc[rep_dat['SSE']==0,'sub']) datUse = rep_dat.loc[~rep_dat['sub'].isin(perfectFitSubs),:].copy() dat2 = datUse.loc[(datUse['model']==model2),metric] print('Mean AIC for %s = %.2f'%(model2,np.mean(dat2))) for model1 in model1s: dat1 = datUse.loc[(datUse['model']==model1),metric] stats = scipy.stats.ttest_rel(dat1,dat2) stats_nonparam = one_sample_permutation(np.subtract(dat1,dat2),n_permute=10000) print('Compare model %s, n = %i, mean AIC = %.2f, dAIC = %.2f, t = %.2f, p = %.4f, perm-p = %.4f'% (model1,len(dat1),np.mean(dat1),np.mean(dat2) - np.mean(dat1), stats[0],stats[1],stats_nonparam['p'])) sns.set_context('notebook') metric = 'AIC' fig,ax = plt.subplots(1,1,figsize=[6,6],frameon=False) order = ['GR','GA','IA','MP_ppSOE'] sns.barplot(data=rep_dat.loc[rep_dat['SSE']>10],x='model',y=metric, alpha=1,errwidth=0,zorder=1,order=order,color='w',edgecolor='k',linewidth=1) sns.swarmplot(data=rep_dat.loc[rep_dat['SSE']>10],x='model',y=metric, alpha=.5,s=3,zorder=2,order=order,color='k') sns.barplot(data=rep_dat.loc[rep_dat['SSE']>10],x='model',y=metric, alpha=0,errwidth=1.5,capsize=.1,errcolor='k',zorder=3, order=order) plt.xticks(range(len(order)),['GR','GA','IA','MS']) plt.xlabel('Model') FigureTools.add_sig_markers(ax,relationships=[[2,3,stats_nonparam['p']],[1,3,0],[0,3,0]], ystart=360,distScale=1.2) plt.savefig(os.path.join(base_dir,'Results/1.Behavior-FitModels/ModelComparisons_Replication.pdf'), transparent=True,bbox_inches='tight') ```
github_jupyter
``` %matplotlib inline import re import os.path import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` In this notebook, we will explore the generated files produced from `whylogs init` in the command line interface. This file has been generated during that process and should include helpful metadata to ease exploration. # Reading logs generated from WhyLogs CLI Running WhyLogs will produce the following four files: 1. A flat summary file; 2. A histograms file; 3. A frequency file; and 4. A binary file containing the raw data objects To interact with these files, we will be downloading and reading the generated profile into Pandas as dataframes. First, let's collect needed metadata from the command line process. ``` # This cell is a generated using the `whylogs init` command and may note run. # If this notebook has not been created as a result of that command, replace commented variables # with strings containing the appropriate values for your use case. project_dir = "<your project dir>" datetime_column = None ``` WhyLogs calculates and displays a number of metrics for the data that passes through. The carefully chosen metrics balance efficient storage and in-depth analysis of your data. ``` # TODO: After generated cell, must confirm with Andy's CLI experience flat_summary = pd.read_csv(os.path.join(profile_dir, "summary_summary.csv")) flat_summary ``` The flat summary file contains a summary of each variable of the dataset. It contains metrics that include descriptive statistics as well as metrics specifically for numeric, text, and categorical variables. Let's look at the available variables from the dataset that are logged in the profile's flat_summary. ``` # Print available variables for flat_summary print(flat_summary["column"].unique()) ``` Choose one variable to do a deep dive. ``` # Filter flat_summary to the desired variable variable = "mths_since_last_record" data = flat_summary[flat_summary["column"]==variable] ``` The inferred variable type metrics can tell us a lot about errors that may occur in the process. ``` # Print data type percentage print("Percentage of data in inferred data type:", data["inferred_dtype"].values) ``` Let's look at some metrics that hold type count information. ``` # List all type count metrics regex = re.compile("type_(.*)_count") metrics = list(filter(regex.match, flat_summary.columns)) ``` We can display this information using whichever visualization tools you are used to. Below is a simple chart created in `matplotlib`. ``` # Display all type count metrics using matplotlib x = [i for i, _ in enumerate(metrics)] fig, ax = plt.subplots() plt.bar(x, np.squeeze(data[metrics].values)) plt.title("Type counts for "+variable) plt.ylabel("Count") plt.xticks(x, metrics) plt.setp(ax.get_xticklabels(), rotation=-30, horizontalalignment='left') plt.show() ``` In addition to the type metrics, there are loads of other useful metrics in the WhyLogs summaries. These include but are not limited to descriptive statistics, estimations with error intervals, and metrics related to missing values. ``` metrics = flat_summary.columns print(metrics) ``` There are many more visualizations one might generate from the flat_summary file. Let's move onto the histogram file. The histogram file contains information for numeric variables that allow us to create histograms and analyze distribution. We'll grab the data for another variable and plot it. ``` histograms = pd.read_json(os.path.join(profile_dir, "summary_histogram.json")) # Print valid variables for histograms print(histograms.keys()) # Filter flat_summary to the desired variable variable = "mths_since_last_record" ``` We can display this information from the histogram. ``` # See one of the inspected histograms bins = histograms[variable]['bin_edges'] n = histograms[variable]['counts'] bin_width = np.diff(bins) plt.bar(bins[0:-1], n, bin_width, align='edge') plt.title("Histogram for "+variable) plt.ylabel("Count") plt.show() ``` Finally, we have more detailed information on the frequencies of many variables in the dataset. These can be accessed through the generated frequencies file. ``` frequencies = pd.read_csv(os.path.join(profile_dir, "summary_strings.json")) ```
github_jupyter
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](http://colab.research.google.com/github/asteroid-team/asteroid/blob/master/notebooks/01_APIOverview.ipynb) ### Introduction Asteroid is an open-source, community-based toolkit made to design, train, evaluate, use and share audio source separation models such as Deep clustering ([Hershey et al.](https://arxiv.org/abs/1508.04306)), ConvTasNet ([Luo et al.](https://arxiv.org/abs/1809.07454)) DPRNN ([Luo et al.](https://arxiv.org/abs/1910.06379)) etc.. Along with the models, Asteroid provides building blocks, losses, metrics and datasets commonly used in source separation. This makes it easy to design new source separation models and benchmark them against others ! For training, Asteroid relies of the great [PyTorchLightning](https://github.com/PyTorchLightning/pytorch-lightning), which handles automatic distributed training, logging, experiment resume and much more, be sure to check it out! For the rest, it's native [PyTorch](https://pytorch.org). Enough talking, let's start ! ``` # First off, install asteroid !pip install git+https://github.com/asteroid-team/asteroid --quiet ``` ### After installing requirements, you need to Restart Runtime (Ctrl + M). Else it will fail to import asteroid ### Waveform transformations & features Time-frequency transformations are often performed on waveforms before feeding them to source separation models. Most of them can be formulated as convolutions with specific (learned or not) filterbank. Their inverses, mapping back to time domain, can be formulated as transposed convolution. Asteroid proposes a unified view of this transformations, which is implemented with the classes `Filterbank`, `Encoder` and `Decoder`. The `Filterbank` object is the one holding the actual filters that are used to compute the transforms. `Encoder` and `Decoder` are applied on top to provide method to go back and forth from waveform to time-frequency domain. A common example is the one of the STFT, that can be defined as follows: ``` from asteroid.filterbanks import STFTFB, Encoder, Decoder # First, instantiate the STFT filterbank fb = STFTFB(n_filters=256, kernel_size=128, stride=64) # Make an encoder out of it, forward some waveform through it. encoder = Encoder(fb) # Same for decoder decoder_fb = STFTFB(n_filters=256, kernel_size=128, stride=64) decoder = Decoder(decoder_fb) # The preceding lines can also be obtained faster with these lines from asteroid.filterbanks import make_enc_dec encoder, decoder = make_enc_dec('stft', n_filters=256, kernel_size=128, stride=64) ``` From there, the interface of `Encoder` is the same as the one from `torch.nn.Conv1d` and `Decoder` as `torch.nn.ConvTranspose1d`, and a waveform-like object can be transformed like this: ``` import torch # Waveform-like wav = torch.randn(2, 1, 16000) # Time-frequency representation tf_rep = encoder(wav) # Back to time domain wav_back = decoder(tf_rep) ``` More info on automatic pseudo-inverse, how to define your own filterbanks etc.. can be found in the [Filterbank notebook](https://github.com/asteroid-team/asteroid/blob/master/notebooks/02_Filterbank.ipynb). ### Masker network & Separation models Asteroid aims at providing most state-of-the-art masker neural network. Some of these masking networks and/or separation models share building blocks such as residual LSTMs or D-Conv-based convolutional blocks. Asteroid provides these building blocks as well as common masker networks with building blocks already assembled (eg. `TDConvNet` or `DPRNN`). These blocks are already configured optimally according to the corresponding papers, just import them and run ! ``` from asteroid.masknn import TDConvNet # We only need to specify the number of input channels # and the number of sources we want to estimate. masker = TDConvNet(in_chan=128, n_src=2) # Now, we can use it to estimate some masks! tf_rep = torch.randn(2, 128, 10) wav_back = masker(tf_rep) ``` Let's put the encoder, masker and decoder together in an `nn.Module` to make it all simple. ``` from asteroid.filterbanks import make_enc_dec class Model(torch.nn.Module): def __init__(self): super().__init__() # Encoder and Decode in "one line" self.enc, self.dec = make_enc_dec( 'stft', n_filters=256, kernel_size=128, stride=64 ) # # Mask network from ConvTasNet in one line. self.masker = TDConvNet(in_chan=self.enc.n_feats_out, n_src=2) def forward(self, wav): # Simplified forward tf_rep = self.enc(wav) masks = self.masker(tf_rep) wavs_out = self.dec(tf_rep.unsqueeze(1) * masks) return wavs_out # Define and forward stft_conv_tasnet = Model() wav_out = stft_conv_tasnet(torch.randn(1, 1, 16000)) ``` Actually, for models like ConvTasNet, they can directly be imported and used from asteroid like this : ``` from asteroid import ConvTasNet model = ConvTasNet(n_src=2) ``` ### Datasets and DataLoader We support several source separation datasets, you can find more information on them in the docs. Note that their is no common API between them, preparing the data in the format expected by the `Dataset` is the role of the recipes. In order to experiment easily, we added a small part of LibriMix for direct download. ``` from asteroid.data import LibriMix train_set, val_set = LibriMix.mini_from_download(task='sep_clean') ``` ### Loss functions Asteroid provides several loss functions that are commonly used for source separation or speech enhancement. More importantly, we also provide `PITLossWrapper`, an efficient wrapper that can turn any loss function into a permutation invariant loss. For example, defining a permuatation invariant si-sdr loss, run ``` from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx') ``` You can find more info about this in the [PIT loss tutorial](https://github.com/asteroid-team/asteroid/blob/master/notebooks/03_PITLossWrapper.ipynb). ### Training For training, Asteroid relies on PyTorchLightning which automatizes almost everything for us. We have a thin wrapper around it to make things even simpler. #### Putting all ingredients together with `System` To use PyTorchLightning, we need to define all the ingredients (dataloaders, model, loss functions, optimizers, etc..) into one object, the `LightningModule`. In order to keep things separate and re-usable, and to reduce boilerplate, we define a sub-class, `System`, which expects these ingredients separately. Additionally, `LightningModule` needs to expose the `training_step` and `validation_step` functions. It is usual for these functions to be shared or really similar so we grouped them under `common_step`. ``` class System(pl.LightningModule): def __init__(self, model, optimizer, loss_func, train_loader, val_loader=None, scheduler=None, config=None): ... def common_step(self, batch, batch_nb, train=True): inputs, targets = batch est_targets = self(inputs) loss = self.loss_func(est_targets, targets) return loss ``` #### Example training script ``` from torch.optim import Adam from torch.utils.data import DataLoader import pytorch_lightning as pl from asteroid.data import LibriMix from asteroid.engine.system import System from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr from asteroid import ConvTasNet train_set, val_set = LibriMix.mini_from_download(task='sep_clean') train_loader = DataLoader(train_set, batch_size=4, drop_last=True) val_loader = DataLoader(val_set, batch_size=4, drop_last=True) # Define model and optimizer (one repeat to be faster) model = ConvTasNet(n_src=2, n_repeats=1) optimizer = Adam(model.parameters(), lr=1e-3) # Define Loss function. loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx') # Define System system = System(model=model, loss_func=loss_func, optimizer=optimizer, train_loader=train_loader, val_loader=val_loader) # Define lightning trainer, and train trainer = pl.Trainer(fast_dev_run=True) trainer.fit(system) ``` #### Extending `System` If your model or data is a bit different, changing `System` is easy, just overwrite the `common_step` method. ``` # Example of how simple it is to define a new System with # different training dynamic. class YourSystem(System): def common_step(self, batch, batch_nb, train=True): # Your DataLoader returns three tensors inputs, some_other_input, targets = batch # Your model returns two. est_targets, some_other_output = self(inputs, some_other_input) if train: # Your loss takes three argument loss = self.loss_func(est_targets, targets, cond=some_other_output) else: # At validation time, you don't want cond loss = self.loss_func(est_targets, targets) return loss ``` Of course, Asteroid is not limited to using `System` as this is pure PyTorchLightning and more complicated use cases might not benefit from `System`. In this case, writing a `LightningModule` would be the way to go !
github_jupyter
``` %matplotlib inline import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt from sklearn.linear_model import Lasso, LassoCV, Ridge, RidgeCV from sklearn.model_selection import cross_val_predict, train_test_split from yellowbrick.datasets import load_concrete from yellowbrick.regressor import AlphaSelection, PredictionError, ResidualsPlot mpl.rcParams['figure.figsize'] = (9,6) ``` # Yellowbrick - Regression Examples The Yellowbrick library is a diagnostic visualization platform for machine learning that allows data scientists to steer the model selection process. It extends the scikit-learn API with a new core object: the Visualizer. Visualizers allow visual models to be fit and transformed as part of the scikit-learn pipeline process, providing visual diagnostics throughout the transformation of high-dimensional data. Estimator score visualizers *wrap* scikit-learn estimators and expose the Estimator API such that they have `fit()`, `predict()`, and `score()` methods that call the appropriate estimator methods under the hood. Score visualizers can wrap an estimator and be passed in as the final step in a `Pipeline` or `VisualPipeline`. In machine learning, regression models attempt to predict a target in a continuous space. Yellowbrick has implemented the following regressor score visualizers that display the instances in model space to better understand how the model is making predictions: - `AlphaSelection` visual tuning of regularization hyperparameters - `PredictionError` plot the expected vs. the actual values in model space - `Residuals Plot` plot the difference between the expected and actual values #### Load Data Yellowbrick provides several datasets wrangled from the [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/). For the following examples, we'll use the `concrete` dataset, since it is well-suited for regression tasks. The `concrete` dataset contains 1030 instances and 9 attributes. Eight of the attributes are explanatory variables, including the age of the concrete and the materials used to create it, while the target variable `strength` is a measure of the concrete's compressive strength (MPa). ``` # Use Yellowbrick to load the concrete dataset data = load_concrete() # Save the data in a Pandas DataFrame df = pd.DataFrame(data['data'], columns=data['feature_names'], dtype='float') # Save feature names as a list and target variable as a string feature_names = ['cement', 'slag', 'ash', 'water', 'splast', 'coarse', 'fine', 'age'] target_name = 'strength' # Get the X and y data from the DataFrame X = df[feature_names] y = df[target_name] # Create the train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) ``` ### Residuals Plot A residual is the difference between the observed value of the target variable (y) and the predicted value (ŷ), i.e. the error of the prediction. The `ResidualsPlot` Visualizer shows the difference between residuals on the vertical axis and the dependent variable on the horizontal axis, allowing you to detect regions within the target that may be susceptible to more or less error. If the points are randomly dispersed around the horizontal axis, a linear regression model is usually well-suited for the data; otherwise, a non-linear model is more appropriate. The following example shows a fairly random, uniform distribution of the residuals against the target in two dimensions. This seems to indicate that our linear model is performing well. ``` # Instantiate the linear model and visualizer model = Ridge() visualizer = ResidualsPlot(model) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data g = visualizer.poof() # Draw/show/poof the data ``` Yellowbrick's `ResidualsPlot` Visualizer also displays a histogram of the error values along the right-hand side. In the example above, the error is normally distributed around zero, which also generally indicates a well-fitted model. If the histogram is not desired, it can be turned off with the `hist=False` flag. ``` # Instantiate the linear model and visualizer model = Ridge() visualizer = ResidualsPlot(model, hist=False) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data g = visualizer.poof() # Draw/show/poof the data ``` ### Prediction Error Plot Yellowbrick's `PredictionError` Visualizer plots the actual targets from the dataset against the predicted values generated by the model. This allows us to see how much variance is in the model. Data scientists can diagnose regression models using this plot by comparing against the 45-degree line, where the prediction exactly matches the model. ``` # Instantiate the linear model and visualizer model = Lasso() visualizer = PredictionError(model) visualizer.fit(X_train, y_train) # Fit the training data to the visualizer visualizer.score(X_test, y_test) # Evaluate the model on the test data g = visualizer.poof() # Draw/show/poof the data ``` ### Alpha Selection Visualizer The `AlphaSelection` Visualizer demonstrates how different values of alpha influence model selection during the regularization of linear models. Since regularization is designed to penalize model complexity, the higher the alpha, the less complex the model, decreasing the error due to variance (overfit). However, alphas that are too high increase the error due to bias (underfit). Therefore, it is important to choose an optimal alpha such that the error is minimized in both directions. To do this, typically you would you use one of the "RegressionCV” models in scikit-learn. E.g. instead of using the `Ridge` (L2) regularizer, use `RidgeCV` and pass a list of alphas, which will be selected based on the cross-validation score of each alpha. This visualizer wraps a “RegressionCV” model and visualizes the alpha/error curve. If the visualization shows a jagged or random plot, then potentially the model is not sensitive to that type of regularization and another is required (e.g. L1 or Lasso regularization). ``` # Create a list of alphas to cross-validate against alphas = np.logspace(-10, 1, 400) # Instantiate the linear model and visualizer model = LassoCV(alphas=alphas) visualizer = AlphaSelection(model) visualizer.fit(X, y) # Fit the data to the visualizer g = visualizer.poof() # Draw/show/poof the data ```
github_jupyter
# LDA Visualizations ## Setup ``` import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import pyLDAvis.gensim import pickle import pandas as pd import gensim from gensim import corpora from gensim.models import CoherenceModel, LdaModel from gensim.test.utils import datapath from matplotlib import pyplot as plt from wordcloud import WordCloud, STOPWORDS import math import clean import model_lda import analyze import spacy from spacy import displacy #define path names raw_file = 'data/documents_raw.pkl' clean_file = 'data/documents_clean.pkl' model_file = 'data/model.gensim' DTM_file = 'data/DTM.pkl' dict_file = 'data/dictionary.gensim' #embeddings nlp = spacy.load('en_core_web_lg') raw_docs = pd.read_pickle(raw_file) doc = nlp(raw_docs['text'][0]) displacy.render(doc, style='ent', jupyter=True) #LDA warnings.filterwarnings("ignore", category=UserWarning) #preprocess clean_docs = clean.dataClean(raw_file,FILTER= True,STEM=False) #make dictionary from documents dictionary = model_lda.makeDict(clean_docs) #make document-term-matrix from docs,dictionary DTM = model_lda.makeDTM(clean_docs,dictionary) #ldamodel = LdaModel.load(model_file) ``` ## Identify Number of Topics ``` n_pass = 20 start = 2 stop = 30 step = 2 #FYI: this step takes a long time to run model_list, coherence_values = analyze.coherenceByNumTopics( DTM, dictionary, clean_docs, n_pass, stop, start, step) print(len(coherence_values)) analyze.plotCoherence(coherence_values, start, stop, step) ``` Examine the above plot to identify which value of N_TOPICS maximizes the coherence value (or the point at which they plateau). ``` N_TOPICS = 12 #default, but can be changed based on results from above N_PASS = 50 #this can also be changed, but longer usually = better ldamodel = model_lda.LDA(DTM,dictionary, N_TOPICS, N_PASS) ``` ## Word Clouds ``` def topicCloud(model,N_TOPICS): cloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=3000, height=2000, max_words=10, colormap='tab10', prefer_horizontal=1.0) topics = model.show_topics(formatted=False) h = math.ceil(N_TOPICS/2) fig, axes = plt.subplots(h, 2, figsize=(10,10), sharex=True, sharey=True) for i, ax in enumerate(axes.flatten()): fig.add_subplot(ax) topic_words = dict(topics[i][1]) cloud.generate_from_frequencies(topic_words, max_font_size=500) plt.gca().imshow(cloud) plt.gca().set_title('Topic ' + str(i+1), fontdict=dict(size=16)) plt.gca().axis('off') plt.subplots_adjust(wspace=0, hspace=0) plt.axis('off') plt.margins(x=0, y=0) plt.tight_layout() plt.show() return #topicCloud(ldamodel,N_TOPICS) topicCloud(ldamodel,10) ``` # Interactive Visualizations ``` warnings.filterwarnings("ignore", category=FutureWarning) pyLDAvis.enable_notebook() lda_display = pyLDAvis.gensim.prepare(ldamodel, DTM, dictionary, sort_topics=False) pyLDAvis.display(lda_display) ``` # Model Perplexity and Coherence ``` perplexity_score = ldamodel.log_perplexity(DTM) coherence_model_lda = CoherenceModel(model=ldamodel, texts=clean_docs, dictionary=dictionary, coherence='c_v') coherence_score = coherence_model_lda.get_coherence() print('\nPerplexity: ', perplexity_score) print('\nCoherence: ', coherence_score) ```
github_jupyter
``` #$conda activate py37 #(py37)$ jupyter notebook # %config IPCompleter.greedy=True #press [SHIFT] and [TAB] from within the method parentheses ### intellisense - works perfect!! -> excute in command line windows. : (py37) $ -> works perfect !! # (py37) $pip3 install jupyter-tabnine # (py37) $sudo jupyter nbextension install --py jupyter_tabnine # (py37) $jupyter nbextension enable jupyter_tabnine --py ##### jupyter nbextension enable --py jupyter_tabnine ##instead above line excuted. # (py37) $jupyter serverextension enable --py jupyter_tabnine #--> I installed in (py37) conda env. ``` # Time Series ``` import numpy as np import pandas as pd np.random.seed(12345) import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) PREVIOUS_MAX_ROWS = pd.options.display.max_rows pd.options.display.max_rows = 20 np.set_printoptions(precision=4, suppress=True) ``` ## Date and Time Data Types and Tools ``` from datetime import datetime now = datetime.now() now now.year, now.month, now.day delta = datetime(2011, 1, 7) - datetime(2008, 6, 24, 8, 15) delta delta.days delta.seconds from datetime import timedelta start = datetime(2011, 1, 7) start + timedelta(12) #datetime.timedelta -> 12 day forward. start - 2 * timedelta(12) #24 day back ``` ### Converting Between String and Datetime %F : Shortcut for %Y-%m-%d (e.g., 2012-4-18 ),%D: Shortcut for %m/%d/%y (e.g., 04/18/12 ) * stamp.strftime('%Y-%m-%d') * from dateutil.parser import parse : parse('2011-01-03') * pd.to_datetime(datestrs) ``` # stamp = datetime(2020, 1, 14) stamp = datetime(2020, 1, 14, 20, 15) str(stamp) stamp.strftime('%Y-%m-%d') #%Y 4digit year stamp.strftime('%y-%m-%d-%H-%M-%S') #%y 2 digit year m: 2 digit month, M: minute, H:24hour, I: 12hour stamp.strftime('%y-%m-%d-%I-%M-%S') #%y 2 digit year m: 2 digit month, M: minute, H:24hour, I: 12hour ``` time.strptime :convert strings to dates using date ``` value = '2011-01-03' #string datetime.strptime(value, '%Y-%m-%d') #datetime : need to write format spec such as '%Y-%m-%d' datestrs = ['7/6/2011', '8/6/2011'] [datetime.strptime(x, '%m/%d/%Y') for x in datestrs] #no need format spec such as '%Y-%m-%d', especially for common date formats. from dateutil.parser import parse parse('2011-01-03') parse('Jan 31, 1997 10:45 PM') parse('6/12/2011', dayfirst=True) datestrs = ['2011-07-06 12:00:00', '2011-08-06 00:00:00'] pd.to_datetime(datestrs) #handles values that should be considered missing ( None , empty string, etc.): idx = pd.to_datetime(datestrs + [None]) idx idx[2] pd.isnull(idx) ``` ## Time Series Basics ``` from datetime import datetime dates = [datetime(2011, 1, 2), datetime(2011, 1, 5), datetime(2011, 1, 7), datetime(2011, 1, 8), datetime(2011, 1, 10), datetime(2011, 1, 12)] ts = pd.Series(np.random.randn(6), index=dates) ts ts.index #index(1,3,5) : number + missing value -> NaN, index(0,2,4): number+number -> number ts + ts[::2] ts.index.dtype ``` A Timestamp can be substituted anywhere you would use a datetime object. Addi‐ tionally, it can store frequency information (if any) and understands how to do time zone conversions and other kinds of manipulations. ``` stamp = ts.index[0] stamp ``` ### Indexing, Selection, Subsetting * Time series behaves like any other pandas.Series when you are indexing and selecting data based on label: ``` stamp = ts.index[2] ts[stamp] ts['1/10/2011'] ts['20110110'] ``` pd.date_range ``` longer_ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) longer_ts ``` * For longer time series, a **year** or only a **year and month** can be passed to easily select slices of data: * the string '2001' is interpreted as a year and selects that time period. This also works if you specify the month: ``` longer_ts['2001'] longer_ts['2001-05'] # Slicing with datetime objects works as well: ts[datetime(2011, 1, 7):] ts #slice with timestamps not contained in a time series to perform a range query: ts['1/6/2011':'1/11/2011'] ts.truncate(after='1/9/2011') ``` ``pd.date_range(start ='1-1-2018', end ='1-05-2018', freq ='5H') pd.date_range(start ='1-1-2018', end ='8-01-2018', freq ='M') pd.date_range(start ='1-1-2018', end ='11-01-2018', freq ='3M') pd.date_range(start ='1-1-2018', periods = 13) pd.date_range(start ='01-03-2017', end ='1-1-2018', periods = 13) pd.date_range(start ='1-1-2018', periods = 13, tz ='Asia / Tokyo') `` ``` dates = pd.date_range('1/1/2000', periods=100, freq='W-WED') #wed only # dates = pd.date_range('1/1/2000', periods=100, freq='M') #MonthEnd print(len(dates)) dates long_df = pd.DataFrame(np.random.randn(100, 4), index=dates, columns=['Colorado', 'Texas', 'New York', 'Ohio']) print(long_df.shape) long_df.head() long_df.loc['5-2001'] #indexing on its rows: # long_df.loc['2001-5'] #indexing on its rows: same as above ``` ### Time Series with Duplicate Indices ``` dates = pd.DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/2/2000', '1/3/2000']) dup_ts = pd.Series(np.arange(5), index=dates) dup_ts dup_ts.index.is_unique dup_ts['1/3/2000'] # not duplicated dup_ts['1/2/2000'] # duplicated grouped = dup_ts.groupby(level=0) grouped.mean() grouped.count() ``` ## Date Ranges, Frequencies, and Shifting ``` ts ``` resample('D'): sample time series to be fixed daily frequency ``` resampler = ts.resample('D') #DatetimeIndexResampler -> contiguous day with blank days list(resampler) ``` ### Generating Date Ranges ``` index = pd.date_range('2012-04-01', '2012-06-01') index pd.date_range(start='2012-04-01', periods=20) pd.date_range(end='2012-06-01', periods=20) ``` #### BM: business end of month,M: month end, D: day, B:Biz Day, H: hour, T: minute ``` pd.date_range('2000-01-01', '2000-12-01', freq='BM') pd.date_range('2012-05-02 12:56:31', periods=5) # generate a set of timestamps normalized to midnight as a convention. pd.date_range('2012-05-02 12:56:31', periods=5, normalize=True) ``` ### Frequencies and Date Offsets: * Frequencies: base frequency('M' for monthly or 'H' for hourly.) and a multiplier. * ``from pandas.tseries.offsets import Hour, Minute, Day, MonthEnd`` ``` from pandas.tseries.offsets import Hour, Minute hour = Hour() #base frequency: hour hour four_hours = Hour(4) #offset is 4 hour four_hours ``` never need to explicitly create one of these objects, instead using a string alias like 'H' or '4H' ``` pd.date_range('2000-01-01', '2000-01-03 23:59', freq='4h') # Many offsets can be combined together by addition: Hour(2) + Minute(30) pd.date_range('2000-01-01', periods=10, freq='1h30min') ``` #### Week of month dates ``` rng = pd.date_range('2012-01-01', '2012-09-01', freq='WOM-3FRI') #get dates like the third Friday of each month: list(rng) ``` ### Shifting (Leading and Lagging) Data ``` ts = pd.Series(np.random.randn(4), index=pd.date_range('1/1/2000', periods=4, freq='M')) ts ts.shift(2) #move forward two index ts.shift(-2) ``` shift is computing percent changes in a time series or multiple time series as DataFrame columns. * (ts - ts.shift(1)) / ts.shift(1) ``` # percent changes in a time series : (ts - ts.shift(1)) / ts.shift(1) ts / ts.shift(1) - 1 ``` if the frequency is known, it can be passed to shift to advance the timestamps instead of simply the data: ``` ts ts.shift(2, freq='M') #index, datatime is sifted by 2 instead of value ts.shift(3, freq='D') # index sifted 3 days forward ts.shift(1, freq='90T') #T: minute, 90 minutes forward.(1h : 30min) ``` #### Shifting dates with offsets ``` from pandas.tseries.offsets import Day, MonthEnd now = datetime(2011, 11, 17) now + 3 * Day() now + MonthEnd() now + MonthEnd(2) # forward 2 moth end. offset = MonthEnd() offset.rollforward(now) #this month end offset.rollback(now) #previous month end ts = pd.Series(np.random.randn(20), index=pd.date_range('1/15/2000', periods=20, freq='4d')) print(len(ts)) ts ts.groupby(offset.rollforward).mean() #groupby(MonthEnd and this month -> this month end) # ts.resample('M') -> month end ts.resample('M').mean() #same as above ``` ## Time Zone Handling ``` datetime.now() import pytz pytz.common_timezones[-5:] tz = pytz.timezone('America/New_York') tz pytz.timezone('US/Pacific') ``` ### Time Zone Localization and Conversion ``` rng = pd.date_range('3/9/2012 9:30', periods=6, freq='D') rng ts = pd.Series(np.random.randn(len(rng)), index=rng) #len(rng):6 ts print(ts.index.tz) pd.date_range('3/9/2012 9:30', periods=10, freq='D') pd.date_range('3/9/2012 9:30', periods=10, freq='D', tz='UTC') #test pd.date_range('3/9/2012 9:30', periods=10, freq='D', tz='US/Pacific') ts ts_utc = ts.tz_localize('UTC') ts_utc ts_utc.index ts_utc.tz_convert('America/New_York') ts_eastern = ts.tz_localize('America/New_York') ts_eastern.tz_convert('UTC') ts_eastern.tz_convert('Europe/Berlin') print(type(ts.index)) ts.index.tz_localize('Asia/Shanghai') ``` ### Operations with Time Zone−Aware Timestamp Objects ``` stamp = pd.Timestamp('2011-03-12 04:00') stamp stamp_utc = stamp.tz_localize('utc') stamp_utc stamp_utc.tz_convert('America/New_York') stamp_moscow = pd.Timestamp('2011-03-12 04:00', tz='Europe/Moscow') stamp_moscow stamp_utc.value stamp_utc.tz_convert('America/New_York').value from pandas.tseries.offsets import Hour stamp = pd.Timestamp('2012-03-12 01:30', tz='US/Eastern') stamp stamp + Hour() stamp = pd.Timestamp('2012-11-04 00:30', tz='US/Eastern') stamp stamp + 2 * Hour() ``` ### Operations Between Different Time Zones -> Result in UTC. ``` #BM: business end of month,M: month end, D: day, B:Biz Day, H: hour, T: minute rng = pd.date_range('3/7/2012 9:30', periods=10, freq='B') ts = pd.Series(np.random.randn(len(rng)), index=rng) ts ts1 = ts[:7].tz_localize('Europe/London') ts1 ts2 = ts1[2:].tz_convert('Europe/Moscow') ts2 result = ts1 + ts2 result result.index ``` ## Periods and Period Arithmetic * Periods represent timespans, like days, months, quarters, or years * pd.Period(2007, freq='A-DEC'): full timespan from January 1, 2007, to December 31, 2007, inclusive. * pd.Period('2014', freq='A-DEC') ``` p = pd.Period(2007, freq='A-DEC')#A-JAN, A-FEB...YearEnd(Annual dates anchored on last calendar day of given month) p p + 5 #full timespan from January 1, 2012, to December 31, 2007, inclusive. p - 2 # If two periods have the same frequency, their difference is the number of units # between them: pd.Period('2014', freq='A-DEC') - p ``` pd.period_range(): Regular ranges of periods ``` rng = pd.period_range('2000-01-01', '2000-06-30', freq='M') rng pd.Series(np.random.randn(6), index=rng) # Quarterly dates anchored on last calendar day of each month, for year ending in indicated month values = ['2001Q3', '2002Q2', '2003Q1'] index = pd.PeriodIndex(values, freq='Q-DEC')#Q-JAN, Q-FEB, ...QuarterEnd index ``` ### Period Frequency Conversion * Periods and PeriodIndex objects can be converted to another frequency with their asfreq method. ``` p = pd.Period('2007', freq='A-DEC') p p.asfreq('M', how='start') p.asfreq('M', how='end') p = pd.Period('2007', freq='A-JUN') p p.asfreq('M', 'start') p.asfreq('M', 'end') p = pd.Period('Aug-2007', 'M') p.asfreq('A-JUN') rng = pd.period_range('2006', '2009', freq='A-DEC') ts = pd.Series(np.random.randn(len(rng)), index=rng) ts ts.asfreq('M', how='start') ts.asfreq('B', how='end') ``` ### Quarterly Period Frequencies ``` p = pd.Period('2012Q4', freq='Q-JAN') p p.asfreq('D', 'start') p.asfreq('D', 'end') p4pm = (p.asfreq('B', 'e') - 1).asfreq('T', 's') + 16 * 60 p4pm p4pm.to_timestamp() rng = pd.period_range('2011Q3', '2012Q4', freq='Q-JAN') ts = pd.Series(np.arange(len(rng)), index=rng) ts new_rng = (rng.asfreq('B', 'e') - 1).asfreq('T', 's') + 16 * 60 ts.index = new_rng.to_timestamp() ts ``` ### Converting Timestamps to Periods (and Back) ``` rng = pd.date_range('2000-01-01', periods=3, freq='M') ts = pd.Series(np.random.randn(3), index=rng) ts pts = ts.to_period() pts rng = pd.date_range('1/29/2000', periods=6, freq='D') ts2 = pd.Series(np.random.randn(6), index=rng) ts2 ts2.to_period('M') pts = ts2.to_period() pts pts.to_timestamp(how='end') ``` ### Creating a PeriodIndex from Arrays ``` data = pd.read_csv('examples/macrodata.csv') data.head(5) data.year data.quarter index = pd.PeriodIndex(year=data.year, quarter=data.quarter, freq='Q-DEC') index data.index = index data.infl ``` ## Resampling and Frequency Conversion ``` rng = pd.date_range('2000-01-01', periods=100, freq='D') ts = pd.Series(np.random.randn(len(rng)), index=rng) ts ts.resample('M').mean() ts.resample('M', kind='period').mean() ``` ### Downsampling ``` rng = pd.date_range('2000-01-01', periods=12, freq='T') ts = pd.Series(np.arange(12), index=rng) ts ts.resample('5min', closed='right').sum() ts.resample('5min', closed='right').sum() ts.resample('5min', closed='right', label='right').sum() ts.resample('5min', closed='right', label='right', loffset='-1s').sum() ``` #### Open-High-Low-Close (OHLC) resampling ``` ts.resample('5min').ohlc() ``` ### Upsampling and Interpolation ``` frame = pd.DataFrame(np.random.randn(2, 4), index=pd.date_range('1/1/2000', periods=2, freq='W-WED'), columns=['Colorado', 'Texas', 'New York', 'Ohio']) frame df_daily = frame.resample('D').asfreq() df_daily frame.resample('D').ffill() frame.resample('D').ffill(limit=2) frame.resample('W-THU').ffill() ``` ### Resampling with Periods ``` frame = pd.DataFrame(np.random.randn(24, 4), index=pd.period_range('1-2000', '12-2001', freq='M'), columns=['Colorado', 'Texas', 'New York', 'Ohio']) frame[:5] annual_frame = frame.resample('A-DEC').mean() annual_frame # Q-DEC: Quarterly, year ending in December annual_frame.resample('Q-DEC').ffill() annual_frame.resample('Q-DEC', convention='end').ffill() annual_frame.resample('Q-MAR').ffill() ``` ## Moving Window Functions ``` close_px_all = pd.read_csv('examples/stock_px_2.csv', parse_dates=True, index_col=0) close_px = close_px_all[['AAPL', 'MSFT', 'XOM']] close_px = close_px.resample('B').ffill() close_px.AAPL.plot() close_px.AAPL.rolling(250).mean().plot() plt.figure() appl_std250 = close_px.AAPL.rolling(250, min_periods=10).std() appl_std250[5:12] appl_std250.plot() expanding_mean = appl_std250.expanding().mean() plt.figure() close_px.rolling(60).mean().plot(logy=True) close_px.rolling('20D').mean() ``` ### Exponentially Weighted Functions ``` plt.figure() aapl_px = close_px.AAPL['2006':'2007'] ma60 = aapl_px.rolling(30, min_periods=20).mean() ewma60 = aapl_px.ewm(span=30).mean() ma60.plot(style='k--', label='Simple MA') ewma60.plot(style='k-', label='EW MA') plt.legend() ``` ### Binary Moving Window Functions ``` plt.figure() spx_px = close_px_all['SPX'] spx_rets = spx_px.pct_change() returns = close_px.pct_change() corr = returns.AAPL.rolling(125, min_periods=100).corr(spx_rets) corr.plot() plt.figure() corr = returns.rolling(125, min_periods=100).corr(spx_rets) corr.plot() ``` ### User-Defined Moving Window Functions ``` plt.figure() from scipy.stats import percentileofscore score_at_2percent = lambda x: percentileofscore(x, 0.02) result = returns.AAPL.rolling(250).apply(score_at_2percent) result.plot() pd.options.display.max_rows = PREVIOUS_MAX_ROWS ``` ## Conclusion
github_jupyter
# Find Intersection Have the function FindIntersection(strArr) read the array of strings stored in strArr which will contain 2 elements:\ the first element will represent a list of comma-separated numbers sorted in ascending order,\ the second element will represent a second list of comma-separated numbers (also sorted).\ Your goal is to return a comma-separated string containing the numbers that occur in elements of strArr in sorted order.\ If there is no intersection, return the string false.\ ***Examples***\ ***Input:*** ["1, 3, 4, 7, 13", "1, 2, 4, 13, 15"]\ ***Output:*** 1,4,13\ ***Input:*** ["1, 3, 9, 10, 17, 18", "1, 4, 9, 10"]\ ***Output:*** 1,9,10 ``` data=input() se=data.split('"|,',data)[1:-1] l1=[] l2=[] c=0 for i in se: try: if c==0: l1.append(int(i)) else: l2.append(int(i)) except: c+=1 c=0 for i in l1: if i in l2: if c==0: c+=1 print(i,end='') else: print(end=',') print(i,end='') def FindIntersection(data): import re se=re.split('"|,',data)[1:-1] l1=[] l2=[] c=0 for i in se: try: if c==0: l1.append(int(i)) else: l2.append(int(i)) except: c+=1 c=0 s='' for i in l1: if i in l2: if c==0: c+=1 s+=str(i) else: s+=','+str(i) # code goes here return s # keep this function call here print(FindIntersection(input())) data se=data.split(',') se se.split(',') def FindIntersection(strArr): v = set(map(int, strArr[0].split(', '))) q = set(map(int, strArr[1].split(', '))) c = sorted(list(v&q)) if len(c) ==0: return 'false' d = """""" for i in range(len(c)): d += str(c[i]) if i<len(c)-1: d+= "," return d print(FindIntersection(input())) def LetterChanges(str): l='' for i in str: if ord(i)>96 and ord(i)<123: x=chr((ord(i)-97+1)%25+96) if x in 'aeiou': l+=chr((ord(i)-97+1)%25+96-32) else: l+=x else: l+=i # code goes here return l # keep this function call here print(LetterChanges(input())) chr((ord('z')+1-97)%25+96) def FirstFactorial(a): # code goes here a=int(a) if a==1: return 1 else: return a*FirstFactorial(a-1) # keep this function call here print(FirstFactorial(input())) def LongestWord(my_str): # code goes here punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~''' no_punct = "" for char in my_str: if char not in punctuations: no_punct = no_punct + char c=0 s=0 no_punct=no_punct.split(' ') for i in no_punct: if len(i)>c: s=i c=len(i) return s # keep this function call here print(LongestWord(input())) # define punctuation my_str = "Hello!!!, he said ---and went." # To take input from the user # my_str = input("Enter a string: ") # remove punctuation from the string # display the unpunctuated string print() import math math.ceil() x=bin(213)[2:] k=-(-len(x)//8) x='0'*(8*k-len(x))+x int(x[::-1],2) def BinaryReversal(str): # code goes here x=bin(int(str))[2:] k=-(-len(x)//8) x='0'*(8*k-len(x))+x print(x) return int(x[::-1],2) # keep this function call here print(BinaryReversal(input())) ```
github_jupyter
# Analiza serij na spletnem portalu [MAL](https://myanimelist.net/topanime.php) Projektna naloga pri predmetu Programiranje 1 ## 0. Priprava podatkov Preden začnemo, moramo uvoziti vnaprej pripravljene podatke. V ta namen uporabimo knjižnico pandas in vnesemo podatke v tabele. ``` import pandas as pd import os # izberemo interaktivni stil risanja %matplotlib inline # spremenimo velikost figur import matplotlib as plt plt.rcParams['figure.figsize'] = (10, 5) pd.set_option('display.max_rows', 20) # naložimo razpredelnice, s katero bomo delali pot_do_serij = os.path.join('data', 'serije.csv') pot_do_licensorjev = os.path.join('data', 'licensorji.csv') pot_do_producerjev = os.path.join('data', 'producerji.csv') pot_do_studijev = os.path.join('data', 'studiji.csv') pot_do_zanrov = os.path.join('data', 'žanri.csv') serije = pd.read_csv(pot_do_serij) licensorji = pd.read_csv(pot_do_licensorjev) producerji = pd.read_csv(pot_do_producerjev) studiji = pd.read_csv(pot_do_studijev) zanri = pd.read_csv(pot_do_zanrov) ``` Tukaj še malo očisitmo podatke, saj nam manjkajoča polja pri popularnosti in število glasov gredo samo v napoto. ``` serije = serije[ serije.popularity.notnull() & serije.votes.notnull() ].astype( {'popularity': 'int64', 'votes': 'int64'} ).sort_values('popularity') ``` Nato razcepimo čas premiere na leto in sezono, saj bo z razcepljenima podatkoma kasnejša obravnava lažja. Poleg tega bomo tudi kasneje obravnavali vsakega izmed njiju posebej. ``` def add_season_year(df, premiered_series): def get_year(s): if type(s) == str: return int(s.split()[1]) else: return float('NaN') def get_season(s): if type(s) == str: return s.split()[0] else: return float('NaN') leta = premiered_series.apply(get_year) sezone = premiered_series.apply(get_season) df['leto'] = leta df['sezona'] = sezone add_season_year(serije, serije.premiered) ``` Sedaj si oglejmo, katere podatke smo zajeli. Začnimo z glavno tabelo, tabelo serij. Kot vidimo, so zajeta naslednja polja: * naslov * vrsta serije * število epizod * status predvajanja * čas premiere * vir originalnega materiala * trajanje posamezne epizode * primernost vsebine * ocena uporabnikov * število glasov * popularnost glede na število uporabnikov, ki so si serijo ogledali * leto izdaje * sezona izdaje ``` serije ``` V tabeli licensorjev so seveda shranjeni licensorji. To so podjetja, ki jim pripadajo pravice do serij. Niso pa to nujno podjetja, ki so proizvedle serijo, ampak jo samo izdajajo. Torej so nekako kot izdajatelji, ki naročajo avtorjem, kaj želijo imeti narejeno. ``` licensorji ``` Producerji imajo nadzor nad kreacijo serije. Odločajo se, kateri studio bo naredil animacije, katere glasovne igralce bodo pozvali k igranju, katere dele originalnega materiala bodo uporabili. Torej imajo v nadzoru širšo sliko proizvodnje. ``` producerji ``` Studiji so odgovorni za samo animacijo in proizvodnjo epizod kot celote. Čeprav so bolj proti dnu same hierarhije, imajo ravno oni največji vpliv na izgled serije, saj so odgovorni za vizualni del. ``` studiji ``` Na koncu pa imamo še tabelo žanrov, kjer je za vsako serijo shranjen širši žanr, v katerega spada. ``` zanri ``` ## 1. Vpliv posamičnih elementov na popularnost in oceno ### 1.1 Čas premiere Standardni časi premier so razdeljeni na 4 letne čase. V navadi je namreč, da se leto razdeli na 4 časovna obdobja po 12 tednov in v vsakem tednu izide nova epizoda. Zaradi tega so popularne 12 in 24 epizod dolge serije in studiji so primorani se močno držati teh rokov. V nasprotnem primeru se morajo višji v hierarhiji odločiti ali končati serijo na slabem mestu, ali pa zakupiti mesto za predajanje v naslednji sezoni. ``` serije.groupby('sezona').mean()[['score', 'votes', 'popularity']] ``` Opazimo, da je poletni čas rahla anomalija glede tako na popularnost, kot na število glasov. Pričakovali bi, da med sezonami ne bi bilo velikih razlik, saj lahko dobre serije izidejo kadarkoli. Najverjetneje gre za dejstvo, da imajo ljudje med poletjem preprosto več prostega časa. Tu še posebej pride do izraza razporeditev populacije glede na starost. Namreč otroci in najstniki ne samo da imajo poleti čas zaradi počitnic, ampak so tudi največji delež populacije, ki uporablja internet. Kombinacija teh dveh dejstev pripomore, da je poleti največ aktivnosti. ``` serije.groupby('sezona').mean().sort_values('popularity').plot.bar(y='popularity') serije.groupby('sezona').mean().sort_values('votes', ascending=False).plot.bar(y='votes') ``` Dejstvo, da so ocene serij bolj ali manj enake ne glede na čas premiere, toliko bolj potrdi domnevo, da gre pri povišani aktivnosti preko poletja res za večjo mero prostega časa pri uporabnikih. Namreč ni opazne razlike v kvaliteti serij, ki bi konstanto spodbujevale pogovore, slabe ali dobre. ``` serije.groupby('sezona').mean().sort_values('score', ascending=False).plot.bar(y='score') ``` Nato si poglejmo še leto premiere. Pričakovano je, da se bo z leti popularnost zmanjševala zaradi naslednjih razlogov: * novejša tehnologijo omogoča več kreativnosti in bolj osupljive možnosti animacije * dostop do starih serij je veliko težji * oboževalci starih serij so starejši in zato v delu populacije, ki interneta ne uporablja toliko kot mlajši ``` serije.groupby('leto').mean() ``` In res, novejše serije so bolj popularne od starejših. Zanimiv pa je padec popularnosti v zadnjih desetih letih. En možen faktor je, da je pri malo starejših serijah poteklo že dovolj časa, da si jo lahko uporabniki ogledajo. Pri novejših serijah, predvsem s povečanjem popularnosti ogleda preko interneta, ni dovolj časa za ogled vseh novo izdanih serij. ``` serije.groupby('leto').mean().plot(y='popularity') ``` Drug prispevajoč faktor je morda padec kvalitete v primerjavi z malo starejšimi serijami. Opazimo lahko višje ocene za serije v okolici leta 2010 v primerjavi s tistimi okoli leta 2000 in sedajšnimi. Z rastočo priljubljenostjo tega vira zabave, se povečuje število izdanih serij. Precej verjetno je, da je kvaliteta objektivno slabša zaradi večjega pritiska na studije, da proizvedejo čim več serij. ``` serije.groupby('leto').mean().plot(y='score') ``` ## 1.2 Vir originalnega materiala Velik del serij je animacija predhodnega materiala. Prednost je predvsem v tem, da je za originalne serije potrebno precej več vloženega časa, truda, planiranja, in kljub temu je možnost uspeha morda celo manjša. Namreč originalni material ima že privržence, ki so navdušeni nad pridobitvijo novega materiala v franšizi. Zato so dobljeni podatki smiselni. Popularnost iger, manga serij in "lahkih novel" je na splošno veliko večja od knjig, radia in glasbenih del. In animirane serije to toliko bolj potrdijo, saj obstaja veliko večje zanimanje za popularne franšize. ``` serije.groupby('source').mean() serije.groupby('source').mean().sort_values('popularity').plot.bar(y='popularity') ``` Ocene uporabnikov se tudi dokaj približno držijo popularnosti. Čeprav bi lahko prišlo do manjše kvalitete, saj bo serija uspešna že po zaslugi uspešnosti predhodnega materiala, se za res popularne franšize borijo res največja podjetja. In slaba adaptacija jim morda res ne uniči posla, vendar pa jim zagotovo prinese slab ugled s strani gledalcev serij. Poleg tega si lahko privoščijo bolj ambiciozne adaptacije, z boljšo glasbeno spremljavo, z boljšimi glasovni igralci in vse to skupaj privede v povprečju do večje kvalitete popularnih serij. ``` serije.groupby('source').mean().sort_values('score', ascending=False).plot.bar(y='score') ``` ## 1.3 Čas trajanja epizode Industrijski standard so postale približno 20-minutne epizode. Ta dolžina omogoča novo epizodo tedensko in ohrani določen nivo kvalitete. Zato je pričakovana večja popularnost pri 20-minutni dolžini epizode. Za druge dolžine epizod so odgovorne adaptacije drugih materialov, na primer par-minutni videospoti, eno-epizodne serije, kjer je njihova dolžina pač takšna, kot je. Za najdaljše dolžine epizod so odgovorni filmi. Za njihovo produkcijo gredo veliki stroški, je pa zato pričakovana visoka kvaliteta. ``` serije.groupby('duration').mean() ``` In res, opazimo višjo popularnost pri tam 25-minutni dolžini trajanja, saj je to trenutni standard. Višja popularnost je tudi pri enourni dolžini, kjer se nahajajo kratki filmi in filmi, razdrobljeni na več enournih delov. Trend tudi kaže na večjo popularnost preko enournih epizod. Gre namreč za daljše filme, ki jih je dovolj malo, da imajo ljudje dovolj časa za ogled vseh. Ni tiste nasičenosti, kjer bi izhajalo po deset filmov na sezono, in so zato dovolj redki, da si ljudje vzamejo čas za njihov ogled kjub navalu vseh možnih serij. ``` serije.groupby('duration').mean().plot(y='popularity') ``` Predvsem pri oceni uporabnikov se opazi precej višja kvaliteta pri daljših epizodah, kar je smiselno. ``` serije.groupby('duration').mean().plot(y='score') ``` ## 1.4 Primernost vsebine Primernost vsebine nakazuje katerih tem se serija dotika. Razporeditev tukaj bo bolj nakazovala porazdelitev gledajoče populacije in manj vpliv same primernosti. ``` serije.groupby('rating').mean() ``` Opazimo precejšno razliko med popularnostjo vsebin, ciljane na najstnike in mlade odrasle, ter ali povsem odraslimi vsebinami ali pa otroškimi vsebinami. Tu res pride do izraza, kdo gleda katere serije. Največji delež gledalcev je ravno moških in to najstnikov in mladih odraslih. Zato je smiselno, da se izdaja popularne serije, ki ciljajo na ta del demografike. ``` serije.groupby('rating').mean().sort_values('popularity').plot.bar(y='popularity') ``` V primerjavi s popularnostjo, pri ocenah ni tako velike razlike. Tukaj so celo dobro ocenjene manj popularne, otroške vsebine, medtem ko *R+ - Mild Nudity* pade skoraj na dno. ``` serije.groupby('rating').mean().sort_values('score', ascending=False).plot.bar(y='score') ``` ## 1.5 Ocena serije Ocena serije je povezana s popularnostjo v tem smislu, da naj bi kvaliteta serije pripomogla k njeni popularnosti. Seveda naj bi bile izjeme, kjer dobro ocenjena serija ni popularna in kjer popularne serije niso najbolje ocenjene. ``` serije.groupby('score').mean() ``` Opazimo, da je precej več primerov, kjer je serija popularna z nižjo oceno. To se lahko pripeti pri slabih adaptacijah v popularni franšizi. Druga možnost pa je serija, ki je na nek način stereotipična verzija trenutnega stanja. Torej vsebuje standardne tipe likov, vsebuje dostikrat uporabljeno verzijo oblike zgodbe. Take serija bo popularna, vendar pa bo slabo ocenjena, saj se ocenjevanja serij poslužijo bolj zainteresirani uporabniki, ki so morda že naveličani klišejev. ``` serije.plot.scatter(x='score', y='popularity') ``` ## 1.6 Število glasov Pričakovali bi, da se število glasov tesno ujema s popularnostjo. Morda so kakšne izjeme, kjer malo ljudi glasuje za dokaj priljubljeno serijo, vendar jih ne bi smelo biti veliko. ``` serije.groupby('votes').mean() ``` In res, boljšega prileganja se skoraj ne da dobiti. Opazimo kakšno izjemo popularne serije z malo glasovi. Lahko so to serije, ki so izšle pred kratkim in si še ni veliko ljudi ustvarilo takšnega mnenja, da bi lahko serijo ocenili. Morda pa gre za starejše popularne serije, kjer se ljudem ne zdi več potrebno podajajti svojega mnenja, saj so že tako dobro vgrajene v kulturo. ``` serije.plot.scatter(x='votes', y='popularity') ``` Pri vplivu na oceno pa pride do večje raznolikosti. Slabo ocenjene serije so bile bolj redko ocenjene od dobrih. Ocen, slabših od 5 sploh ni za manj kot 20.000 glasov. Pri višjem številu glasov pa je precej raznolikosti. Skupaj s prejšnim grafom sklepamo, da gre za najbolj popularne serije, kjer pa vemo, da imajo njihove ocene zmerna nihanja. ``` serije.plot.scatter(x='votes', y='score') ``` ## 1.7 Vpletena podjetja Podjetja imajo določeno percepcijo v očeh gledalcev. Že znano ime studija lahko prinese val zanimanja za serijo preden je sploh narejena. Prav tako pa lahko slab ugled prinese slabe ocene jeznih gledalcev, ki so po njihovem mnenju bili prikrajšani dobre dolgo pričakovane adaptacije. Tukaj je tudi pomembno, da so nekatera podjetja vpletena v veliko večih serijah, kot ostala. Zato imajo dostikrat nižjo popularnost in nižjo oceno serij, saj jih izdajo toliko, da ne morejo biti vse dobre. Po drugi strani pa ravno njim pripadejo adaptacije najbolj popularnih serij iz drugih medijev. Če si najprej pogledamo deset najbolj popularnih licensorjev. Recimo za *Warner Bros. Pictures*, *DreamWorks* in *Walt Disney Studios* opazimo dolgo povprečno dolžino epizod. Torej so prevzeli nase več filmov, ki so v povprečju bolje ocenjeni. Na vrhu so torej zato, ker so bili vpleteni v proizvodnji popularnih filmov. Za večino teh podjetij je tudi res, da niso bila vpletena v veliko serijah, torej so na vrhu le zaradi dveh, treh uspešnic in niso bili vpleteni v ostalih. ``` serije_z_licensorji = pd.merge(serije, licensorji) serije_z_licensorji.groupby('licensor').mean().sort_values('popularity')[:10] ``` Pri ocenah se mad najboljših deset pojavijo tudi nekateri najbolj popularni licensorji. Ponovno, tu so zaradi izdaje malega števila uspešnic in niso vpleteni v regularni proizvodnji serij. ``` serije_z_licensorji.groupby('licensor').mean().sort_values('score', ascending=False)[:10] ``` Čeprav imajo producerji velik vpliv na končen izdelek, pade večina percepcije na studije, ki so opravili animacije. Zato menimo, da nimajo direktnega vpliva na popularnost in ocene, temveč njihova razporeditev samo refleksira popularnost serij. ``` serije_s_producerji = pd.merge(serije, producerji) serije_s_producerji.groupby('producer').mean().sort_values('popularity')[:10] serije_s_producerji.groupby('producer').mean().sort_values('score', ascending=False)[:10] ``` Kot smo že omenili, studiji imajo velik vpliv na popularnost serij. Vsak studio ima svoj stil animacije in svojo končni izgled. Spet, tukaj prevladajo studiji z malim številom relativno uspešnih serij. Čeprav veliki studiji izdelajo najbolj popularne serije, jih izdelajo veliko vsako sezono, sicer ni smiselno imeti toliko zaposlenih. In to jim znižuje povprečje. ``` serije_s_studiji = pd.merge(serije, studiji) serije_s_studiji.groupby('studio').mean().sort_values('popularity')[:10] serije_s_studiji.groupby('studio').mean().sort_values('score', ascending=False)[:10] ``` ## 1.7 Žanr serije Žanr serije je povezan s popularnostjo v tem smislu, da različne dele populacije zanimajo različne stvari. Torej med popularnimi žanri pričakujemo teme, ki so blizu najstnikom in mlajšim odraslim. Je pa tukaj tudi možno, da bodo na vrhu bolj specializirani in usmerjeni žanri, saj bo adaptiran le dober originalni material, medtem ko za žanre, kjer bi pričakovali širše zanimanje, jim bodo popularnost zniževalo veliko število sprejemljivih, a ne odličnih serij. Do neke mere se to res opazi. Akcijske serije, komedije, fantazije npr. niso proti vrhu. Tam so psihološke, vampirske serije, trilerji. Se pa najdejo na vrhu tudi haremske in shojo serije. (namenjene najstnicam, tj. romantične in emocionalne vsebine) Bolj zahodnjaški žanri so proti dnu. Različne avtomobilske, športne, znanstveno-fantastične serije niso priljubljene na Japonskem in zato ni dobrega originalnega materiala, po katerem bi bilo veliko izvedenih priljubljenih serij. ``` serije_z_zanri = pd.merge(serije, zanri) serije_z_zanri.groupby('genre').mean().sort_values('popularity').plot.bar(y='popularity') serije_z_zanri.groupby('genre').mean().sort_values('score', ascending=False).plot.bar(y='score') ```
github_jupyter
# Softmax exercise *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the course website.* This exercise is analogous to the SVM exercise. You will: - implement a fully-vectorized **loss function** for the Softmax classifier - implement the fully-vectorized expression for its **analytic gradient** - **check your implementation** with numerical gradient - use a validation set to **tune the learning rate and regularization** strength - **optimize** the loss function with **SGD** - **visualize** the final learned weights ``` import random import numpy as np from comp411.data_utils import load_CIFAR10 import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the linear classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'comp411/datasets/cifar-10-batches-py' # Cleaning up variables to prevent loading data multiple times (which may cause memory issue) try: del X_train, y_train del X_test, y_test print('Clear previously loaded data.') except: pass X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] mask = np.random.choice(num_training, num_dev, replace=False) X_dev = X_train[mask] y_dev = y_train[mask] # Preprocessing: reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_val = np.reshape(X_val, (X_val.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) X_dev = np.reshape(X_dev, (X_dev.shape[0], -1)) # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis = 0) X_train -= mean_image X_val -= mean_image X_test -= mean_image X_dev -= mean_image # add bias dimension and transform into columns X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]) X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]) X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]) X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))]) return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) print('dev data shape: ', X_dev.shape) print('dev labels shape: ', y_dev.shape) ``` ## Softmax Classifier Your code for this section will all be written inside **comp411/classifiers/softmax.py**. ``` # First implement the naive softmax loss function with nested loops. # Open the file comp411/classifiers/softmax.py and implement the # softmax_loss_naive function. from comp411.classifiers.softmax import softmax_loss_naive import time # Generate a random softmax weight matrix and use it to compute the loss. W = np.random.randn(3073, 10) * 0.0001 loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As a rough sanity check, our loss should be something close to -log(0.1). print('loss: %f' % loss) print('sanity check: %f' % (-np.log(0.1))) ``` **Inline Question 1** Why do we expect our loss to be close to -log(0.1)? Explain briefly.** $\color{blue}{\textit Your Answer:}$ *CIFAR10 dataset has 10 classes. In our latest attempt we have initialized the W matrix weights in a random manner. On the other hand the softmax value corresponds to the porbability of correctness of each class. Since there has not been any gain tuning happening, so probability of correct class is 0.1 and -log(0.1) will be the loss.* ``` # Complete the implementation of softmax_loss_naive and implement a (naive) # version of the gradient that uses nested loops. loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As we did for the SVM, use numeric gradient checking as a debugging tool. # The numeric gradient should be close to the analytic gradient. from comp411.gradient_check import grad_check_sparse f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # similar to SVM case, do another gradient check with regularization loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1) f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # Now that we have a naive implementation of the softmax loss function and its gradient, # implement a vectorized version in softmax_loss_vectorized. # The two versions should compute the same results, but the vectorized version should be # much faster. tic = time.time() loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005) toc = time.time() print('naive loss: %e computed in %fs' % (loss_naive, toc - tic)) from comp411.classifiers.softmax import softmax_loss_vectorized tic = time.time() loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005) toc = time.time() print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)) # As we did for the SVM, we use the Frobenius norm to compare the two versions # of the gradient. grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro') print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized)) print('Gradient difference: %f' % grad_difference) # Use the validation set to tune hyperparameters (regularization strength and # learning rate). You should experiment with different ranges for the learning # rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of over 0.35 on the validation set. from comp411.classifiers import Softmax results = {} best_val = -1 best_softmax = None learning_rates = [1e-7, 5e-7, 15e-7] regularization_strengths = [2.5e4, 5e4, 5e2] ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained softmax classifer in best_softmax. # ################################################################################ # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # pass for n in learning_rates: for m in regularization_strengths: softmax_model = Softmax() softmax_model.train(X_train, y_train, learning_rate=n, reg=m, num_iters=1000) y_train_pred = softmax_model.predict(X_train) train_accuracy = np.mean(y_train_pred == y_train) y_val_pred = softmax_model.predict(X_val) val_accuracy = np.mean(y_val_pred == y_val) results[(n,m)] = (train_accuracy, val_accuracy) if best_val < val_accuracy: best_val = val_accuracy best_softmax = softmax_model # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print('lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy)) print('best validation accuracy achieved during cross-validation: %f' % best_val) # evaluate on test set # Evaluate the best softmax on test set y_test_pred = best_softmax.predict(X_test) test_accuracy = np.mean(y_test == y_test_pred) print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, )) ``` **Inline Question 2** - *True or False* Suppose the overall training loss is defined as the sum of the per-datapoint loss over all training examples. It is possible to add a new datapoint to a training set that would leave the SVM loss unchanged, but this is not the case with the Softmax classifier loss. $\color{blue}{\textit Your Answer:}$ *T* $\color{blue}{\textit Your Explanation:}$ *This is possible. Because for example if we add a data to training set which satisfies the hinge loss margin in the expression max(0,prediction - label + margin) so we won't have a gradient for this data point. But in the case of softmax function for any datapoint that we add to the traning set we are going to have a softmax value and a negative log likelihood so for each data point in the traning set we are turning the Weights matrix a little bit.* ``` # Visualize the learned weights for each class w = best_softmax.W[:-1,:] # strip out the bias w = w.reshape(32, 32, 3, 10) w_min, w_max = np.min(w), np.max(w) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for i in range(10): plt.subplot(2, 5, i + 1) # Rescale the weights to be between 0 and 255 wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min) plt.imshow(wimg.astype('uint8')) plt.axis('off') plt.title(classes[i]) ```
github_jupyter
# Companion notebook to EF's training on optimization with data uncertainty Sander Vlot & Joaquim Gromicho, 2021 --- > During this course we make use of Jupyter notebooks hosted by [Google Colab](https://colab.research.google.com/notebooks/intro.ipynb). The usage of this platform is allowed by ORTEC for **educational and personal experimentation only**. May you consider using it for a project please consult the IT department. Notebooks deployed on `colab` require neither python nor other dependencies to be installed on your own machine, you only need a browser (preferably `chrome`) and you may also need a google account if you want to execute them. --- This notebook has been setup for `colab`. Check [the pyomo cookbook](https://jckantor.github.io/ND-Pyomo-Cookbook/) and in particular [the explanation on how to get pyomo and the solvers on colab](https://jckantor.github.io/ND-Pyomo-Cookbook/01.02-Running-Pyomo-on-Google-Colab.html). May you want to use on your own python distribution, then you should care for the installation of the required packages and subsidiary applications. ``` import sys at_colab = "google.colab" in sys.modules if at_colab: _=!pip install -q pyomo _=!wget -N -q "https://ampl.com/dl/open/ipopt/ipopt-linux64.zip" _=!unzip -o -q ipopt-linux64 _=!apt-get install -y -q coinor-cbc _=!pip install -q cplex _=!pip install -q gurobipy _=!pip install -q xpress !pyomo help --solvers import pyomo.environ as pyo cbc = pyo.SolverFactory('cbc') ipopt = pyo.SolverFactory('ipopt') cplex = pyo.SolverFactory('cplex_direct') gurobi = pyo.SolverFactory('gurobi_direct') xpress = pyo.SolverFactory('xpress_direct') %matplotlib inline ``` # Recall the Alice's production planning model Alice owns a company that produces trophies for * football * wood base, engraved plaque, brass football on top * €12 profit and uses 4 dm of wood * golf * wood base, engraved plaque, golf ball on top * €9 profit and uses 2 dm of wood Alice’s current stock of raw materials * 1000 footballs * 1500 golf balls * 1750 plaques * 480 m (4800 dm) of wood > Alice wonders what the optimal production plan should be, in other words: how many football and how many golf trophies should Alice produce to maximize his profit while respecting the availability of raw materials? *** The following model __maximizes__ Alice's profit by deciding the number of $x_1$ football and $x_2$ golf trophies to produce. $$ \begin{array}{rrcrcl} \max & 12x_1 & + & 9x_2 \\ s.t. & x_1 & & & \leq & 1000 \\ & & & x_2 & \leq & 1500 \\ & x_1 & + & x_2 & \leq & 1750 \\ & 4x_1 & + & 2x_2 & \leq & 4800 \\ & x_1 & , & x_2 & \geq & 0 \\ \end{array} $$ ``` trophies = [ 'Football', 'Golf' ] profits = { 'Football' : 12, 'Golf' : 9 } wood = { 'Football' : 4, 'Golf' : 2 } Alice = pyo.ConcreteModel('Alice') Alice.x = pyo.Var(trophies,within=pyo.NonNegativeReals) Alice.profit = pyo.Objective(expr = sum([profits[t]*Alice.x[t] for t in trophies]), sense=pyo.maximize) Alice.footballs = pyo.Constraint(expr = Alice.x['Football'] <= 1000) Alice.golfBalls = pyo.Constraint(expr = Alice.x['Golf'] <= 1500) Alice.plaques = pyo.Constraint(expr = sum([Alice.x[t] for t in trophies]) <= 1750) Alice.wood = pyo.Constraint(expr = sum(wood[t]*Alice.x[t] for t in trophies) <= 4800 ) %time results = cbc.solve(Alice) print(results.solver.status, results.solver.termination_condition ) print(Alice.profit.expr()) print([Alice.x[t].value for t in trophies]) Alice.display() Alice.pprint() def ShowModelComponents( model ): for v in model.component_objects(pyo.Var, active=True): print ("Variable ",v) varobject = getattr(model, str(v)) for index in varobject: print (" ",index, varobject[index].value) for o in model.component_objects(pyo.Objective, active=True): print ("Objective ",o) varobject = getattr(model, str(o)) for index in varobject: print (" ",index, varobject[index].expr()) for c in model.component_objects(pyo.Constraint, active=True): print ("Constraint",c) varobject = getattr(model, str(c)) for index in varobject: print (" ",index, varobject[index].uslack()) ShowModelComponents( Alice ) def ShowDuals( model ): import fractions # display all duals print ("Duals") for c in model.component_objects(pyo.Constraint, active=True): print ("Constraint ",c) for index in c: print (" ", index, str(fractions.Fraction(model.dual[c[index]]))) Alice.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT) %time results = cbc.solve(Alice) print(results.solver.status, results.solver.termination_condition ) ShowDuals( Alice ) def JustSolution( model ): return [ pyo.value(model.profit) ] + [ pyo.value(model.x[i]) for i in trophies ] JustSolution( Alice ) ``` ## Betty: a tiny bit of data science... We start by simulating two samples of observed wood lengths for `f` football trophies and `g` golf trophies. ``` import numpy as np np.random.seed(2021) n = 2000 f = np.random.lognormal(np.log(4.), .005, n) g = np.random.lognormal(np.log(2.), .005, n) print(f) print(g) print( min(f), max(f), min(g), max(g) ) import matplotlib.pyplot as plt plt.plot( f, '.' ) plt.plot( g, '.' ) plt.show() ``` What is the consequence of uncertainty? We compare the cumulative lengths with the nominal ones. ``` cs = np.cumsum(g) - np.cumsum( [2]*len(g) ) plt.plot(cs) plt.show() plt.pie( [ sum( cs > 0 ), sum( cs <= 0 ) ], labels = [ 'trouble!', 'ok' ], autopct='%1.1f%%', shadow=True, startangle=90, colors=[ 'red', 'green' ]) plt.show() ``` A very simple and somehow naïf uncertainty region can be taken as the observed minimal box around the data. ``` import matplotlib.patches as patches plt.figure() plt.plot( f, g, '.' ) currentAxis = plt.gca() currentAxis.add_patch(patches.Rectangle((min(f), min(g)), max(f)-min(f), max(g)-min(g),fill=False,color='r')) plt.show() print( min(f), max(f), min(g), max(g) ) ``` ## Caroline's robust model for box uncertainty in wood consumption Suppose now that Alice notices that not _exactly_ 4 and 2 dm of wood are used, but some fluctuations are observed. Alice wants to be __sure__ that her model does not violate the wood constraint, therefore the following should hold: $$ \begin{array}{rrcrcl} \max & 12 x_1 & + & 9 x_2 \\ s.t. & x_1 & & & \leq & 1000 \\ & & & x_2 & \leq & 1500 \\ & x_1 & + & x_2 & \leq & 1750 \\ & a_1 x_1 & + & a_2 x_2 & \leq & 4800 & \forall \ell \leq a \leq u \\ & x_1 & , & x_2 & \geq & 0 \\ \end{array} $$ *** A bit of linear duality (or even better: an introduction to robust optimization!) helps Alice how to deal with the above model that has an infinite number of constraints. The first thing to notice is that the wood consumption is modeled by constraints that are equivalent to bounding the following optimization problem: $$ \begin{array}{rrr} \max & x_1 a_1 + x_2 a_2 & \leq 4800 \\ s.t. & \ell \leq a \leq u \end{array} $$ Or $$ \begin{array}{rrr} \max & x_1 a_1 + x_2 a_2 & \leq 4800 \\ s.t. & a \leq u \\ & -a \leq -\ell \end{array} $$ Now we use linear duality to realize that the above is equivalent to: $$ \begin{array}{rrr} \min & u y - \ell w & \leq 4800 \\ s.t. & y - w = x \\ & y \geq 0, w \geq 0 \end{array} $$ and the constraint imposed by the last problem is equivalent to: $$ \begin{array}{rrl} & u y - \ell w & \leq 4800 \\ & y - w & = x \\ & y \geq 0, w \geq 0 \end{array} $$ The only thing we need to do is add variables and constraints to Alice's model. # A model in `pyomo` ``` def AliceWithBoxUncertainty( lower, upper, domain=pyo.NonNegativeReals ): Alice = pyo.ConcreteModel('AliceBox') Alice.x = pyo.Var(trophies,within=domain) Alice.profit = pyo.Objective(expr = sum([profits[t]*Alice.x[t] for t in trophies]), sense=pyo.maximize) Alice.footballs = pyo.Constraint(expr = Alice.x['Football'] <= 1000) Alice.golfBalls = pyo.Constraint(expr = Alice.x['Golf'] <= 1500) Alice.plaques = pyo.Constraint(expr = sum([Alice.x[t] for t in trophies]) <= 1750) Alice.y = pyo.Var(trophies,domain=pyo.NonNegativeReals) Alice.w = pyo.Var(trophies,domain=pyo.NonNegativeReals) Alice.robustWood = pyo.Constraint(expr = sum([upper[t]*Alice.y[t] - lower[t]*Alice.w[t] for t in trophies]) <= 4800) def PerVariable( model, t ): return model.x[t] == model.y[t] - model.w[t] Alice.perVariable = pyo.Constraint(trophies,rule=PerVariable) return Alice lower = upper = {} lower['Football'] = min(f) upper['Football'] = max(f) lower['Golf'] = min(g) upper['Golf'] = max(g) Alice = AliceWithBoxUncertainty( lower, upper, domain=pyo.NonNegativeIntegers ) %time results = cbc.solve(Alice) print(results.solver.status, results.solver.termination_condition ) JustSolution( Alice ) # you can play with the amount of uncertainty. # In particular, if below you make delta equal to 0 you obtain the same result als the nominal model. delta = 0.05 def AliceWithSymmetricalBoxUncertainty( delta, domain=pyo.NonNegativeIntegers ): lower = { trophy : wood[trophy] - delta for trophy in wood } upper = { trophy : wood[trophy] + delta for trophy in wood } return AliceWithBoxUncertainty( lower, upper, domain=domain ) Alice = AliceWithSymmetricalBoxUncertainty( delta ) %time results = cbc.solve(Alice) print(results.solver.status, results.solver.termination_condition ) JustSolution( Alice ) ``` # Integer optimization Alice's model gave integer solutions, but not the robust version. If we need integer solutions then we should impose that to the nature of the variables, which in this case of _box uncertainty_ is easy to do since the model remains linear, although it will be mixed integer. ``` Alice = AliceWithBoxUncertainty( lower, upper, domain=pyo.NonNegativeIntegers ) %time results = cbc.solve(Alice) print(results.solver.status, results.solver.termination_condition ) JustSolution( Alice ) import pandas df = pandas.DataFrame() for delta in np.linspace(0,.5,21): Alice = AliceWithSymmetricalBoxUncertainty( delta, domain=pyo.NonNegativeIntegers ) cbc.solve(Alice) results = JustSolution( Alice ) df.at[delta,'value'] = results[0] df.at[delta,trophies[0]] = results[1] df.at[delta,trophies[1]] = results[2] df df.plot() df[['Football','Golf']].plot() ``` # Cardinality constrained uncertainty Each $a_j$ may deviate by at most $\pm \delta_j$ from the nominal value $\bar{a}_j$ bun no more than $\Gamma$ will actually deviate. $$ \begin{array}{rrcrcl} \max & 12 x_1 & + & 9 x_2 \\ s.t. & x_1 & & & \leq & 1000 \\ & & & x_2 & \leq & 1500 \\ & x_1 & + & x_2 & \leq & 1750 \\ & a_1 x_1 & + & a_2 x_2 & \leq & 4800 & \forall a,y : a_j=\bar{a}_j+\delta_jy_j, \|y\|_\infty \leq 1, \|y\|_1\leq \Gamma \\ & x_1 & , & x_2 & \geq & 0 \\ \end{array} $$ As we have seen on the previous lecture, Lagrange duality yields the following modification to the problem as equivalent to the robust model stated above: $$ \begin{array}{rrcrcrcrcrcrcl} \max & 12 x_1 & + & 9 x_2 \\ s.t. & x_1 & & & & & & & & & \leq & 1000 \\ & & & x_2 & & & & & & & \leq & 1500 \\ & x_1 & + & x_2 & & & & & & & \leq & 1750 \\ & a_1 x_1 & + & a_2 x_2 & + & \lambda\Gamma & + & z_1 & + & z_2 & \leq & 4800 \\ &-d_1 x_1 & & & + & \lambda & + & z_1 & & & \geq & 0 \\ & & &-d_2 x_2 & + & \lambda & & & + & z_2 & \geq & 0 \\ & x_1 & , & x_2 & , & \lambda & , & z_1 & , & z_2 & \geq & 0 \\ \end{array} $$ ``` def AliceWithGammaUncertainty( delta, gamma, domain=pyo.NonNegativeReals ): Alice = pyo.ConcreteModel('AliceGamma') Alice.x = pyo.Var(trophies,within=domain) Alice.profit = pyo.Objective(expr = sum([profits[t]*Alice.x[t] for t in trophies]), sense=pyo.maximize) Alice.footballs = pyo.Constraint(expr = Alice.x['Football'] <= 1000) Alice.golfBalls = pyo.Constraint(expr = Alice.x['Golf'] <= 1500) Alice.plaques = pyo.Constraint(expr = sum([Alice.x[t] for t in trophies]) <= 1750) Alice.z = pyo.Var(trophies,domain=pyo.NonNegativeReals) Alice.lam = pyo.Var(domain=pyo.NonNegativeReals) Alice.robustWood = pyo.Constraint( \ expr = sum([wood[t]*Alice.x[t] for t in trophies]) \ + gamma * Alice.lam \ + sum(Alice.z[t] for t in trophies) <= 4800) def up_rule( model, t ): return model.z[t] >= delta * model.x[t] - model.lam def down_rule( model, t ): return model.z[t] >= -delta * model.x[t] - model.lam Alice.up = pyo.Constraint(trophies,rule=up_rule) Alice.down = pyo.Constraint(trophies,rule=down_rule) return Alice Alice = AliceWithGammaUncertainty( 0.01, 2, domain=pyo.NonNegativeIntegers ) %time results = cbc.solve(Alice) print(results.solver.status, results.solver.termination_condition ) JustSolution(Alice) ``` # Ball uncertainty As [the documentation](https://pyomo.readthedocs.io/en/stable/library_reference/kernel/conic.html) says a conic constraint is expressed in 'pyomo' in simple variables. This [table](https://pyomo.readthedocs.io/en/stable/library_reference/kernel/syntax_comparison.html) is very useful. A straightforward remodulation leads to that: $$ a_1x_1+a_2x_2 + \Omega \|x\| \leq 4800 $$ $$ \Omega \|x\| \leq 4800 - a_1x_1 - a_2x_2 $$ $$ \|\Omega x\| \leq 4800 - a_1x_1 - a_2x_2 $$ By defining $y = 4800 - a_1x_1 - a_2x_2$ we may write: $$ \Omega^2 \|x\|^2 \leq y^2 $$ $$ (\Omega x_1)^2 + (\Omega x_2)^2 \leq y^2 $$ $$ \|w\|^2 \leq y^2 $$ with $w = \Omega x$. ## Documentation says that we need to use the kernel now ``` import pyomo.kernel as pyk def AliceWithBallUncertainty( omega, domain_type=pyk.RealSet ): idxTrophies = range( len(trophies) ) Alice = pyk.block() Alice.x = pyk.variable_list() for i in idxTrophies: Alice.x.append( pyk.variable(lb=0,domain_type=domain_type) ) Alice.profit = pyk.objective(expr = sum(profits[trophies[i]]*Alice.x[i] for i in idxTrophies), sense=pyk.maximize) Alice.footballs = pyk.constraint(expr = Alice.x[0] <= 1000) Alice.golfBalls = pyk.constraint(expr = Alice.x[1] <= 1500) Alice.plaques = pyk.constraint(expr = sum([Alice.x[i] for i in idxTrophies]) <= 1750) Alice.y = pyk.variable(lb=0) Alice.w = pyk.variable_list() for i in idxTrophies: Alice.w.append( pyk.variable(lb=0) ) Alice.wood = pyk.constraint( expr = Alice.y == 4800 - sum(wood[trophies[i]]*Alice.x[i] for i in idxTrophies) ) Alice.xtow = pyk.constraint_list() for i in idxTrophies: Alice.xtow.append( pyk.constraint( expr = Alice.w[i] == omega * Alice.x[i] ) ) from pyomo.core.kernel.conic import quadratic Alice.robust = quadratic(Alice.y,Alice.w) return Alice ``` ## Now the problem is nonlinear ``` Alice = AliceWithBallUncertainty( 0.1 ) %time results = ipopt.solve(Alice) print(results.solver.status, results.solver.termination_condition ) print(pyk.value(Alice.profit)) print( [pyk.value(Alice.x[i]) for i in range(len(Alice.x))] ) ``` ## But `cplex`, `gurobi` and `xpress` support second order cones ``` conicsolver = gurobi %time results = conicsolver.solve(Alice) print(results.solver.status, results.solver.termination_condition ) print(pyk.value(Alice.profit)) print( [pyk.value(Alice.x[i]) for i in range(len(Alice.x))] ) ``` ## And therefore we can also have mixed integer models ``` Alice = AliceWithBallUncertainty( 0.1, domain_type=pyk.IntegerSet ) %time results = conicsolver.solve(Alice) print(results.solver.status, results.solver.termination_condition ) print(pyk.value(Alice.profit)) print( [pyk.value(Alice.x[i]) for i in range(len(Alice.x))] ) ``` ## Final note: maybe useful to recall that in python you can always ask for help... ``` help(Alice.y) help(Alice.robust) ``` # How to bring second order cones into the `pyomo.environ` Noting that $\| x \| \leq t$ is for $t \geq 0$ equivalent to $\| x \|^2 \leq t^2$ and knowing that the commercial solvers (`gurobi`, `cplex` and `express`) support convex quadratic inequalities, we can model this variant in `pyomo.environ` as follows. Note that the essential part to make the model convex is having the rght hand side nonnegative. ``` def AliceWithBallUncertaintyAsSquaredSecondOrderCone(omega,domain=pyo.NonNegativeReals): Alice = pyo.ConcreteModel('Alice') Alice.x = pyo.Var(trophies,within=domain) # the nonegativity of this variable is essential! Alice.y = pyo.Var(within=pyo.NonNegativeReals) Alice.profit = pyo.Objective(expr = sum([profits[t]*Alice.x[t] for t in trophies]), sense=pyo.maximize) Alice.footballs = pyo.Constraint(expr = Alice.x['Football'] <= 1000) Alice.golfBalls = pyo.Constraint(expr = Alice.x['Golf'] <= 1500) Alice.plaques = pyo.Constraint(expr = sum([Alice.x[t] for t in trophies]) <= 1750) Alice.wood = pyo.Constraint(expr = Alice.y == 4800 - sum(wood[t]*Alice.x[t] for t in trophies) ) Alice.robust = pyo.Constraint(expr = sum((omega*Alice.x[t])**2 for t in trophies) <= Alice.y**2) return Alice Alice = AliceWithBallUncertaintyAsSquaredSecondOrderCone( 0.1, domain=pyo.NonNegativeIntegers ) %time results = cplex.solve(Alice) print(results.solver.status, results.solver.termination_condition ) JustSolution(Alice) ``` Note how the verbose `xpress` solver confirms that the convex quadratic constraint is recognized as conic. ``` Alice = AliceWithBallUncertaintyAsSquaredSecondOrderCone( 0.1, domain=pyo.NonNegativeIntegers ) %time results = xpress.solve(Alice,tee=True) print(results.solver.status, results.solver.termination_condition ) JustSolution(Alice) ```
github_jupyter
``` %%capture # get_corpus_path # get_txt_orig_path # get_txt_clean_path %run ../path_manager.ipynb # CorpusCleaner %run ../DataCleanerModule.ipynb ## Jupyter.notebook.save_checkpoint() # w = spacy.load('/R/spacy_data/en_core_web_sm/en_core_web_sm-2.0.0', disable=['parser', 'ner', 'textcat']) # from spacy.lemmatizer import Lemmatizer # from nltk.corpus import wordnet # l = Lemmatizer() # lm = l('substantially', 'ADV') # l = WordNetLemmatizer() # l.lemmatize('substantially', wordnet.VERB) # [token.lemma_ for token in w('initial jobless claim fall low level result to substantially lower expectation time.')] ENCHANT_INSTALLED ``` # Misspelling correction ``` CORPUS_ID = 'IDB' TXT_ORIG_DIR = get_txt_orig_path(CORPUS_ID) # TXT_CLEAN_DIR = os.path.join(get_corpus_path(CORPUS_ID), 'TXT_PCLEAN') # TXT_CLEAN_DIR = get_txt_clean_path(CORPUS_ID) metadata_file = pd.read_csv(os.path.join(get_corpus_path(CORPUS_ID), f'{CORPUS_ID.lower()}_metadata.csv'), low_memory=False) #metadata_file['url_txt'] = metadata_file['url_pdf'].map(lambda x: x[0]) metadata_file['url_txt'] = metadata_file['url_pdf'] metadata_file = metadata_file.dropna(axis=0, subset=['url_txt']) metadata_file.shape metadata_file.shape metadata_file.head(2) %%time cleaner=ParallelCorpusCleaner( use_spellchecker=True, use_respeller=True, use_lemmatizer=True, use_spacy=True, replacements_plurals_to_singular_file='../whitelists/whitelist_replacements_plurals_to_singular.csv', acronyms_file='../whitelists/whitelist_acronyms.csv', num_workers=1 ) cleaner.set_input_folder(TXT_ORIG_DIR) cleaner.set_output_folder(TXT_CLEAN_DIR) print(cleaner.input_folder) ``` # Run with subset dataset ``` cleaner.spell_cache_dict.keys() cleaner.input_folder cleaner.output_folder # filepath = os.path.join(TXT_ORIG_DIR, 'wb_19431317.txt') # with open(filepath, 'rb') as fl: # # Use context so that the file will be closed automatically upon exit from the context. # text = fl.read() # text = text.decode('utf-8', errors='ignore') # text = text.lower() # r = cleaner.clean_text(text, filen='wb_19431317') # l = cleaner.lemmatize_text(text) # print(r['exception']) # lmtzr_spacy = spacy.load('en_core_web_sm', disable=['parser', 'ner', 'textcat']) doclist = ['idb_0030d9b5e94e811312fd73f9eec71b16bd4b6219.txt', 'idb_ffe31f1b83295b9759637d168d15d9a6708c57fa.txt'] # r = cleaner.clean_doc('idb_0030d9b5e94e811312fd73f9eec71b16bd4b6219', cleaner.input_folder + '/idb_0030d9b5e94e811312fd73f9eec71b16bd4b6219.txt') import os import fnmatch doclist =[] # for file_name in os.listdir(r'C:\Users\odupr\Documents\GitHub\DR\decat-nlp-master\CORPUS\IDB\TXT_ORIG'): for file_name in os.listdir(cleaner.input_folder): if fnmatch.fnmatch(file_name, '*.txt'): doclist.append(file_name) print(len(doclist)) doclist=doclist[:-3] print(len(doclist)) print(type(doclist)) doclist[0] #doclist = ['idb_0030d9b5e94e811312fd73f9eec71b16bd4b6219.txt', 'idb_ffe31f1b83295b9759637d168d15d9a6708c57fa.txt'] # doclist = [f"{id}" for id in metadata_file['id'].sample(n=len(metadata_file), random_state=1029)] # %%time # # doclist=['436966.txt', '2002839.txt'] # doclist = [f"{id}" for id in metadata_file['id'].sample(n=len(metadata_file), random_state=1029)] print(len(doclist)) # wb_8886341 print(pd.datetime.now()) output=cleaner.batch_clean_docs( doclist, #default_docs_per_worker=10, batch_size=96, # x4 of num_workers save_docs=True, skip_existing=True, collect_spell_errors=False) print(pd.datetime.now()) output process_output_manager = multiprocessing.Manager() process_output_dict = process_output_manager.dict() batch = {} fileid = doclist[0] filen = os.path.join(TXT_ORIG_DIR, fileid) save_docs = True process_output_dict = {} p = multiprocessing.Process(target=cleaner.clean_doc, args=(fileid, filen, save_docs, process_output_dict)) # , kwargs=kwargs) batch[fileid] = p p.start() output #pineapple import os import fnmatch doclist =[] for file_name in os.listdir(r'C:\Users\odupr\Documents\GitHub\DR\decat-nlp-master\CORPUS\IDB\TXT_ORIG'): if fnmatch.fnmatch(file_name, '*.txt'): doclist.append(file_name) print(doclist[0:5]) fileid = doclist.pop(0) ``` ### Convert the output into a dataframe for easier processing ``` misc_metadata = pd.DataFrame(output) misc_metadata.shape misc_metadata.to_csv(os.path.join(get_corpus_path(CORPUS_ID), f'{CORPUS_ID.lower()}_derived_metadata.csv')) misc_metadata.head(2) ``` Log items that were skipped due to some unexpected errors ``` misc_metadata[misc_metadata.skipped != ''].skipped.to_csv(f'{CORPUS_ID.lower()}_skipped-items-{pd.datetime.now()}.log') ``` Generate new attributes based on the `lang` column from `misc_metadata`. ``` misc_metadata['language_detected'] = misc_metadata.lang.map(lambda x: x[0]) misc_metadata['language_score'] = misc_metadata.lang.map(lambda x: x[1]) misc_metadata = misc_metadata.drop('lang', axis=1) misc_metadata.index.name = 'id' misc_metadata = misc_metadata.reset_index() misc_metadata.head(2) misc_metadata[misc_metadata.skipped != ''].head() misc_metadata['id'].nunique(), misc_metadata.shape valid_processed_ids = misc_metadata[(misc_metadata.skipped == '') & (misc_metadata.write_status == True)].set_index('id').index len(valid_processed_ids) complete_metadata_file = pd.concat([metadata_file.drop_duplicates('id').set_index('id'), misc_metadata.set_index('id')[['language_detected', 'language_score', 'tokens', 'write_status']]], axis=1) complete_metadata_file = complete_metadata_file.loc[valid_processed_ids] complete_metadata_file.shape complete_metadata_file.head(2) complete_metadata_file.to_csv(os.path.join(get_corpus_path(CORPUS_ID), f'{CORPUS_ID.lower()}_metadata_complete.csv')) complete_metadata_file.head(2) # Processing output s = '''%%time cleaner = CorpusCleaner( use_spellchecker=True, use_respeller=True, use_lemmatizer=True, use_spacy=True, replacements_plurals_to_singular_file='whitelist_replacements_plurals_to_singular.csv', ) cleaner.set_input_folder(TXT_ORIG_DIR) cleaner.set_output_folder(TXT_CLEAN_DIR)''' %pprint print(s) ```
github_jupyter
``` #Functions for parsing columns of the spreadsheet on Santa Clara County School Meals import re abbrev_to_index = { 'm': 0, 'mon': 0, 'monday': 0, 'mondays': 0, 't': 1, 'tues': 1, 'tuesday': 1, 'tuesdays': 1, 'w': 2, 'wed': 2, 'wednesday': 2, 'wednesdays': 2, 'th': 3, 'thr': 3, 'thurs': 3, 'thursday': 3, 'thursdays': 3, 'f': 4, 'fri': 4, 'friday': 4, 'fridays': 4, 's': 5, 'sat': 5, 'saturday': 5, 'saturdays': 5, 'sa': 5, 'su': 6, 'sun': 6, 'sunday': 6, 'sundays': 6 } daysOfWeek = { '0': 'Monday', '1': 'Tuesday', '2': 'Wednesday', '3': 'Thursday', '4': 'Friday', '5': 'Saturday', '6': 'Sunday', } def parse_address(addr): regex = r"([\w|.| |\d|-]+)[,\s*]+([\w|\s]+)[,\s*]*(.*)[-| ]+(.*)" matches = re.finditer(regex, addr, re.IGNORECASE | re.DOTALL) address = [addr] for matchNum, match in enumerate(matches, start=1): for groupNum in range(1, len(match.groups())+1): address.append(match.group(groupNum)) return tuple(address) def fcnparse_siteaddress(csvaddress): myoutput = nominatim.query(csvaddress+ ' California') if len(myoutput.toJSON())>0: myjson = myoutput.toJSON()[0] #print(myjson) #print(type(myjson)) full_address = myjson['display_name'] #print(full_address) #print(type(full_address)) tmp = full_address.split(', ') #print(tmp) if tmp[0].isnumeric(): address = tmp[0]+" "+tmp[1] city = tmp[-5:-4][0] state = tmp[-3:-2][0] zipcode = tmp[-2:-1][0] elif tmp[1].isnumeric(): address = tmp[1]+" "+tmp[2] city = tmp[-5:-4][0] state = tmp[-3:-2][0] zipcode = tmp[-2:-1][0] else: address = csvaddress+" ######CHECK ME######" city = "######CHECK ME######" zipcode = "######CHECK ME######" state = "California" else: full_address = "" address = csvaddress+"######CHECK ME######" city = "######CHECK ME######" zipcode = "######CHECK ME######" state = "California" return full_address, address, city, state, zipcode def fcnparse_opendays_opentime(dh_str): dh_str = dh_str.replace("-", " to ") try: days = dh_str.split("@")[0].split("\n")[0].lower() regex = r"(?:\W+)[,|-|\s|&]*" l = re.compile(regex).split(days) matches = re.finditer(regex, days) _days=[] new_days = daysOfWeek[str(abbrev_to_index[l[0]])] if l[0] in abbrev_to_index.keys() else l[0] if l[0] in abbrev_to_index.keys(): _days.append(daysOfWeek[str(abbrev_to_index[l[0]])]) for matchNum, match in enumerate(matches, start=1): if l[matchNum] in ["to", "-", "through"]: startDay = abbrev_to_index[l[matchNum - 1]] + 1 endDay = abbrev_to_index[l[matchNum + 1]] for i in range(startDay, endDay): _days.append(daysOfWeek[str(i)]) else: if l[matchNum] in abbrev_to_index.keys(): _days.append( daysOfWeek[str(abbrev_to_index[l[matchNum]])]) days= ",".join(list(set(_days))) timing = dh_str.split("@")[1] timing = re.findall("(\d{2}:\d{2}\s*[aAPpmM]*)",timing) return (days, timing[0], timing[1] if len(timing)>1 else "") except: return "####### CHECK ME ########" def fcnparse_mealtypes(attributesMEALTYPES): #print(attributesMEALTYPES) mealoptions = [] if re.search('breakfast', attributesMEALTYPES, re.IGNORECASE): mealoptions = mealoptions + ["Breakfast", ] if re.search('lunch', attributesMEALTYPES, re.IGNORECASE): mealoptions = mealoptions + ["Lunch", ] if re.search('dinner', attributesMEALTYPES, re.IGNORECASE): mealoptions = mealoptions + ["Dinner", ] #print(mealoptions) return mealoptions fcnparse_opendays_opentime('s,M,Th@11:15 am until 12:15 pm') import urllib def getOrCreateAirTable(_url, _header, _data, _uniqueFieldName): _f ={ "maxRecords": 1, "filterByFormula": "{"+_uniqueFieldName+"}='"+_data["fields"][_uniqueFieldName]+"'" } _filter = "?"+urllib.parse.urlencode(_f) _res = requests.get(_url+_filter, headers = _header) _recs = json.loads(_res.text.encode('utf8'))["records"] if len(_recs) == 0: return insertIntoAirTable(_url, _header, _data) else: _id = json.loads(_res.text.encode('utf8'))["records"][0]["id"] print("Existing Id -> "+str(_id) + "::: "+ _url) return _id def insertIntoAirTable(_url, _header, _data): print("Insered Data -> {0}".format(_data)) print ("."*40) _res = requests.post(_url, headers = _header, data = json.dumps(_data)) print(_res.text) _id = json.loads(_res.text.encode('utf8'))["id"] print("Id -> "+str(_id) + "::: "+ _url) print("Response -> {0}".format(_res.text)) print("-"*80) return _id def updateServicesAirTable(_url, _header, _data): # print("Update Data : "+ json.dumps(_data)) _res = requests.patch(_url, headers = _header, data = json.dumps(_data)) # print("Update Services "+ " ::: "+ _url) # print(_res.text) # print("*"*80) #Code to scrape Santa Clara County school meal sites import requests import csv import json import time #from OSMPythonTools.nominatim import Nominatim #nominatim = Nominatim() post_url_services = 'https://api.airtable.com/v0/applQOkth8R2ns3qo/services' post_url_organizations = 'https://api.airtable.com/v0/applQOkth8R2ns3qo/organizations' post_url_address = 'https://api.airtable.com/v0/applQOkth8R2ns3qo/address' post_url_location = 'https://api.airtable.com/v0/applQOkth8R2ns3qo/locations' post_url_schedule = 'https://api.airtable.com/v0/applQOkth8R2ns3qo/schedule' post_url_phones = 'https://api.airtable.com/v0/applQOkth8R2ns3qo/phones' post_headers = { 'Authorization' : 'Bearer keyLBTgL8hjaceVN4', 'Content-Type': 'application/json' } csvfilename = open('C:\deeya\code for sanjose\cfsj_csvtoairtable\Santa Clara Scrape 10_1 - Food Senior Meal Sites.csv', encoding='utf-8') csv_f = csv.reader(csvfilename) #Skip header row count = 1 header= {} col_num = 0 for row in csv_f: for f in row: header[f] = col_num col_num += 1 break #Loop over all rows and parse out the columns in each row count = 1 for row in csv_f: # # skip till you find the Service to start at... # if row[5] != "Lakeside School": # continue rowcount = 0 for elem in row: rowcount = rowcount+1 ################################################# #Upload to Service Table ################################################# #Parse data for SERVICES sheet: attributes.SITENAME, attributes.MEALTYPES, #attributes.SITESTATUS, attributes.DESCRIPTION, attributes.EMAIL, attributes.WEBLINK _temp = row[header["attributes.SITENAME"]].split(":") attributesSITENAME = _temp[1] if len(_temp)>1 else _temp[0] attributesMEALTYPES = fcnparse_mealtypes(row[header["attributes.MEALTYPES"]]) attributesSITESTATUS = row[header["attributes.SITESTATUS"]] attributesDESCRIPTION = row[header["attributes.DESCRIPTION"]]+" "+row[header["attributes.DESCRIPTIONLONG"]] attributesEMAIL = row[header["attributes.EMAIL"]] attributesWEBLINK = row[header["attributes.WEBLINK"]] airtableSERVICES = { "fields": { "Name": attributesSITENAME, "active": attributesSITESTATUS, "Description": attributesDESCRIPTION, "email": attributesEMAIL, "url": attributesWEBLINK } } print("Upload to service table -> " + attributesSITENAME) serviceId = getOrCreateAirTable(post_url_services, post_headers, airtableSERVICES, "Name") ################################################## #Upload to Organization Table ################################################## attributesSITEADDRESS = row[header["attributes.SITEADDRESS"]] full_address, address, city, state, zipcode = parse_address(attributesSITEADDRESS) airtableORGANIZATIONS = { "fields": { "name": row[header["attributes.SITETYPE"]], "description": row[header["attributes.DESCRIPTION"]] } } print("Upload to organization table") organizationId = getOrCreateAirTable(post_url_organizations, post_headers, airtableORGANIZATIONS, "name") ################################################### #Upload to Address Table ################################################### #Parse data for ADDRESS sheet: attributes.SITEADDRESS #print(attributesSITEADDRESS) #full_address, address, city, state, zipcode = fcnparse_siteaddress(attributesSITEADDRESS) #print(count+1, address, city, state, zipcode, '\n') # address = "1234 Memory Lane" # city = "San Jose" # state = "CA" # zipcode = "95124" airtableADDRESS = { "fields": { "address_1": address, "city": city, "State": state, "Zip Code": zipcode } } print("Upload to address table") addressId = getOrCreateAirTable(post_url_address, post_headers, airtableADDRESS, "address_1") #################################################### # Upload to Phone Table #################################################### # Parse data for PHONES sheet: attributes.PHONENUMBER #attributesPHONENUMBER = row[14] if row[14] != '' else "n/a" airtablePHONES = { "fields": { "number": row[header["attributes.PHONENUMBER"]], "services": [serviceId], "organizations": [organizationId] } } print("Upload to phone table") phoneId = getOrCreateAirTable(post_url_phones, post_headers, airtablePHONES, "number") ################################################## #Upload to Schedule Table ################################################## #Parse data for SCHEDULE sheet: attributes.OPENDAYS, attributes.OPENTIMES attributesOPENDAYS = row[header["attributes.OPENDAYS"]] attributesOPENTIMES = row[header["attributes.OPENTIMES"]] opendaystime = fcnparse_opendays_opentime(attributesOPENDAYS+ '@'+ attributesOPENTIMES) airtableSCHEDULE = { "fields": { "name": row[header["attributes.SITENAME"]], "services": [serviceId], "weekday": opendaystime[0].split(","), "opens_at": opendaystime[1], "closes_at": opendaystime[2] } } if airtableSCHEDULE["fields"]["weekday"] == ['']: airtableSCHEDULE["fields"]["description-x"] = "####### Please check data ######" airtableSCHEDULE["fields"]["weekday"] = [] print("Upload to schedule table") scheduleId = insertIntoAirTable(post_url_schedule, post_headers, airtableSCHEDULE) ################################################### #Upload to Location Table ################################################### airtableLOCATION = { "fields": { "name": row[header["attributes.SITENAME"]], "latitude": row[header["geometry.x"]], "longitude": row[header["geometry.y"]], "services": [ serviceId ], "schedule": [ scheduleId ], "address": [ addressId ] } } print("Upload to location table") LocationId = getOrCreateAirTable(post_url_location, post_headers, airtableLOCATION, "name") #################################################### # Update Services Table #################################################### airtableSERVICES = { "fields": { "Organization": [ organizationId ], "phones": [ phoneId ], "schedule":[ scheduleId ], "locations":[ LocationId ] } } updateServicesAirTable("https://api.airtable.com/v0/applQOkth8R2ns3qo/services/"+serviceId, post_headers, airtableSERVICES) count = count + 1 # if count == 4: # break #Prototyping code for parsing addresses from OSMPythonTools.nominatim import Nominatim nominatim = Nominatim() #myoutput = nominatim.query('1680 Foley ave') #print(myoutput.toJSON()) csvaddress = '592 Dunholme Way, san jose' myoutput = nominatim.query(csvaddress) print(myoutput) print(myoutput.toJSON()) print(len(myoutput.toJSON())) myjson = myoutput.toJSON()[0] print(myjson) print(type(myjson)) full_address = myjson['display_name'] print(full_address) print(type(full_address)) tmp = full_address.split(', ') print(tmp) firstelem = tmp[0] print(firstelem) print(firstelem.isnumeric()) if tmp[0].isnumeric(): address = tmp[0]+" "+tmp[1] city = tmp[-5:-4][0] state = tmp[-3:-2][0] zipcode = tmp[-2:-1][0] elif tmp[1].isnumeric(): address = tmp[1]+" "+tmp[2] city = tmp[-5:-4][0] state = tmp[-3:-2][0] zipcode = tmp[-2:-1][0] else: address = csvaddress+" ######CHECK ME######" city = "######CHECK ME######" zipcode = "######CHECK ME######" state = "California" print(address) print(city) print(state) print(zipcode) if ~len([]): print("yup empty") #blah = '1680' #blah.isdigit() print(1!=2) #Prototyping code for parsing meal types import re if re.search('mandye', 'Mandy Pande', re.IGNORECASE): print("Yes found") test_tup1 = [1, 3, 5] test_tup2 = [4,] # printing original tuples print("The original tuple 1 : " + str(test_tup1)) print("The original tuple 2 : " + str(test_tup2)) # Ways to concatenate tuples # using + operator res = test_tup1 + test_tup2 # printing result print("The tuple after concatenation is : " + str(res)) ```
github_jupyter
# Linear Regression LinearRegression is a simple machine learning model where the response y is modelled by a linear combination of the predictors in X. The linear regression model implemented in the cuml library allows the user to change the fit_intercept, normalize and algorithm parameters. cuML’s LinearRegression expects either a cuDF DataFrame or a NumPy matrix and provides 2 algorithms to fit a linear mode: lSVD and Eig . SVD is more stable, but Eig (default) is much more faster. The Linear Regression function accepts the following parameters: 1. algorithm:‘eig’ or ‘svd’ (default = ‘eig’). Eig uses a eigendecomposition of the covariance matrix, and is much faster. SVD is slower, but is guaranteed to be stable. 2. fit_intercept:boolean (default = True). If True, LinearRegression tries to correct for the global mean of y. If False, the model expects that you have centered the data. 3. normalize:boolean (default = False). If True, the predictors in X will be normalized by dividing by it’s L2 norm. If False, no scaling will be done. The methods that can be used with the Linear regression are: 1. fit: Fit the model with X and y. 1. get_params: Sklearn style return parameter state 1. predict: Predicts the y for X. 1. set_params: Sklearn style set parameter state to dictionary of params. In order to convert your dataset to cudf format please read the cudf documentation on https://rapidsai.github.io/projects/cudf/en/latest/. For additional information on the linear regression model please refer to the documentation on https://rapidsai.github.io/projects/cuml/en/latest/index.html ``` import numpy as np import pandas as pd import cudf import os from cuml import LinearRegression as cuLinearRegression from sklearn.linear_model import LinearRegression as skLinearRegression from sklearn.datasets import make_regression from sklearn.metrics import mean_squared_error # Select a particular GPU to run the notebook os.environ["CUDA_VISIBLE_DEVICES"]="2" ``` # Helper Functions ``` # check if the mortgage dataset is present and then extract the data from it, else just create a random dataset for regression import gzip def load_data(nrows, ncols, cached = 'data/mortgage.npy.gz'): #split the dataset in a 80:20 split train_rows = int(nrows*0.8) if os.path.exists(cached): print('use mortgage data') with gzip.open(cached) as f: X = np.load(f) # the 4th column is 'adj_remaining_months_to_maturity' # used as the label X = X[:,[i for i in range(X.shape[1]) if i!=4]] y = X[:,4:5] rindices = np.random.randint(0,X.shape[0]-1,nrows) X = X[rindices,:ncols] y = y[rindices] df_y_train = pd.DataFrame({'fea%d'%i:y[0:train_rows,i] for i in range(y.shape[1])}) df_y_test = pd.DataFrame({'fea%d'%i:y[train_rows:,i] for i in range(y.shape[1])}) else: print('use random data') X,y = make_regression(n_samples=nrows,n_features=ncols,n_informative=ncols, random_state=0) df_y_train = pd.DataFrame({'fea0':y[0:train_rows,]}) df_y_test = pd.DataFrame({'fea0':y[train_rows:,]}) df_X_train = pd.DataFrame({'fea%d'%i:X[0:train_rows,i] for i in range(X.shape[1])}) df_X_test = pd.DataFrame({'fea%d'%i:X[train_rows:,i] for i in range(X.shape[1])}) return df_X_train, df_X_test, df_y_train, df_y_test ``` # Run tests ``` %%time # nrows = number of samples # ncols = number of features of each sample nrows = 2**20 ncols = 399 #split the dataset into training and testing sets, in the ratio of 80:20 respectively X_train, X_test, y_train, y_test = load_data(nrows,ncols) print('training data',X_train.shape) print('training label',y_train.shape) print('testing data',X_test.shape) print('testing label',y_test.shape) print('label',y_test.shape) %%time # use the sklearn linear regression model to fit the training dataset skols = skLinearRegression(fit_intercept=True, normalize=True) skols.fit(X_train, y_train) %%time # calculate the mean squared error of the sklearn linear regression model on the testing dataset sk_predict = skols.predict(X_test) error_sk = mean_squared_error(y_test,sk_predict) %%time # convert the pandas dataframe to cudf format X_cudf = cudf.DataFrame.from_pandas(X_train) X_cudf_test = cudf.DataFrame.from_pandas(X_test) y_cudf = y_train.values y_cudf = y_cudf[:,0] y_cudf = cudf.Series(y_cudf) %%time # run the cuml linear regression model to fit the training dataset cuols = cuLinearRegression(fit_intercept=True, normalize=True, algorithm='eig') cuols.fit(X_cudf, y_cudf) %%time # calculate the mean squared error of the testing dataset using the cuml linear regression model cu_predict = cuols.predict(X_cudf_test).to_array() error_cu = mean_squared_error(y_test,cu_predict) # print the mean squared error of the sklearn and cuml model to compare the two print("SKL MSE(y):") print(error_sk) print("CUML MSE(y):") print(error_cu) ```
github_jupyter
``` import os os.environ['CUDA_VISIBLE_DEVICES'] = '1' import numpy as np import tensorflow as tf import json with open('train-test.json') as fopen: dataset = json.load(fopen) with open('dictionary.json') as fopen: dictionary = json.load(fopen) train_X = dataset['train_X'] train_Y = dataset['train_Y'] test_X = dataset['test_X'] test_Y = dataset['test_Y'] dictionary.keys() dictionary_from = dictionary['from']['dictionary'] rev_dictionary_from = dictionary['from']['rev_dictionary'] dictionary_to = dictionary['to']['dictionary'] rev_dictionary_to = dictionary['to']['rev_dictionary'] GO = dictionary_from['GO'] PAD = dictionary_from['PAD'] EOS = dictionary_from['EOS'] UNK = dictionary_from['UNK'] for i in range(len(train_X)): train_X[i] += ' EOS' train_X[0] for i in range(len(test_X)): test_X[i] += ' EOS' test_X[0] def pad_second_dim(x, desired_size): padding = tf.tile([[[0.0]]], tf.stack([tf.shape(x)[0], desired_size - tf.shape(x)[1], tf.shape(x)[2]], 0)) return tf.concat([x, padding], 1) class Translator: def __init__(self, size_layer, num_layers, embedded_size, from_dict_size, to_dict_size, learning_rate, batch_size): def cells(size_layer, reuse=False): return tf.nn.rnn_cell.GRUCell(size_layer,reuse=reuse) self.X = tf.placeholder(tf.int32, [None, None]) self.Y = tf.placeholder(tf.int32, [None, None]) self.X_seq_len = tf.count_nonzero(self.X, 1, dtype = tf.int32) self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype = tf.int32) batch_size = tf.shape(self.X)[0] encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1)) decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1)) encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X) main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1]) decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1) decoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, decoder_input) print(encoder_embedded) for n in range(num_layers): (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn( cell_fw = cells(size_layer // 2), cell_bw = cells(size_layer // 2), inputs = encoder_embedded, sequence_length = self.X_seq_len, dtype = tf.float32, scope = 'bidirectional_rnn_%d'%(n)) encoder_embedded = tf.concat((out_fw, out_bw), 2) bi_state = tf.concat((state_fw,state_bw), -1) last_state = tuple([bi_state] * num_layers) with tf.variable_scope("decoder"): rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)]) outputs, _ = tf.nn.dynamic_rnn(rnn_cells_dec, decoder_embedded, sequence_length=self.X_seq_len, initial_state = last_state, dtype = tf.float32) self.logits = tf.layers.dense(outputs,to_dict_size) self.training_logits = self.logits[:, :tf.reduce_max(self.Y_seq_len)] self.training_logits = pad_second_dim(self.training_logits, tf.reduce_max(self.Y_seq_len)) masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32) self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits, targets = self.Y, weights = masks) self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost) y_t = tf.argmax(self.training_logits,axis=2) y_t = tf.cast(y_t, tf.int32) self.prediction = tf.boolean_mask(y_t, masks) mask_label = tf.boolean_mask(self.Y, masks) correct_pred = tf.equal(self.prediction, mask_label) correct_index = tf.cast(correct_pred, tf.float32) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) size_layer = 512 num_layers = 2 embedded_size = 256 learning_rate = 1e-3 batch_size = 128 epoch = 20 tf.reset_default_graph() sess = tf.InteractiveSession() model = Translator(size_layer, num_layers, embedded_size, len(dictionary_from), len(dictionary_to), learning_rate,batch_size) sess.run(tf.global_variables_initializer()) def str_idx(corpus, dic): X = [] for i in corpus: ints = [] for k in i.split(): ints.append(dic.get(k,UNK)) X.append(ints) return X def pad_sentence_batch(sentence_batch, pad_int): padded_seqs = [] seq_lens = [] max_sentence_len = max([len(sentence) for sentence in sentence_batch]) for sentence in sentence_batch: padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence))) seq_lens.append(len(sentence)) return padded_seqs, seq_lens train_X = str_idx(train_X, dictionary_from) test_X = str_idx(test_X, dictionary_from) train_Y = str_idx(train_Y, dictionary_to) test_Y = str_idx(test_Y, dictionary_to) import tqdm for e in range(epoch): pbar = tqdm.tqdm( range(0, len(train_X), batch_size), desc = 'minibatch loop') train_loss, train_acc, test_loss, test_acc = [], [], [], [] for i in pbar: index = min(i + batch_size, len(train_X)) maxlen = max([len(s) for s in train_X[i : index] + train_Y[i : index]]) batch_x, seq_x = pad_sentence_batch(train_X[i : index], PAD) batch_y, seq_y = pad_sentence_batch(train_Y[i : index], PAD) feed = {model.X: batch_x, model.Y: batch_y} accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer], feed_dict = feed) train_loss.append(loss) train_acc.append(accuracy) pbar.set_postfix(cost = loss, accuracy = accuracy) pbar = tqdm.tqdm( range(0, len(test_X), batch_size), desc = 'minibatch loop') for i in pbar: index = min(i + batch_size, len(test_X)) batch_x, seq_x = pad_sentence_batch(test_X[i : index], PAD) batch_y, seq_y = pad_sentence_batch(test_Y[i : index], PAD) feed = {model.X: batch_x, model.Y: batch_y,} accuracy, loss = sess.run([model.accuracy,model.cost], feed_dict = feed) test_loss.append(loss) test_acc.append(accuracy) pbar.set_postfix(cost = loss, accuracy = accuracy) print('epoch %d, training avg loss %f, training avg acc %f'%(e+1, np.mean(train_loss),np.mean(train_acc))) print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1, np.mean(test_loss),np.mean(test_acc))) rev_dictionary_to = {int(k): v for k, v in rev_dictionary_to.items()} test_size = 20 batch_x, seq_x = pad_sentence_batch(test_X[: test_size], PAD) batch_y, seq_y = pad_sentence_batch(test_Y[: test_size], PAD) feed = {model.X: batch_x,model.Y: batch_y,} logits = np.argmax(sess.run(model.logits, feed_dict = feed), axis = 2) logits.shape rejected = ['PAD', 'EOS', 'UNK', 'GO'] for i in range(test_size): predict = [rev_dictionary_to[i] for i in logits[i] if rev_dictionary_to[i] not in rejected] actual = [rev_dictionary_to[i] for i in batch_y[i] if rev_dictionary_to[i] not in rejected] print(i, 'predict:', ' '.join(predict)) print(i, 'actual:', ' '.join(actual)) print() ```
github_jupyter
# Object oriented programming ## Import modules ``` from math import sqrt, atan2, sin, cos, pi ``` ## Simple class definitions Define a simple class that represents a point in two dimensions. ``` class Point: ''' objects of this class represent points in a 2D space, e.g., p1 = Point() p1.x, p1.y = 5.3, 7.4 print(p1.x, p1.y) p2 = Point() p2.x, p2.y = 3.1, 9.7 print(p1.distance(p2)) ''' # object attributes x: float y: float def distance(self, other): ''' computes the distance between the point and another point ''' return sqrt((self.x - other.x)**2 + (self.y - other.y)**2) ``` A point has two attributes, its x and y coordinates. The function `distance_from_origin` is an object method, it will compute the point's distance to another point. ``` p1 = Point() p1.x, p1.y = 3.7, 5.3 p2 = Point() p2.x, p2.y = 1.4, -7.9 print(p1.x, p1.y) print(p2.x, p2.y) p1.distance(p2), p2.distance(p1) ``` However, this implementation is very brittle, e.g., ``` p1.x = 'abc' ``` Although the assignment succeeds, trouble looms down the road, potentially much later during the execution of your code, so that it will be hard to trace the root cause of the problem. ``` try: p1.distance(p2) except Exception as error: import traceback traceback.print_exc() ``` ## Accessing attributes: getters and setters The class defined below is much more robust. When an inappropriate value is assigned to one of the coordinates, a ValueError exception will immediately be raised. Note that we renamed the object attributes to `_x` and `_y` respectively. By convention, this implies that users of the class should not access the attribute directly. This is merely a convention though, and hence is not enforced by the Python interpreter. ``` class Point: ''' objects of this class represent points in a 2D space, e.g., p1 = Point() p1.x, p1.y = 5.3, 7.4 print(p1.x, p1.y) p2 = Point() p2.x, p2.y = 3.1, 9.7 print(p1.distance(p2)) ''' # object attributes _x: float _y: float @property def x(self): ''' get the point's x coordinate ''' return self._x @x.setter def x(self, value): ''' set the point's x coordinate ''' self._x = float(value) @property def y(self): ''' get the point's y coordinate ''' return self._y @y.setter def y(self, value): ''' set the point's y coordinate ''' self._y = float(value) def distance(self, other): ''' computes the distance between the point and another point ''' return sqrt((self.x - other.x)**2 + (self.y - other.y)**2) p1 = Point() p1.x = 3 p1.y = 15.1 p2 = Point() p2.x = '2.9' p2.y = 5.2 print(p1.x, p1.y, p2.x, p2.y) try: p1.x = 'abc' except Exception as error: import traceback traceback.print_exc() ``` Although this statement will still raise an error, it is much more informative, since the stack trace points to the actual culprit, i.e., the assignment to the x coordinate, rather than putting the blame on the `distance` method. ## Constructor and string representation It would be convenient to immediately specify a point's coordinates when it is created. The method `__init__` will be automatically called when a new `Point` object is created, and can be used to initialize the new object's attributes `_x` and `_y`. ``` class Point: ''' objects of this class represent points in a 2D space, e.g., p1 = Point(5.3, 7.4) print(p1.x, p1.y) p2 = Point(3.1, 9.7) print(p2) print(p1.distance(p2)) ''' # object attributes _x: float _y: float def __init__(self, x, y): ''' constructs a point with the given coordinates x: float representing the x coordinate y: float representing the y coordinate ''' self.x = x self.y = y @property def x(self): ''' get the point's x coordinate ''' return self._x @x.setter def x(self, value): ''' set the point's x coordinate ''' self._x = float(value) @property def y(self): ''' get the point's y coordinate ''' return self._y @y.setter def y(self, value): ''' set the point's y coordinate ''' self._y = float(value) def distance(self, other): ''' computes the distance between the point and another point ''' return sqrt((self.x - other.x)**2 + (self.y - other.y)**2) def __repr__(self): ''' get the point's string representation ''' return f'({self.x}, {self.y})' ``` Also note the `__repr__` method that returns a string representation of the `Point` object. ``` p1 = Point(3.4, 9.2) p2 = Point(5.6, 7.3) print(p1, p2) ``` ## Non-basic getters/setters `x` and `y` are the getters/setters for the basic attributes of the point. However, it may be convenient to define getters and setters for non-basic attributes. For instance, it may be convenient to access the coordiantes as a tuple, rather than by individual component. ``` class Point: ''' objects of this class represent points in a 2D space, e.g., p1 = Point(5.3, 7.4) print(p1.x, p1.y) p2 = Point(3.1, 9.7) print(p2) print(p1.distance(p2)) ''' # object attributes _x: float _y: float def __init__(self, x, y): ''' constructs a point with the given coordinates x: float representing the x coordinate y: float representing the y coordinate ''' self.x = x self.y = y @property def x(self): ''' get the point's x coordinate ''' return self._x @x.setter def x(self, value): ''' set the point's x coordinate ''' self._x = float(value) @property def y(self): ''' get the point's y coordinate ''' return self._y @y.setter def y(self, value): ''' set the point's y coordinate ''' self._y = float(value) @property def coords(self): ''' get the point's coordinates returns tuple of 2 float values ''' return self.x, self.y @coords.setter def coords(self, value): ''' set the point's coordinates coords: tuple of 2 float values ''' self.x, self.y = value @property def polar_coords(self): ''' get the point's polar coordinates returns tuple of 2 float values as (r, theta) ''' return sqrt(self.x**2 + self.y**2), atan2(self.y, self.x) @polar_coords.setter def polar_coords(self, value): self.x = value[0]*cos(value[1]) self.y = value[0]*sin(value[1]) def distance(self, other): ''' computes the distance between the point and another point ''' return sqrt((self.x - other.x)**2 + (self.y - other.y)**2) def __repr__(self): ''' get the point's string representation ''' return f'({self.x}, {self.y})' ``` We also added a getter and setter for a point's polar coordinates, these are computed from `x` and `y` in the getter, while the carthesian coordinates are computed from the given polar coordinates in the setter. ``` p = Point(1.0, 3.0) p.polar_coords p.polar_coords = 2.0, pi/4 p.coords ``` ## Interface versus implementation If a class is well-defined, its implementation can be changed without a user of the class having to make changes to her code. Note that the setters for the individual attribues `x` and `y` have been removed. ``` class Point: ''' objects of this class represent points in a 2D space, e.g., p1 = Point(5.3, 7.4) print(p1.x, p1.y) p2 = Point(3.1, 9.7) print(p2) print(p1.distance(p2)) ''' # object attributes _x: float _y: float def __init__(self, coord1, coord2, polar=False): ''' constructs a point with the given coordinates coord1: float representing the x coordinate, or r coord2: float representing the y coordinate, or theta polar: bool indicating whether the coordiantes are polar, False by default ''' coord1, coord2 = float(coord1), float(coord2) if polar: self._x, self._y = Point._convert_to_carthesian(coord1, coord2) else: self._x, self._y = coord1, coord2 @property def x(self): ''' get the point's x coordinate ''' return self._x @property def y(self): ''' get the point's y coordinate ''' return self._y @property def coords(self): ''' get the point's coordinates returns tuple of 2 float values ''' return self.x, self.y @coords.setter def coords(self, value): ''' set the point's coordinates coords: tuple of 2 float values ''' self._x, self._y = float(value[0]), float(value[1]) @property def r(self): ''' get the point's radial coordinate ''' return sqrt(self.x**2 + self.y**2) @property def theta(self): ''' get the point's angular coordinate ''' return atan2(self.y, self.x) @property def polar_coords(self): ''' get the point's polar coordinates returns tuple of 2 float values as (r, theta) ''' return sqrt(self.x**2 + self.y**2), atan2(self.y, self.x) def _convert_to_carthesian(r, theta): return r*cos(theta), r*sin(theta) @polar_coords.setter def polar_coords(self, value): self._x, self._y = Point._convert_to_carthesian(float(value[0]), float(value[1])) def distance(self, other): ''' computes the distance between the point and another point ''' return sqrt((self.x - other.x)**2 + (self.y - other.y)**2) def __repr__(self): ''' get the point's string representation ''' return f'({self.x}, {self.y})' p = Point(1.0, 3.0) p.polar_coords p.polar_coords = 2.0, pi/4 p.coords ``` We can now transparently change the implementation of the class, e.g., by storing the coordinates of a point as polar, rather than carthesian coordinates. ``` class Point: ''' objects of this class represent points in a 2D space, e.g., p1 = Point(5.3, 7.4) print(p1.x, p1.y) p2 = Point(3.1, 9.7) print(p2) print(p1.distance(p2)) ''' # object attributes _r: float _theta: float def __init__(self, coord1, coord2, polar=False): ''' constructs a point with the given coordinates coord1: float representing the x coordinate, or r coord2: float representing the y coordinate, or theta polar: bool indicating whether the coordiantes are polar, False by default ''' coord1, coord2 = float(coord1), float(coord2) if not polar: self._r, self._theta = Point._convert_to_polar(coord1, coord2) else: self._r, self._theta = coord1, coord2 @property def x(self): ''' get the point's x coordinate ''' return self._r*cos(self._theta) @property def y(self): ''' get the point's y coordinate ''' return self._r*sin(self._theta) @property def coords(self): ''' get the point's coordinates returns tuple of 2 float values ''' return self.x, self.y @coords.setter def coords(self, value): ''' set the point's coordinates coords: tuple of 2 float values ''' self._r, self._theta = Point._convert_to_polar(float(value[0]), float(value[1])) @property def r(self): ''' get the point's radial coordinate ''' return self._r @property def theta(self): ''' get the point's angular coordinate ''' return self._theta @property def polar_coords(self): ''' get the point's polar coordinates returns tuple of 2 float values as (r, theta) ''' return self.r, self.theta def _convert_to_polar(x, y): return sqrt(x**2 + y**2), atan2(y, x) @polar_coords.setter def polar_coords(self, value): self._r, self._theta = float(value[0]), float(value[1]) def distance(self, other): ''' computes the distance between the point and another point ''' return sqrt((self.x - other.x)**2 + (self.y - other.y)**2) def __repr__(self): ''' get the point's string representation ''' return f'({self.x}, {self.y})' p = Point(1.0, 3.0) p.polar_coords p.polar_coords = 2.0, pi/4 p.coords ``` ## Inheritance The `Point` class can be extended to represent point masses. One option would be to modifies `Point`'s definition to include and extra object attribute, and add relevant methods to the class. However, that would potentially break existing software, or at least add unnecessary complexity when using `Point` objects. The better option is to define a new class that inherits attributes and methods from `Point`, but adds the specific logic to represent points that have mass. ``` class PointMass(Point): ''' objects of this class represent points in a 2D space, e.g., p1 = PointMass(5.3, 7.4, 1.3) print(p1.x, p1.y, p1.mass) p2 = Point(3.1, 9.7, 0.9) print(p2) print(p1.distance(p2)) ''' # object attributes _mass: float def __init__(self, x, y, mass): ''' constructs a point with the given coordinates and mass x: float representing the x coordinate y: float representing the y coordinate mass: float representing the mass ''' super().__init__(x, y) self.mass = mass @property def mass(self): ''' get the point's mass ''' return self._mass @mass.setter def mass(self, value): ''' set the point's mass ''' self._mass = float(value) def __repr__(self): ''' get the point's string representation ''' return f'{super().__repr__()}: {self.mass}' ``` The `PointMass` class add the `_mass` attributes and its getter and setter, and has its own construvtor that calls `Point`'s constructor. Similarly, the `__repr__` method uses the one defined in `Point` to generate part of the string representation for a `PointMass` object. ``` p1 = PointMass(1.0, 3.0, 2.0) p2 = PointMass(-2.0, 1.0, 3.0) ``` All methods defined for `Point` objects work for `PointMass` objects. ``` p1.distance(p2) print(p1) ``` Type information on objects is fairly straightforward using the `type` and `instanceof` functions. ``` p1 = Point(3.4, 5.2) p2 = PointMass(1.9, 2.3, 0.7) print(type(p1), type(p2)) print(isinstance(p1, Point), isinstance(p1, PointMass), isinstance(p2, Point), isinstance(p2, PointMass)) ``` As expected, `p1` is a `Point`, but not a `PointMass`, while `p2` is both a `PointMass` and a `Point` since by definition each `PointMass` is a `Point`. `Point` is `PointMass` base class, while `PointMass` is a class derived from `Point`. ## Static and class methods, class attributes The `total_mass` and `center_of_mass` methods operate on collections of point masses, not on individual objects. Hence they are defined as static and class method respectively. `center_of_mass` is a class methods, since it needs a reference to the class to call `total_mass`. ``` class PointMass(Point): ''' objects of this class represent points in a 2D space, e.g., p1 = PointMass(5.3, 7.4, 1.3) print(p1.x, p1.y, p1.mass) p2 = Point(3.1, 9.7, 0.9) print(p2) print(p1.distance(p2)) ''' # object attributes _mass: float def __init__(self, x, y, mass): ''' constructs a point with the given coordinates and mass x: float representing the x coordinate y: float representing the y coordinate mass: float representing the mass ''' super().__init__(x, y) self.mass = mass @property def mass(self): ''' get the point's mass ''' return self._mass @mass.setter def mass(self, value): ''' set the point's mass ''' self._mass = float(value) @staticmethod def total_mass(points): mass = 0.0 for point in points: mass += point.mass return mass @classmethod def center_of_mass(cls, points): x, y = 0.0, 0.0 for point in points: x += point.mass*point.x y += point.mass*point.y mass = cls.total_mass(points) return x/mass, y/mass def __repr__(self): ''' get the point's string representation ''' return f'{super().__repr__()}: {self.mass}' p1 = PointMass(1.5, 2.0, 0.5) p2 = PointMass(-0.5, 1.0, 2.0) PointMass.total_mass([p1, p2]) PointMass.center_of_mass([p1, p2]) ``` Suppose we would like each point mass to have a unique identifier. This can be accomplished if we can keep track of the number of instanciated objects, and use that number as the ID of a newly created one. ``` class PointMass(Point): ''' objects of this class represent points in a 2D space, e.g., p1 = PointMass(5.3, 7.4, 1.3) print(p1.x, p1.y, p1.mass) p2 = Point(3.1, 9.7, 0.9) print(p2) print(p1.distance(p2)) ''' # class attribute _id_state = 0 # object attributes _mass: float _id: int def __init__(self, x, y, mass): ''' constructs a point with the given coordinates and mass x: float representing the x coordinate y: float representing the y coordinate mass: float representing the mass ''' super().__init__(x, y) self.mass = mass self._id = self.__class__._id_state self.__class__._id_state += 1 @property def id(self): ''' get the point's ID ''' return self._id @property def mass(self): ''' get the point's mass ''' return self._mass @mass.setter def mass(self, value): ''' set the point's mass ''' self._mass = float(value) @staticmethod def total_mass(points): mass = 0.0 for point in points: mass += point.mass return mass @classmethod def center_of_mass(cls, points): x, y = 0.0, 0.0 for point in points: x += point.mass*point.x y += point.mass*point.y mass = cls.total_mass(points) return x/mass, y/mass def __repr__(self): ''' get the point's string representation ''' return f'{super().__repr__()}: {self.mass}' ``` Note that we define a getter for the `id` attribute, but no setter. Modifying a point's ID would probably mess up functionality that depends on IDs being unique. ``` p1 = PointMass(0.3, 1.8, 2.5) p2 = PointMass(1.9, -2.1, 0.3) print(p1.id, p2.id) ``` ## Introspection In some circumstances, it can be useful to determine properties of an object at runtime, e.g., whether it has an attribute or a method. ``` p1 = Point(3.1, 4.5) p2 = PointMass(-0.3, 1.2, 0.6) ``` The `hasattr` function returns true if the object has an attribute of the given name, false otherwise. ``` hasattr(p1, '_theta') hasattr(p1, '_mass') hasattr(p2, '_mass') ``` Note that the term "attribute" is used in a flexible way, it also works for methods, and that classes such as `Point` are in fact also objects, e.g., ``` hasattr(p1, 'x') hasattr(p2, 'distance') ``` We can also retrieve values of attributes in a similar way. ``` getattr(p1, 'x') ``` This even works for object methods that can be called dynamically. ``` method = getattr(p1, "distance") method(p2) ``` Note that the method is bound to the object is was retreived from. ``` method(p1) ``` Attributes can also be set using `setattr`. ``` p1 setattr(p1, '_r', 1.0) p1 ``` Perhaps somewhat more unexpectedly, it is possible to add new attributes after an object has been created. This attribute is specific to that object, other objects of the same class are not affected. ``` p3 = Point(2.0, pi/4, polar=True) setattr(p1, 'color', 'blue') p1.color hasattr(p3, 'color') ``` This is most likely bad practice, so you probably should write such code. Note that even a simple typo can lead to "interesting" results. ``` p1.coorrds = 5, 3 ``` The `dir` function returns a list of all the attributes and methods an object has. ``` dir(p3) ``` We recognize the object attributes `_r` and `_theta`, and object methods we defined such as the getter `x`, `y`,..., `polar_coords`, as well as ordinary methods such as `distance`. The object's `__dict__` attribute is in fact rather interesting, since it is a dictionary that has the object's attribute names as keys, and maps those to their respective values. The function `vars` returns a reference to this dictionary. ``` for attr_name, attr_value in vars(p1).items(): print(f'{attr_name}: {attr_value}') ``` Note that this provides yet another (not so clean) way to alter an object's attribute value. ``` d = vars(p1) d['_r'] = 1.0 p1.r ``` ## Using `__slots__` It may be worth avoiding the issue of accidentally adding an attribute to an individual object by using the `__slots__` class attribute. Consider the following class definition. A `SimplePoint` object will have only two attributes, `_x` and `_y` with corresonponding accessors. ``` class SimpleSlotPoint: # object attributes __slots__ = ('_x', '_y') def __init__(self, x, y): self.x = x self.y = y @property def x(self): return self._x @x.setter def x(self, value): self._x = float(value) @property def y(self): return self._y @y.setter def y(self, value): self._y = float(value) p = SimpleSlotPoint(3, 5) p.x p.x = 17 p.x ``` Not surprisingly, the accessors work as before, but when we try to dynamically add an attribute to the `p` object, we actually get an error. ``` try: p.z = 13 except Exception as error: import traceback traceback.print_exc() ``` Using slots has the additional advantage of reducing memory overhead for objects, so it is quite useful when it is expected that many instances of the class may be instantiated during execution. Note that objects instantiated from classes that use `__slots__` don't have a `__dict__` attribute. Hence the `vars` function will throw an error when called on an object that has `__slots__`. ``` '__dict__' in dir(p) ``` To measure the difference in performance between an implementation that uses `__slots__` versus one that uses `__dict__` we define an additional class with the same functionality as `SimpleSlotPoint`. ``` class SimpleDictPoint: def __init__(self, x, y): self._x = float(x) self._y = float(y) @property def x(self): return self._x @x.setter def x(self, value): self._x = float(value) @property def y(self): return self._y @y.setter def y(self, value): self._y = float(value) ``` Time object instantiation. ``` %timeit SimpleDictPoint(1.0, 3.0) %timeit SimpleSlotPoint(1.0, 3.0) ``` Time object attribute access. ``` %%timeit p = SimpleDictPoint(0.0, 0.0) for _ in range(1000): p.x += 1.0 p.y -= 0.5 %%timeit p = SimpleSlotPoint(0.0, 0.0) for _ in range(1000): p.x += 1.0 p.y -= 0.5 ``` ## Wrapper classes or derivation by extension Suppose (for whatever bizarre reason) we would like to have named numpy arrays. One option would be to define a new class with `numpy.array` as base class, add a `name` object attribute, along with its accessors. An alternative is to define a class that has an `numpy.array` as an object attribute, and pass all attribute requests on to that attribute by implementing the `__getattr__` method explicitely. To ensure that the elements of the named arrays are accessible by index, we also implement `__getitem__` and `__setitem__`. ``` class NamedArray: import numpy as np # object attributes _data: np.array _name: str def __init__(self, name, *args, **kwargs): self._name = name self._data = np.array(*args, **kwargs) @property def name(self): return self._name def __getattr__(self, name): if hasattr(self._data, name): return getattr(self._data, name) else: raise AttributeError(f'no such attribute {name}') def __getitem__(self, index): return self._data[index] def __setitem__(self, index, value): self._data[index] = value array = NamedArray('mine', [[2, 1, 7], [3, -2, 5]], dtype=np.float32) ``` The array object has a name attribute with the expected value. ``` array.name ``` However, all attributes and methods of the `numpy.array` attribute are available directly as well. ``` array.shape array.mean() ``` Thanks to the `__getitem__` and `__setitem__` methods, the named array can be indexed like an ordinary numpy array. ``` array[0, 0] = 1 for value in array: print(value) ``` When trying to access a non-existing attribute, an exception is raised. ``` try: array.blabla except Exception as error: import traceback traceback.print_exc() ```
github_jupyter
# S3VT Landsat and Sentinel 2 validation of hotspots - working ## Description This notebook demonstrates how to: From a candidate latitude longitude and solar_day: * determine if intersecting Landsat or Sentinel 2 ARD exists * apply the platform specific tests to determine if hotspots were detected in the vicinity 5km of hotspot * return number of pixel identified as hotspots * save a boolean file labelled with solar date of acquisition * as a secondary test perform a Normalized Burnt Ratio and return as a binary with solar date of acquisition * find canidate dates within a time range of source hotspot * find closest before date within tolerance (dNBR A) * find closest after date within tolerance (dNBR B) * candidate closest to source hotspot will be used for hotspot matching i.e. high resolution hotspot Assumptions: * reflectance values are scaled by 10000 i.e. 100% reflectance = 10000 ### Load packages ``` %matplotlib inline from pathlib import Path import datacube import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np import pandas as pd import sys import xarray as xr import geopandas as gpd sys.path.append("Scripts") from dea_datahandling import load_ard from dea_plotting import rgb from dea_bandindices import calculate_indices from shapely import wkt from geopy.distance import distance import rioxarray ``` ### Connect to the datacube ``` dc = datacube.Datacube(app='validating_hotspots') ``` ### Be ignorant of the sensor ``` # configure sensor bands - #TODO implement sensor ignorance code here sensor_ignorance = {'msi':{'0.433-0.453': 'nbar_coastal_aerosol', '0.450-0.515': 'nbar_blue', '0.525-0.600': 'nbar_green', '0.630-0.680': 'nbar_red', '0.845-0.885': 'nbar_nir_1', '1.560-1.660': 'nbar_swir_2', '2.100-2.300': 'nbar_swir_3', 'fmask': 'fmask'}, 'oli': {'0.433-0.453': 'nbart_band01', '0.450-0.515': 'nbart_band02', '0.525-0.600': 'nbart_band03', '0.630-0.680': 'nbart_band04', '0.845-0.885': 'nbart_band05', '1.560-1.660': 'nbart_band06', '2.100-2.300': 'nbart_band07', 'fmask': 'fmask'}} df = pd.read_csv('nearest_points.SENTINEL_3A_SLSTR_ESA.csv') geometry = df.geometry.apply(wkt.loads) crs = 'epsg:4326' nearest_points = gpd.GeoDataFrame(df, crs=crs, geometry=geometry) nearest_points['datetime'] = pd.to_datetime(nearest_points['datetime']) nearest_points['solar_day'] = pd.to_datetime(nearest_points['solar_day']) nearest_points['2_datetime'] = pd.to_datetime(nearest_points['2_datetime']) nearest_points['dist_m'] = nearest_points.apply(lambda row: distance((row.latitude, row.longitude),(row['2_latitude'], row['2_longitude'])).meters, axis = 1) nearest_points['timedelta'] = (abs(nearest_points['datetime'] - nearest_points['2_datetime'])) nearest_points['count'] = 1 nearest_points = nearest_points[(nearest_points['dist_m'] < 5000)] nearest_points["s2msi_rdnbr_gt_1200"] = "" nearest_points["s2msi_pre_burn_time"] = "" nearest_points["s2msi_post_burn_time"] = "" nearest_points["s2msi_pre_burn_timedelta"] = "" nearest_points["s2msi_post_burn_timedelta"] = "" nearest_points["s2msi_pre_percent"] = "" nearest_points["s2msi_post_percent"] = "" nearest_points["lsoli_rdnbr_gt_1200"] = "" nearest_points["lsoli_pre_burn_time"] = "" nearest_points["lsoli_post_burn_time"] = "" nearest_points["lsoli_pre_burn_timedelta"] = "" nearest_points["lsoli_post_burn_timedelta"] = "" nearest_points["lsoli_pre_percent"] = "" nearest_points["lsoli_post_percent"] = "" nearest_points["lsoli_hotspot_percent"] = "" ``` # Functions ``` # Buffer candidate hotspot with 5 kilometre radius (or .05 degrees will do) def buffer_hotspot(lon, lat): ul_lon = lon - 0.05 lr_lon = lon + 0.05 ul_lat = lat + 0.05 lr_lat = lat - 0.05 return ((ul_lon, lr_lon), (ul_lat, lr_lat)) #def merge(list1, list2, list3): # merged_list = [(list1[i], list2[i], list3[i]) for i in range(0, len(list1))] # return merged_list #nearest_points.datetime[0].date() #datelist = [] #for i in nearest_points.datetime: # datelist.append(i)#.date())#.strftime("%Y-%m-%d")) # #candidate_list = list(set(merge(nearest_points.longitude.to_list(), # nearest_points.latitude.to_list(), # datelist))) def buffer_date(firetime, days): prefire_date = (firetime - np.timedelta64(days, "D")).astype(str) postfire_date = (firetime + np.timedelta64(days, "D")).astype(str) return(prefire_date, postfire_date) def get_measurement_list(product): measurement_list = [] for i in dc.list_products().name: for j in dc.list_measurements().query('product == @i').name: if i == product: measurement_list.append([i, '--',j]) return(measurement_list) # potentially unambiguous active fire pixels def get_candidates(ds): test1 = (((ds[sensor_ignorance[sensor]['2.100-2.300']] / ds[sensor_ignorance[sensor]['0.845-0.885']]) > 2.5) * ((ds[sensor_ignorance[sensor]['2.100-2.300']] - ds[sensor_ignorance[sensor]['0.845-0.885']]) > 3000) * (ds[sensor_ignorance[sensor]['2.100-2.300']] > 5000)) # Unambiguous fire pixels test2 = (((ds[sensor_ignorance[sensor]['1.560-1.660']] > 8000) * (ds[sensor_ignorance[sensor]['0.433-0.453']] < 2000)) * ((ds[sensor_ignorance[sensor]['0.845-0.885']] > 4000) + (ds[sensor_ignorance[sensor]['2.100-2.300']] < 1000)).clip(min=0, max=1)) # other candidate fire pixels test3 = (((ds[sensor_ignorance[sensor]['2.100-2.300']]/ds[sensor_ignorance[sensor]['0.845-0.885']]) > 1.8)* (ds[sensor_ignorance[sensor]['2.100-2.300']]-ds[sensor_ignorance[sensor]['0.845-0.885']] > 1700)) unambiguous = (test1 + test2 + test3).clip(min=0, max=1) return(unambiguous) get_measurement_list('ga_s2a_ard_nbar_granule') # potentially unambiguous active fire pixels def get_candidates(ds): test1 = (((ds[sensor_ignorance[sensor]['2.100-2.300']] / ds[sensor_ignorance[sensor]['0.845-0.885']]) > 2.5) * ((ds[sensor_ignorance[sensor]['2.100-2.300']] - ds[sensor_ignorance[sensor]['0.845-0.885']]) > 3000) * (ds[sensor_ignorance[sensor]['2.100-2.300']] > 5000)) # Unambiguous fire pixels test2 = (((ds[sensor_ignorance[sensor]['1.560-1.660']] > 8000) * (ds[sensor_ignorance[sensor]['0.433-0.453']] < 2000)) * ((ds[sensor_ignorance[sensor]['0.845-0.885']] > 4000) + (ds[sensor_ignorance[sensor]['2.100-2.300']] < 1000)).clip(min=0, max=1)) # other candidate fire pixels test3 = (((ds[sensor_ignorance[sensor]['2.100-2.300']]/ds[sensor_ignorance[sensor]['0.845-0.885']]) > 1.8)* (ds[sensor_ignorance[sensor]['2.100-2.300']]-ds[sensor_ignorance[sensor]['0.845-0.885']] > 1700)) unambiguous = (test1 + test2 + test3).clip(min=0, max=1) return(unambiguous) def get_context_kernel_array(y, x, array): T, Y, X = array.shape ymin = y - 60 ymax = y + 60 xmin = x - 60 xmax = x + 60 if ymin < 0: ymin = 0 if xmin < 0: xmin = 0 if ymax > Y: ymax = Y if xmax > X: xmax = X try: outarray = array[0][:, ymin:ymax][xmin:xmax] except: outarray = np.nans((61,61), dtype=np.float64) return(outarray, (ymin, ymax, xmin, xmax)) def run_test6(ds): #6. ratio b7 b6 > 1.6 return((ds[sensor_ignorance[sensor]['2.100-2.300']]/ds[sensor_ignorance[sensor]['1.560-1.660']]) > 1.6 ) # Oceans test #7. {b4 > b5 AND b5 > b6 AND b6 > b7 AND b1 - b7 < 0.2} def run_test7(ds): test7 = ((ds[sensor_ignorance[sensor]['0.630-0.680']]>ds[sensor_ignorance[sensor]['0.845-0.885']])* (ds[sensor_ignorance[sensor]['0.845-0.885']]>ds[sensor_ignorance[sensor]['1.560-1.660']])* (ds[sensor_ignorance[sensor]['1.560-1.660']]>ds[sensor_ignorance[sensor]['2.100-2.300']])* ((ds[sensor_ignorance[sensor]['0.433-0.453']]-ds[sensor_ignorance[sensor]['2.100-2.300']]) < 2000)) return(test7.clip(min=0, max=1))#.plot() # Water bodies test - comment - seems like bad test / smoke complications? #AND #8. {(b3 > b2) def run_test8(ds): test8 = (ds[sensor_ignorance[sensor]['0.525-0.600']]>ds[sensor_ignorance[sensor]['0.450-0.515']]) return(test8.clip(min=0, max=1)) #OR #9. (b1 > b2 AND b2 > b3 AND b3 < b4)}. def run_test9(ds): test9 = ((ds[sensor_ignorance[sensor]['0.433-0.453']]>ds[sensor_ignorance[sensor]['0.450-0.515']]) * (ds[sensor_ignorance[sensor]['0.450-0.515']]>ds[sensor_ignorance[sensor]['0.525-0.600']])* (ds[sensor_ignorance[sensor]['0.525-0.600']]<ds[sensor_ignorance[sensor]['0.630-0.680']])) return(test9.clip(min=0, max=1)) def get_watermasks(ds): watermask=(run_test7(ds)+run_test8(ds)+run_test9(ds)).clip(min=0, max=1) return(watermask) def get_hotspots(ds): # Find the candidates and perform context check # TODO create mask that is ref7 > 0, non water and not other candidates # mask # b7=<0 # water # othercandidates #candidates = (test1 + test2 + test3).clip(min=0, max=1) candidates = get_candidates(ds) watermasks = get_watermasks(ds) indices = np.where(candidates.data == 1) swircandidates = (ds[sensor_ignorance[sensor]['2.100-2.300']].where(candidates.data == 0)).where(watermasks.data == 0) nircandidates = (ds[sensor_ignorance[sensor]['0.845-0.885']].where(candidates.data == 0)).where(watermasks.data == 0) test4 = (candidates*0) test5 = (candidates*0) index = 0 while index < len(indices[1]): y = indices[1][index] x = indices[2][index] #4. ratio between b7 b5 > ratio b7 b5 + max[3x std ratio b7 and b5, 0.8 ] #AND #5. b7 > b7 + max[3x std b7, 0.08] #AND #swirkernel = get_context_kernel_array(y,x,ds[sensor_ignorance[sensor]['2.100-2.300']].data)[0] #nirkernel = get_context_kernel_array(y,x,ds[sensor_ignorance[sensor]['0.845-0.885']].data)[0] swirkernel = get_context_kernel_array(y,x,swircandidates.data)[0] nirkernel = get_context_kernel_array(y,x,nircandidates.data)[0] swir = ds[sensor_ignorance[sensor]['2.100-2.300']].data[0][y][x] nir = ds[sensor_ignorance[sensor]['0.845-0.885']].data[0][y][x] test4.data[0][y][x] = ((swir/nir) > (np.nanmean(swirkernel/nirkernel) + max(3*np.nanstd(swirkernel/nirkernel), 0.8))) test5.data[0][y][x] = (swir > (np.nanmean(swirkernel) + max(3*np.nanstd(swirkernel), 0.08))) #print(test4.data[0][y][x],(swir/nir), (np.nanmean(swirkernel/nirkernel) + max(3*np.nanstd(swirkernel/nirkernel), 0.8))) #print(test5.data[0][y][x], swir,(np.nanmean(swirkernel) + max(3*np.nanstd(swirkernel), 0.08)) ) # Write values to new dimension #print(index, y, x, get_context_kernel_array(y,x,ds[sensor_ignorance[sensor]['2.100-2.300']].data)[1]) index = index + 1 test6 = run_test6(ds) t, y, z = np.where((candidates*(test4*test5*test6)).data == 1) hotspots = len(y) return(hotspots, (candidates*(test4*test5*test6))) #(candidates*(test4*test5*test6)).plot() def get_nbr(ds): swir = ds[sensor_ignorance[sensor]['2.100-2.300']] nir = ds[sensor_ignorance[sensor]['0.845-0.885']] return((nir - swir) / (swir + nir)) def get_rdnbr(pre_fire_image, post_fire_imag): # Revitalising dNBR from NSW Govt postfire_nbr = get_nbr(post_fire_image) prefire_nbr = get_nbr(pre_fire_image) dnbr = (prefire_nbr[0] - postfire_nbr[0]) # Scaling and offset as per NSW Govt algorithm #rdnbr = ((dnbr/(np.sqrt(np.abs(prefire_nbr[0]))))*1000) return((dnbr/(np.sqrt(np.abs(prefire_nbr[0]))))*1000) def plot_rgb(image, fake_saturation): #image = mask_invalid_data(image) rgb = image.to_array(dim='color') rgb = rgb.transpose(*(rgb.dims[1:]+rgb.dims[:1])) # make 'color' the last dimension #rgb = rgb.where((rgb <= fake_saturation).all(dim='color')) # mask out pixels where any band is 'saturated' rgb /= fake_saturation # scale to [0, 1] range for imshow rgb[0].plot.imshow(col_wrap=5, add_colorbar=False) def get_timedelta_from_ds(ds, target_time): timedelta = {} index = 0 for i in list(ds.time.data): timedelta[index] = {"time": i, "delta": target_time - i} index = index+1 #min(list(ds.time.data),key=lambda date : np.datetime64('2019-12-15')) return(pd.DataFrame.from_dict(timedelta).transpose()) def get_portion_imaged(image): t, y, x = np.where(image[sensor_ignorance[sensor]['2.100-2.300']] >= 0) t1, y1, x1 = np.where(image[sensor_ignorance[sensor]['2.100-2.300']] < 0) return(len(y)/(len(y1)+len(y)), len(y)) def get_portion_above_threshold(image, threshold): y1, x1 = np.where(image) y, x = np.where(image > threshold) return(len(y)/(len(y1)+len(y)), len(y)) def run_hotspots(output_path, hotspot_index, xtuple, ytuple, hotspot_time_tuple, hotspot_utc_time): query = { 'x': xtuple, 'y': ytuple, 'time': (hotspot_time_tuple), 'measurements': measurements, 'output_crs': 'EPSG:3577', 'resolution': (-30, 30), 'group_by': 'solar_day' } try: #ds = load_ard(dc=dc, products=sensors_products[sensor], min_gooddata=0.5, **query) dataset_list = [] for product in sensors_products[sensor]: datasets = dc.find_datasets(product=product, **query) dataset_list.extend(datasets) #ds = load_ard(dc=dc, products=sensors_products[sensor], min_gooddata=0.5, **query) ds = dc.load(datasets=dataset_list, **query) index = 0 pd_timediff = get_timedelta_from_ds(ds, hotspot_utc_time) # Initialise variables pre_fire_candidate_delta = 0 post_fire_candidate_delta = 0 while index < len(ds.time): image = ds.isel(time=[index])[([sensor_ignorance[sensor]['2.100-2.300'],sensor_ignorance[sensor]['0.845-0.885'],sensor_ignorance[sensor]['0.450-0.515']])]#,'fmask'] )] # Maybe we won't use the mask as it confuses smoke with cloud mask = ds.isel(time=[index])[('fmask')] # Add dataset time to filename portion, valid_count = get_portion_imaged(image) if (portion > 0.50): # Determine pre and post fire imagery closest to hotspot utc time if index in pd_timediff[(pd_timediff.delta == pd_timediff.delta.abs())].index: if pre_fire_candidate_delta == 0: pre_fire_candidate_delta = pd_timediff.delta.abs() pre_fire_image = image pre_index = index else: if pre_fire_candidate_delta > pd_timediff.delta.abs(): pre_fire_candidate_delta = pd_timediff.delta.abs() pre_fire_image = image pre_index = index if index in pd_timediff[(pd_timediff.delta < pd_timediff.delta.abs())].index: if post_fire_candidate_delta == 0: post_fire_candidate_delta = pd_timediff.delta.abs() post_fire_image = image post_index = index else: if post_fire_candidate_delta > pd_timediff.delta.abs(): post_fire_candidate_delta = pd_timediff.delta.abs() post_fire_image = image post_index = index # Write rgb to file #image.isel(time=0).rio.to_raster(output_path.joinpath(str(hotspot_index)+'_'+str(image.time[0].data)+'rgb.tif')) #hotspots, hotspot_array = get_hotspots( ds.isel(time=[index])) # Write hotspot raster to file #hotspot_array.astype('int8').rio.to_raster(output_path.joinpath(str(hotspot_index)+'_'+str(image.time[0].data)+'hotspots.tif')) # Add results to hotspot geopandas dataframe #print(index, "hotspot pixel count: ", hotspots, " of ", valid_count) index = index + 1 except: result = '-' # Assuming you have valid data either side of your hotspot date try: # Write rgb to file pre_fire_image.isel(time=0).rio.to_raster(output_path.joinpath(str(hotspot_index)+'_'+str(pre_fire_image.time[0].data)+'rgb.tif')) pre_hotspots, hotspot_array = get_hotspots(ds.isel(time=[pre_index])) portion, pre_valid_count = get_portion_imaged(pre_fire_image) hotspot_array.astype('int8').rio.to_raster(output_path.joinpath(str(hotspot_index)+'_'+str(pre_fire_image.time[0].data)+'hotspots.tif')) except Exception: pass try: post_fire_image.isel(time=0).rio.to_raster(output_path.joinpath(str(hotspot_index)+'_'+str(post_fire_image.time[0].data)+'rgb.tif')) post_hotspots, hotspot_array = get_hotspots(ds.isel(time=[post_index])) portion, post_valid_count = get_portion_imaged(post_fire_image) hotspot_array.astype('int8').rio.to_raster(output_path.joinpath(str(hotspot_index)+'_'+str(post_fire_image.time[0].data)+'hotspots.tif')) except Exception: pass try: return(pre_fire_image, post_fire_image, (pre_hotspots/pre_valid_count), (post_hotspots/post_valid_count) ) except: return() sensors_products = {'oli': ['ga_ls8c_ard_3'], 'msi': ['ga_s2a_ard_nbar_granule', 'ga_s2b_ard_nbar_granule']} sensor = 'msi' output_path = Path('s2msi') output_path.mkdir(parents=True, exist_ok=True) measurements = [] for measurement in sensor_ignorance[sensor]: measurements.append(sensor_ignorance[sensor][measurement]) # Get time lon and lat for hotspot for hotspot_index in nearest_points.index: #if hotspot_index == 0: hotspot_lat = (nearest_points[(nearest_points.index == hotspot_index)].latitude.values[0]) hotspot_lon = (nearest_points[(nearest_points.index == hotspot_index)].longitude.values[0]) xtuple, ytuple = buffer_hotspot(hotspot_lon, hotspot_lat) hotspot_utc_time = (nearest_points[(nearest_points.index == hotspot_index)].datetime.values[0]) hotspot_time_tuple = buffer_date(hotspot_utc_time, 8) # Dumb workaround for inexplicable null arrays when trying to execute get_rdnbr in function try: pre_fire_image, post_fire_image, pre_percent, post_percent = run_hotspots(output_path,hotspot_index, xtuple, ytuple, hotspot_time_tuple, hotspot_utc_time) rdnbr = get_rdnbr(pre_fire_image, post_fire_image) rdnbr.astype('int16').rio.to_raster(output_path.joinpath(str(hotspot_index)+'_RdNBR.tif')) portion, valid_count = get_portion_above_threshold(rdnbr, 1200) if (sensor == 'msi'): nearest_points.loc[hotspot_index, "s2msi_rdnbr_gt_1200"] = portion nearest_points.loc[hotspot_index, "s2msi_pre_burn_time"] = pre_fire_image.time[0].data nearest_points.loc[hotspot_index, "s2msi_post_burn_time"] = post_fire_image.time[0].data nearest_points.loc[hotspot_index, "s2msi_pre_burn_timedelta"] = get_timedelta_from_ds(pre_fire_image, hotspot_utc_time).delta[0] nearest_points.loc[hotspot_index, "s2msi_post_burn_timedelta"] = get_timedelta_from_ds(post_fire_image, hotspot_utc_time).delta[0] nearest_points.loc[hotspot_index, "s2msi_pre_percent"] = pre_percent nearest_points.loc[hotspot_index, "s2msi_post_percent"] = post_percent if (sensor == 'oli'): nearest_points.loc[hotspot_index, "lsoli_rdnbr_gt_1200"] = portion nearest_points.loc[hotspot_index, "lsoli_rdnbr_gt_1200"] = portion nearest_points.loc[hotspot_index, "lsoli_pre_burn_time"] = pre_fire_image.time[0].data nearest_points.loc[hotspot_index, "lsoli_post_burn_time"] = post_fire_image.time[0].data nearest_points.loc[hotspot_index, "lsoli_pre_burn_timedelta"] = get_timedelta_from_ds(pre_fire_image, hotspot_utc_time).delta[0] nearest_points.loc[hotspot_index, "lsoli_post_burn_timedelta"] = get_timedelta_from_ds(post_fire_image, hotspot_utc_time).delta[0] nearest_points.loc[hotspot_index, "lsoli_pre_percent"] = pre_percent nearest_points.loc[hotspot_index, "lsoli_post_percent"] = post_percent except: run_hotspots(output_path, hotspot_index, xtuple, ytuple, hotspot_time_tuple, hotspot_utc_time) pd.pivot_table(nearest_points[(nearest_points['dist_m'] < 5000)],values='count', index=['2_satellite_sensor_product'], columns=['satellite_sensor_product'], aggfunc={'count':len}) # TODO # likely fire = # hotspot either side of burn date # burn scar detected by comparing pre and post burn date within 24 hours print(i) ```
github_jupyter
## Sample weight adjustment The objective of this tutorial is to familiarize ourselves with *SampleWeight* the *samplics* class for adjusting sample weights. In practice, it is necessary to adjust base or design sample weights obtained directly from the random sample mechanism. These adjustments are done to correct for nonresponse, reduce effects of extreme/large weights, better align with known auxiliary information, and more. Specifically in this tutorial, we will: * learn how to use the method *adjust()* to redistribute sample weights to account for nonresponse and unknown eligibility, * learn how to use the method *poststratify()* to ensure that the sample weights sum to known control totals or that the relative distribution of domains is force to known distributions, * learn how to use the method *calibrate()* to adjust sample weight using auxiliary information under the regression model, * learn how to use the method *normalize()* to ensure that the sample weights sum to known constants. To run the code in this notebook, we will use the dataset that was developed in the previous tutorial on sample selection. ``` import numpy as np import pandas as pd import samplics from samplics.datasets import load_psu_sample, load_ssu_sample from samplics.weighting import SampleWeight ``` ### Design (base) weight <a name="section1"></a> The design weight is the inverse of the overall probability of selection which is the product of the first and second probabilities of selection. ``` # Load PSU sample data psu_sample_dict = load_psu_sample() psu_sample = psu_sample_dict["data"] # Load PSU sample data ssu_sample_dict = load_ssu_sample() ssu_sample = ssu_sample_dict["data"] full_sample = pd.merge( psu_sample[["cluster", "region", "psu_prob"]], ssu_sample[["cluster", "household", "ssu_prob"]], on="cluster" ) full_sample["inclusion_prob"] = full_sample["psu_prob"] * full_sample["ssu_prob"] full_sample["design_weight"] = 1 / full_sample["inclusion_prob"] full_sample.head(15) ``` For the purposes of this illustration of handling non-response, we first need to incorporate some household non-response into our example. That is, we simulate the non-response status and store it in the variable *response_status*. The variable *response_status* has four possible values: *ineligible* which indicates that the sampling unit is not eligible for the survey, *respondent* which indicates that the sampling unit responded to the survey, *non-respondent* which indicates that the sampling unit did not respond to the survey, and *unknown* means that we are not able to infer the status of the sampling unit i.e. we do not know whether the sampling unit is eligible or not to the survey. ``` np.random.seed(7) full_sample["response_status"] = np.random.choice( ["ineligible", "respondent", "non-respondent", "unknown"], size=full_sample.shape[0], p=(0.10, 0.70, 0.15, 0.05) ) full_sample[["cluster", "region", "design_weight", "response_status"]].head(15) ``` ### Nonresponse adjustment <a name="section2"></a> In general, the sample weights are adjusted to redistribute the sample weights of all eligible units for which there is no sufficient response (unit level nonresponse) to the sampling units that sufficiently responded to the survey. This adjustment is done within adjustment classes or domains. Note that the determination of the response categories (unit response, item response, ineligible, etc.) is outside of the scope of this tutorial. Also, the weights of the sampling units with unknown eligibility are redistributed to the rest of the sampling units. In general, ineligible sampling units receive weights from the sampling units with unknown eligibility since eligible sampling units can be part of the unknown pool. The method *adjust()* has a boolean parameter *unknown_to_inelig* which controls how the sample weights of the unknown is redistributed. By default, *adjust()* redistribute the sample weights of the sampling units of the unknown to the ineligibles (*unknown_to_inelig=True*). If we do not wish to redistribute the sample weights of the unknowns to the ineligibles then we just set the flag to False (*unknown_to_inelig=Fasle*). In the snippet of code below, we adjust the weight within clusters that is we use clusters as our adjustment classes. Note that we run the nonresponse adjustment twice, the first time with *unknown_to_inelig=True* (*nr_weight*) and the second time with the flag equal to False (*nr_weight2*). With *unknown_to_inelig=True*, the ineligible received part of the sample weights from the unknowns. Hence, the sample weights for the respondent is less than when the flag is False. With *unknown_to_inelig=Fasle*, the ineligible did Not receive any weights from the unknowns. Hence, the sample weights for the ineligible units remain the same before and after adjustment. In a real survey, the statistician may decide on the best non-response strategy based on the available information. ``` status_mapping = {"in": "ineligible", "rr": "respondent", "nr": "non-respondent", "uk": "unknown"} full_sample["nr_weight"] = SampleWeight().adjust( samp_weight=full_sample["design_weight"], adjust_class=full_sample[["region", "cluster"]], resp_status=full_sample["response_status"], resp_dict=status_mapping, ) full_sample["nr_weight2"] = SampleWeight().adjust( samp_weight=full_sample["design_weight"], adjust_class=full_sample[["region", "cluster"]], resp_status=full_sample["response_status"], resp_dict=status_mapping, unknown_to_inelig=False, ) full_sample[ ["cluster", "region", "design_weight", "response_status", "nr_weight", "nr_weight2"] ].drop_duplicates().head(15) ``` **Important.** The default call of *adjust()* expects standard codes for response status that is "in", "rr", "nr", and "uk" where "in" means ineligible, "rr" means respondent, "nr" means non-respondent, and "uk" means unknown eligibility. In the call above, if we omit the parameter *response_dict*, then the run would fail with an assertion error message. The current error message is the following: **The response status must only contains values in ('in', 'rr', 'nr', 'uk') or the mapping should be provided using response_dict parameter**. For the call to run without using *response_dict*, it is necessary that the response status takes only values in the standard codes i.e. ("in", "rr", "nr", "uk"). The variable associated with *response_status* can contain any code but a mapping is necessary when the response variable is not constructed using the standard codes. To further illustrate the mapping of response status, let's assume that we have response_status2 which has the values 100 for ineligible, 200 for non-respondent, 300 for respondent, and 999 for unknown. ``` response_status2 = np.repeat(100, full_sample["response_status"].shape[0]) response_status2[full_sample["response_status"] == "non-respondent"] = 200 response_status2[full_sample["response_status"] == "respondent"] = 300 response_status2[full_sample["response_status"] == "unknown"] = 999 pd.crosstab(response_status2, full_sample["response_status"]) ``` To use *response_status2*, we need to map the values 100, 200, 300 and 999 to "in", "rr", "nr", and "uk". This mapping is done below using the Python dictionary *status_mapping2*. Using *status_mapping2* in the function call *adjust()* will lead to the same adjustment as in the previous run i.e. *nr_weight* and *nr_weight3* contain the same adjusted weights. ``` status_mapping2 = {"in": 100, "nr": 200, "rr": 300, "uk": 999} full_sample["nr_weight3"] = SampleWeight().adjust( samp_weight=full_sample["design_weight"], adjust_class=full_sample[["region", "cluster"]], resp_status=response_status2, resp_dict=status_mapping2, ) full_sample[["cluster", "region", "response_status", "nr_weight", "nr_weight3"]].drop_duplicates().head() ``` If the response status variable only takes values "in", "nr", "rr" and "uk", then it is not necessary to provide the mapping dictionary to the function i.e. *resp_dict* can be omitted from the function call *adjust()*. ``` response_status3 = np.repeat("in", full_sample["response_status"].shape[0]) response_status3[full_sample["response_status"] == "non-respondent"] = "nr" response_status3[full_sample["response_status"] == "respondent"] = "rr" response_status3[full_sample["response_status"] == "unknown"] = "uk" full_sample["nr_weight4"] = SampleWeight().adjust( samp_weight=full_sample["design_weight"], adjust_class=full_sample[["region", "cluster"]], resp_status=response_status3, ) full_sample[["cluster", "region", "response_status", "nr_weight", "nr_weight4"]].drop_duplicates().head() # Just dropping a couple of variables not needed for the rest of the tutorial full_sample.drop( columns=["psu_prob", "ssu_prob", "inclusion_prob", "nr_weight2", "nr_weight3", "nr_weight4"], inplace=True ) ``` ### Poststratification <a name="section3"></a> Poststratification is useful to compensate for under-representation of the sample or to correct for nonsampling error. The most common poststratification method consists of adjusting the sample weights to ensure that they sum to **known** control values from **reliable** souces by adjustment classes (domains). Poststratification classes can be formed using variables beyond the ones involved in the sampling design. For example, socio-economic variables such as age group, gender, race and education are often used to form poststratification classes/cells. **Important:** poststratifying to totals that are known to be out of date, and thus likely inaccurate and unreliable will not improve the estimate. Use this with caution. Let's assume that we have a reliable external source e.g. a recent census that provides the number of households by region. The external source has the following control data: 3700 households for East, 1500 for North, 2800 for South and 6500 for West. We use the method *poststratify()* to ensure that the poststratified sample weights (*ps_weight*) sum to the know control totals by region. Note that the control totals are provided using the Python dictionary *census_households*. ``` census_households = {"East": 3700, "North": 1500, "South": 2800, "West": 6500} full_sample["ps_weight"] = SampleWeight().poststratify( samp_weight=full_sample["nr_weight"], control=census_households, domain=full_sample["region"] ) full_sample.head(15) ``` The snippet of code below shows that the poststratified sample weights sum to the expected control totals that is 3700 households for East, 1500 for North, 2800 for South and 6500 for West. ``` sum_of_weights = full_sample[["region", "nr_weight", "ps_weight"]].groupby("region").sum() sum_of_weights.reset_index(inplace=True) sum_of_weights.head() ``` The crosstable below shows that only one adjustment factor was calculated and applied per adjustment class or region. ``` full_sample["ps_adjust_fct"] = round(full_sample["ps_weight"] / full_sample["nr_weight"], 12) pd.crosstab(full_sample["ps_adjust_fct"], full_sample["region"]) ``` In some surveys, there is interest in keeping relative distribution of strata to some known distribution. For example, WHO EPI vaccination surveys often postratify sample weights to ensure that relative sizes of strata reflect offcial statistics e.g. census data. In most cases, the strata are based on some administrative divisions. For example, assume that according to census data that East contains 25% of the households, North contains 10%, South contains 20% and West contains 45%. We can poststratify using the snippet of code below. ``` known_ratios = {"East": 0.25, "North": 0.10, "South": 0.20, "West": 0.45} full_sample["ps_weight2"] = SampleWeight().poststratify( samp_weight=full_sample["nr_weight"], factor=known_ratios, domain=full_sample["region"] ) full_sample.head() sum_of_weights2 = full_sample[["region", "nr_weight", "ps_weight2"]].groupby("region").sum() sum_of_weights2.reset_index(inplace=True) sum_of_weights2["ratio"] = sum_of_weights2["ps_weight2"] / sum(sum_of_weights2["ps_weight2"]) sum_of_weights2.head() ``` ### Calibration <a name="section4"></a> Calibration is a more general concept for adjusting sample weights to sum to known constants. In this tutorial, we consider the generalized regression (GREG) class of calibration. Assume that we have $\hat{\mathbf{Y}} = \sum_{i \in s} w_i y_i$ and know population totals $\mathbf{X} = (\mathbf{X}_1, ..., \mathbf{X}_p)^T$ are available. Working under the model $Y_i | \mathbf{x}_i = \mathbf{x}^T_i \mathbf{\beta} + \epsilon_i$, the GREG estimator of the population total is $$\hat{\mathbf{Y}}_{GR} = \hat{\mathbf{Y}} + (\mathbf{X} - \hat{\mathbf{X}})^T\hat{\mathbf{B}}$$ where $\hat{\mathbf{B}}$ is the weighted least squares estimate of $\mathbf{\beta}$ and $\hat{\mathbf{X}}$ is the survey estimate of $\mathbf{X}$. The essential of the GREG approach is, under the regression model, to find the adjusted weights $w^{*}_i$ that are the closest to $w_i$, to minimize $h(z) = \frac{\sum_{i \in s} c_i(w_i - z_i)}{w_i}$. Let us simulate three auxiliary variables that is education, poverty and under_five (number of children under five in the household) and assume that we have the following control totals. * Total number of under five children: 6300 in the East, 4000 in the North, 6500 in the South and 14000 in the West. * Poverty (Yes: in poverty / No: not in poverty) | Region &nbsp;| Poverty &nbsp;| Number of households | |:--------|:--------:|:--------------------:| | East | No | 2600 | | | Yes | 1200 | | North | No | 1500 | | | Yes | 200 | | South | No | 1800 | | | Yes | 1100 | | West | No | 4500 | | | Yes | 2200 | * Education (Low: less than secondary, Medium: secondary completed, and High: More than secondary) | Region &nbsp;| Education &nbsp;| Number of households | |:--------|:--------:|:------:| | East | Low | 2000 | | | Medium | 1400 | | | High | 350 | | North | Low | 550 | | | Medium | 700 | | | High | 250 | | South | Low | 1300 | | | Medium | 1200 | | | High | 350 | | West | Low | 2100 | | | Medium | 4000 | | | High | 500 | ``` np.random.seed(150) full_sample["education"] = np.random.choice(("Low", "Medium", "High"), size=150, p=(0.40, 0.50, 0.10)) full_sample["poverty"] = np.random.choice((0, 1), size=150, p=(0.70, 0.30)) full_sample["under_five"] = np.random.choice((0, 1, 2, 3, 4, 5), size=150, p=(0.05, 0.35, 0.25, 0.20, 0.10, 0.05)) full_sample.head() ``` We now will calibrate the nonreponse weight (*nr_weight*) to ensure that the estimated number of households in poverty is equal to 4,700 and the estimated total number of children under five is 30,8500. The control numbers 4,700 and 30,800 are obtained from the table above. The class *SampleWeight()* uses the method *calibrate(samp_weight, aux_vars, control, domain, scale, bounded, modified)* to adjust the weight using the GREG approach. * The contol values must be stored in a python dictionnary i.e. totals = {"poverty": 4700, "under_five": 30800}. In this case, we have two numerical variables poverty with values in {0, 1} and under_five with values in {0, 1, 2, 3, 4, 5}. * *aux_vars* is the matrix of covariates. ``` totals = {"poverty": 4700, "under_five": 30800} full_sample["calib_weight"] = SampleWeight().calibrate( full_sample["nr_weight"], full_sample[["poverty", "under_five"]], totals ) full_sample[["cluster", "region", "household", "nr_weight", "calib_weight"]].head(15) ``` We can confirm that the estimated totals for the auxiliary variables are equal to their control values. ``` poverty = full_sample["poverty"] under_5 = full_sample["under_five"] nr_weight = full_sample["nr_weight"] calib_weight = full_sample["calib_weight"] print( f"\nTotal estimated number of poor households was {sum(poverty*nr_weight):.2f} before and {sum(poverty*calib_weight):.2f} after adjustment \n" ) print( f"Total estimated number of children under 5 was {sum(under_5*nr_weight):.2f} before and {sum(under_5*calib_weight):.2f} after adjustment \n" ) ``` If we want to control by domain then we can do so using the parameter *domain* of *calibrate()*. First we need to update the python dictionary holding the control values. Now, those values have to be provided for each domain. Note that the dictionary is now a nested dictionary where the higher level keys hold the domain values i.e. East, North, South and West. Then the higher level values of the dictionary are the dictionaries providing mapping for the auxiliary variables and the corresponding control values. ``` totals_by_domain = { "East": {"poverty": 1200, "under_five": 6300}, "North": {"poverty": 200, "under_five": 4000}, "South": {"poverty": 1100, "under_five": 6500}, "West": {"poverty": 2200, "under_five": 14000}, } full_sample["calib_weight_d"] = SampleWeight().calibrate( full_sample["nr_weight"], full_sample[["poverty", "under_five"]], totals_by_domain, full_sample["region"] ) full_sample[["cluster", "region", "household", "nr_weight", "calib_weight", "calib_weight_d"]].head(15) ``` Note that the GREG domain estimates above do not have the additive property. That is the GREG domain estimates do not sum to the overal GREG estimate. To illustrate this, let's assume that we want to estimate the number of households. ``` print(f"\nThe number of households using the overall GREG is: {sum(full_sample['calib_weight']):.2f} \n") print(f"The number of households using the domain GREG is: {sum(full_sample['calib_weight_d']):.2f} \n") ``` Note that with the additive flag equal to True, the sum of the domain estimates is equal to the GREG overal estimate. ``` totals_by_domain = { "East": {"poverty": 1200, "under_five": 6300}, "North": {"poverty": 200, "under_five": 4000}, "South": {"poverty": 1100, "under_five": 6500}, "West": {"poverty": 2200, "under_five": 14000}, } calib_weight3 = SampleWeight().calibrate( full_sample["nr_weight"], full_sample[["poverty", "under_five"]], totals_by_domain, full_sample["region"], additive=True, ) under_5 = np.array(full_sample["under_five"]) print(f"\nEach column can be used to estimate a domain: {np.sum(np.transpose(calib_weight3) * under_5, axis=1)}\n") print(f"The number of households using the overall GREG is: {sum(full_sample['calib_weight']):.2f} \n") print( f"The number of households using the domain GREG is: {sum(full_sample['calib_weight_d']):.2f} - with ADDITIVE=FALSE\n" ) print( f"The number of households using the domain GREG is: {np.sum(np.transpose(calib_weight3)):.2f} - with ADDITIVE=TRUE \n" ) ``` ### Normalization <a name="section5"></a> DHS and MICS normalize the final sample weights to sum to the sample size. We can use the class method *normalize()* to ensure that the sample weight sum to some constant across the sample or by normalization domain e.g. stratum. **Important:** *normalization* is mostly added here for completeness but it is sheldom to see sample weight normalize in large scale household surveys. One major downside of normalized weights is the Note that estimation of totals does not make sense with normalized weights. ``` full_sample["norm_weight"] = SampleWeight().normalize(full_sample["nr_weight"]) full_sample[["cluster", "region", "nr_weight", "norm_weight"]].head(25) print((full_sample.shape[0], full_sample["norm_weight"].sum())) ``` When *normalize()* is called with only the parameter *sample_weight* then the sample weights are normalize to sum to the length of the sample weight vector. ``` full_sample["norm_weight2"] = SampleWeight().normalize(full_sample["nr_weight"], control=300) print(full_sample["norm_weight2"].sum()) full_sample["norm_weight3"] = SampleWeight().normalize(full_sample["nr_weight"], domain=full_sample["region"]) weight_sum = full_sample.groupby(["region"]).sum() weight_sum.reset_index(inplace=True) weight_sum[["region", "nr_weight", "norm_weight", "norm_weight3"]] norm_level = {"East": 10, "North": 20, "South": 30, "West": 50} full_sample["norm_weight4"] = SampleWeight().normalize(full_sample["nr_weight"], norm_level, full_sample["region"]) weight_sum = full_sample.groupby(["region"]).sum() weight_sum.reset_index(inplace=True) weight_sum[["region", "nr_weight", "norm_weight", "norm_weight3", "norm_weight4",]] ```
github_jupyter
# Model Acquisition into IncQuery Server ## Set up ``` import iqs_jupyter from iqs_jupyter import schema iqs = iqs_jupyter.connect() ``` ## Optional: clean up ``` iqs.persistent_index.delete_all_persisted_model_compartments() iqs.in_memory_index.delete_all_inmemory_model_compartments() iqs.queries.unregister_all_queries() ``` ## Identify model by compartment URI, check URI is vacant, start acquisition ``` model = schema.ModelCompartment(compartment_uri = "foo:/bar/baz") model not in iqs.persistent_index.list_persisted_model_compartments().persisted_model_compartments start_response = iqs.acquisition.start_acquisition(model) start_response ``` ## Upload model contents chunk by chunk ``` chunk_data = { "elementStore": [ { "classifier": { "classifierName": "Class", "packageNsUri": "http://www.nomagic.com/magicdraw/UML/2.5.1" }, "elements": [ { "compartmentURI": "foo:/bar/baz", "relativeElementID": "class_Foo" }, { "compartmentURI": "foo:/bar/baz", "relativeElementID": "class_Bar" } ] } ], "attributeStore": [ { "attribute": { "name": "name", "ownerProxy": { "classifierName": "NamedElement", "packageNsUri": "http://www.nomagic.com/magicdraw/UML/2.5.1" }, "typeProxy" : { "classifierName" : "String", "packageNsUri" : "http://www.nomagic.com/magicdraw/UML/2.5.1" } }, "tuples": [ { "source": { "compartmentURI": "foo:/bar/baz", "relativeElementID": "class_Foo" }, "values": [ "Class Foo" ] }, { "source": { "compartmentURI": "foo:/bar/baz", "relativeElementID": "class_Bar" }, "values": [ "Class Bar" ] } ] } ], "referenceStore": [ { "reference": { "name" : "redefinedElement", "ownerProxy" : { "classifierName" : "RedefinableElement", "packageNsUri" : "http://www.nomagic.com/magicdraw/UML/2.5.1" }, "typeProxy" : { "classifierName" : "RedefinableElement", "packageNsUri" : "http://www.nomagic.com/magicdraw/UML/2.5.1" }, "oppositeProxy" : { "featureName" : "_redefinableElementOfRedefinedElement", "featureKind" : "EREFERENCE", "classifierProxy" : { "classifierName" : "RedefinableElement", "packageNsUri" : "http://www.nomagic.com/magicdraw/UML/2.5.1" } } }, "tuples": [ { "source": { "compartmentURI": "foo:/bar/baz", "relativeElementID": "class_Bar" }, "targets": [ { "compartmentURI": "foo:/bar/baz", "relativeElementID": "class_Foo" } ] } ] } ], "singleCompartment": False } chunk_request = schema.UploadChunkRequest( data_chunk = schema.PersistIndexData( write_handle = start_response.write_handle, compartment_uri = start_response.compartment_uri, index_data = chunk_data, all_updates_from_primary_model = True ) ) upload_response = iqs.acquisition.upload_acquisition_chunk(chunk_request) upload_response ``` ## Do not forget to close & save model when done (or discard if aborted) ``` iqs.acquisition.close_acquisition(start_response) ``` ## Use newly acquired model on server ``` model in iqs.persistent_index.list_persisted_model_compartments().persisted_model_compartments iqs.in_memory_index.load_model_compartment(model) test_query_package = "test.queries" test_query_main = "test.queries.redefines" test_query_code = ''' pattern redefines( src: Class, srcName: java String, trg: Class, trgName: java String ) { RedefinableElement.redefinedElement(src, trg); Class.name(src, srcName); Class.name(trg, trgName); } ''' if test_query_main not in iqs.queries.list_queries().viatra.query_fq_ns: # skip if already registered try: iqs.queries.register_queries_plain_text(test_query_code, query_package=test_query_package) except: print("Query registration not available as guest user; please try again in a few minutes") else: print("Query is already registered; proceed") iqs.query_execution.execute_query_on_model_compartment( schema.ExecuteQueryOnCompartmentRequest(model_compartment = model, query_fqn = test_query_main) ) ```
github_jupyter
# Introduction to Data Science. Lecture 2: Notebooks and Python Basics *COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* Welcome to your first Jupyter notebook! This will be our main working environment for this class. # Jupyter Notebook Basics First, let's get familiar with Jupyter Notebooks. Notebooks are made up of "cells" that can contain text or code. Notebooks also show you output of the code right below a code cell. These words are written in a text cell using a simple formatting dialect called [markdown](http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/Working%20With%20Markdown%20Cells.html). Double click on this cell text or press enter while the cell is selected to see how it is formatted and change it. We can make words *italic* or **bold** or add [links](http://datasciencecourse.net) or include pictures: ![Data science cat](datasciencecat.jpg) The content of the notebook, as you edit in your browser, is written to the `.ipynb` file we provided. If you want to read up on Notebooks in details check out the [excellent documentation](http://jupyter-notebook.readthedocs.io/en/latest/notebook.html). ## Google Colab An alternative to native Jupyter Notebooks are cloud-hosted google colab notebooks. For this class you should have the notbeooks installed locally, but some of you might have run into troubles with that, so [here is a google colab](https://colab.research.google.com/drive/1lsxQQ2dTboFo4RHAGkmPxcuENN2uZEC9) notebook that you can run interactively without an installation. ## Writing Code The most interesting aspect of notebooks, however, is that we can write code in the cells. You can use [many different programming languages](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels) in Jupyter notebooks, but we'll stick to Python. So, let's try it out: ``` print ("Hello World!") a = 3 # the return value of the last line of a cell is the output a ``` Again, we've greeted the world out there using a print statement. We also assigned a variable and returned it, which makes it the output of this cell. Notice that the output here is directly written into the notebook. You can change something in a cell and re-run it using the "run cell" button in the toolbar, or use the `CTRL+ENTER` shortcut. Another cool thing about cells is that they preserve the state of what happened before. Let's initialize a couple of variables in the next cell: ``` age = 2 gender = "female" name = "Datascience Cat" smart = True ``` These variables are now available to all cell below or above **if you executed the cell**. In practice, you should never rely on a variable from a lower cell in an earlier cell. **This behavior is different from if you were to execute the cells as a python file".** If you make a change to a cell, you need to execute it again. You can also batch-executed multiple cells using the "Cell" menu in the toolbar. Let's do something with the variables we just defined: ``` print (name + ", age: " + str(age) + ", " + gender + ", is smart: " + str(smart)) ``` In the previous cell, we've [concatenated a couple of strings](https://docs.python.org/3.5/tutorial/introduction.html#strings) to produce one longer string using the `+` operator. Also, we had to call the `str()` function to get [string representations of these variables](https://docs.python.org/3.5/library/stdtypes.html#str). An alternative way to do this is not to concatenate the string but to pass each variable in as a separate argument to the print function: ``` print (name, "age: " + str(age), gender, "is smart: " + str(smart), sep=", ") ``` The last argument, `sep=", "` tells the print function to use a comma and a space between each argument. ## Modes Notebooks have two modes, a **command mode** and **edit mode**. You can see which mode you're in by the color of the cell: * **green** means edit mode, * **blue** means command mode. Many operations depend on your mode. For code cells, you can switch into edit mode with "Enter", and get out of it with "Escape". ## Shortucts While you can always use the tool-bar above, you'll be much more efficient if you use a couple of shortcuts. The most important ones are: **`Ctrl+Enter`** runs the current cell. **`Shift+Enter`** runs the current cell and jumps to the next cell. **`Alt+Enter`** runs the cell and adds a new one below it. In command mode: **`h`** shows a help menu with all these commands. **`a`** adds a cell before the current cell. **`b`** adds a cell after the current cell. **`dd`** deletes a cell. **`m`** as in **m**arkdown, switches a cell to markdown mode. **`y`** as in p**y**thon switches a cell to code. ## Kernels When you [run code](http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/Running%20Code.html), the code is actually executed in a **kernel**. You can do bad things to a kernel: you can make it stuck in an endless loop, crash it, corrupt it, etc. And you probably will do all of these things :). So sometimes you might have to interrupt your kernel or restart it. Use the "Kernel" menu to restart the kernel, re-run your notebook, etc. Also, before submitting a homework or a project, make sure to `Restart and Run All`. This will create a clean run of your project, without any side effects that you might encounter during development. We want you to submit the homeworks **with output**, and by doing that you will make sure that we actually can also execute your code properly. ## Storing Output Notebooks contain both, the input to a computation and the outputs. If you run a notebook, all the outputs generated by the code cells are also stored in the notebook. That way, you can look at notebooks also in non-interactive environments, like on [GitHub for this notebook](https://github.com/datascience-course/2018-datascience-lectures/blob/master/02-basic-python/lecture-02-notebook.ipynb). The Notebook itself is stored in a rather ugly format containing the text, code, and the output. As discussed, this can sometimes be challenging when working with version control. ### Exercise 2: Creating Cells, Executing Code 1. Create a new code cell below where you define variables containing your name, your age in years, and your major. 2. Create another cell that uses these variables and prints a concatenated string stating your name, major, and your age in years, months, and days (assuming today is your birthday). The output should look like that: ``` Name: Science Cat, Major: Computer Science, Age: 94 years, or 1128 months, or 34310 days. ``` # Python Basics ## Functions In math, functions transfrom an input to an output as defined by the property of the function. You probably remember functions defined like this $f(x) = x^2 + 3$ In programming, functions can do exactly this, but are also used to execute “subroutines”, i.e., to execute pieces of code in various order and under various conditions. Functions in programming are very important for structuring and modularizing code. In computer science, functions are also called “procedures” and “methods” (there are subtle distinctions, but nothing we need to worry about at this time). The following Python function, for example, provides the output of the above defined function for every valid input: ``` def f(x): result = x ** 2 + 3 return result ``` We can now run this function with multiple input values: ``` print(f(2)) print(f(3)) f(5) ``` Let's take a look at this function. The first line ```python def f(x): ``` defines the function of name `f` using the `def` keyword. The name we use (`f` here) is largely arbitrary, but following good software engineering practices it should be something meaningful. So instead of `f`, **`square_plus_three` would be a better function name in this case**. After the function name follows a list of parameters, in parantheses. In this case we define that the function takes only one parameter, `x`, but we could also define multiple parameters like this: ```python def f(x, y, z): ``` The parameters are then available as local variables within the function. The second line does the actual computation and assigns it to a **local variable** called `result`. The third line uses the `return` keyword to return the result variable. Functions can have a return value that we can assign to a variable. For example, here we could write: ```python my_result = f(10) ``` Which would assign the return value of the function to the variable `my_result`. Note that the lines of code that belong to a function are **intended by four spaces** (you can hit tab to intend, but it will be converted to four spaces). Python defines the scope of a function using intendation. Many other programming languages use curly brackets `{}` to do this. A function is ended by a new line. For example, the same function wouldn't work like this: ``` def f(x): result = x ** 2 + 3 # Throws a NameError becauser result isn't defined outside the function return result ``` Equally, we can't intend by too much: ``` def f(x): result = x ** 2 + 3 # Throws an IndentationError return result ``` ## Scope Another critical concept when working with functions is to understand the scope of a variable. Scope defines under which circumstances a variable is accessible. For example, in the following code snippet we cannot access the variable defined inside a function: ``` def scope_test(): function_scope = "only readable in here" # Within the function, we can use the variable we have defined print("Within function: " + function_scope) # calling the function, which will print scope_test() # If we try to use the function_scope variable outside of the function, we will find that it is not defined. # This will throw a NameError, because Python doesn't know about that variable here print("Outside function: " + function_scope) ``` You might wonder “Why is that? Wouldn't it make sense to have access to variables wherever I need access?”. The reason for scoping is that it's simply much easier to **build reliable software when we modularize code**. When we use a function, we shouldn't have to worry about its internals. Another practical reason is that this way we can **re-use variable names** that were used in other places. This is really important when we work with other peoples' code (e.g., libraries). If that weren't possible, we might get nasty side-effects just because the library uses a variable with the same name somewhere. You can, however, use variables defined in the larger scope in the sub-scope: ``` name = "Science Cat" def print_name_with_dr(): print("Dr.", name) print_name_with_dr() ``` This is generally **not considered good practice** - functions should rely on their input parameters. Otherwise it can easily lead to side effects. This would be the better approach: ``` # notice that we're re-using the parameter name def print_name_with_dr(name): print("Dr.", name) print_name_with_dr(name) ``` Finally, there is a way to define a variable within a function for use outside its scope by using the global keyword. There are reasons to do this, however, it is generally discouraged. ``` def scope_test(): # Think long and hard before you do this - generally you shoudln't. I have never. global global_scope global_scope = "defined in the function, global scope" # Within the function, we can use the variable we have defined print("Within function: " + global_scope) scope_test() # Since this is defined as global we can also print the variable here print("Outside function: " + global_scope) ``` ### Exercise 3: Functions Write a function that * takes two numerical variables * multiplies them with each other * divides them by a numerical variable defined in the scope outside the function * and returns the result. Print the result of the function for three different sets of input variables. ## Looking Ahead: Conditions, Loops, Advanced Data Types We've learned how to execute operations and call and define functions. In the next lecture, we'll learn how we can control the flow of execution in a program unsing conditions (if statements) and loops. We'll also introduce more advanced data types such as lists and dictionaries.
github_jupyter
<div id="section1" dir='rtl'> <h2> توجه </h2> <hr> بخش زیادی از این فایل آموزشی مربوط به مسابقه دیتادیز است و حاصل زحمات آن ها می باشد. </div> # data-hub: * [Website](https://data-hub.ir/) * [Youtube](https://www.youtube.com/channel/UCrBcbQWcD0ortWqHAlP94ug) * [Github](https://github.com/datahub-ir) * Telegram Channel: @**data_hub_ir** * Instagram Page: @**data_hub_ir** * Telegram Group: @**data_jobs** <div id="section1" dir='rtl'> <h2> بارگیری و کاوش داده‌ها </h2> <hr> در این دفترچه با مجموعه داده کوچکی به نام <code>property_data.csv</code> که در کنار دفترچه آمده است کار می‌کنیم. برای خواندن آن از کتابخانۀ <code>pandas</code> و برای برخی اعمال که بر روی این مجموعه داده قرار است انجام دهیم از کتابخانۀ <code>numpy</code> استفاده می‌کنیم. </div> ``` import numpy as np import pandas as pd df = pd.read_csv('property_data.csv') df ``` <div dir="rtl"> یکی از کار‌های سریع و مفیدی که لازم است انجام دهیم این است که با استفاده از تابع <code>head</code> کتابخانۀ <code>pandas</code> محتویات داخل مجموعه داده شامل اسم ستون‌ها و چند سطر اول آن را ببینیم. همچنین با استفاده از تابع <code>dtypes</code> نوع داده‌های هر ستون را مشاهده کنیم. </div> ``` df.tail(1) df.dtypes # برگرداندن نوع ستون ها ``` <div dir='rtl'> حال با استفاده از اطلاعاتی که بدست آوردیم می‌توانیم بفهمیم که ستون‌های ما به چه معنا هستند و مقداری که انتظار داریم در مجموعه‌ی داده برای آن‌ها ببینیم چیست. به طور مثال: <ul> <li> <code>ST_NUM</code>: شماره خیابان، Numeric type </li> <li> <code>ST_NAME</code>: نام خیابان، string </li> <li> <code>OWN_OCCUPIED</code>: کسی در خانه زندگی می‌کند یا خیر؟، 'Y': بله، 'N': خیر </li> <li> <code>NUM_BEDROOMS</code>: تعداد اتاق خواب‌ها، Numeric type </li> </ul> چیزی که متوجه‌اش می‌شویم این است که برخی انتظارات ما با چیزی که <code>dtypes</code> می‌گوید سازگار نیست و این بدین معنا است که برخی مقادیر از آن ستون‌ها با چیزی به غیر از نوع دلخواه پر شده است که باید آن‌ها را پاک‌سازی کنیم. <br> نوع دیگر مقادیری که باید پاک‌سازی کنیم <code>NaN</code> ها هستند. وجود <code>NaN</code> به این معنا است که داده در آن ویژگی مقداری نگرفته است. </div> <div id="section1" dir='rtl'> <h2> مقادیر گم‌شده </h2> <hr> ابتدا راه‌کار‌هایی را مطرح می‌کنیم که مجموعه داده را از مقادیر <code>NaN</code> پاک‌سازی کند. <br> برای انجام این کار می‌توان از توابع <code>()fillna</code> یا <code>()dropna</code> بهره جست. </div> ``` df dropped_df = df.dropna() # هستند را حذف می کند Nan تمامی سطرهایی که شامل dropped_df dropped_df = df.dropna(how='all') dropped_df dropped_df = df.dropna(axis=1) # هستند را حذف می کند Nan تمامی ستون هایی که شامل dropped_df int(df.shape[1] * .9) # ستون هایی که کمتر از ۹۰ درصد داده سالم دارند را حذف می کند dropped_df = df.dropna(thresh=int(df.shape[0] * .9), axis=1) dropped_df dropped_df = df.dropna(thresh=int(df.shape[1] * .9), axis=0) dropped_df ``` <div dir="rtl"> برای پر کردن داده‌هایی که مقدار <code>NaN</code> گرفته‌اند، ابتدا باید ستون این داده‌ها را مشخص کنیم و سپس باید با مقادیری که با توجه به مسئله‌ای که با آن سر و کار داریم توجیه پذیر است، آن‌ها را مقداردهی کنیم. </div> ``` # برمیگرداند False وگرنه True داشته باشد Nan مقدار 'ST_NUM' اگر سطر df['ST_NUM'].isnull() df['ST_NAME'].isnull().sum() df['ST_NUM'].isnull().sum() df.columns df df['ST_NAME'].isnull() np.array(df['ST_NAME'].isnull()) column_names = df.columns Nan_columns = list() for col in column_names: if(True in np.array(df[col].isnull())): Nan_columns.append(col) Nan_columns ``` <div dir="rtl"> حال می‌توانیم هر یک از ستون‌هایی که دارای مقدار <code>NaN</code> هستند را به شکل مناسبی مقداردهی کنیم: </div> ``` df['SQ_FT'] = df['SQ_FT'].fillna(800) # را با ۱۰ پر می کند 'ST_NUM' ستون Nan مقدار df # را با میانگین ستون پر میکند Nan مقادیر df['ST_NUM'] = df['ST_NUM'].fillna(df['ST_NUM'].mean()) df ``` <div dir="rtl"> حال که بلدیم با داده‌های <code>NaN</code> چه کنیم، به راهکار پاک‌سازی داده‌هایی که با type ‌موردنظر ستون سازگار نیستند می‌پردازیم. <br> راهکار کلی به این شکل است که ابتدا باید این داده‌ها را در هر ستون شناسایی کنیم، سپس به جای آن‌های <code>NaN</code> قرار دهیم و در نهایت عملیات‌هایی که برای پاک‌سازی NaN در بالا توضیح دادیم را انجام دهیم. </div> ``` len(df['NUM_BATH']) for i in range(len(df['NUM_BATH'])): try: int(df['NUM_BATH'][i]) # نوع ستون مدنظر except ValueError: df.loc[i, 'NUM_BATH'] = np.nan # Nan جایگزینی با df int(df['NUM_BATH'][6]) ``` <div dir="rtl"> روش دیگر آن است که این مقادیر اشتباه را از قبل خودمان مشخص کنیم و در هنگام خواندن از فایل <code>csv</code> به جای آن‌ها <code>NaN</code> قرار دهیم. <br> به طور مثال: </div> ``` missing_values = ["n/a", "na", "--", "HURLEY"] new_df = pd.read_csv('property_data.csv', na_values=missing_values) new_df new_df.dtypes ``` <div id="section1" dir="rtl"> <h2> Assert: کارهایی را که انجام دادید، تست کنید </h2> <hr> با استفاده از تابع <code>assert</code> پایتون می‌توانیم به سادگی شروطی بر روی مقادیر ستون‌ها قرار دهیم و اگر مجموعۀ داده در شرط موردنظر صدق نکند، این تابع از اجرای ادامه برنامه جلوگیری می‌کند. به طور مثال با توجه به عملیات انجام شده در بالا نباید ستون ST_NUM مجموعه داده <code>df</code> مقدار <code>NaN</code> داشته باشد. </div> ``` assert(df['ST_NUM'] is not np.nan) assert(df['PID'] is not np.nan) ``` <div id="section1" dir="rtl"> <h2> پیش‌پردازش با sklearn </h2> <p></p> <hr> مدل‌ها برای کار نیازمند داده به عنوان ورودی هستند. بنابراین باید داده‌ها را به صورت متناسب با نوع ورودی آن‌ها دربیاوریم. برای این کار از پکیج <code>preprocessing</code> استفاده می‌کنیم. </div> ``` from sklearn import preprocessing ary_int = np.random.randint(-100, 100, 10) ary_int ary_str = ['foo', 'bar', 'baz', 'x', 'y', 'z', 'z', 'z'] ary_str ``` <div dir='rtl'> زمانی که با داده‌های متنی کار می‌کنیم، ممکن است همه داده‌ها عددی نباشند و مثلا داده‌های بعضی ستون‌ها به شکل طبقه‌بندی(<code>categorical</code>) باشند. ما باید آن‌ها را نیز به صورت عددی دربیاوریم چرا که اکثر مدل‌ها در <code>sklearn</code> انتظار دارند داده‌های عددی به عنوان ورودی دریافت کنند. یکی از راه‌‌های کد کردن داده‌های طبقه‌بندی استفاده از <code>labelEncoder</code> است. <code>labelEncoder</code> بازای هر دسته موجود در ورودی، یک عدد از صفر تا تعداد دسته‌ی ورودی منهای یک اختصاص می‌دهد. </div> ``` l_encoder = preprocessing.LabelEncoder() l_encoder.fit_transform(ary_str) ary_str print(l_encoder.transform(['foo'])) print(l_encoder.transform(['baz'])) print(l_encoder.transform(['bar'])) ``` <div dir='rtl'> همان‌طور که می‌بینید اعداد اختصاص داده‌شده به برچسب‌ها در این حالت بدون‌ترتیب هستند. در مثال بالا <code>foo</code> قبل از <code>baz</code> آمده اما عدد اختصاص داده‌شده به آن بیشتر است. در صورتی که تعداد دسته‌هایی که کد می‌کنیم زیاد باشد ممکن است بخواهیم این نگاشت را به صورت یک دیکشنری داشته باشیم. </div> ``` list(l_encoder.classes_) dict(zip(l_encoder.classes_, l_encoder.transform(l_encoder.classes_))) ``` <div dir='rtl'> کار با <code>DataFrame</code> از این هم ساده‌تر است! تنها چیزی که نیاز داریم این است که از متد <code>()apply.</code> برای اعمال <code>labelEncoder</code> به <code>DataFrame</code> استفاده کنیم. در این صورت مقادیر هر ستون به صورت جداگانه کد می‌شوند. برای مثال دقت کنید که <code>foo</code> در ستون اول همانند <code>y</code> در ستون دوم به ۱ نگاشته می‌شود. </div> ``` l_encoder = preprocessing.LabelEncoder() df = pd.DataFrame(data = {'col1': ['foo','bar','foo','bar'], 'col2': ['x', 'y', 'x', 'z'], 'col3': [3000, 20, 3000, 400]}) df df['col1'] = l_encoder.fit_transform(df['col1']) df df.apply(l_encoder.fit_transform) ``` <div dir='rtl'> <div dir='rtl'>روش دیگری که به معرفی آن می‌پردازیم. روش <code>onehot</code> است. بسیاری از مدل‌هایی که با آن‌ها کار می‌کنیم از معیار فاصله برای حل مسائل استفاده می‌کنند. برای مثال در مسئله‌ي دسته‌بندی به روش <code>KNN</code> به دنبال نزدیک‌ترین همسایه‌ها به داده‌ی مورد نظر هستیم. <br> فاصله در رابطه با داده‌های طبقه‌بندی شده معنی خود را از دست می‌دهد. برای مثال فرض کنید یکی از ویژگی‌های ما برند خودرو است. برندهای داده‌ها از سه دسته‌ی <code dir="ltr">['benz', 'bmw', 'saipa']</code> هستند. اگر به این داده‌ها برچسب‌های ۰، ۱و ۲ را اختصاص دهیم به صورت ضمنی این معنی را به مسئله القا کردیم که فاصله‌ی بنز با سایپا بیشتر از فاصلۀ بنز با بی‌ام‌و است (فرض بدی هم نیست!) و فاصلۀ بی‌ام‌و و سایپا همانند فاصله‌ی بنز و بی‌ام‌و است (این یکی بی‌انصافی است!) بنابراین باید به دنبال راهی باشیم که در آن فاصلۀ کد تولید شده برای این سه برند مساوی باشد. <br> <code>onehot</code> روشی است که این امکان را برای ما فراهم می‌کند. در این روش بازای ورودی یک آرایه به طول تعداد کلاس‌های آن ورودی در نظر می‌گیریم. بازای هر دسته تمامی درایه‌های این آرایه‌ی جدید را برابر صفر قرار می‌دهیم و تنها درایه‌ای را که مربوط به کلاس آن دسته است را یک می‌کنیم. شکل زیر مثالی از این نحوه‌ی کدکردن را نشان می‌دهد. <br> <img src="https://miro.medium.com/proxy/1*WXpoiS7HXRC-uwJPYsy1Dg.png" alt="" /> <caption><a href='https://blog.myyellowroad.com/'>https://blog.myyellowroad.com/</a></caption> </div> </div> ``` df pd.get_dummies(df) ``` <div dir='rtl'> گاهی ممکن است تمایل داشته باشیم که دامنۀ ویژگی‌ در داده‌ها را <code>scale</code> کنیم. مثلا از دامنۀ ۱ تا ۱۰۰ به ۰ تا ۱ یا هر دامنۀ دیگری. برای این کار از <code>MinMaxScaler</code> استفاده می‌کنیم. </div> ``` ary_int mm_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1)) mm_scaler.fit([ary_int]) print(mm_scaler.fit_transform([ary_int]), '!انگار چیزی اشتباه شده') np.expand_dims(ary_int, axis=1) ``` <div dir='rtl'> همان‌طور که مشاهده می‌شود خط آخر چیزی نیست که انتظار آن را داشتیم. دلیل این امر آن است که ورودی باید یک ماتریس <code>(n,1)</code> باشد. برای این کار کافیست ورودی را به صورت <code>numpy array</code> تعریف کنیم. </div> ``` print(mm_scaler.fit_transform(np.expand_dims(ary_int, axis=1))) # از این راه هم می توان کار بالا را انجام داد mm_scaler.fit_transform(ary_int.reshape(-1, 1)) mm_scaler = preprocessing.MinMaxScaler(feature_range=(0, 10)) mm_scaler.fit_transform(ary_int.reshape(-1, 1)) X_train = np.array([[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]]) X_scaled = preprocessing.scale(X_train) X_scaled X_scaled.mean(axis=0) X_scaled.std(axis=0) df numeric_df = df.apply(lambda x: np.log10(x) if np.issubdtype(x.dtype, np.number) else x) numeric_df df numeric_df['col3'] = df['col3'].apply(lambda x: np.log10(x)) df[df.duplicated(keep=False)] df.duplicated().sum() df[df.duplicated() == False] tmp = df[['col1', 'col2']] tmp tmp = df.drop(['col3'], axis=1) tmp tmp = df.drop(0, axis=0) tmp arr = np.array([1, 7, 5, 4, 6, 3, 11, 17, 51, 14, 61, 23]) arr pd.cut(arr, 4) pd.cut(np.array([1, 7, 5, 4, 6, 3]),3, labels=["bad", "medium", "good"]) df pd.cut(df['col3'], 3) pd.cut(df['col3'], 3, labels=False) ```
github_jupyter
``` # Import the modules import sqlite3 import spiceypy import numpy as np import pandas as pd from matplotlib import pyplot as plt # Establish a connection to the comet database con = sqlite3.connect('../databases/comets/mpc_comets.db') # Extract information about the comet 67P comet_67p_from_db = pd.read_sql('SELECT NAME, PERIHELION_AU, ' \ 'SEMI_MAJOR_AXIS_AU, ' \ 'APHELION_AU, ECCENTRICITY, ' \ 'ARG_OF_PERIH_DEG, LONG_OF_ASC_NODE_DEG ' \ 'FROM comets_main WHERE NAME LIKE "67P%"', con) # Print the orbital elements of 67P print(f'{comet_67p_from_db.iloc[0]}') # Load SPICE kernels (meta file) spiceypy.furnsh('kernel_meta.txt') # Get the G*M value for the Sun _, GM_SUN_PRE = spiceypy.bodvcd(bodyid=10, item='GM', maxn=1) GM_SUN = GM_SUN_PRE[0] # Set an initial and end time ini_datetime = pd.Timestamp('2004-01-01') end_datetime = pd.Timestamp('2014-01-01') # Create a numpy array with 1000 timesteps between the initial and end time datetime_range = np.linspace(ini_datetime.value, end_datetime.value, 1000) # Convert the numpy arraay to a pandas date-time object datetime_range = pd.to_datetime(datetime_range) # Set an initial dataframe for the 67P computations comet_67p_df = pd.DataFrame([]) # Set the UTC date-times comet_67p_df.loc[:, 'UTC'] = datetime_range # Convert the UTC date-time strings to ET comet_67p_df.loc[:, 'ET'] = comet_67p_df['UTC'].apply(lambda x: \ spiceypy.utc2et(x.strftime('%Y-%m-%dT%H:%M:%S'))) # Compute the ET corresponding state vectors comet_67p_df.loc[:, 'STATE_VEC'] = \ comet_67p_df['ET'].apply(lambda x: spiceypy.spkgeo(targ=1000012, \ et=x, \ ref='ECLIPJ2000', \ obs=10)[0]) # Compute the state vectors corresponding orbital elements comet_67p_df.loc[:, 'STATE_VEC_ORB_ELEM'] = \ comet_67p_df.apply(lambda x: spiceypy.oscltx(state=x['STATE_VEC'], \ et=x['ET'], \ mu=GM_SUN), \ axis=1) # Assign miscellaneous orbital elements as individual columns # Set the perihelion. Convert km to AU comet_67p_df.loc[:, 'PERIHELION_AU'] = \ comet_67p_df['STATE_VEC_ORB_ELEM'].apply(lambda x: \ spiceypy.convrt(x[0], \ inunit='km', \ outunit='AU')) # Set the eccentricity comet_67p_df.loc[:, 'ECCENTRICITY'] = \ comet_67p_df['STATE_VEC_ORB_ELEM'].apply(lambda x: x[1]) # Set the inclination in degrees comet_67p_df.loc[:, 'INCLINATION_DEG'] = \ comet_67p_df['STATE_VEC_ORB_ELEM'].apply(lambda x: np.degrees(x[2])) # Set the longitude of ascending node in degrees comet_67p_df.loc[:, 'LONG_OF_ASC_NODE_DEG'] = \ comet_67p_df['STATE_VEC_ORB_ELEM'].apply(lambda x: np.degrees(x[3])) # Set the argument of perihelion in degrees comet_67p_df.loc[:, 'ARG_OF_PERIH_DEG'] = \ comet_67p_df['STATE_VEC_ORB_ELEM'].apply(lambda x: np.degrees(x[4])) # Set the semi-major axis in AU comet_67p_df.loc[:, 'SEMI_MAJOR_AXIS_AU'] = \ comet_67p_df['STATE_VEC_ORB_ELEM'].apply(lambda x: \ spiceypy.convrt(x[-2], \ inunit='km', \ outunit='AU')) # Compute the aphelion, based on the semi-major axis and eccentricity comet_67p_df.loc[:, 'APHELION_AU'] = \ comet_67p_df.apply(lambda x: x['SEMI_MAJOR_AXIS_AU'] \ * (1.0 + x['ECCENTRICITY']), \ axis=1) # Let's plot the perihelion, eccentricity and argument of perihelion # Let's set a dark background plt.style.use('dark_background') # Set a default font size for better readability plt.rcParams.update({'font.size': 14}) # We plot the data dynamically in a for loop. col_name represents the column # name for both dataframes; ylabel_name is used to change the y label. for col_name, ylabel_name in zip(['PERIHELION_AU', \ 'ECCENTRICITY', \ 'ARG_OF_PERIH_DEG'], \ ['Perihelion in AU', \ 'Eccentricity', \ 'Arg. of. peri. in degrees']): # Set a figure with a certain figure size fig, ax = plt.subplots(figsize=(12, 8)) # Line plot of the parameter vs. the UTC date-time from the SPICE data ax.plot(comet_67p_df['UTC'], \ comet_67p_df[col_name], \ color='tab:orange', alpha=0.7, label='SPICE Kernel') # As a guideline, plot the parameter data from the MPC data set as a # horizontal line ax.hlines(y=comet_67p_from_db[col_name], \ xmin=ini_datetime, \ xmax=end_datetime, \ color='tab:orange', linestyles='dashed', label='MPC Data') # Set a grid for better readability ax.grid(axis='both', linestyle='dashed', alpha=0.2) # Set labels for the x and y axis ax.set_xlabel('Time in UTC') ax.set_ylabel(ylabel_name) # Now we set a legend. However, the marker opacity in the legend has the # same value as in the plot ... leg = ax.legend(fancybox=True, loc='upper right', framealpha=1) # ... thus, we set the markers' opacity to 1 with this small code for lh in leg.legendHandles: lh.set_alpha(1) # Save the plot in high quality plt.savefig(f'67P_{col_name}.png', dpi=300) ```
github_jupyter
<a href="https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/Perceiver/Perceiver_for_masked_language_modeling_and_image_classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Set-up environment In this notebook, we are going to show that you can do masked language modeling and image classification with the same architecture. We first install HuggingFace Transformers. ``` !pip install -q git+https://github.com/huggingface/transformers.git ``` ## Using Perceiver on text Here we load `PerceiverForMaskedLM`, which is trained in a similar way as `BertForMaskedLM`, except that it can be trained on raw UTF-8 bytes, rather than subwords (BERT employs [WordPiece subword tokenization](https://paperswithcode.com/method/wordpiece)). The max sequence length is set to 2048 bytes, as the authors did this for a fair comparison to BERT's 512 subwords. We can prepare text for the model using `PerceiverTokenizer`. ``` from transformers import PerceiverTokenizer, PerceiverForMaskedLM tokenizer = PerceiverTokenizer.from_pretrained("deepmind/language-perceiver") model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver") ``` We move the model to the GPU, if it's available. ``` import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.to(device) ``` Here, we "tokenize" the text (this will just turn the text into a sequence of byte IDs). Next, we replace the byte IDs of the word we'd like to mask by the mask token ID of the tokenizer. The authors note that the model performs much better if the masked span starts with a space. ``` text = "This is an incomplete sentence where some words are missing." # prepare input encoding = tokenizer(text, padding="max_length", return_tensors="pt") # mask " missing.". encoding.input_ids[0, 52:61] = tokenizer.mask_token_id inputs, input_mask = encoding.input_ids.to(device), encoding.attention_mask.to(device) print("Inputs:", tokenizer.decode(inputs.squeeze())) ``` Next, we can perform a forward pass through the model. The inputs are of shape (batch_size, seq_length) = (1, 2048). The model outputs logits of shape (batch_size, seq_len, vocab_size), which in this case will be equal to (1, 2048, 262). The vocabulary size of the Perceiver is 262, which is 256 (for the bytes) + 6 for 6 reserved special tokens. ``` # forward pass outputs = model(inputs=inputs, attention_mask=input_mask) logits = outputs.logits masked_tokens_predictions = logits[0, 51:61].argmax(dim=-1) print(tokenizer.decode(masked_tokens_predictions)) ``` ## Using Perceiver on images The Perceiver also works really well on images. Here we load our familiar cats image. ``` from PIL import Image import requests url = "https://storage.googleapis.com/perceiver_io/dalmation.jpg" url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) image ``` The Perceiver authors released 3 variants of the Perceiver for image classification (which only differ in the preprocessing). Here we load the first variant, `PerceiverForImageClassificationLearned`, which adds learned absolute position embeddings to the pixel values. We can prepare images for the model using `PerceiverFeatureExtractor`, which will center crop + resize + normalize images to be of resolution 224x224. ``` from transformers import PerceiverFeatureExtractor, PerceiverForImageClassificationLearned del model feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-learned") model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned") model.to(device) ``` Here we prepare the image for the model, and forward it through it. ``` # prepare input encoding = feature_extractor(image, return_tensors="pt") inputs, input_mask = encoding.pixel_values.to(device), None # forward pass outputs = model(inputs, input_mask) logits = outputs.logits ``` The model outputs logits of shape (batch_size, num_labels), which in this case will be (1, 1000) - as the model was trained on ImageNet-1k, which includes 1,000 possible classes. ``` print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) ``` ## Using Perceiver on images (Fourier) The second variant adds fixed Fourier 2D position embeddings. ``` from transformers import PerceiverForImageClassificationFourier del model model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier") model.to(device) # prepare input encoding = feature_extractor(image, return_tensors="pt") inputs, input_mask = encoding.pixel_values.to(device), None # forward pass outputs = model(inputs, input_mask) logits = outputs.logits print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) ``` ## Using Perceiver on images (Conv) The third variant applies a Conv2D + maxpool preprocessing operation on the image, before using it for cross-attention with the latents of the Perceiver encoder. ``` from transformers import PerceiverForImageClassificationConvProcessing del model model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv") model.to(device) # prepare input encoding = feature_extractor(image, return_tensors="pt") inputs, input_mask = encoding.pixel_values.to(device), None # forward pass outputs = model(inputs, input_mask) logits = outputs.logits print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) ```
github_jupyter
# ds4se Tutorial - Analysis Data Science for Software Engieering (ds4se) is an academic initiative to perform exploratory analysis on software engieering artifact and metadata. Data Management, Analysis, and Benchmarking for DL and Traceability. In this tutorial, we will use ds4se to analyze Libest dataset and tries to find the some statistatical information about various source and target artifacts. ds4se library requries several other libraries to be present and up to date. In the following cells, we install those libraries and upgrade the ones that need to be up to date. ``` pip install --upgrade gensim !pip install nbdev !pip install sentencepiece pip install dit ``` To use the ds4se library in your machine, simply run the following command to install it. ``` pip install --upgrade ds4se import pandas as pd import numpy as np #this facade provides an interface for users to use the functionalityies ds4se provides. For the complete list that facade contains, see the project pypi page. import ds4se.facade as facade source_file = pd.read_csv("[libest-pre-req].csv",names=['ids', 'text'], header=None, sep=' ') target_file = pd.read_csv("[libest-pre-tc].csv",names=['ids', 'text'], header=None, sep=' ') ``` Here is a preview of the source artifact class ``` source_file ``` Here's a preview of target artifact class ``` target_file ``` # Some Preprocessing Almost all methods in the analysis part takes in dataframe(s) as input. The library is designed to recognize dataframe column named "contents" as the content of all the file. So we need to start by changing the column names from "text" into "contents" ``` source_file = source_file.rename(columns={"text":"contents"}) # source_file = source_file.drop(columns='ids') target_file = target_file.rename(columns={"text":"contents"}) # target_file = target_file.drop(columns=["ids"], axis=1) source_file target_file ``` # NumDoc Let's first check how many documents each artifacts class contains. Note the the method takes in two parameters at a time: it will process both source and target artifact class at the same time. ``` num_source = facade.NumDoc(source_file, target_file) num_source ``` From the preview of the dataframe, we can see it indeed return the correct value The method returns a list of four integers: the number of documents for source artifact class, the number of documnets for target artifact class, and the last two numbers are the difference between previous result. ``` print("the source class has {} number of documents, with {} difference with respect to target class".format(num_source[0],num_source[2])) print("the target class has {} number of documents, with {} difference with respect to source class".format(num_source[1],num_source[3])) ``` # VocabSize ``` vocabshared = facade.VocabSize(source_file,target_file) vocabshared ``` # AverageToken Computes the average number of token in each class and also the difference between them ``` token = facade.AverageToken(source_file, target_file) token ``` # VocabShared To find out the most frequent token in both source and target artifacts, use VocabShared method ``` vocab_shared = facade.VocabShared(source_file, target_file) vocab_shared ``` # Vocab Same as VocabShared method, but only need one dataframe as parameter ``` vocab = facade.Vocab(source_file) vocab ``` # SharedVocabSize ``` sharedvocabsize = facade.SharedVocabSize(source_file, target_file) sharedvocabsize ``` # KLDivergence # CrossEntropy To calcualte cross entropy of source and target class, use CrossEntropy methods ``` entropy = facade.CrossEntropy(source_file, target_file) ``` # MutualInformation
github_jupyter
``` import sys import os # sklearn from sklearn.metrics import precision_recall_fscore_support from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from interpret_text.experimental.classical import ClassicalTextExplainer import pandas as pd train = pd.read_csv('../../DeepHateLingo/input/train.csv') train.head() x_train = train['text'] y_train = train['target'] x_train test = pd.read_csv('../../DeepHateLingo/input/test.csv') test.head() x_test = test['text'] y_test = test['target'] x_test validation = pd.read_csv('../../DeepHateLingo/input/validation.csv') validation.head() x_valid = validation['text'] y_valid = validation['target'] x_valid X_train, X_test, y_train, y_test = train_test_split(x_train, y_train, train_size=0.8, test_size=0.2) # Create explainer object that contains default glassbox classifier and explanation methods explainer = ClassicalTextExplainer() label_encoder = LabelEncoder() y_train = label_encoder.fit_transform(y_train) y_test = label_encoder.transform(y_test) print("X_train shape =" + str(X_train.shape)) print("y_train shape =" + str(y_train.shape)) print("X_train data structure = " + str(type(X_train))) classifier, best_params = explainer.fit(X_train, y_train) # obtain best classifier and hyper params print("best classifier: " + str(best_params)) #mean_accuracy = classifier.score(x_test, y_test, sample_weight=None) #print("accuracy = " + str(mean_accuracy * 100) + "%") y_pred = classifier.predict(X_test) [precision, recall, fscore, support] = precision_recall_fscore_support(y_test, y_pred,average='macro') # for testing from scrapbook.api import glue working_dir = os.getcwd() mean_accuracy = classifier.score(X_test, y_test, sample_weight=None) glue("accuracy", mean_accuracy) glue("precision", precision) glue("recall", recall) glue("f1", fscore) print("[precision, recall, fscore, support] = " + str([precision, recall, fscore, support])) # Enter any document or a document and label pair that needs to be interpreted document = "কয়েকবছর আগে ভারতের নির্বাচনে চরম সাম্প্রদায়িক কট্টরপন্থী দল বিজেপির নেতা মোদী রাষ্ট্রীয় ক্ষমতায় আসীন হলে আমাদের দেশের বিএনপির বাধঁ ভাঙ্গা উল্লাস দেখে মনে হয়েছিল যে মোদী এবার সোনিয়া গান্ধীর চরম আস্থাভাজন হাসিনা সরকার কে চিৎপটাং করে খালেদা জিয়া কে মসনদে বসিয়ে দিবে কিন্তু হায় সে আশায় গুড়ে বালি দিয়ে মোদী আর ও বেশী দীর্ঘ দিন হাসিনা সরকারকে ক্ষমতায় থাকার পথ সুগম করে দিল" doc2 = "খানকি তোদের জন্য বাংলার যুবগয়া আজ খায়াপ বড বড দুধ আর পাছা ঢাকের রাখ মাগি" doc3 = "ছাত্রলীগকে সন্ত্রাসী সংগঠন হিসেবে ষোষনা করে ছাত্রলীগের রাজনীতি নিষিদ্ধ ঘোষনা করা হোক" # Obtain the top feature ids for the selected class label explainer.preprocessor.labelEncoder = label_encoder local_explanation = explainer.explain_local(doc3) y = classifier.predict(doc3) predicted_label = label_encoder.inverse_transform(y) local_explanation = explainer.explain_local(doc3, predicted_label) from interpret_text.experimental.widget import ExplanationDashboard ExplanationDashboard(local_explanation) ছাত্রলীগকে সন্ত্রাসী সংগঠন হিসেবে ষোষনা করে ছাত্রলীগের রাজনীতি নিষিদ্ধ ঘোষনা করা হোক ```
github_jupyter
# <font color='red'>BackPropagation</font> **There will be some functions that start with the word "grader" ex: grader_sigmoid(), grader_forwardprop(), grader_backprop() etc, you should not change those function definition.<br><br>Every Grader function has to return True.** ## <font color='red'>Loading data </font> ``` import pickle import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt with open('data.pkl', 'rb') as f: data = pickle.load(f) print(data.shape) X = data[:, :5] y = data[:, -1] print(X.shape, y.shape) ``` # <font color='red'>Computational graph</font> <img src='https://i.imgur.com/seSGbNS.png'> * **If you observe the graph, we are having input features [f1, f2, f3, f4, f5] and 9 weights [w1, w2, w3, w4, w5, w6, w7, w8, w9]**.<br><br> * **The final output of this graph is a value L which is computed as (Y-Y')^2** # <font color='red'>Task 1: Implementing backpropagation and Gradient checking </font> <font color='blue'><b>Check this video for better understanding of the computational graphs and back propagation</font> ``` from IPython.display import YouTubeVideo YouTubeVideo('i94OvYb6noo',width="1000",height="500") ``` * <b>Write two functions<br> * Forward propagation</b>(Write your code in<font color='blue'> def forward_propagation()</b></font>)<br><br> For easy debugging, we will break the computational graph into 3 parts. <font color='green'><b>Part 1</b></font></b> <img src='https://i.imgur.com/0xUaxy6.png'><br><br> <font color='green'><b>Part 2</b></font></b><br> <img src='https://i.imgur.com/J29pAJL.png'><br><br> <font color='green'><b>Part 3</b></font></b> <img src='https://i.imgur.com/vMyCsd9.png'> <pre> <font color='green'> def forward_propagation(X, y, W): <font color='grey'> # X: input data point, note that in this assignment you are having 5-d data points # y: output varible # W: weight array, its of length 9, W[0] corresponds to w1 in graph, W[1] corresponds to w2 in graph, <br> ..., W[8] corresponds to w9 in graph. # you have to return the following variables # exp= part1 (compute the forward propagation until exp and then store the values in exp) # tanh =part2(compute the forward propagation until tanh and then store the values in tanh) # sig = part3(compute the forward propagation until sigmoid and then store the values in sig) # now compute remaining values from computional graph and get y' # write code to compute the value of L=(y-y')^2 # compute derivative of L w.r.to Y' and store it in dl # Create a dictionary to store all the intermediate values # store L, exp,tanh,sig,dl variables </font> return (dictionary, which you might need to use for back propagation) <font color='grey'> </font> </font> </pre> * <b>Backward propagation</b>(Write your code in<font color='blue'> def backward_propagation()</b></font>) </b> <pre> <font color='green'> def backward_propagation(L, W,dictionary): <font color='grey'> # L: the loss we calculated for the current point # dictionary: the outputs of the forward_propagation() function # write code to compute the gradients of each weight [w1,w2,w3,...,w9] # Hint: you can use dict type to store the required variables # return dW, dW is a dictionary with gradients of all the weights </font> return dW </font> </font> </pre> ## <font color='red'>Gradient clipping</font> <b> Check this <a href='https://towardsdatascience.com/how-to-debug-a-neural-network-with-gradient-checking-41deec0357a9'>blog link</a> for more details on Gradient clipping we know that the derivative of any function is $$\lim_{\epsilon\to0}\frac{f(x+\epsilon)-f(x-\epsilon)}{2\epsilon}$$ * The definition above can be used as a numerical approximation of the derivative. Taking an epsilon small enough, the calculated approximation will have an error in the range of epsilon squared. * In other words, if epsilon is 0.001, the approximation will be off by 0.00001. Therefore, we can use this to approximate the gradient, and in turn make sure that backpropagation is implemented properly. This forms the basis of <b>gradient checking!</b> ## <Font color='blue'>Gradient checking example</font> <font > lets understand the concept with a simple example: $f(w1,w2,x1,x2)=w_{1}^{2} . x_{1} + w_{2} . x_{2}$ from the above function , lets assume $w_{1}=1$, $w_{2}=2$, $x_{1}=3$, $x_{2}=4$ the gradient of $f$ w.r.t $w_{1}$ is \begin{array} {lcl} \frac{df}{dw_{1}} = dw_{1} &=&2.w_{1}.x_{1} \\& = &2.1.3\\& = &6 \end{array} let calculate the aproximate gradient of $w_{1}$ as mentinoned in the above formula and considering $\epsilon=0.0001$ \begin{array} {lcl} dw_1^{approx} & = & \frac{f(w1+\epsilon,w2,x1,x2)-f(w1-\epsilon,w2,x1,x2)}{2\epsilon} \\ & = & \frac{((1+0.0001)^{2} . 3 + 2 . 4) - ((1-0.0001)^{2} . 3 + 2 . 4)}{2\epsilon} \\ & = & \frac{(1.00020001 . 3 + 2 . 4) - (0.99980001. 3 + 2 . 4)}{2*0.0001} \\ & = & \frac{(11.00060003) - (10.99940003)}{0.0002}\\ & = & 5.99999999999 \end{array} Then, we apply the following formula for gradient check: <i>gradient_check</i> = $\frac{\left\Vert\left (dW-dW^{approx}\rm\right) \right\Vert_2}{\left\Vert\left (dW\rm\right) \right\Vert_2+\left\Vert\left (dW^{approx}\rm\right) \right\Vert_2}$ The equation above is basically the Euclidean distance normalized by the sum of the norm of the vectors. We use normalization in case that one of the vectors is very small. As a value for epsilon, we usually opt for 1e-7. Therefore, if gradient check return a value less than 1e-7, then it means that backpropagation was implemented correctly. Otherwise, there is potentially a mistake in your implementation. If the value exceeds 1e-3, then you are sure that the code is not correct. in our example: <i>gradient_check</i> $ = \frac{(6 - 5.999999999994898)}{(6 + 5.999999999994898)} = 4.2514140356330737e^{-13}$ you can mathamatically derive the same thing like this \begin{array} {lcl} dw_1^{approx} & = & \frac{f(w1+\epsilon,w2,x1,x2)-f(w1-\epsilon,w2,x1,x2)}{2\epsilon} \\ & = & \frac{((w_{1}+\epsilon)^{2} . x_{1} + w_{2} . x_{2}) - ((w_{1}-\epsilon)^{2} . x_{1} + w_{2} . x_{2})}{2\epsilon} \\ & = & \frac{4. \epsilon.w_{1}. x_{1}}{2\epsilon} \\ & = & 2.w_{1}.x_{1} \end{array} ## <font color='red'> Implement Gradient checking </font> <br> (Write your code in <font color='blue'> def gradient_checking()</font>) **Algorithm** <pre> <font color='darkblue'> W = initilize_randomly def gradient_checking(data_point, W):<font color='grey'> # compute the L value using forward_propagation() # compute the gradients of W using backword_propagation()</font> approx_gradients = [] for each wi weight value in W:<font color='grey'> # add a small value to weight wi, and then find the values of L with the updated weights # subtract a small value to weight wi, and then find the values of L with the updated weights # compute the approximation gradients of weight wi</font> approx_gradients.append(approximation gradients of weight wi)<font color='grey'> # compare the gradient of weights W from backword_propagation() with the aproximation gradients of weights with <br> gradient_check formula</font> return gradient_check</font> <b>NOTE: you can do sanity check by checking all the return values of gradient_checking(),<br> they have to be zero. if not you have bug in your code </pre></b> # <font color='red'> Task 2 : Optimizers </font> * As a part of this task, you will be implementing 3 type of optimizers(methods to update weight) * Use the same computational graph that was mentioned above to do this task * Initilze the 9 weights from normal distribution with mean=0 and std=0.01 **Check below video and <a href='https://cs231n.github.io/neural-networks-3/'>this</a> blog** ``` from IPython.display import YouTubeVideo YouTubeVideo('gYpoJMlgyXA',width="1000",height="500") ``` <font color='blue'><b>Algorithm</b> <pre> for each epoch(1-100): for each data point in your data: using the functions forward_propagation() and backword_propagation() compute the gradients of weights update the weigts with help of gradients ex: w1 = w1-learning_rate*dw1 </pre> ## <font color='red'> Implement below tasks</b> * <b>Task 2.1</b>: you will be implementing the above algorithm with <b>Vanilla update</b> of weights<br><br> * <b>Task 2.2</b>: you will be implementing the above algorithm with <b>Momentum update</b> of weights<br><br> * <b>Task 2.3</b>: you will be implementing the above algorithm with <b>Adam update</b> of weights **Note : If you get any assertion error while running grader functions, please print the variables in grader functions and check which variable is returning False .Recheck your logic for that variable .** <br> <br> <br> # <font color='red'>Task 1 </font> ## <font color='blue'>Forward propagation </font> ``` import numpy as np import math X[0], y[0] def sigmoid(z): import numpy as np '''In this function, we will compute the sigmoid(z)''' # we can use this function in forward and backward propagation return 1/(1+np.exp(-z)) def forward_propagation(x, y, w): '''In this function, we will compute the forward propagation ''' # X: input data point, note that in this assignment you are having 5-d data points X = x # y: output varible y=y # W: weight array, its of length 9, W[0] corresponds to w1 in graph, W[1] corresponds to w2 in graph,..., W[8] corresponds to w9 in graph. W = w # you have to return the following variables # exp= part1 (compute the forward propagation until exp and then store the values in exp) expo = np.exp( ( ((W[0]*X[0])+(W[1]*X[1])) * ((W[0]*X[0])+(W[1]*X[1])) ) + W[5] ) # tanh =part2(compute the forward propagation until tanh and then store the values in tanh) tanh_t = math.tanh(expo + W[6]) # sig = part3(compute the forward propagation until sigmoid and then store the values in sig) prev_sig = W[7] + ( np.sin(W[2]*X[2]) * ( (W[3]*X[3])+(W[4]*X[4]) ) ) sigmoid = 1/(1+np.exp(-prev_sig)) # now compute remaining values from computional graph and get y' y_hat = tanh_t + (W[8]*sigmoid) # write code to compute the value of L=(y-y')^2 L = (y-y_hat)**2 # compute derivative of L w.r.to Y' and store it in dl dl = -2*(y-y_hat) # Create a dictionary to store all the intermediate values temp_dict={} # store L, exp,tanh,sig variables temp_dict['dy_pr']=dl temp_dict['loss']=L temp_dict['exp']=expo temp_dict['tanh']=tanh_t temp_dict['sigmoid']=sigmoid return temp_dict#(dictionary, which you might need to use for back propagation) ``` <font color='cyan'>Grader function - 1</font> ``` def grader_sigmoid(z): val=sigmoid(z) assert(val==0.8807970779778823) return True grader_sigmoid(2) ``` <font color='cyan'>Grader function - 2 </font> ``` def grader_forwardprop(data): dl = (data['dy_pr']==-1.9285278284819143) loss=(data['loss']==0.9298048963072919) part1=(data['exp']==1.1272967040973583) part2=(data['tanh']==0.8417934192562146) part3=(data['sigmoid']==0.5279179387419721) assert(dl and loss and part1 and part2 and part3) return True w=np.ones(9)*0.1 d1=forward_propagation(X[0],y[0],w) grader_forwardprop(d1) ``` ## <font color='blue'>Backward propagation</font> ``` def backward_propagation(L,W,d1): '''In this function, we will compute the backward propagation ''' # L: the loss we calculated for the current point # dictionary: the outputs of the forward_propagation() function # write code to compute the gradients of each weight [w1,w2,w3,...,w9] # Hint: you can use dict type to store the required variables k = d1['dy_pr'] dW = dict() # temporary dictionary to store derivatives with respect to each weight # dw1 = # in dw1 compute derivative of L w.r.to w1 dw1 = k*(1-d1['tanh']**2)*d1['exp']*((2*(W[0]*L[0]+W[1]*L[1]))*L[0]) dW['dw1']=dw1 # dw2 = # in dw2 compute derivative of L w.r.to w2 dw2 = k*(1-d1['tanh']**2)*d1['exp']*((2*(W[0]*L[0]+W[1]*L[1]))*L[1]) dW['dw2']=dw2 # dw3 = # in dw3 compute derivative of L w.r.to w3 dw3 = k*W[8]*(d1['sigmoid']*(1-d1['sigmoid']))*(L[4]*W[4]+L[3]*W[3])*np.cos(W[2]*L[2])*L[2] dW['dw3']=dw3 # dw4 = # in dw4 compute derivative of L w.r.to w4 dw4 = k*W[8]*(d1['sigmoid']*(1-d1['sigmoid']))*np.sin(W[2]*L[2])*L[3] dW['dw4']=dw4 # dw5 = # in dw5 compute derivative of L w.r.to w5 dw5 = k*W[8]*(d1['sigmoid']*(1-d1['sigmoid']))*np.sin(W[2]*L[2])*L[4] dW['dw5']=dw5 # dw6 = # in dw6 compute derivative of L w.r.to w6 dw6 = k*(1-d1['tanh']**2)*d1['exp'] dW['dw6']=dw6 # dw7 = # in dw7 compute derivative of L w.r.to w7 dw7 = k*(1-d1['tanh']**2) dW['dw7']=dw7 # dw8 = # in dw8 compute derivative of L w.r.to w8 dw8 = k*W[8]*(d1['sigmoid']*(1-d1['sigmoid'])) dW['dw8']=dw8 # dw9 = # in dw9 compute derivative of L w.r.to w9 dw9 = k*d1['sigmoid'] dW['dw9']=dw9 # return dW, dW is a dictionary with gradients of all the weights return dW ``` <font color='cyan'>Grader function - 3 </font> ``` def grader_backprop(data): dw1=(data['dw1']==-0.22973323498702003) dw2=(data['dw2']==-0.021407614717752925) dw3=(data['dw3']==-0.005625405580266319) dw4=(data['dw4']==-0.004657941222712423) dw5=(data['dw5']==-0.0010077228498574246) dw6=(data['dw6']==-0.6334751873437471) dw7=(data['dw7']==-0.561941842854033) dw8=(data['dw8']==-0.04806288407316516) dw9=(data['dw9']==-1.0181044360187037) assert(dw1 and dw2 and dw3 and dw4 and dw5 and dw6 and dw7 and dw8 and dw9) return True w=np.ones(9)*0.1 d1=forward_propagation(X[0],y[0],w) d1=backward_propagation(X[0],w,d1) grader_backprop(d1) ``` ## <font color='blue'> Implement gradient checking</font> ``` W = np.random.random(9) def gradient_checking(X, y, W): # compute the L value using forward_propagation() dictionary = forward_propagation(X, y, W) L = dictionary['loss'] # compute the gradients of W using backword_propagation() bp_gradient_dict = backward_propagation(X, W, dictionary) dws = list(bp_gradient_dict.values()) approx_gradients = [] W_new = W.copy() epsilon = 1e-7 gc = [] # gradient checks for i in range(len(W)): # add a small value to weight wi, and then find the values of L with the updated weights W_new[i] = W[i]+epsilon lplus = forward_propagation(X, y, W_new)['loss'] # subtract a small value to weight wi, and then find the values of L with the updated weights W_new[i] = W[i]-epsilon lminus= forward_propagation(X, y, W_new)['loss'] # compute the approximation gradients of weight wi dwapprox = (lplus - lminus)/(2*epsilon) approx_gradients.append(dwapprox) # Doing gradient checking for each weight. gc.append((dws[i]-dwapprox)/(dws[i]+dwapprox)) print(gc[i]) if gc[i] < 1e-7 : print(True) else: print(False) dict_new = forward_propagation(X,y,W_new) L_new = dict_new['loss'] # bp_grad_dict_new = backward_propagation(X,W_new,dict_new) # # compare the gradient of weights W from backword_propagation() with the aproximation gradients of weights with gradient_check formula # dw = np.linalg.norm(np.array(list(bp_gradient_dict.values()))) # dw_approx = np.linalg.norm(np.array(list(bp_grad_dict_new.values()))) # gradient_check = np.linalg.norm(dw-dw_approx)/(np.linalg.norm(dw)+np.linalg.norm(dw_approx)) # print(bp_grad_dict_new) # print(dw, dw_approx, np.linalg.norm(dw-dw_approx)) gradient_check = (L - L_new)/(L + L_new) return gradient_check gradient_checking(X[0],y[0],W) ``` # <font color='red'>Task 2: Optimizers ###<font color='blue'>Algorithm with Vanilla update of weights</font> ``` from tqdm import tqdm np.random.seed(42) epochs= list(range(10)) lossv = [] W = np.random.random_sample(size=9) learning_rate = 0.001 for e in tqdm(epochs): for i in range(len(X)): data = X[i] target = y[i] fwd_dict = forward_propagation(data,target,W) #dw is a dictionary with gradients of all the weights dw = backward_propagation(data,W,fwd_dict) dw = list(dw.values()) # after python 3.7, dictionary is ordered for j in range(len(W)): W[j] = W[j]-learning_rate*dw[j] # los -> loss los = forward_propagation(data,target,W)['loss'] lossv.append(los) import matplotlib.pyplot as plt plt.plot(epochs, lossv, '-r*') plt.title("Loss v/s Epoch of Vanilla update of weights") plt.xlabel('Epoch') plt.ylabel('Loss') for i in zip(epochs, lossv): print(i) ``` ###<font color='blue'>Algorithm with Momentum update of weights</font> ``` np.random.seed(42) epochs= list(range(10)) lossm = [] W = np.random.random_sample(size=9) learning_rate = 0.001 beta=0.95 mt = np.zeros(9) for e in tqdm(epochs): for i in range(len(X)): data = X[i] target = y[i] fwd_dict = forward_propagation(data,target,W) #dw is a dictionary with gradients of all the weights dw = backward_propagation(data,W,fwd_dict) dw = list(dw.values()) for j in range(len(W)): mt[j] = beta*mt[j] + (1-beta)*dw[j] W[j] = W[j]-learning_rate*mt[j] # los -> loss los = forward_propagation(data,target,W)['loss'] lossm.append(los) import matplotlib.pyplot as plt plt.plot(epochs, lossm, '-g*') plt.title("Loss v/s Epoch of Momentum update of weights") plt.xlabel('Epoch') plt.ylabel('Loss') for i in zip(epochs, lossm): print(i) ``` ###<font color='blue'>Algorithm with Adam update of weights</font> ``` np.random.seed(42) epochs= list(range(10)) lossa = [] W = np.random.random_sample(size=9) learning_rate = 0.001 beta1=0.90 beta2=0.99 mt = np.zeros(9) vt = np.zeros(9) epsilon = 0.001 for e in tqdm(epochs): for i in range(len(X)): data = X[i] target = y[i] fwd_dict = forward_propagation(data,target,W) #dw is a dictionary with gradients of all the weights dw = backward_propagation(data,W,fwd_dict) dw = list(dw.values()) for j in range(len(W)): mt[j] = beta1*mt[j] + (1-beta1)*dw[j] vt[j] = beta2*vt[j] + (1-beta2)*(dw[j])**2 m_hat = mt[j]/(1-beta1) v_hat = vt[j]/(1-beta2) pros = (learning_rate/(np.sqrt(v_hat))+epsilon)*m_hat W[j] = W[j]-pros # los -> loss los = forward_propagation(data,target,W)['loss'] lossa.append(los) import matplotlib.pyplot as plt plt.plot(epochs, lossa, '-g*') plt.title("Loss v/s Epoch of Momentum update of weights") plt.xlabel('Epoch') plt.ylabel('Loss') for i in zip(epochs, lossa): print(i) ``` <font color='blue'>Comparision plot between epochs and loss with different optimizers</font> ``` plt.figure(figsize=(10,6)) plt.plot(epochs, lossv, '--r<',markersize=15, label='Vanilla') plt.plot(epochs, lossm, '--go', label='Momentum') plt.plot(epochs, lossa, '-b*', label='Adam') plt.title("Loss v/s Epoch of Momentum,Adam, and Vanilla update of weights") plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() ``` <p style="font-family:'Segoe UI';font-size:16px"> <b>Conclusion:</b><br> <b>Vanilla:</b><br> 1. In early few epochs, loss is falling very sharp. 2. After few epochs, loss is not decreasing much. <br> <b>Momentum:</b><br> 1. There is not much difference but little bit better than vanilla. <br> <b>Adam:</b><br> 1. As we know Adam is one of better option among all, and we are observing the same here. 2. In beginning 2 epochs loss is decreasing and then later loss keeps decreasing. <br><br> <b>Among all, Adam performed better.</b> </p>
github_jupyter
# Load Data ``` import pandas as pd from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler df = pd.read_csv("Dataset/Dataset2.csv") X = df.iloc[:,1:5] y = df.iloc[:,9] # scaler = StandardScaler() # X = scaler.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.20, random_state=101) X.head() y.head() df.describe() df.info() ``` # Data Analysis ### Bar Graph on number of essay based on grade ``` df.iloc[:,9].plot(kind='hist') ``` Observation: Imbalanced data ### Score vs No. of Characters ``` import seaborn as sns sns.set_style("whitegrid") sns.barplot(x="Score",y='No. of Characters',hue='Score',data=df,palette="rocket") ``` ### Score vs No. of Words ``` sns.barplot(x="Score",y='No. of Words',hue='Score',data=df,palette="rocket") ``` ### Score vs No. of Unique words ``` sns.barplot(x="Score",y='No. of Unique words',hue='Score',data=df,palette="rocket") ``` ### Score vs POS Sum ``` sns.barplot(x="Score",y='POS Sum',hue='Score',data=df,palette="rocket") ``` # SVM ``` from sklearn import svm SVM = svm.SVC(probability=True) SVM.fit(X_train, y_train) poly_pred = SVM.predict(X_test) from sklearn.metrics import accuracy_score poly_accuracy = accuracy_score(y_test, poly_pred) print('Accuracy (Polynomial Kernel): ', "%.2f" % (poly_accuracy*100)) ``` # KNN ``` from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import accuracy_score KNN = KNeighborsClassifier(n_neighbors=13) KNN.fit(X_train,y_train) KNN_pred = KNN.predict(X_test) KNN_accuracy = accuracy_score(y_test,KNN_pred) print("KNN Accuracy:", "%.2f" % (KNN_accuracy*100)) ``` # RFC ``` from sklearn.ensemble import RandomForestClassifier RFC = RandomForestClassifier(n_estimators=11,random_state =4) RFC.fit(X_train, y_train) RFC_pred = RFC.predict(X_test) RFC_accuracy = accuracy_score(y_test,RFC_pred) print("RFC Accuracy:", "%.2f" % (RFC_accuracy*100)) ``` # XG BOOST ``` from xgboost import XGBClassifier import warnings warnings.filterwarnings('ignore') XGB = XGBClassifier(eval_metric='mlogloss') XGB.fit(X_train, y_train) XGB_pred = XGB.predict(X_test) XGB_accuracy = accuracy_score(y_test,XGB_pred) print("XGB Accuracy:", "%.2f" % (XGB_accuracy*100)) ``` # LogisticRegression ``` from sklearn.linear_model import LogisticRegression LogisticRegression = LogisticRegression(solver='lbfgs',max_iter=9999999999999999999999999999999999999999999999999999999999999) LogisticRegression.fit(X_train, y_train) LogisticRegression_pred = LogisticRegression.predict(X_test) LogisticRegression_accuracy = accuracy_score(y_test,LogisticRegression_pred) print("LogisticRegression Accuracy:", "%.2f" % (LogisticRegression_accuracy*100)) ``` # Stack ``` from sklearn.ensemble import StackingClassifier from sklearn.ensemble import VotingClassifier estimator_list = [ ('rfc',RFC), ('knn',KNN), ('svm',SVM), ('LR',LogisticRegression), ('XGB',XGB) ] # Build stack model stack_model = VotingClassifier( estimators=estimator_list,voting='hard' ) # Train stacked model stack_model.fit(X_train, y_train) # Make predictions stack_pred = stack_model.predict(X_test) stack_accuracy = accuracy_score(y_test,stack_pred) print("Stack Test Accuracy:", "%.2f" % (stack_accuracy*100)) from sklearn.metrics import confusion_matrix, classification_report import seaborn as sns cm = confusion_matrix(y_test, stack_pred) sns.heatmap(cm,annot=True) print(classification_report(y_test, stack_pred)) import pickle pickle.dump(stack_model, open("Model/Stack_model.sav", 'wb')) ``` # Neural network ``` from keras.models import Sequential from keras.layers import Dense model = Sequential() model.add(Dense(64,input_dim = 4, activation='relu')) model.add(Dense(124,activation='relu')) model.add(Dense(6,activation='softmax')) model.compile(loss='sparse_categorical_crossentropy',optimizer='sgd',metrics=['accuracy']) model.fit(X_train,y_train,epochs=100,validation_split=0.2) ```
github_jupyter
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/espnet/notebook/blob/master/tts_realtime_demo.ipynb) # ESPnet LT real time E2E-TTS demonstration This notebook provides a demonstration of the realtime E2E-TTS using ESPnet-TTS and ParallelWaveGAN (+ MelGAN). - ESPnet: https://github.com/airenas/espnet - ParallelWaveGAN: https://github.com/kan-bayashi/ParallelWaveGAN Author: Airenas Vaičiūnas ([airenass@gmail.com](https://github.com/airenas)) ## Install ``` !pip install -q parallel_wavegan PyYaml unidecode ConfigArgparse %cd /home/avaiciunas/gfs/tts/espnet pwd ``` --- ## LT demo. Select trained model from egs/lab #### (a) Tacotron2 ``` # set path trans_type = "char" dict_path = "egs/sabina/tts1/data/lang_1char/char_train_no_dev_units.txt" model_path = "egs/sabina/tts1/exp/char_train_no_dev_pytorch_train_pytorch_tacotron2/results/model.last1.avg.best" print("sucessfully set prepared models.") # set path trans_type = "char" dict_path = "egs/sabina/tts1/data/lang_1char/char_train_no_dev_units.txt" model_path = "egs/sabina/tts1/exp/char_train_no_dev_pytorch_train_pytorch_transformer.v3.single/results/model.loss.best" #model_path = "../egs/sabina/tts1/exp/char_train_no_dev_pytorch_train_pytorch_tacotron2/results/snapshot.ep.500" print("sucessfully set prepared models.") ``` ### Download pretrained vocoder model You can select one from two models. Please only run the seletected model cells. #### (a) Parallel WaveGAN ``` # download pretrained model import os if not os.path.exists("downloads/en/parallel_wavegan"): !utils/download_from_google_drive.sh \ https://drive.google.com/open?id=1Grn7X9wD35UcDJ5F7chwdTqTa4U7DeVB downloads/en/parallel_wavegan tar.gz # set path vocoder_path = "downloads/en/parallel_wavegan/ljspeech.parallel_wavegan.v2/checkpoint-400000steps.pkl" vocoder_conf = "downloads/en/parallel_wavegan/ljspeech.parallel_wavegan.v2/config.yml" print("Sucessfully finished download.") ``` #### (b) MelGAN This is an **EXPERIMENTAL** model. ``` # download pretrained model import os if not os.path.exists("downloads/en/melgan"): !utils/download_from_google_drive.sh \ https://drive.google.com/open?id=1ipPWYl8FBNRlBFaKj1-i23eQpW_W_YcR downloads/en/melgan tar.gz # set path vocoder_path = "downloads/en/melgan/train_nodev_ljspeech_melgan.v1.long/checkpoint-1000000steps.pkl" vocoder_conf = "downloads/en/melgan/train_nodev_ljspeech_melgan.v1.long/config.yml" print("Sucessfully finished download.") ``` ### Setup ``` # add path import sys sys.path.append("egs/lab/tts1/local") sys.path.append("./") # define device import torch device = torch.device("cpu") # define E2E-TTS model from argparse import Namespace from espnet.asr.asr_utils import get_model_conf from espnet.asr.asr_utils import torch_load from espnet.utils.dynamic_import import dynamic_import idim, odim, train_args = get_model_conf(model_path) model_class = dynamic_import(train_args.model_module) model = model_class(idim, odim, train_args) torch_load(model_path, model) model = model.eval().to(device) inference_args = Namespace(**{"threshold": 0.5, "minlenratio": 0.0, "maxlenratio": 10.0}) # define neural vocoder import yaml import parallel_wavegan.models with open(vocoder_conf) as f: config = yaml.load(f, Loader=yaml.Loader) vocoder_class = config.get("generator_type", "ParallelWaveGANGenerator") vocoder = getattr(parallel_wavegan.models, vocoder_class)(**config["generator_params"]) vocoder.load_state_dict(torch.load(vocoder_path, map_location="cpu")["model"]["generator"]) vocoder.remove_weight_norm() vocoder = vocoder.eval().to(device) # define text frontend with open(dict_path) as f: lines = f.readlines() lines = [line.replace("\n", "").split(" ") for line in lines] char_to_id = {c: int(i) for c, i in lines} def frontend(text): """Clean text and then convert to id sequence.""" if trans_type == "phn": text = filter(lambda s: s != " ", g2p(text)) text = " ".join(text) print(f"Cleaned text: {text}") charseq = text.split(" ") else: print(f"Cleaned text: {text}") charseq = list(text) idseq = [] for c in charseq: if c.isspace(): idseq += [char_to_id["<space>"]] elif c not in char_to_id.keys(): idseq += [char_to_id["<unk>"]] else: idseq += [char_to_id[c]] idseq += [idim - 1] # <eos> return torch.LongTensor(idseq).view(-1).to(device) print("Now ready to synthesize!") ``` ### Synthesis ``` import time input_text = "apie tai šeštadienio rytą socialiniame tinkle paskelbė miesto meras vytautas grubliauskas, aiškėja," pad_fn = torch.nn.ReplicationPad1d( config["generator_params"].get("aux_context_window", 0)) use_noise_input = vocoder_class == "ParallelWaveGANGenerator" with torch.no_grad(): start = time.time() x = frontend(input_text) print(f"x = {x}") c, _, _ = model.inference(x, inference_args) c = pad_fn(c.unsqueeze(0).transpose(2, 1)).to(device) xx = (c,) amStart = time.time() elapsed = (amStart - start) print(f"acustic model done: {elapsed:5f} s") if use_noise_input: z_size = (1, 1, (c.size(2) - sum(pad_fn.padding)) * config["hop_size"]) z = torch.randn(z_size).to(device) xx = (z,) + xx y = vocoder(*xx).view(-1) elapsed = (time.time() - amStart) print(f"vocoder done: {elapsed:5f} s") rtf = (time.time() - start) / (len(y) / config["sampling_rate"]) print(f"RTF = {rtf:5f}") from IPython.display import display, Audio display(Audio(y.view(-1).cpu().numpy(), rate=config["sampling_rate"])) ```
github_jupyter
``` %matplotlib inline import numpy as np import jupyter_manim from manim import * %%manim -qm -v WARNING Teleportation class Teleportation(Scene): config= { "x_lim":int, # define leftmost limit of x for circuit and gates "y_lim":int # define up-most limit of y for circuit and gates } def __init__(self, **kwargs): super(Teleportation, self).__init__() #inheritance from class Scene self.x_lim=-5.5 self.y_lim=2 def construct(self): # List to hold all circuit line elements and VGroup to include all the Mobjects of circuitline circuitline=[[None for i in range(5)] for i in range(5)] circuit=VGroup() # Vgroup # Algorithm name algorithm="Teleportation Algorithm" title = Title(algorithm,include_underline=False).set_y(3.1) #title of the ALgorithm self.add_foreground_mobject(title) #section zero: create all registers: quantum and classical regs=[MathTex(r"q_0"),MathTex(r"q_1"), MathTex(r"q_2"),MathTex(r"c_{rz}"), MathTex(r"c_{rxz}")] # list of registers name registers=VGroup() for i,reg in enumerate(regs): #add registers to the scene reg.set_x(self.x_lim).set_y(2-i) circuitline[0][i]=Line([self.x_lim-0.5,2-i,0], [self.x_lim-0.6,2-i,0]) registers.add(reg) circuit.add(circuitline[0][i]) self.add(registers) dots=[None for i in range(5)] # Dots for i in range(3): dots[i]=Dot() def create_gate(gate:str,x:int,y:int,control=None,scale=1,textcolor=WHITE,boxcolor=BLUE): if control: dot=Dot().set_x(x).set_y(control) control_line= Line([x,control,0], [x,y,0]) gate_ = MathTex(gate,color=textcolor).scale(scale+0.5).set_x(x).set_y(y) gate_.bg = BackgroundRectangle(gate_,stroke_width=1,color=boxcolor, fill_opacity=1).scale(scale+1) return VGroup(control_line,gate_.bg,gate_,dot) else: gate_ = MathTex(gate,color=textcolor).scale(scale+0.5).set_x(x).set_y(y) gate_.bg = BackgroundRectangle(gate_,stroke_width=1,color=boxcolor, fill_opacity=1).scale(scale+1) return VGroup(gate_.bg,gate_) def draw_circuitline(regs,sect:int,xrange:list,ydiff: float,barrier=True,circuitline=circuitline,circuit=circuit): for i in range(5): circuitline[sect][i]=Line([xrange[0],ydiff-i,-0.5], [xrange[1],ydiff-i,-0.5]) circuit.add(circuitline[sect][i]) self.add(circuitline[sect][i]) if barrier: barrierline=DashedLine([xrange[1],2.2,0], [xrange[1],-0.25,0],dashed_ratio=0.75).set_opacity(0.5) self.add(barrierline) # self.add(VGroup(circuit,barrierline)) # return VGroup(circuit,barrier) def move_dot(sect: int,dots=dots,circuitline=circuitline,circuit=circuit): animation=[] for j in range(3): animation.append(MoveAlongPath(dots[j], circuitline[sect][j])) # self.play(*animation) return animation # #section I: Create a maximally entangled state sect_range1=[self.x_lim+0.6,self.x_lim+3] draw_circuitline(regs,sect=1,xrange=sect_range1,ydiff=2) H_gate1=create_gate("H",self.x_lim+1.25 ,1,scale=0.5) C_not1= create_gate("X",self.x_lim+2.25 ,0,control=1,scale=0.5) texTemplate = TexTemplate() texTemplate.add_to_preamble(r"\usepackage{braket}") state_1a=MathTex(r"\ket{q_1 q_2}=\ket{0}",tex_template=texTemplate, font_size=20).set_x(sect_range1[0]).set_y(1.5) state_1b=MathTex(r"\ket{q_1 q_2}=\frac{1}{\sqrt{2}} (\ket{00} + \ket{11})", tex_template=texTemplate, font_size=20).set_x(sect_range1[1]).set_y(1.5) state_psi=MathTex(r"\ket{q_0}=\alpha \ket{0} +\beta \ket{1}", tex_template=texTemplate, font_size=20).set_x(sect_range1[0]).set_y(2.5) self.play(Create(H_gate1),Create(C_not1),Create(state_psi),Create(state_1a)) self.play(state_psi.animate.move_to([sect_range1[1],2.5,0]), FadeTransform(state_1a,state_1b), *move_dot(sect=1), runtime=3) # # Section :II create entanglement between 1st and 2nd qubit sect_range2=[self.x_lim+3,self.x_lim+5.5] draw_circuitline(regs,sect=2,xrange=sect_range2,ydiff=2) C_not2= create_gate("X",self.x_lim+3.75 ,1,control=2,scale=0.5) H_gate2=create_gate("H",self.x_lim+4.75 ,2,scale=0.5) # state_1=MathTex(r"\frac{1}{\sqrt{2}} (\ket{00} + \ket{11})", # tex_template=texTemplate, font_size=25).set_x(sect_range2[0]).set_y(1.5) state_2= MathTex(r"\ket{q_0 q_1 q_2} =\frac{1}{\sqrt{2}}(\ket{000} + \ket{011}+\ket{100} +\ket{111})", tex_template=texTemplate, font_size=20).set_x(sect_range2[1]).set_y(2.5) self.play(Create(C_not2),Create(H_gate2),FadeOut(state_psi)) self.play(FadeTransform(state_1b,state_2), *move_dot(sect=2), runtime=3) # # Section III: Add measurement Gates sect_range3=[self.x_lim+5.5,self.x_lim+8] draw_circuitline(regs,sect=3,xrange=sect_range3,ydiff=2) measure_0=create_gate(r"\not \frown",self.x_lim+6.25,y=-1,control=2,scale=0.5) measure_1=create_gate(r"\not \frown",self.x_lim+7.25,y=-2,control=1,scale=0.5) state_3= MathTex(r"\ket{q_0 q_1 q_2}= \frac{1}{2}( \ket{00}(\alpha \ket{0} + \beta \ket{1})\\ + \ket{01}( \alpha \ket{1} + \beta \ket{0})\\ +\ket{10} (\alpha \ket{0} - \beta \ket{1})\\ +\ket{11}(\alpha \ket{1} - \beta \ket{0}))", tex_template=texTemplate, font_size=20).set_x(sect_range3[1]).set_y(1) self.play(Create(measure_0),Create(measure_1),FadeOut(state_2)) self.play(FadeTransform(state_2,state_3), dots[0].animate.move_to([self.x_lim+6.25,2,0]), dots[1].animate.move_to([self.x_lim+7.25,1,0]), FadeOut(dots[3]), runtime=3) self.wait(2) # Section IV: Add classical controlled gate sect_range4=[self.x_lim+8,self.x_lim+11] draw_circuitline(regs,sect=4,xrange=sect_range4,ydiff=2,barrier=False) classicX=create_gate(r"X",self.x_lim+8.75,y=0,control=-2,scale=0.5) classicZ=create_gate(r"Z",self.x_lim+9.75,y=0,control=-1,scale=0.5) state_4= MathTex(r"\ket{q_2}= \alpha \ket{0} +\beta \ket{1}", tex_template=texTemplate, font_size=20).set_x(sect_range4[1]).set_y(0.4) self.play(Create(classicX),Create(classicZ),FadeOut(state_3),FadeTransform(state_3,state_4)) self.wait(3) import manim as manim manim.__version__ ```
github_jupyter
# Example: CanvasXpress stackedline Chart No. 1 This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at: https://www.canvasxpress.org/examples/stackedline-1.html This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function. Everything required for the chart to render is included in the code below. Simply run the code block. ``` from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="stackedline1", data={ "z": { "Annt1": [ "Desc:1", "Desc:2", "Desc:3", "Desc:4" ], "Annt2": [ "Desc:A", "Desc:B", "Desc:A", "Desc:B" ], "Annt3": [ "Desc:X", "Desc:X", "Desc:Y", "Desc:Y" ], "Annt4": [ 5, 10, 15, 20 ], "Annt5": [ 8, 16, 24, 32 ], "Annt6": [ 10, 20, 30, 40 ] }, "x": { "Factor1": [ "Lev:1", "Lev:2", "Lev:3", "Lev:1", "Lev:2", "Lev:3" ], "Factor2": [ "Lev:A", "Lev:B", "Lev:A", "Lev:B", "Lev:A", "Lev:B" ], "Factor3": [ "Lev:X", "Lev:X", "Lev:Y", "Lev:Y", "Lev:Z", "Lev:Z" ], "Factor4": [ 5, 10, 15, 20, 25, 30 ], "Factor5": [ 8, 16, 24, 32, 40, 48 ], "Factor6": [ 10, 20, 30, 40, 50, 60 ] }, "y": { "vars": [ "V1", "V2", "V3", "V4" ], "smps": [ "S1", "S2", "S3", "S4", "S5", "S6" ], "data": [ [ 5, 10, 25, 40, 45, 50 ], [ 95, 80, 75, 70, 55, 40 ], [ 25, 30, 45, 60, 65, 70 ], [ 55, 40, 35, 30, 15, 1 ] ] } }, config={ "graphOrientation": "vertical", "graphType": "StackedLine", "lineThickness": 3, "lineType": "spline", "showDataValues": True, "showTransition": False, "smpTitle": "Collection of Samples", "smpTitleFontStyle": "italic", "subtitle": "Random Data", "theme": "CanvasXpress", "title": "Stacked-Line Graphs", "xAxis": [ "V1", "V2" ], "xAxis2": [ "V3", "V4" ] }, width=613, height=613, events=CXEvents(), after_render=[], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="stackedline_1.html") ```
github_jupyter
## Dimensionality reduction ### The curse of dimensionality Fitting and overfitting get worse with ''curse of dimensionality'' Bellman 1961 Think about a hypersphere. Its volume is given by $$ V_D(r) = \frac{2r^D\pi^{D/2}}{D\ \Gamma(D/2)}$$ where $\Gamma(z)$ is the complete gamma function, $D$ is the dimension, and $r$ the radius of the sphere. If you populated a hypercube of size $2r$ how much data would be enclosed by the hypersphere - as $D$ increases the fractional volume enclosed by the hypersphere goes to 0! For example: the SDSS comprises a sample of 357 million sources. - each source has 448 measured attributes - selecting just 30 (e.g., magnitude, size..) and normalizing the data range $-1$ to $1$ probability of having one of the 357 million sources reside within a unit hypersphere 1 in 1.4$\times 10^5$. ### Principal Component Analysis Points are correlated along a particular direction which doesn't align with the initial choice of axes. * we should rotate our axes to align with this correlation. * rotation preserves the relative ordering of data Choose rotation to maximize the ability to discriminate between the data points * first axis, or <u>principal component</u>, is direction of maximal variance * second principal component is orthogonal to the first component and maximizes the residual variance * ... In the following example a distribution of points drawn from a bivariate Gaussian and centered on the origin of $x$ and $y$. PCA defines a rotation such that the new axes ($x’$ and $y’$) are aligned along the directions of maximal variance (the principal components) with zero covariance. This is equivalent to minimizing the square of the perpendicular distances between the points and the principal components. ``` import numpy as np from matplotlib import pyplot as plt from matplotlib.patches import Ellipse from matplotlib import ticker np.random.seed(42) r = 0.9 sigma1 = 0.25 sigma2 = 0.08 rotation = np.pi / 6 s = np.sin(rotation) c = np.cos(rotation) X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T R = np.array([[c, -s], [s, c]]) X = np.dot(R, X) #------------------------------------------------------------ # Plot the diagram fig = plt.figure(figsize=(5, 5), facecolor='w') ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False) # draw axes ax.annotate(r'$x$', (-r, 0), (r, 0), ha='center', va='center', arrowprops=dict(arrowstyle='<->', color='k', lw=1)) ax.annotate(r'$y$', (0, -r), (0, r), ha='center', va='center', arrowprops=dict(arrowstyle='<->', color='k', lw=1)) # draw rotated axes ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s), ha='center', va='center', arrowprops=dict(color='k', arrowstyle='<->', lw=1)) ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c), ha='center', va='center', arrowprops=dict(color='k', arrowstyle='<->', lw=1)) # scatter points ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2) # draw lines vnorm = np.array([s, -c]) for v in (X.T): d = np.dot(v, vnorm) v1 = v - d * vnorm ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k') # draw ellipses for sigma in (1, 2, 3): ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2, rotation * 180. / np.pi, ec='k', fc='gray', alpha=0.2, zorder=1)) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) plt.show() ``` #### Derivation of principal component analyses Set of data $X$: $N$ observations by $K$ measurements Center data by subtracting the mean The covariance is $$ C_X=\frac{1}{N-1}X^TX,$$ $N-1$ as the sample covariance matrix. We want a projection, $R$, aligned with the directions of maximal variance ($Y= X R$) with covariance $$C_{Y} = R^T X^T X R = R^T C_X R$$ Derive principal component by maximizing its variance (using Lagrange multipliers and constraint) $$\phi(r_1,\lambda_1) = r_1^TC_X r_1 - \lambda_1(r_1^Tr_1-1)$$ derivative of $\phi(r_1,\lambda)$ with respect to $r_1$ set to 0 $$C_Xr_1 - \lambda_1 r_1 = 0$$ $\lambda_1$ is the root of the equation $\det(C_X - \lambda_1 {\bf I})=0$ and the largest eigenvalue $$\lambda_1 = r_1^T C_X r_1$$ Other principal components derived by applying additional constraint that components are uncorrelated (e.g., $r^T_2 C_X r_1 = 0$). #### Singular value decomposition (SVD) Common approach is eigenvalue decomposition of the covariance or correlation matrix, or singular value decomposition (SVD) of the data matrix $$U \Sigma V^T = \frac{1}{\sqrt{N - 1}} X$$ columns of $U$ are _left-singular vectors_ columns of $V$ are the _right-singular vectors_ The columns of $U$ and $V$ form orthonormal bases ($U^TU = V^TV = I$) Covariance matrix is $$ C_X = \left[\frac{1}{\sqrt{N - 1}}X\right]^T \left[\frac{1}{\sqrt{N - 1}}X\right]\nonumber\\ = V \Sigma U^T U \Sigma V^T\nonumber\\ = V \Sigma^2 V^T. $$ right singular vectors $V$ are the principal components so principal from the SVD of $X$ dont need $C_X$. #### Application of PCA In the following example we have 100 data points in 3 dimensions, $X$, and $R$ as the projection matrix. To compute the PCA components (4 in our case) we use `PCA` from `scikit-learn`. ``` from sklearn.decomposition import PCA X = np.random.normal(size=(100, 3)) R = np.random.random((3, 10)) X = np.dot(X, R) pca = PCA(n_components=4) pca.fit(X) comp = pca.transform(X) mean = pca.mean_ components = pca.components_ var = pca.explained_variance_ ``` For our astronomy use case, we are using the SDSS spectroscopic dataset. The SDSS spectra come from galaxies at a range of redshifts, and have sections of unreliable or missing data due to sky absorption, cosmic rays, bad detector pixels, or other effects. AstroML provides a set of spectra which have been moved to rest frame, corrected for masking using an iterative PCA reconstruction technique, and resampled to 1000 common wavelength bins. The spectra can be downloaded using `fetch_sdss_corrected_spectra()`. In the following example we plot 15 of these spectra: ``` import numpy as np from matplotlib import pyplot as plt from astroML.datasets import sdss_corrected_spectra #---------------------------------------------------------------------- # Use pre-computed PCA to reconstruct spectra data = sdss_corrected_spectra.fetch_sdss_corrected_spectra() spectra_raw = data['spectra'] spectra_corr = sdss_corrected_spectra.reconstruct_spectra(data) wavelengths = sdss_corrected_spectra.compute_wavelengths(data) #------------------------------------------------------------ # select random spectra np.random.seed(5) nrows = 5 ncols = 3 ind = np.random.randint(spectra_corr.shape[0], size=nrows * ncols) spec_sample_raw = spectra_raw[ind] spec_sample_corr = spectra_corr[ind] ``` We show the SDSS downloaded spectra with black, and our corrected spectra with blue. ``` fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, bottom=0.1, top=0.95, hspace=0.05) for i in range(ncols): for j in range(nrows): ax = fig.add_subplot(nrows, ncols, ncols * j + 1 + i) ax.plot(wavelengths, spec_sample_raw[ncols * j + i], '-k', lw=1) ax.plot(wavelengths, spec_sample_corr[ncols * j + i], '-k', lw=1, c='blue') ax.set_xlim(3100, 7999) ax.yaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_major_locator(plt.MultipleLocator(1000)) if j < nrows - 1: ax.xaxis.set_major_formatter(plt.NullFormatter()) else: plt.xlabel(r'wavelength $(\AA)$') ylim = ax.get_ylim() dy = 0.05 * (ylim[1] - ylim[0]) ax.set_ylim(ylim[0] - dy, ylim[1] + dy) plt.show() ``` ### PCA Reconstruction of a spectrum Reconstruction of spectrum, ${x}(k)$, from the eigenvectors, ${e}_i(k)$ $$ {x}_i(k) = {\mu}(k) + \sum_j^R \theta_{ij} {e}_j(k),$$ Truncating this expansion (i.e., $r<R$) $$ {x}_i(k) = {\mu}(k) + \sum_i^{r<R} \theta_i {e}_i(k), $$ - eigenvectors ordered by their associated eigenvalues - eigenvalues reflect variance within each eigenvector (sum of the eigenvalues is total variance of the system). - project a each spectrum onto these first few eigenspectra is a compression of the data This is the sense in which PCA gives for dimensionality reduction. In the following example, the input spectrum is shown in gray, and the partial reconstruction for progressively more terms is shown in black. The top panel shows only the mean of the set of spectra. By the time 20 PCA components are added, the reconstruction is very close to the input, as indicated by the expected total variance of 94%. ``` #------------------------------------------------------------ # Compute PCA components # Eigenvalues can be computed using PCA as in the commented code below: #from sklearn.decomposition import PCA #pca = PCA() #pca.fit(spectra_corr) #evals = pca.explained_variance_ratio_ #evals_cs = evals.cumsum() # because the spectra have been reconstructed from masked values, this # is not exactly correct in this case: we'll use the values computed # in the file compute_sdss_pca.py evals = data['evals'] ** 2 evals_cs = evals.cumsum() evals_cs /= evals_cs[-1] evecs = data['evecs'] spec_mean = spectra_corr.mean(0) #------------------------------------------------------------ # Find the coefficients of a particular spectrum spec = spectra_corr[1] coeff = np.dot(evecs, spec - spec_mean) #------------------------------------------------------------ # Plot the sequence of reconstructions fig = plt.figure(figsize=(5, 5)) fig.subplots_adjust(hspace=0, top=0.95, bottom=0.1, left=0.12, right=0.93) for i, n in enumerate([0, 4, 8, 20]): ax = fig.add_subplot(411 + i) ax.plot(wavelengths, spec, '-', c='gray') ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k') if i < 3: ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_ylim(-2, 21) ax.set_ylabel('flux') if n == 0: text = "mean" elif n == 1: text = "mean + 1 component\n" text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1] else: text = "mean + %i components\n" % n text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1] ax.text(0.02, 0.93, text, ha='left', va='top', transform=ax.transAxes) fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$') plt.show() ``` ### Eigenvalues The eigenvalues for the PCA decomposition of the SDSS spectra described in the previous section. The top panel shows the decrease in eigenvalue as a function of the number of eigenvectors, with a break in the distribution at ten eigenvectors. The lower panel shows the cumulative sum of eigenvalues normalized to unity. 94% of the variance in the SDSS spectra can be captured using the first ten eigenvectors. ``` fig = plt.figure(figsize=(10, 7.5)) fig.subplots_adjust(hspace=0.05, bottom=0.12) ax = fig.add_subplot(211, xscale='log', yscale='log') ax.grid() ax.plot(evals, c='k') ax.set_ylabel('Normalized Eigenvalues') ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_ylim(5E-4, 100) ax = fig.add_subplot(212, xscale='log') ax.grid() ax.semilogx(evals_cs, color='k') ax.set_xlabel('Eigenvalue Number') ax.set_ylabel('Cumulative Eigenvalues') ax.set_ylim(0.65, 1.00) plt.show() ``` ### PCA with missing data Observed spectrum, $x^o$ is the true spectrum, ${x}$ plus a wavelength-dependent weight, ${w}$. Weight is zero where data are missing and $1/{\sigma}^2$ for rest Minimizing the quadratic deviation between ${x}^o$ truncated reconstruction, $\sum_i \theta_i {e}_i$ and solving for $\theta_i$ gives $$ \sum_k \theta_i {w}(k) {e}_i(k) {e}_j(k) = \sum_k {w}(k) {x}^o(k) {e}_j(k), $$ If $M_{ij} = \sum_k {w}(k) {e}_i(k) {e}_j(k)$ and $F_i = \sum_k {w}(k) {x}^o(k) {e}_i(k)$ then $$ \theta_i = \sum_j M_{ij}^{-1} F_{j}, $$ - $F_j$ are coefficients derived from gappy data - $M_{ij}^{-1}$ shows how correlated eigenvectors are over the missing regions. An estimate of the uncertainty on the reconstruction coefficients is given by $$ %Cov(\theta_i,\theta_j) = \frac{1}{N}M_{ij}^{-1} {\rm Cov}(\theta_i,\theta_j) = M_{ij}^{-1}. $$ Accuracy of this reconstruction will depend on the distribution of the gaps within the data vector. The principal component vectors defined for the SDSS spectra can be used to interpolate across or reconstruct missing data. Examples of three masked spectral regions are shown comparing the reconstruction of the input spectrum (black line) using the mean and the first ten eigenspectra (blue line) The gray bands represent the masked region of the spectrum. ``` evecs = data['evecs'] mu = data['mu'] norms = data['norms'] mask = data['mask'] #------------------------------------------------------------ # plot the results i_plot = ((wavelengths > 5750) & (wavelengths < 6350)) wavelength = wavelengths[i_plot] specnums = [20, 8, 9] subplots = [311, 312, 313] fig = plt.figure(figsize=(10, 12.5)) fig.subplots_adjust(left=0.09, bottom=0.08, hspace=0, right=0.92, top=0.95) for subplot, i in zip(subplots, specnums): ax = fig.add_subplot(subplot) # compute eigen-coefficients spectra_i_centered = spectra_corr[i] / norms[i] - mu coeffs = np.dot(spectra_i_centered, evecs.T) # blank out masked regions spectra_i = spectra_corr[i] mask_i = mask[i] spectra_i[mask_i] = np.nan # plot the raw masked spectrum ax.plot(wavelength, spectra_i[i_plot], '-', color='k', label='True spectrum', lw=1.5) # plot two levels of reconstruction for nev in [10]: if nev == 0: label = 'mean' else: label = 'reconstruction\n(nev=%i)' % nev spectra_i_recons = norms[i] * (mu + np.dot(coeffs[:nev], evecs[:nev])) ax.plot(wavelength, spectra_i_recons[i_plot], label=label, color='grey') # plot shaded background in masked region ylim = ax.get_ylim() mask_shade = ylim[0] + mask[i][i_plot].astype(float) * ylim[1] plt.fill(np.concatenate([wavelength[:1], wavelength, wavelength[-1:]]), np.concatenate([[ylim[0]], mask_shade, [ylim[0]]]), lw=0, fc='k', alpha=0.2) ax.set_xlim(wavelength[0], wavelength[-1]) ax.set_ylim(ylim) ax.yaxis.set_major_formatter(ticker.NullFormatter()) if subplot == 311: ax.legend(loc=1) ax.set_xlabel('$\lambda\ (\AA)$') ax.set_ylabel('normalized flux') plt.show() ``` ### Comparing PCA, NMF and ICA #### Nonnegative Matrix Factorization Eigenvectors are defined relative to mean data vector. Principal components that can be positive or negative but for many physical systems we know data are a linear sum of positive components (e.g. galaxy spectrum is a linear sum of stellar components Nonnegative matrix factorization (NMF) applies positivity constraint ``` from sklearn.decomposition import NMF X = np.random.random((100, 3)) # 100 points in 3 dims, all positive nmf = NMF(n_components=3) # setting n_components is optional nmf.fit(X) proj = nmf.transform(X) # project to 3 dimensions comp = nmf.components_ # 3 x 10 array of components err = nmf.reconstruction_err_ # how well 3 components captures data ``` #### Independent component analysis ``` from sklearn.decomposition import FastICA X = np.random.normal(size=(100, 2)) # 100 pts in 2 dims R = np.random.random((2, 5)) # mixing matrix X = np.dot(X, R) # X is now 2D data in 5D space ica = FastICA(2) # fit two components sources = ica.fit_transform(X) proj = ica.transform(X) # 100 x 2 projection of data comp = ica.components_ # the 2 x 5 matrix of indep. components ``` #### Comparing PCA, NMF and ICA A comparison of the decomposition of SDSS spectra using PCA (left panel), ICA (middle panel) and NMF (right panel). The rank of the component increases from top to bottom. For the ICA and PCA the first component is the mean spectrum (NMF does not require mean subtraction). All of these techniques isolate a common set of spectral features (identifying features associated with the continuum and line emission). The ordering of the spectral components is technique dependent. ``` from sklearn.decomposition import NMF, FastICA, PCA data = sdss_corrected_spectra.fetch_sdss_corrected_spectra() spectra = sdss_corrected_spectra.reconstruct_spectra(data) wavelengths = sdss_corrected_spectra.compute_wavelengths(data) #---------------------------------------------------------------------- # Compute PCA, ICA, and NMF components def compute_PCA_ICA_NMF(n_components=5): spec_mean = spectra.mean(0) # PCA: use randomized PCA for speed pca = PCA(n_components - 1, random_state=0, svd_solver='randomized') pca.fit(spectra) pca_comp = np.vstack([spec_mean, pca.components_]) # ICA treats sequential observations as related. Because of this, we need # to fit with the transpose of the spectra ica = FastICA(n_components - 1, random_state=0) ica.fit(spectra.T) ica_comp = np.vstack([spec_mean, ica.transform(spectra.T).T]) # NMF requires all elements of the input to be greater than zero spectra[spectra < 0] = 0 nmf = NMF(n_components, random_state=0) nmf.fit(spectra) nmf_comp = nmf.components_ return pca_comp, ica_comp, nmf_comp n_components = 5 decompositions = compute_PCA_ICA_NMF(n_components) #---------------------------------------------------------------------- # Plot the results fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, bottom=0.1, top=0.95, hspace=0.05) titles = ['PCA components', 'ICA components', 'NMF components'] for i, comp in enumerate(decompositions): for j in range(n_components): ax = fig.add_subplot(n_components, 3, 3 * j + 1 + i) ax.yaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_major_locator(plt.MultipleLocator(1000)) if j < n_components - 1: ax.xaxis.set_major_formatter(plt.NullFormatter()) else: ax.xaxis.set_major_locator( plt.FixedLocator(list(range(3000, 7999, 1000)))) ax.set_xlabel(r'wavelength ${\rm (\AA)}$') ax.plot(wavelengths, comp[j], '-k', lw=1) # plot zero line xlim = [3000, 8000] ax.plot(xlim, [0, 0], '-', c='gray', lw=1) if j == 0: ax.set_title(titles[i]) if titles[i].startswith('PCA') or titles[i].startswith('ICA'): if j == 0: label = 'mean' else: label = 'component %i' % j else: label = 'component %i' % (j + 1) ax.text(0.03, 0.94, label, transform=ax.transAxes, ha='left', va='top') for l in ax.get_xticklines() + ax.get_yticklines(): l.set_markersize(2) # adjust y limits ylim = plt.ylim() dy = 0.05 * (ylim[1] - ylim[0]) ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy) ax.set_xlim(xlim) plt.show() ``` #### Summary of pracical properties of PCA, NMF, and ICA The following table is a simple summary of the trade-offs along our axes of accuracy, interpretability, simplicity, and speed in dimension reduction methods, expressed in terms of high (H), medium (M), and low (L) categories. |Method | Accuracy | Interpretability | Simplicity | Speed | |-------|----------|------------------|------------|-------| |Principal component analysis | H | H | H | H | |Nonnegative matrix factorization | H | H | M | M| |Independent component analysis | M | M | L | L |
github_jupyter
# Custom Updater ## Overview ### Questions - How can I modify the state of a system in a custom updater? ### Objectives - Show an example of a non-trival custom updater. ## Boilerplate Code ``` from numbers import Number import hoomd import hoomd.md as md import numpy as np cpu = hoomd.device.CPU() sim = hoomd.Simulation(device=cpu, seed=1) # Create a simple cubic configuration of particles N = 5 # particles per box direction box_L = 20 # box dimension snap = hoomd.Snapshot(cpu.communicator) snap.configuration.box = [box_L] * 3 + [0, 0, 0] snap.particles.N = N**3 x, y, z = np.meshgrid(*(np.linspace(-box_L / 2, box_L / 2, N, endpoint=False),) * 3) positions = np.array((x.ravel(), y.ravel(), z.ravel())).T snap.particles.position[:] = positions snap.particles.types = ['A'] snap.particles.typeid[:] = 0 sim.create_state_from_snapshot(snap) rng = np.random.default_rng(1245) ``` ## Problem In this section, we will show how to create a custom updater that modifies the system state. To show this, we will create a custom updater that adds a prescribed amount of energy to a single particle simulating the bombardment of radioactive material into our system. For this problem, we pick a random particle and modify its velocity according to the radiation energy in a random direction. ``` class InsertEnergyUpdater(hoomd.custom.Action): def __init__(self, energy): self.energy = energy def act(self, timestep): snap = self._state.get_snapshot() if snap.communicator.rank == 0: particle_i = rng.integers(snap.particles.N) mass = snap.particles.mass[particle_i] direction = self._get_direction() magnitude = np.sqrt(2 * self.energy / mass) velocity = direction * magnitude old_velocity = snap.particles.velocity[particle_i] new_velocity = old_velocity + velocity snap.particles.velocity[particle_i] = velocity self._state.set_snapshot(snap) @staticmethod def _get_direction(): theta, z = rng.random(2) theta *= 2 * np.pi z = 2 * (z - 0.5) return np.array([ np.sqrt(1 - (z * z)) * np.cos(theta), np.sqrt(1 - (z * z)) * np.sin(theta), z ]) ``` We will now use our custom updater with an `NVE` integrator. Particles will interact via a Lennard-Jones potential. Using the `Table` writer and a `hoomd.logging.Logger`, we will monitor the energy, which should be increasing as we are adding energy to the system. We will also thermalize our system to a `kT == 1`. ``` sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=1.) lj = md.pair.LJ(nlist=md.nlist.Cell()) lj.params[('A', 'A')] = {'epsilon': 1., 'sigma': 1.} lj.r_cut[('A', 'A')] = 2.5 integrator = md.Integrator(methods=[md.methods.NVE(hoomd.filter.All())], forces=[lj], dt=0.005) thermo = md.compute.ThermodynamicQuantities(hoomd.filter.All()) logger = hoomd.logging.Logger(categories=['scalar']) logger.add(thermo, ['kinetic_energy', 'potential_energy']) logger['total_energy'] = ( lambda: thermo.kinetic_energy + thermo.potential_energy, 'scalar') table = hoomd.write.Table(100, logger, max_header_len=1) sim.operations += integrator sim.operations += thermo sim.operations += table # Create and add our custom updater energy_operation = hoomd.update.CustomUpdater(action=InsertEnergyUpdater(10.), trigger=100) sim.operations += energy_operation sim.run(1000) ``` As we can see the total energy of the system is indeed increasing. The energy isn't increasing by 10 every time since we are adding the velocity in a random direction which may be against the current velocity. ## Improving upon our Custom Action Maybe we want to allow for the energy to be from a distribution. HOOMD-blue has a concept called a variant which allows for quantities that vary over time. Let's change the `InsertEnergyupdater` to use variants and create a custom variant that grabs a random number from a Gaussian distribution. (If you don't understand the variant code, that is fine. We are just using this to showcase how you can iteratively improve custom actions). <div class="alert alert-warning" style="color: black;"> <h4>Note:</h4> <p><code>Variant</code> objects model a parameter as a function of the timestep, so to get the value for a particular timestep we have to call the variant. For more information see the documentation for <code>hoomd.variant</code>. </p> </div> ``` class InsertEnergyUpdater(hoomd.custom.Action): def __init__(self, energy): self._energy = energy @property def energy(self): """A `hoomd.variant.Variant` object.""" return self._energy @energy.setter def energy(self, new_energy): if isinstance(new_energy, Number): self._energy = hoomd.variant.Constant(new_energy) elif isinstance(new_energy, hoomd.variant.Variant): self._energy = new_energy else: raise ValueError("energy must be a variant or real number.") def act(self, timestep): snap = self._state.get_snapshot() if snap.communicator.rank == 0: particle_i = rng.integers(snap.particles.N) mass = snap.particles.mass[particle_i] direction = self._get_direction() magnitude = np.sqrt(2 * self.energy(timestep) / mass) velocity = direction * magnitude old_velocity = snap.particles.velocity[particle_i] new_velocity = old_velocity + velocity snap.particles.velocity[particle_i] = velocity self._state.set_snapshot(snap) @staticmethod def _get_direction(): theta, z = rng.random(2) theta *= 2 * np.pi z = 2 * (z - 0.5) return np.array([ np.sqrt(1 - (z * z)) * np.cos(theta), np.sqrt(1 - (z * z)) * np.sin(theta), z ]) class GaussianVariant(hoomd.variant.Variant): def __init__(self, mean, std): hoomd.variant.Variant.__init__(self) self.mean = mean self.std = std def __call__(self, timestep): return rng.normal(self.mean, self.std) ``` We briefly show that the Gaussian Variant works. ``` energy = GaussianVariant(mean=10., std=2.) sample_energies = np.array([energy(0) for _ in range(1000)]) f"Mean: {sample_energies.mean()}, std. dev. {sample_energies.std()}" ``` We now use the updated `InsertEnergyUpdater` in the simulation. ``` sim.operations.updaters.remove(energy_operation) # Create and add our custom updater energy_operation = hoomd.update.CustomUpdater( action=InsertEnergyUpdater(energy), trigger=100) sim.operations.updaters.append(energy_operation) sim.run(1000) ``` We could continue to improve upon this updater and the execution of this operation. However, this suffices to showcase the ability of non-trivial updaters to affect the simulation state.
github_jupyter
## Lesson 2: Futures in Research ### Futures Data Quantopian has open, high, low, close, and volume (OHLCV) data for 72 US futures from the beginning of 2002 to the current date. This dataset contains both day and minute frequency data for 24 hours x 5 days a week, and is collected from electronic trade data. The list of US futures currently available on Quantopian can be found at the <a href="#futures_list">end of this notebook.</a> ### Future Object On Quantopian, a `Future` is an instance of a specific futures contract denoted by a base symbol + a code for month/year of delivery. For example, `CLF16` is a contract for <a href="http://www.cmegroup.com/trading/energy/crude-oil/light-sweet-crude.html">crude oil</a> with delivery date in January (`F`) 2016 (`16`). Here is a reference for delivery months and their corresponding codes: Code | Delivery Month | --- | --- | F | January G | February H | March J | April K | May M | June N | July Q | August U | September V | October X | November Z | December Let's start by looking at a particular contract of the Light Sweet Crude Oil future (`CL`). In Research, a reference to a futures contract is obtained via the <a href="https://www.quantopian.com/help#quantopian_research_symbols">symbols</a> function. Run the following code in a new cell to output the `Future` object corresponding to `CLF16`. ``` clf16 = symbols('CLF16') clf16 ``` Here is a brief description of some of the properies of the `Future` object: - **`root_symbol`**: The root symbol of the underlying asset. For example, `CL` corresponds to crude oil. - **`start_date`**: The date the contract becomes available on Quantopian. Note that the price of a contract might be `NaN` near the `start_date`, as it may not be actively traded until it gets closer to its delivery date. - **`end_date`**: The last date the contract can be traded or closed before delivery. - **`notice_date`**: The date in which the exchange can start assigning delivery to accounts holding long positions on the contract. - **`auto_close_date`**: This is two days prior to either `notice_date` or `end_date`, whichever is earlier. In backtesting, positions in contracts will be automatically closed out on their `auto_close_date`. - **`tick_size`**: The price of a future can only change in increments of its `tick_size`. For example, `CL` changes in increments of \$0.01. - **`multiplier`**: The number of units per contract. A contract for `CL` corresponds to 1000 barrels of oil. In the following lesson, we'll take a look at how to get pricing and volume data for a particular futures contract. <a></a></a> ### List of Available Futures These are the 72 US futures that are currently available on Quantopian. Symbol | Future | --- | --- | BD | Big Dow BO | Soybean Oil CM | Corn E-Mini CN | Corn DJ | DJIA Futures ET | Ethanol FF | 30-Day Federal Funds FI | 5-Year Deliverable Interest Rate Swap Futures FS | 5-Year Interest Rate Swap Futures FV | 5-Year T-Note MB | Municipal Bonds MS | Soybeans E-Mini MW | Wheat E-Mini OA | Oats RR | Rough Rice SM | Soybean Meal SY | Soybeans TN | 10-Year Deliverable Interest Rate Swap Futures TS | 10-Year Interest Rate Swap Futures TU | 2-Year T-Note TY | 10-Year T-Note UB | Ultra Tbond US | 30-Year T-Bond WC | Wheat YM | Dow Jones E-mini VX | VIX Futures AD | Australian Dollar AI | Bloomberg Commodity Index Futures BP | British Pound CD | Canadian Dollar EC | Euro FX ED | Eurodollar EE | Euro FX E-mini ES | S&P 500 E-Mini EU | E-micro EUR/USD Futures FC | Feeder Cattle JE | Japanese Yen E-mini JY | Japanese Yen LB | Lumber LC | Live Cattle LH | Lean Hogs MD | S&P 400 MidCap Futures ME | Mexican Peso MI | S&P 400 MidCap E-Mini ND | NASDAQ 100 Futures NK | Nikkei 225 Futures NQ | NASDAQ 100 E-Mini NZ | New Zealand Dollar SF | Swiss Franc SP | S&P 500 Futures TB | TBills GC | Gold HG | Copper High Grade SV | Silver CL | Light Sweet Crude Oil HO | NY Harbor ULSD Futures HU | Unleaded Gasoline NG | Natural Gas PA | Palladium PL | Platinum PB | Pork Bellies QG | Natural Gas E-mini QM | Crude Oil E-Mini XB | RBOB Gasoline Futures EI | MSCI Emerging Markets Mini EL | Eurodollar NYSE LIFFE MG | MSCI EAFE Mini XG | Gold mini-sized YS | Silver mini-sized RM | Russell 1000 Mini SB | Sugar #11 ER | Russell 2000 Mini
github_jupyter
### Lab 7 1) Scrieti un program care la fiecare x secunde unde x va fi aleator ales la fiecare iteratie (din intervalul [a, b] , unde a, b sunt date ca argumente) afiseaza de cate minute ruleaza programul (in minute, cu doua zecimale). Programul va rula la infinit. ``` import time import random #import sys #a = int(sys.argv[1]) #b = int(sys.argv[2]) def wait(x): time.sleep(x) def time_cron(a,b): time_interval = random.uniform(a,b) # while(1): # measure process time t0 = time.clock() wait(time_interval) print time.clock() - t0, "seconds process time" # measure wall time t0 = time.time() wait(time_interval) print time.time() - t0, "seconds wall time" time_cron(0,2) ``` 2) Scrieti doua functii de verificare daca un numar este prim, si verificati care dintre ele este mai eficienta din punct de vedere al timpului. 3) Gasiti toate fisierele duplicate dintr-un director dat ca argument si afisati timpul de rulare. Calea grupurilor de fisiere duplicate vor fi scrise intr-un fisier output.txt. (duplicat in fct de continut) 4) Sa se scrie un script care primeste ca argument un director si creeaza un fisier JSON cu date despre toate fisierele din acel director. Pentru fiecare fisier vor fi afisate urmatoarele informatii: nume_fisier, md5_fisier, sha256_fisier, size_fisier (in bytes), cand a fost creat fisierul (in format human-readable) si calea absoluta catre fisier. ``` import os import json import hashlib import time def get_file_md5(filePath): h = hashlib.md5() h.update(open(filePath,"rb").read()) return h.hexdigest() def get_file_sha256(filePath): h = hashlib.sha256() h.update(open(filePath,"rb").read()) return h.hexdigest() def get_dir_data(dir_path): json_data = {} dir_path = os.path.realpath(dir_path) json_file = open(os.path.basename(dir_path) + '.json', 'w') print next(os.walk(dir_path))[2] #print os.path.basename(dir_path) for dir_file in next(os.walk(dir_path))[2]: file_data = {} #file_data["file_name"] = dir_file file_data[dir_file] = {} file_data[dir_file]["file_md5"] = get_file_md5(dir_file) file_data[dir_file]["file_sha256"] = get_file_sha256(dir_file) file_data[dir_file]["file_size"] = os.path.getsize(dir_file) file_time = time.gmtime(os.path.getctime(dir_file)) file_data[dir_file]["file_time"] = time.strftime("%Y-%m-%d %I:%M:%S %p", file_time) file_data[dir_file]["file_path"] = os.path.realpath(dir_file) #print file_data json_data.update(file_data) #print json_data #print json_data json_data = json.dumps(json_data, sort_keys = True, indent=4, separators=(',', ': ')) json_file.write( json_data ) json_file.close() get_dir_data('./') ``` 5) Sa se creeze doua scripturi care sa comunice intre ele prin date serializate. Primul script va salva periodic o lista cu toate fisierele dintr-un director iar al doilea script va adauga intr-o arhiva toate fisierele cu size mai mic de 100kb si modificate cu cel mult 5 minute in urma (nu va fi adaugat acelasi fisier de 2 ori). 6) Sa se scrie un script care afiseaza in ce zi a saptamanii este anul nou, pentru ultimii x ani (x este dat ca argument). ``` import datetime as dt def weekday_new_year(x): today = dt.datetime.today() current_year = today.year #print today, '::', current_year for i in range(0, x): print current_year-i, ': ', dt.date(current_year-i, 1, 31).strftime("%A") #.weekday() shows only no weekday_new_year(5) ``` 7) Sa se simuleze extragerea 6/49. ---
github_jupyter
# Optimality conditions Now we will move to studying constrained optimizaton problems i.e., the full problem $$ \begin{align} \ \min \quad &f(x)\\ \text{s.t.} \quad & g_j(x) \geq 0\text{ for all }j=1,\ldots,J\\ & h_k(x) = 0\text{ for all }k=1,\ldots,K\\ &x\in \mathbb R^n. \end{align} $$ In order to identify which points are optimal, we want to define similar conditions as there are for unconstrained problems through the gradient: >If $x$ is a local optimum to function $f$, then $\nabla f(x)=0$. ## Feasible descent directions Let $S\subset \mathbb R^n$ ($S\neq \emptyset$ closed) and $x^*\in S$. **Definition:** The set $$ D = \{d\in \mathbb R^n: d\neq0,x^*+\alpha d\in S \text{ for all } \alpha\in (0,\delta) \text{ for some } \delta>0\}$$ is called the cone of feasible directions of $S$ in $x^*$. **Definition:** The set $$ F = \{d\in \mathbb R^n: f(x^*+\alpha d)<f(x^*)\text{ for all } \alpha\in (0,\delta) \text{ for some } \delta>0\}$$ is called the cone of descent directions. **Definition:** The set $F\cap D$ is called the cone of feasible descent directions. ![alt text](images/feasible_descent_directions.svg "Feasible descent directions") **(Obvious) Theorem:** Consider an optimization problem $$ \begin{align} \min &\ f(x)\\ \text{s.t. }&\ x\in S \end{align} $$ and let $x^*\in S$. Now if $x^*$ is a local minimizer **then** the set of feasible descent directions $F\cap D$ is empty. Since, if $\nabla f(x)d<0$, **then** $d$ is a descent direction, the following theorem follows easily. **Theorem:** Consider an optimization problem $$ \begin{align} \min &\ f(x)\\ \text{s.t. }&\ x\in S \end{align} $$ and let $x^*\in S$. Now, if $x^*$ is a local minimizer, then $\{d\in B(0,1):\nabla f(x^*)d<0 \}\cap D$ is empty. ## KKT conditions Unfortunately, the set $D$ is not easily explicitly modelled. Thus, we need to develop methods for explicitly defining the set $D$ or even better the set $\{d\in B(0,1):\nabla f(x^*)d<0 \}\cap D$. This is done through the KKT conditions: **Theorem (Kuhn-Tucker Necessary Conditions)** Let $x^**$ be a local minimum for problem $$ $$ \begin{align} \ \min \quad &f(x)\\ \text{s.t.} \quad & g_j(x) \geq 0\text{ for all }j=1,\ldots,J\\ & h_k(x) = 0\text{ for all }k=1,\ldots,K\\ &x\in \mathbb R^n. \end{align} $$ $$ and assume that $x^*$ is regular. Then there exists unique Lagrance multiplier vectors $\lambda^* = (\lambda^*_1,\ldots,\lambda_J^*)$ and $\mu^*=(\mu_1^*,\ldots,\mu_K^*)$ such that $$ \begin{align} &\nabla_xL(x,\lambda,\mu) = 0\\ &\mu_j^*\geq0,\text{ for all }j=1,\ldots,J\\ &\mu_j^*=0,\text{for all }j\in A(x^*), \end{align} $$ where $$L(x,\lambda,\mu) = f(x)+\sum_{j=1}^J\lambda_jh_j(x) + \sum_{k=1}^K\mu_kg_k(x)$$ and $A(x^*)$ is the set of active constraints at $x^*$. If in addition $f$, $h$ and $g$ are twice continuously differentiable, it holds that $$ yH_{x}L(x^*,\lambda^*,\mu^*)y\geq0, \text{ for all }y\in V(x^*), $$ where $$ V(x^*) = \{y:\nabla h_j(x^*)'y=0, \text{ for all }j=1,\ldots,J, \text{ and }\nabla g_k(x^*)'y=0, \text{ for all }j\in A(x^*). $$ **Example (page 285, Bertsekas: Nonlinear Programming)** Consider the optimizaiton problem $$ \begin{align} \min &\qquad \frac12 (x_1^2+x^2_2+x^2_3)\\ \text{s.t}&\qquad x_1+x_2+x_3\geq 0. \end{align} $$ Let us verify the Kuhn-Tucker necessary conditions for the local optimum $x^*=(-1,-1,-1)$. ``` def f(x): return 0.5*sum([i**2 for i in x]) def g(x): return 3-sum(x) def h(x): return 0*sum(x) import numpy as np import ad def grad_x_L(x,lambda_,mu,f,g,h): return ad.gh(f)[0](x)+lambda_*np.array(ad.gh(h)[0](x))+mu*np.array(ad.gh(g)[0](x)) import ad mu = 1 lambda_ = 10 #Does not play a role. Think why? x_opt = [-1,-1,-1] print grad_x_L(x_opt,lambda_,mu,f,g,h) print g(x_opt) ```
github_jupyter
``` import geopandas as gpd import pandas as pd import folium import branca import requests import json from folium.features import GeoJson, GeoJsonTooltip, GeoJsonPopup print(folium.__version__) income = pd.read_csv(r"https://raw.githubusercontent.com/pri-data/50-states/master/data/income-counties-states-national.csv", dtype={"fips":str}) income['income-2015'] = pd.to_numeric(income['income-2015'], errors='coerce') income.head() response = requests.get(r"https://raw.githubusercontent.com/python-visualization/folium/master/examples/data/us-states.json") data = response.json() states = gpd.GeoDataFrame.from_features(data, crs='EPSG:4326') states.head() response = requests.get('https://gist.githubusercontent.com/tvpmb/4734703/raw/b54d03154c339ed3047c66fefcece4727dfc931a/US%2520State%2520List') abbrs = pd.read_json(response.text) abbrs.head(3) statesmerge = states.merge(abbrs, how='left', left_on='name', right_on='name') statesmerge['geometry'] = statesmerge.geometry.simplify(.05) statesmerge.head() income.groupby(by="state")[['state','income-2015']].median().head() statesmerge['medianincome'] = statesmerge.merge(income.groupby(by="state")[['state','income-2015']].median(), how='left', left_on='alpha-2', right_on='state')['income-2015'] statesmerge['change'] = statesmerge.merge(income.groupby(by="state")[['state','change']].median(), how='left', left_on='alpha-2', right_on='state')['change'] statesmerge.head() statesmerge['medianincome'].quantile(0.25) colormap = branca.colormap.LinearColormap( vmin=statesmerge['change'].quantile(0.0), vmax=statesmerge['change'].quantile(1), colors=['red','orange','lightblue','green','darkgreen'], caption="State Level Median County Household Income (%)", ) m = folium.Map(location=[35.3, -97.6], zoom_start=4) popup = GeoJsonPopup( fields=['name','change'], aliases=['State',"% Change"], localize=True, labels=True, style="background-color: yellow;", ) tooltip = GeoJsonTooltip( fields=["name", "medianincome", "change"], aliases=["State:", "2015 Median Income(USD):", "Median % Change:"], localize=True, sticky=False, labels=True, style=""" background-color: #F0EFEF; border: 2px solid black; border-radius: 3px; box-shadow: 3px; """, max_width=800, ) g = folium.GeoJson( statesmerge, style_function=lambda x: { "fillColor": colormap(x["properties"]["change"]) if x["properties"]["change"] is not None else "transparent", "color": "black", "fillOpacity": 0.4, }, tooltip=tooltip, popup=popup ).add_to(m) colormap.add_to(m) m ```
github_jupyter
# Uitwerkingen toets ## Opgave 1 Bij deze opgave gaat het om het definiëren van variabelen. ``` m = 10 c = 299792458 e = m * c ** 2 ``` Bedenk dat als je niet goed weet welke operator (`*`, `//`, `+`, etc.) voorrang heeft je kan groeperen met ronde haken, bijvoorbeeld ```python e = m * (c ** 2) ``` ## Opgave 2 Deze opgave gaat over *conditional statements* en met name over het uitsluiten van mogelijkheden. ```python if percentage >= 90: grade = "A" elif percentage >= 80: grade = "B" elif percentage >= 70: grade = "C" elif percentage >= 60: grade = "D" else: grade = "F" ``` Variaties zijn natuurlijk mogelijk, je zou dit ook met alleen maar `if` en `else` statements kunnen uitschrijven, bijvoorbeeld ```python if percentage >= 90: grade = "A" else: if percentage >= 80: grade = "B" else: if percentage >= 70: grade = "C" else: if percentage >= 60: grade = "D" else: grade = "F" ``` ## Opgave 3 Deze opgave gaat over sorteren (en specifiek hoe Python sorteert). Karakters hebben een numerieke representatie (denk aan ASCII) en Python zal op basis van deze numerieke waarde sorteren. Als de eerste karakters gelijk zijn (bijvoorbeeld "H" bij "HUIS" en "HAARD") zal Python het volgende karakter nemen om verder te sorteren ("U" en "A"). ``` words = [ "ALLEENSTAAND", "FIETS", "GROETEN", "ROBOTICA", "KLUS", "TAKENPAKKET", "XYLOFOON", ] lengths = [len(x) for x in words if x > "HANZE" and x < "TENTAMEN"] lengths ``` Als een kleine variatie op deze opgave, bedenk dat je een `LoL` strategie kan volgen om naast de lengte van het woord ook het woord zelf op te nemen in het resultaat: ``` [[len(x), x] for x in words if x > "HANZE" and x < "TENTAMEN"] ``` ## Opgave 4 Deze opgave gaat over het kunnen identificeren van base- en recursive case(s). In dit geval kunnen twee base cases en één recursieve case worden bepaald. ``` def manhattan(x, y): if x < 0 or y < 0: # base case return 0 elif x == 0 or y == 0: # base case return 1 else: # recursive case return manhattan(x - 1, y) + manhattan(x, y - 1) ``` ## Opgave 5 Deze opgave gaat over het gebruik van list of lists (`LoL`'s). In de extra *list comprehension* opgaven kan je een opgave `mul_table` vinden met een beschijving van hoe je per rij paarsgewijs `x` en `y` kan zetten om te komen tot een vermenigvuldigingstabel. Dit probleem volgt hetzelfde principe. ``` def manhattan_table(size): table = [[manhattan(x, y) for x in range(size)] for y in range(size)] return table ``` ## Opgave 6 Use it or lose it! Dit is een klassiek optimalisatieprobleem waar je uit een set van boeken moet kiezen welke combinatie het beste een boekenplank vult. In het boek onder hoofdstuk 2 vind je bij "Use It or Lose It" de beschrijving van dit algoritme. ``` def bookcase(size, books): if len(books) == 0: return 0 first = books[0] rest = books[1:] if first > size: return bookcase(size, rest) if first == size: return size if first < size: use_it = first + bookcase(size - first, rest) lose_it = bookcase(size, rest) return max(use_it, lose_it) ```
github_jupyter
# Circularly Linked List In short a linked list (singly or doubly) where the tail node points at the head. In the last node of a list, the link field often contains a null reference, a special value is used to indicate the lack of further nodes. A less common convention is to make it point to the first node of the list; in that case, the list is said to be 'circular' or 'circularly linked'; otherwise, it is said to be 'open' or 'linear'. It is a list where the last pointer points to the first node. <b>References and resources:</b> - Python Data Structures and Algorithms by Benjamin Baka - [Circular linked list](https://www.youtube.com/watch?v=3bmCGdh0jS8&index=6&list=PLj8W7XIvO93rx6hFr6H3Un4Ezpg1iUpOG) - [Wikipedia](https://en.wikipedia.org/wiki/Linked_list#Circular_linked_list) ``` # # Uncomment to use inline pythontutor # from IPython.display import IFrame # IFrame('http://www.pythontutor.com/visualize.html#mode=display', height=750, width=750) class Node: """ Simple node with pointer. """ def __init__(self, value): self.value = value self.next = None class CircularlySinglyLinkedList: # Should run in O(1) def __init__(self): self.tail = None self.head = None self.size = 0 # We initialize a counter at 0. def append(self, value): """ The point at which we append new nodes is through self.head. self.tail will refer to the first node. """ node = Node(value) if self.tail: self.tail.next = node self.tail = node else: # Sets the first entry as head node, and now updates tail pointing to each new append, ending at none. self.tail = node self.head = node self.size += 1 self.tail.next = self.head # We set the tail's next pointing to the head. words = CircularlySinglyLinkedList() words.append('eggs') words.tail.next.value class Node: def __init__(self, value): self.value = value self.next = None class CircularlySinglyLinkedList: # Should run in O(1) def __init__(self): self.tail = None self.head = None self.size = 0 # We initialize a counter at 0. def append(self, value): """ The point at which we append new nodes is through self.head. self.tail will refer to the first node. """ node = Node(value) if self.tail: self.tail.next = node self.tail = node else: # Sets the first entry as head node, and now updates tail pointing to each new append, ending at none. self.tail = node self.head = node self.size += 1 self.tail.next = self.head # We set the tail's next pointing to the head. def delete(self, value): """ To delete a node between other nodes we must make the previous node point directly to the successor of its next node. It should take O(n) to delete the node. """ current = self.head previous = self.head while previous == current or previous != self.tail: if current.value == value: if current == self.head: self.head = current.next else: previous.next = current.next self.size -= 1 return previous = current current = current.next def iterate(self): current = self.head while current: yield current.value current = current.next words = CircularlySinglyLinkedList() words.append('egg') words.append('ham') words.append('spam') words.size words.delete('ham') words.head.value words.tail.value words.tail.next.value counter = 0 for word in words.iterate(): print(word) counter += 1 if counter > 5: break ```
github_jupyter
``` import matplotlib.pyplot as plt import seaborn as sns; sns.set() import pandas as pd import numpy as np save_dir = "../plots/lossy_compression/" sns.set(style="whitegrid") paper_rc = {'lines.linewidth': 2, 'lines.markersize': 10} sns.set_context("paper", rc = paper_rc) level_1_base_dir = "/scratch/gf332/models/relative-entropy-coding/lossy/clic2019/large_level_1_vae/" level_2_base_dir = "/scratch/gf332/models/relative-entropy-coding/lossy/clic2019/large_level_2_vae/" rec_kodak_stats_files = { "ms_ssim": { 0.1: f"{level_2_base_dir}/ms-ssim/beta_0.003/kodak/results.csv", 0.2: f"{level_2_base_dir}/ms-ssim/beta_0.010/kodak/results.csv", 0.5: f"{level_2_base_dir}/ms-ssim/beta_0.030/kodak/results.csv", #0.8: f"{level_2_base_dir}/ms-ssim/beta_0.050/kodak/results.csv", 1.0: f"{level_2_base_dir}/ms-ssim/beta_0.080/kodak/results.csv", }, "gaussian": { 0.1: f"{level_2_base_dir}/mse/beta_0.001_filters_196_128/kodak/results.csv", 0.2: f"{level_2_base_dir}/mse/beta_0.003/kodak/results.csv", 0.3: f"{level_2_base_dir}/mse/beta_0.010/kodak/results.csv", 0.7: f"{level_2_base_dir}/mse/beta_0.030/kodak/results.csv", 1.0: f"{level_2_base_dir}/mse/beta_0.050/kodak/results.csv", }, "level-1-gaussian": { 0.1: f"{level_1_base_dir}/mse/beta_0.001_filters_192/kodak/results.csv", 0.2: f"{level_1_base_dir}/mse/beta_0.003_filters_192/kodak/results.csv", 0.3: f"{level_1_base_dir}/mse/beta_0.010_filters_192/kodak/results.csv", 0.7: f"{level_1_base_dir}/mse/beta_0.030_filters_192/kodak/results.csv", 1.0: f"{level_1_base_dir}/mse/beta_0.100_filters_320/kodak/results.csv", } } level_1_gaussian_rec_dfs = [] gaussian_rec_dfs = [] ms_ssim_rec_dfs = [] for gauss_rec_bpp in sorted(rec_kodak_stats_files["level-1-gaussian"].keys()): with open(rec_kodak_stats_files["level-1-gaussian"][gauss_rec_bpp], 'r') as f: rec_df = pd.read_csv(f, sep=',\s+') rec_df = rec_df.T rec_df.columns = rec_df.iloc[0] rec_df = rec_df.drop(rec_df.index[0]) rec_df.loc["loss"] = "gaussian" rec_df.loc["target_bpp"] = gauss_rec_bpp level_1_gaussian_rec_dfs.append(rec_df) for gauss_rec_bpp in sorted(rec_kodak_stats_files["gaussian"].keys()): with open(rec_kodak_stats_files["gaussian"][gauss_rec_bpp], 'r') as f: rec_df = pd.read_csv(f, sep=',\s+') rec_df = rec_df.T rec_df.columns = rec_df.iloc[0] rec_df = rec_df.drop(rec_df.index[0]) rec_df.loc["loss"] = "gaussian" rec_df.loc["target_bpp"] = gauss_rec_bpp gaussian_rec_dfs.append(rec_df) for ms_ssim_rec_bpp in sorted(rec_kodak_stats_files["ms_ssim"].keys()): with open(rec_kodak_stats_files["ms_ssim"][ms_ssim_rec_bpp], 'r') as f: rec_df = pd.read_csv(f, sep=',\s+') rec_df = rec_df.T rec_df.columns = rec_df.iloc[0] rec_df = rec_df.drop(rec_df.index[0]) rec_df.loc["loss"] = "ms_ssim" rec_df.loc["target_bpp"] = ms_ssim_rec_bpp ms_ssim_rec_dfs.append(rec_df) ``` # Aggregate Statistics ``` psnr_base_dir = "/scratch/gf332/CWOQ/balle/compression/results/image_compression/kodak/PSNR_sRGB_RGB/" ms_ssim_base_dir = "/scratch/gf332/CWOQ/balle/compression/results/image_compression/kodak/MS-SSIM_sRGB_RGB/" psnr_method_names = { "jpeg420.txt": ("JPEG (4:2:0)", "MSE", 8), "bpg444.txt": ("BPG (4:4:4)", "MSE", 8), "theis-2017-iclr.txt": ("Theis (2017)", "MSE", 9), "balle-2018-iclr-opt-mse.txt": ("Ballé (2018) (opt. for MSE)", "MSE", 9), #"balle-2018-iclr-opt-msssim.txt": ("Ballé (2018) (opt. for MS-SSIM)", "MS-SSIM", 9), #"minnen-2018-neurips.txt": ("Minnen (2018)", "MSE", 9), "minnen-2020-icip.txt": ("Minnen (2020)", "MSE", 9), } ms_ssim_method_names = { "jpeg420.txt": ("JPEG (4:2:0)", "MSE", 10), "bpg444.txt": ("BPG (4:4:4)", "MSE", 10), "theis-2017-iclr.txt": ("Theis (2017)", "MSE", 11), #"balle-2018-iclr-opt-mse.txt": ("Ballé (2018) (opt. for MSE)", "MSE", 11), "balle-2018-iclr-opt-msssim.txt": ("Ballé (2018) (opt. for MS-SSIM)", "MS-SSIM", 11), #"minnen-2018-neurips.txt": ("Minnen (2018)", "MSE", 11), } def prepare_method_df(metric, method_name, method_legend, skiprows, opt_objective): base_dir = psnr_base_dir if metric == "psnr" else ms_ssim_base_dir df = pd.read_csv(f"{base_dir}/{method_name}", skiprows=skiprows, header=0, names=["bpp", metric]) df["objective"] = opt_objective df["method"] = method_legend df = df.astype({metric: float, "bpp": float}) if metric == "ms_ssim": df["ms_ssim"] = to_db(df["ms_ssim"]) df = df[df["bpp"] < 2] return df to_db = lambda x: -10. * np.log10(1. - x) def get_rec_psnr_average_df(rec_dfs, kind): aggregate_df = pd.concat(rec_dfs, axis=1) aggregate_df = aggregate_df.T.astype({ "comp_lossy_BPP": float, "lossy_BPP": float, "ideal_PSNR": float, "PSNR": float, "ideal_MS_SSIM": float, "MS_SSIM": float, }) psnr_avg_df = aggregate_df.groupby("target_bpp").mean().rename({"PSNR": "psnr", "comp_lossy_BPP": "bpp", "MS_SSIM": "ms_ssim"}, axis=1) psnr_avg_df["model"] = f"iREC {kind}" psnr_avg_df["type"] = f"actual" ideal_psnr_avg_df = aggregate_df.groupby("target_bpp").mean().rename({"ideal_PSNR": "psnr", "lossy_BPP": "bpp", "ideal_MS_SSIM": "ms_ssim"}, axis=1) ideal_psnr_avg_df["model"] = f"iREC {kind}" ideal_psnr_avg_df["type"] = f"ideal" return pd.concat([psnr_avg_df, ideal_psnr_avg_df]) rec_l1_gaussian_psnr_avg_df = get_rec_psnr_average_df(level_1_gaussian_rec_dfs, "(Ours) (opt. for MSE)") rec_gaussian_psnr_avg_df = get_rec_psnr_average_df(gaussian_rec_dfs, "(Ours) (opt. for MSE)") rec_ms_ssim_psnr_avg_df = get_rec_psnr_average_df(ms_ssim_rec_dfs, "(Ours) (opt. for MS-SSIM)") psnr_results = pd.concat([prepare_method_df("psnr", method_name, method_legend, skiprows, opt_objective) for method_name, (method_legend, opt_objective, skiprows) in psnr_method_names.items()]) ms_ssim_results = pd.concat([prepare_method_df("ms_ssim", method_name, method_legend, skiprows, opt_objective) for method_name, (method_legend, opt_objective, skiprows) in ms_ssim_method_names.items()]) sns.set_palette("bright") fig, ax = plt.subplots(figsize=(7.4, 6.5)) sns.lineplot(x="bpp", y="psnr", data=psnr_results, markers=False, dashes=False, hue='method', ax=ax) rec_psnr_df = rec_gaussian_psnr_avg_df[rec_gaussian_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) ax.plot(rec_psnr_df["bpp"], rec_psnr_df["psnr"], label=rec_psnr_df["method"].values[0], marker="o", linewidth=3, c='k') # rec_ssim_df = rec_ms_ssim_psnr_avg_df[rec_ms_ssim_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) # ax.plot(rec_ssim_df["bpp"], rec_ssim_df["psnr"], label=rec_ssim_df["method"].values[0], marker="o", linewidth=3, c='r') handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:][::-1], labels=labels[1:][::-1], fontsize=18, loc="upper left", title=None, fancybox=False, frameon=False) ax.set_xlim([0, 1.5]) ax.set_ylim([25, 42]) ax.set_ylabel(None) ax.set_xlabel("Bits per Pixel", fontsize=18) ax.tick_params(axis='both', which='major', labelsize=18) x_major_ticks = np.arange(0., 1.5, 0.2) x_minor_ticks = np.arange(0., 1.5, 0.05) ax.set_xticks(x_major_ticks) ax.set_xticks(x_minor_ticks, minor=True) y_major_ticks = np.arange(25., 42, 5) y_minor_ticks = np.arange(25., 42, 1) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.grid(which='major', alpha=1) ax.grid(which='minor', c="#eeeeee", alpha=1) ax.text(.45, .02, 'PSNR (RGB) on Kodak', horizontalalignment='left', transform=ax.transAxes, fontsize=24) fig.tight_layout() fig.savefig(f"{save_dir}/method_psnr_comparison.png") sns.set_palette("bright") fig, ax = plt.subplots(figsize=(7.4, 6.5)) sns.lineplot(x="bpp", y="ms_ssim", data=ms_ssim_results, markers=False, dashes=False, hue='method', ax=ax) # rec_psnr_df = rec_gaussian_psnr_avg_df[rec_gaussian_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) # ax.plot(rec_psnr_df["bpp"], to_db(rec_psnr_df["ms_ssim"]), label=rec_psnr_df["method"].values[0], marker="o", linewidth=3, c='b') rec_ssim_df = rec_ms_ssim_psnr_avg_df[rec_ms_ssim_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) ax.plot(rec_ssim_df["bpp"], to_db(rec_ssim_df["ms_ssim"]), label=rec_ssim_df["method"].values[0], marker="o", linewidth=3, c='k') handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:][::-1], labels=labels[1:][::-1], fontsize=18, loc="upper left", title=None, fancybox=False, frameon=False) ax.set_xlim([0, 1.5]) ax.set_ylim([8, 27]) ax.set_xlabel("Bits per Pixel", fontsize=18) ax.set_ylabel(None) ax.tick_params(axis='both', which='major', labelsize=18) x_major_ticks = np.arange(0., 1.5, 0.2) x_minor_ticks = np.arange(0., 1.5, 0.05) ax.set_xticks(x_major_ticks) ax.set_xticks(x_minor_ticks, minor=True) y_major_ticks = np.arange(8., 27, 2) y_minor_ticks = np.arange(8., 27, 0.5) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.grid(which='major', alpha=1) ax.grid(which='minor', c="#eeeeee", alpha=1) ax.text(.25, .02, 'MS-SSIM (dB) (RGB) on Kodak', horizontalalignment='left', transform=ax.transAxes, fontsize=24) fig.tight_layout() fig.savefig(f"{save_dir}/method_ms_ssim_comparison.eps") fig.savefig(f"{save_dir}/method_ms_ssim_comparison.png") fig, ax = plt.subplots(figsize=(10, 13)) balle_psnr_results = psnr_results[psnr_results["method"].str.contains("Ballé")] sns.lineplot(x="bpp", y="psnr", data=balle_psnr_results, markers=False, dashes=False, hue='method', ax=ax) rec_psnr_df = rec_gaussian_psnr_avg_df[rec_gaussian_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) ax.plot(rec_psnr_df["bpp"], rec_psnr_df["psnr"], label="iREC (Ours) (opt. for MSE)", linewidth=2, c='g') ideal_rec_psnr_df = rec_gaussian_psnr_avg_df[rec_gaussian_psnr_avg_df["type"] == "ideal"].rename({"model": "method"}, axis=1) ax.plot(ideal_rec_psnr_df["bpp"], ideal_rec_psnr_df["psnr"], label="Ideal iREC (Ours) (opt. for MSE)", marker="", linewidth=2, c='g', linestyle="--") rec_psnr_df = rec_ms_ssim_psnr_avg_df[rec_ms_ssim_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) ax.plot(rec_psnr_df["bpp"], rec_psnr_df["psnr"], label="iREC (Ours) (opt. for MS-SSIM)", linewidth=2, c='r') ideal_rec_psnr_df = rec_ms_ssim_psnr_avg_df[rec_ms_ssim_psnr_avg_df["type"] == "ideal"].rename({"model": "method"}, axis=1) ax.plot(ideal_rec_psnr_df["bpp"], ideal_rec_psnr_df["psnr"], label="Ideal iREC (Ours) (opt. for MS-SSIM)", marker="", linewidth=2, c='r', linestyle="--") handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:], labels=labels[1:], fontsize=24, loc="lower right", title=None, fancybox=False, frameon=False) ax.set_xlim([0, 1.5]) ax.set_ylim([24, 40]) ax.set_xlabel("Bits per Pixel", fontsize=24) ax.set_ylabel(None) ax.tick_params(axis='both', which='major', labelsize=24) x_major_ticks = np.arange(0., 1.5, 0.2) x_minor_ticks = np.arange(0., 1.5, 0.05) ax.set_xticks(x_major_ticks) ax.set_xticks(x_minor_ticks, minor=True) y_major_ticks = np.arange(25., 40, 5) y_minor_ticks = np.arange(24., 40, 1) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.grid(which='major', alpha=1) ax.grid(which='minor', c="#eeeeee", alpha=1) ax.text(.03, .96, 'PSNR (RGB) on Kodak', horizontalalignment='left', transform=ax.transAxes, fontsize=28) fig.tight_layout() fig.savefig(f"{save_dir}/actual_vs_ideal_psnr_comparison.eps") fig, ax = plt.subplots(figsize=(10, 13)) balle_ms_ssim_results = ms_ssim_results[ms_ssim_results["method"].str.contains("Ballé")] sns.lineplot(x="bpp", y="ms_ssim", data=balle_ms_ssim_results, markers=False, dashes=False, hue='method', ax=ax) rec_psnr_df = rec_gaussian_psnr_avg_df[rec_gaussian_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) ax.plot(rec_psnr_df["bpp"], to_db(rec_psnr_df["ms_ssim"]), label="iREC (Ours) (opt. for MSE)", linewidth=2, c='g') ideal_rec_psnr_df = rec_gaussian_psnr_avg_df[rec_gaussian_psnr_avg_df["type"] == "ideal"].rename({"model": "method"}, axis=1) ax.plot(ideal_rec_psnr_df["bpp"], to_db(ideal_rec_psnr_df["ms_ssim"]), label="Ideal iREC (Ours) (opt. for MSE)", marker="", linewidth=2, c='g', linestyle="--") rec_psnr_df = rec_ms_ssim_psnr_avg_df[rec_ms_ssim_psnr_avg_df["type"] == "actual"].rename({"model": "method"}, axis=1) ax.plot(rec_psnr_df["bpp"], to_db(rec_psnr_df["ms_ssim"]), label="iREC (Ours) (opt. for MS-SSIM)", linewidth=2, c='r') ideal_rec_psnr_df = rec_ms_ssim_psnr_avg_df[rec_ms_ssim_psnr_avg_df["type"] == "ideal"].rename({"model": "method"}, axis=1) ax.plot(ideal_rec_psnr_df["bpp"], to_db(ideal_rec_psnr_df["ms_ssim"]), label="iREC (Ours) (opt. for MS-SSIM)", marker="", linewidth=2, c='r', linestyle="--") handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:], labels=labels[1:], fontsize=24, loc="lower right", title=None, fancybox=False, frameon=False) ax.set_xlim([0, 1.5]) ax.set_ylim([9, 25]) ax.set_xlabel("Bits / Pixel", fontsize=24) ax.set_ylabel(None) ax.tick_params(axis='both', which='major', labelsize=24) x_major_ticks = np.arange(0., 1.5, 0.2) x_minor_ticks = np.arange(0., 1.5, 0.05) ax.set_xticks(x_major_ticks) ax.set_xticks(x_minor_ticks, minor=True) y_major_ticks = np.arange(10., 25, 2) y_minor_ticks = np.arange(10., 25, 0.5) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.grid(which='major', alpha=1) ax.grid(which='minor', c="#eeeeee", alpha=1) ax.text(.35, .96, 'MS-SSIM (dB) (RGB) on Kodak', horizontalalignment='left', transform=ax.transAxes, fontsize=28) fig.tight_layout() fig.savefig(f"{save_dir}/actual_vs_ideal_ms_ssim_comparison.eps") ``` # Individual Images ``` jpeg_stats_file = "../experimental_data/jpeg/stats.json" balle_clic_stats_file = "../experimental_data/kodak_balle/stats.json" with open(jpeg_stats_file, 'r') as f: jpeg_df = pd.read_json(f) with open(balle_clic_stats_file, 'r') as f: balle_df = pd.read_json(f) balle_df = balle_df.rename(lambda x: x[1:], axis=1) def get_image_dfs(img_name): jpeg_img_df = pd.read_json(jpeg_df[img_name].to_json()) balle_img_df = pd.read_json(balle_df[img_name].to_json()) jpeg_img_df.loc["model"] = "JPEG (4:2:0)" balle_img_df.loc["model"] = "Ballé (2018) (opt. for MSE)" # Put the columns in order jpeg_img_df = jpeg_img_df.reindex(sorted(jpeg_img_df.columns, key=lambda c: int(c.split('_')[1])), axis=1) balle_img_df = balle_img_df.reindex(sorted(balle_img_df.columns, key=lambda c: float(f"0.{c.split('_')[1]}")), axis=1) actual_rec_df = pd.concat(gaussian_rec_dfs, axis=1)[img_name].T.sort_values(by=["target_bpp"]).reset_index(drop=True) actual_rec_df = actual_rec_df.rename({"PSNR": "psnr", "comp_lossy_BPP": "bpp", "MS_SSIM": "ms_ssim"}, axis=1)[["psnr", "bpp", "ms_ssim"]] actual_rec_df = actual_rec_df.astype({"psnr":float, "bpp":float, "ms_ssim":float}) actual_rec_df["model"] = "actual_rec" joint_df = pd.concat([jpeg_img_df, balle_img_df], axis=1).T.astype({"bpp": float, "psnr": float, "ms_ssim": float}) # Convert MS-SSIM statistic to decibels joint_df['ms_ssim'] = to_db(joint_df['ms_ssim']) return actual_rec_df, joint_df def plot_kodak_ms_ssim_image_stats(img_name, actual_rec_df, joint_df): fig, ax = plt.subplots(figsize=(10, 13)) sns.lineplot(x="bpp", y="ms_ssim", data=joint_df, markers=False, dashes=False, hue='model', ax=ax) ax.plot(actual_rec_df["bpp"], to_db(actual_rec_df["ms_ssim"]), label="iREC (Ours) (opt. for MSE)", linewidth=3, marker='o', c='k') handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:], labels=labels[1:], fontsize=24, loc="lower right", title=None, fancybox=False, frameon=False) ax.set_xlim([0, 1.5]) ax.set_ylim([9, 25]) ax.set_xlabel("Bits / Pixel", fontsize=24) ax.set_ylabel(None) ax.tick_params(axis='both', which='major', labelsize=24) x_major_ticks = np.arange(0., 1.5, 0.2) x_minor_ticks = np.arange(0., 1.5, 0.05) ax.set_xticks(x_major_ticks) ax.set_xticks(x_minor_ticks, minor=True) y_major_ticks = np.arange(8., 25, 2) y_minor_ticks = np.arange(8., 25, 0.5) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.grid(which='major', alpha=1) ax.grid(which='minor', c="#eeeeee", alpha=1) ax.text(.35, .96, 'MS-SSIM (dB) (RGB) on Kodak', horizontalalignment='left', transform=ax.transAxes, fontsize=28) fig.tight_layout() fig.savefig(f"{save_dir}/kodak_comparisons/{img_name}_ms_ssim.eps") plt.close() def plot_kodak_psnr_image_stats(img_name, actual_rec_df, joint_df): fig, ax = plt.subplots(figsize=(10, 13)) sns.lineplot(x="bpp", y="psnr", data=joint_df, markers=False, dashes=False, hue='model', ax=ax) ax.plot(actual_rec_df["bpp"], actual_rec_df["psnr"], label="iREC (Ours) (opt. for MSE)", linewidth=3, marker='o', c='k') handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:], labels=labels[1:], fontsize=24, loc="lower right", title=None, fancybox=False, frameon=False) ax.set_xlim([0, 1.5]) ax.set_xlabel("Bits per Pixel", fontsize=24) ax.set_ylabel(None) ax.tick_params(axis='both', which='major', labelsize=24) x_major_ticks = np.arange(0., 2., 0.2) x_minor_ticks = np.arange(0., 2., 0.05) ax.set_xticks(x_major_ticks) ax.set_xticks(x_minor_ticks, minor=True) y_major_ticks = np.arange(20., 45, 5) y_minor_ticks = np.arange(20., 45, 1) ax.set_yticks(y_major_ticks) ax.set_yticks(y_minor_ticks, minor=True) ax.grid(which='major', alpha=1) ax.grid(which='minor', c="#eeeeee", alpha=1) ax.text(.03, .96, 'PSNR (RGB) on Kodak', horizontalalignment='left', transform=ax.transAxes, fontsize=28) fig.tight_layout() fig.savefig(f"{save_dir}/kodak_comparisons/{img_name}_psnr.eps") plt.close() for i in range(1, 25): img_name = f"kodim{i:02}.png" image_dfs = get_image_dfs(img_name) plot_kodak_ms_ssim_image_stats(img_name, *image_dfs) plot_kodak_psnr_image_stats(img_name, *image_dfs) ```
github_jupyter
## Recognition Of Objects with Convolutional Neural Network ## - Ashwin Prakash IMPORTING THE REQUIRED ``` import tensorflow as tf from tensorflow import keras from matplotlib import pyplot as plt import numpy as np from tensorflow.python.keras.utils import np_utils ``` LOADING AND SPLITTING THE DATA ``` (X_train, y_train), (X_test, y_test) = keras.datasets.cifar10.load_data() print('Training Images: {}'.format(X_train.shape)) print('Testing Images: {}'.format(X_test.shape)) print(X_train[0].shape) for i in range(332,336): plt.subplot(120+ 1 + i) img = X_train[i] plt.imshow(img) plt.show() ``` PREPROCESSING THE DATA ``` X_train = X_train.reshape(X_train.shape[0], 32, 32, 3) X_test = X_test.reshape(X_test.shape[0], 32, 32, 3) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test=X_test/255 n_classes = 10 print("Shape before one-hot encoding: ", y_train.shape) Y_train = np_utils.to_categorical(y_train, n_classes) Y_test = np_utils.to_categorical(y_test, n_classes) print("Shape after one-hot encoding: ", Y_train.shape) ``` BUILDING THE MODEL ``` from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPool2D, Flatten model = Sequential() #convolutional layers model.add(Conv2D(50, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(Conv2D(75, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(125, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) # hidden layer model.add(Dense(500, activation='relu')) model.add(Dropout(0.4)) model.add(Dense(250, activation='relu')) model.add(Dropout(0.3)) # output layer model.add(Dense(10, activation='softmax')) # compiling model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') # training the model model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_data=(X_test, Y_test)) ``` PREDICTING ``` classes = range(0,10) names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] # zip the names and classes to make a dictionary of class_labels class_labels = dict(zip(classes, names)) # generate batch of 9 images to predict batch = X_test[500:509] labels = np.argmax(Y_test[500:509],axis=-1) # make predictions predictions = model.predict(batch, verbose = 1) print (predictions) for image in predictions: print(np.sum(image)) class_result = np.argmax(predictions,axis=-1) print (class_result) ``` FINAL OBJECT DETECTION ``` fig, axs = plt.subplots(3, 3, figsize = (19,6)) fig.subplots_adjust(hspace = 1) axs = axs.flatten() for i, img in enumerate(batch): for key, value in class_labels.items(): if class_result[i] == key: title = 'Prediction: {}\nActual: {}'.format(class_labels[key], class_labels[labels[i]]) axs[i].set_title(title) axs[i].axes.get_xaxis().set_visible(False) axs[i].axes.get_yaxis().set_visible(False) # plot the image axs[i].imshow(img) # show the plot plt.show() ```
github_jupyter
## Motif Kernel / SVM Tutorial This Tutorial shows how you can combine the motif kernel of the *strkernel* package with a Support Vector Machine (SVM) to predict the cell population based on the motif content of a read sequence. There are two FASTA files each filled with sequences from two different cell popultions. If you are not familar with the FASTA format, [here](http://genetics.bwh.harvard.edu/pph/FASTA.html) is a short explantion. *fibroblast.fa* are sequences obtained from fibroblast while *stemcells.fa* contains sequences from stemcells. The goal of this tutorial is to show that we can use prior knowledge to construct motifs and use those motifs to classify new sequences into the two cell populations. The accuracy of the prediction is in this case secondary as long as we get a result that shows that we can use the motif content of sequences to compare their similarity and therefore classify them. First, you will need some packages for preprocessing, classification and to plot the results. ``` # preprocessing import numpy as np from Bio.Seq import Seq import Bio.SeqIO as sio # SVM from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report # ROC and precision-recall curve from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score # plotting import matplotlib.pyplot as plt # plotting # motif kernel from strkernel.motifkernel import motifKernel ``` ### Preprocessing In order to use the data with the SVM provided by *sklearn* we need to read in the FASTA files with *Biopython* and add some labels. In this case we will label the stemcells as positive (1) and the fibroblast as negative (0). ``` # load the data # stemcells pos_data = [seq.seq for seq in sio.parse('notebook_data/stemcells.fa', 'fasta')] # fibroblasts neg_data = [seq.seq for seq in sio.parse('notebook_data/fibroblast.fa', 'fasta')] pos_labels = np.ones(len(pos_data), dtype=int) neg_labels = np.zeros(len(neg_data), dtype=int) y = np.concatenate((pos_labels, neg_labels), axis=0) ``` ### Motifs Now we have to decide which collection of motifs we will be using to construct the motif kernel. There are specific transcription factors binding sites which tend to be enriched in one of the groups. Naturally not every sequence contains the binding site for these transcription factors but we should be able to correctly classify those that do. In stemcells, we expect the **oct4** binding site to be enriched because this protein has been shown to be heavily involved in the pluripotency of cells. In fibroblast lung tissue on the other hand, we expect the **mafk** binding site to be enriched. The [jasper](http://jaspar.genereg.net/) database allows to check for the binding motifs of the transcription factors. If we search for **oct4** and **mafk** we get the following binding sites: #### **oct4** ![](notebook_data/oct4.png) #### **mafk** ![](notebook_data/mafk.png) We can now add the most prominent motifs of these bindings sites to our motif collection. For this tutorial we chose the motif *TCAGCA* of the **mafk** binding site and the motifs *ATGCAA* and *TTGT* of the **oct4** binding site. This decision is based on which motifs are the most prominent. Since the reads originate from both strands we also have to include the reverse complement of the motifs. In normal use cases the number of motifs is usally a lot higher but for this tutorial the six motifs will be enough. ``` motif_collection = ["TCAGCA","TGCTGA","ATGCAA","TTGCAT","TTGT","ACAA"] #create the motif kernel motif_kernel = motifKernel(motif_collection) #use the motif kernel to compute the motif content matrix for all sequences motif_matrix = motif_kernel.compute_matrix(pos_data + neg_data) ``` ### Classification We can now split the matrix into test and training data. The training data can then be used to train the SVM. We will keep 30% of the data as test data to evaluate the model. ``` #split the data into test and training set X_train, X_test, y_train, y_test = train_test_split(motif_matrix, y, test_size=0.3, random_state=42, stratify=y) #train the classifier clf = SVC() clf.fit(X_train, y_train) ``` ### Results The only thing left to do is to analyze the trained model. *sklearn* provides a function which can be used to produce a classifcation report. This report shows us the precision, recall and f1-score when we apply the model to our test data. We will also plot the ROC and PRC but first we will have to define a couple of wrapper functions. ``` def plot_roc_curve(y_test, y_score): '''Plots a roc curve including a baseline''' fpr, tpr, thresholds = roc_curve(y_test, y_score) roc_auc = auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating curve') plt.legend(loc="lower right") plt.show() def plot_prec_recall_curve(y_test, y_scores): '''Plots a precision-recall curve including a baseline''' precision, recall, thresholds = precision_recall_curve(y_test, y_scores) average_precision = average_precision_score(y_test, y_scores) baseline = np.bincount(y_test)[1] / sum(np.bincount(y_test)) plt.figure() plt.step(recall, precision, color='b', alpha=0.2, where='post') plt.fill_between(recall, precision, step='post', alpha=0.2, color='b') plt.axhline(y=baseline, linewidth=2, color='navy', linestyle='--') plt.xlabel('Recall') plt.ylabel('Precision') plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format( average_precision)) plt.show() def reportclassfication(clf, X_test, y_test): '''Reports classification results with the given model and testdata''' print("Detailed classification report:") print() y_true, y_pred = y_test, clf.predict(X_test) print(classification_report(y_true, y_pred)) print() # get classfication results with the tuned parameters reportclassfication(clf, X_test, y_test) y_scores = clf.decision_function(X_test) # plot ROC plot_roc_curve(y_test, y_scores) # plot PRC plot_prec_recall_curve(y_test, y_scores) ``` We are able to correctly classify around 80% of the stemcell sequences and 70% of the fibroblast sequences. Obviously this is not the best classification results one could have but we can see that the motifs we derived from the bindings sites of transciption factors can be used to classify sequences into cell populations. In this example we only used the binding sites of two transcription factors to create a motif collection. For better classifcation results you could include more prior information about the cell populations to extend the motif collection.
github_jupyter
## List of callbacks ``` from fastai.gen_doc.nbdoc import * from fastai.vision import * from fastai.text import * from fastai.callbacks import * from fastai.basic_train import * from fastai.train import * from fastai import callbacks ``` fastai's training loop is highly extensible, with a rich *callback* system. See the [`callback`](/callback.html#callback) docs if you're interested in writing your own callback. See below for a list of callbacks that are provided with fastai, grouped by the module they're defined in. Every callback that is passed to [`Learner`](/basic_train.html#Learner) with the `callback_fns` parameter will be automatically stored as an attribute. The attribute name is snake-cased, so for instance [`ActivationStats`](/callbacks.hooks.html#ActivationStats) will appear as `learn.activation_stats` (assuming your object is named `learn`). ## [`Callback`](/callback.html#Callback) This sub-package contains more sophisticated callbacks that each are in their own module. They are (click the link for more details): ### [`LRFinder`](/callbacks.lr_finder.html#LRFinder) Use Leslie Smith's [learning rate finder](https://www.jeremyjordan.me/nn-learning-rate/) to find a good learning rate for training your model. Let's see an example of use on the MNIST dataset with a simple CNN. ``` path = untar_data(URLs.MNIST_SAMPLE) data = ImageDataBunch.from_folder(path) def simple_learner(): return Learner(data, simple_cnn((3,16,16,2)), metrics=[accuracy]) learn = simple_learner() ``` The fastai librairy already has a Learner method called [`lr_find`](/train.html#lr_find) that uses [`LRFinder`](/callbacks.lr_finder.html#LRFinder) to plot the loss as a function of the learning rate ``` learn.lr_find() learn.recorder.plot() ``` In this example, a learning rate around 2e-2 seems like the right fit. ``` lr = 2e-2 ``` ### [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) Train with Leslie Smith's [1cycle annealing](https://sgugger.github.io/the-1cycle-policy.html) method. Let's train our simple learner using the one cycle policy. ``` learn.fit_one_cycle(3, lr) ``` The learning rate and the momentum were changed during the epochs as follows (more info on the [dedicated documentation page](https://docs.fast.ai/callbacks.one_cycle.html)). ``` learn.recorder.plot_lr(show_moms=True) ``` ### [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback) Data augmentation using the method from [mixup: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412). It is very simple to add mixup in fastai : ``` learn = Learner(data, simple_cnn((3, 16, 16, 2)), metrics=[accuracy]).mixup() ``` ### [`CSVLogger`](/callbacks.csv_logger.html#CSVLogger) Log the results of training in a csv file. Simply pass the CSVLogger callback to the Learner. ``` learn = Learner(data, simple_cnn((3, 16, 16, 2)), metrics=[accuracy, error_rate], callback_fns=[CSVLogger]) learn.fit(3) ``` You can then read the csv. ``` learn.csv_logger.read_logged_file() ``` ### [`GeneralScheduler`](/callbacks.general_sched.html#GeneralScheduler) Create your own multi-stage annealing schemes with a convenient API. To illustrate, let's implement a 2 phase schedule. ``` def fit_odd_shedule(learn, lr): n = len(learn.data.train_dl) phases = [TrainingPhase(n).schedule_hp('lr', lr, anneal=annealing_cos), TrainingPhase(n*2).schedule_hp('lr', lr, anneal=annealing_poly(2))] sched = GeneralScheduler(learn, phases) learn.callbacks.append(sched) total_epochs = 3 learn.fit(total_epochs) learn = Learner(data, simple_cnn((3,16,16,2)), metrics=accuracy) fit_odd_shedule(learn, 1e-3) learn.recorder.plot_lr() ``` ### [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) Use fp16 to [take advantage of tensor cores](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) on recent NVIDIA GPUs for a 200% or more speedup. ### [`HookCallback`](/callbacks.hooks.html#HookCallback) Convenient wrapper for registering and automatically deregistering [PyTorch hooks](https://pytorch.org/tutorials/beginner/former_torchies/nn_tutorial.html#forward-and-backward-function-hooks). Also contains pre-defined hook callback: [`ActivationStats`](/callbacks.hooks.html#ActivationStats). ### [`RNNTrainer`](/callbacks.rnn.html#RNNTrainer) Callback taking care of all the tweaks to train an RNN. ### [`TerminateOnNaNCallback`](/callbacks.tracker.html#TerminateOnNaNCallback) Stop training if the loss reaches NaN. ### [`EarlyStoppingCallback`](/callbacks.tracker.html#EarlyStoppingCallback) Stop training if a given metric/validation loss doesn't improve. ### [`SaveModelCallback`](/callbacks.tracker.html#SaveModelCallback) Save the model at every epoch, or the best model for a given metric/validation loss. ``` learn = Learner(data, simple_cnn((3,16,16,2)), metrics=accuracy) learn.fit_one_cycle(3,1e-4, callbacks=[SaveModelCallback(learn, every='epoch', monitor='accuracy')]) !ls ~/.fastai/data/mnist_sample/models ``` ### [`ReduceLROnPlateauCallback`](/callbacks.tracker.html#ReduceLROnPlateauCallback) Reduce the learning rate each time a given metric/validation loss doesn't improve by a certain factor. ### [`PeakMemMetric`](/callbacks.mem.html#PeakMemMetric) GPU and general RAM profiling callback ### [`StopAfterNBatches`](/callbacks.misc.html#StopAfterNBatches) Stop training after n batches of the first epoch. ### [`LearnerTensorboardWriter`](/callbacks.tensorboard.html#LearnerTensorboardWriter) Broadly useful callback for Learners that writes to Tensorboard. Writes model histograms, losses/metrics, embedding projector and gradient stats. ## [`train`](/train.html#train) and [`basic_train`](/basic_train.html#basic_train) ### [`Recorder`](/basic_train.html#Recorder) Track per-batch and per-epoch smoothed losses and metrics. ### [`ShowGraph`](/train.html#ShowGraph) Dynamically display a learning chart during training. ### [`BnFreeze`](/train.html#BnFreeze) Freeze batchnorm layer moving average statistics for non-trainable layers. ### [`GradientClipping`](/train.html#GradientClipping) Clips gradient during training.
github_jupyter
## polyarea Notebook ``` ## Setup with modules needed import numpy as np import matplotlib.pyplot as plt # Make a plot to see shape import matplotlib def readnodes( file='stdin' ): '''Function to read the X,Y coordinates of nodes of the points in the triangle. The circuit around the nodes should be in clockwise direction. Input "file" is name of file. If not passed, keyboard is used Useage: numnode, nodes_xy = readnodes('stdin'/file) ''' # First see if file or std in to be read if file == 'stdin': inpstr = input('Coordinate pairs of nodes ') nodelist = list(map(float,inpstr.split(' '))) # Now make numpy array and reshape numlist = len(nodelist) if numlist != (numlist//2)*2: print('Even number of nodes nodes needed\n',numlist,'given') return 0, 0 numnode = int(numlist//2) nodes_xy = np.array(nodelist).reshape(numnode,2) else: try: nodes_xy = np.genfromtxt(file,delimiter=',') numnode = np.shape(nodes_xy)[0] except: print('Exception reading',file) return 0, 0 # Replicate the last element in array to close the polygon if not np.all(nodes_xy[numnode-1,:] == nodes_xy[0,:]): nodes_xy = np.append(nodes_xy,nodes_xy[0]) # The matrix reverts to an array when this is done, so # a reshape is needed numnode += 1 nodes_xy = np.array(nodes_xy).reshape(numnode,2) # Returns number of nodes and np array return numnode, nodes_xy def form_vectors(nodes_xy): '''Function form the two vectors that make up the triangle from the node coordinates. Usage: trivec = form_vector(nodes_xy) where nodes_xy is numpy rows*2 array trivec is numpy array ''' return nodes_xy-nodes_xy[0] def plotpoly(xy): ''' Plots the 2-D polygon in numpy array xy''' plt.rcParams['figure.figsize'] = [5, 5] plt.rcParams.update({'font.size': 16}) plt.plot(xy[:,0],xy[:,1],'o-',label='Polygon') plt.legend() plt.xlabel('X coordinate'); plt.ylabel('Y coordinate'); plt.show() def triarea(xy,scale=1.0): '''Compute the area enclosed by summing the areas of each triangle that make up the polygon Useaage area = triarea(nodes_xy,scale) ''' # Form the cross product Z-component and divide by two since # cross product a x b = |a||b| sin(theta) in direction normal # to the plane of vectors a and b. theta is the angle between # vectors. # Note: Sign will depend on if we rotate clockwise or # anti-clockwise between vector. A change in sign indicates # a change from convex to concave. # Numpy has method to form cross product n = np.shape(xy)[0]-2 # Take vectors in pairs so stop one from end area = 0 for k in range(1,n): darea = (xy[k,0]*xy[k+1,1]-xy[k+1,0]*xy[k,1])/2 if k == 1: signa = np.sign(darea) else: if signa != np.sign(darea): print('Concave at node ',k) area += darea*scale**2 return area ``` ## Main part of polyarea: Here to run program ``` # Read in the polygon coordinates ('stdin' will read from keyboard) print('\n POLYGON AREA calculation\n') numnode, nodes_xy = readnodes('poly1.in') # (Substract one on output becuase the first and last points are the # same so that the line closes) print('Polygon has',numnode-1,'nodes in it') # Form the vectors from the first point to all other points trivec = form_vectors(nodes_xy) # Compute the area of the polygon Area = triarea(trivec) # Print out the results print('AREA of the polygon is ',Area) # Plot the polygon plotpoly(nodes_xy) ```
github_jupyter
# Gaussian Naive Bayes Classifier with Standard Scaler This Code template is for Classification task using Gaussian Naive Bayes Algorithm where the scaling technique used is StandardScaler. ### Required Packages ``` !pip install imblearn import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from imblearn.over_sampling import RandomOverSampler from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,plot_confusion_matrix warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= "" ``` List of features which are required for model training . ``` #x_values features= [] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X = df[features] Y = df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` #### Distribution Of Target Variable ``` plt.figure(figsize = (10,6)) se.countplot(Y) ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=40) ``` ## Handling Target Imbalance The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important. One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library. ``` x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train) ``` ### Model Gaussian NB is a variant of Naive Bayes that follows Gaussian normal distribution and supports continuous data. An approach to creating a simple model is to assume that the data is described by a Gaussian distribution with no co-variance between features. #### Model Tuning Parameters 1. priors : array-like of shape (n_classes,) > Prior probabilities of the classes. If specified the priors are not adjusted according to the data. 2. var_smoothing : float, default=1e-9 > Portion of the largest variance of all features that is added to variances for calculation stability. #### Standard Scaler Standardize features by removing the mean and scaling to unit variance The standard score of a sample x is calculated as: $z = (x - u) / s$ where u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False. <h4 style="color:orange;">For More Reference :-</h4> <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html">https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html</a> ``` # Build Model here model = make_pipeline(StandardScaler(),GaussianNB()) model.fit(x_train,y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. ``` print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ``` #### Confusion Matrix A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ``` plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues) ``` #### Classification Report A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. * **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ``` print(classification_report(y_test,model.predict(x_test))) ``` #### Creator: Jay Shimpi , Github: [Profile](https://github.com/JayShimpi22)
github_jupyter
``` import matplotlib.pyplot as plt from math import exp from scipy import stats import seaborn as sns import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics import numpy as np import warnings warnings.filterwarnings("ignore") pd.set_option('display.max_columns', 300) ``` ### Read in cleaned movie data set ``` movie_df = pd.read_csv('resources/movie_dataset_us.csv', index_col=0) ``` ### Take a look at the data ``` movie_df = movie_df[movie_df['Other']!=1] movie_df.head() movie_df.describe() movie_df.dropna(subset=['duration'],inplace=True) features=['budget', 'duration','actor_1_facebook_likes','cast_total_facebook_likes','G', 'PG-13', 'R', 'yr_old'] df_features = movie_df[features] target = movie_df['gross'] df_features.describe() from sklearn.linear_model import LinearRegression #instantiate a linear regression object lm = LinearRegression() #fit the linear regression to the data # array of variables lm = lm.fit(df_features, target) #access output print(lm.intercept_) print(lm.coef_) #lm.score, pretty sure it's adjusted r squared. you'll see below print("R^2: ", lm.score(df_features, target)) ``` ### Create a polynomial feature ``` #df['budget^2'] = df['budget']**2 ``` ### Create a interaction feature ``` #df['budget_R'] = df['budget']*df['R'] ``` ### Use SKlearn to create features ``` from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=2, include_bias=False) poly_data = poly.fit_transform(df_features) poly_data len(df_features.columns) poly_columns = poly.get_feature_names(df_features.columns) len(poly_columns) df_poly = pd.DataFrame(poly_data, columns=poly_columns) df_poly.head() df_features.shape df_poly.shape ``` ### Fit and Assess new model ``` #instantiate a linear regression object lm_2 = LinearRegression() #fit the linear regression to the data lm_2 = lm_2.fit(df_poly, target) #access output # print(lm_2.intercept_) # print(lm_2.coef_) print("R^2: ", lm_2.score(df_poly, target)) ``` ### Fit third degree polynomial ``` poly_3 = PolynomialFeatures(degree=3, include_bias=False) poly3_data = poly_3.fit_transform(df_features) poly3_columns = poly_3.get_feature_names(df_features.columns) df_poly3 = pd.DataFrame(poly3_data, columns=poly3_columns) df_poly3.shape df_poly3.head() #instantiate a linear regression object lm_3 = LinearRegression() #fit the linear regression to the data lm_3 = lm_3.fit(df_poly3, target) #access output # print(lm_3.intercept_) # print(lm_3.coef_) print("R^2: ", lm_3.score(df_poly3, target)) ``` # Model Evaluation Agenda: - R^2 - Bias versus Variance - Train Test Split - Model Evaluation ## Coefficient of Determination ($R^2$) The _coefficient of determination_, is a measure of how well the model fits the data. It is a statistic used in the context of statistical models whose main purpose is either the prediction of future outcomes or the testing of hypotheses, on the basis of other related information. It provides a measure of how well observed outcomes are replicated by the model, based on the proportion of total variation of outcomes explained by the model $R^2$ for a model is ultimately a _relational_ notion. It's a measure of goodness of fit _relative_ to a (bad) baseline model. This bad baseline model is simply the horizontal line $y = \mu_Y$, for dependent variable $Y$. $$\text{TSS }= \text{ESS} + \text{RSS }$$ - TSS or SST = Total Sum of Squares - ESS or SSE = Explained Sum of Squares - RSS or SSR = Residual Sum of Squares The actual calculation of $R^2$ is: <br/> $$\Large R^2= \frac{\Sigma_i(\bar{y} - \hat{y}_i)^2}{\Sigma_i(y_i - \bar{y})^2}=1- \frac{\Sigma_i(y_i - \hat{y}_i)^2}{\Sigma_i(y_i - \bar{y})^2}$$. $R^2$ takes values between 0 and 1. $R^2$ is a measure of how much variation in the dependent variable your model explains. <img src='https://pbs.twimg.com/media/D-Gu7E0WsAANhLY.png' width ="700"> ## What Is the Adjusted R-squared? The adjusted R-squared compares the explanatory power of regression models that contain different numbers of predictors. Suppose you compare a five-predictor model with a higher R-squared to a one-predictor model. Does the five predictor model have a higher R-squared because it’s better? Or is the R-squared higher because it has more predictors? Simply compare the adjusted R-squared values to find out! $$Adjusted R^2=1-\left(\frac{n-1}{n-p}\right)(1-R^2)$$ Where: n = sample size p = the number of independent variables in the regression equation - The adjusted R-squared is a modified version of R-squared that has been adjusted for the number of predictors in the model. - The adjusted R-squared increases only if the new term improves the model more than would be expected by chance. It decreases when a predictor improves the model by less than expected by chance. - It is always lower than the R-squared. ## Probabilistic Model Selection Probabilistic model selection (or “information criteria”) provides an analytical technique for scoring and choosing among candidate models. Models are scored both on their performance on the training dataset and based on the complexity of the model. - **Model Performance:** How well a candidate model has performed on the training dataset. - **Model Complexity:** How complicated the trained candidate model is after training. Model performance may be evaluated using a probabilistic framework, such as log-likelihood under the framework of maximum likelihood estimation. Model complexity may be evaluated as the number of degrees of freedom or parameters in the model. ### Akaike Information Criterion vs. Bayesian Information Criterion The model with the lower AIC or BIC should be selected. Despite various subtle theoretical differences, their only difference in practice is the size of the penalty; BIC penalizes model complexity more heavily. Compared to the BIC method (below), the AIC statistic penalizes complex models less, meaning that it may put more emphasis on model performance on the training dataset, and, in turn, select more complex models. A downside of BIC is that for smaller, less representative training datasets, it is more likely to choose models that are too simple. https://machinelearningmastery.com/probabilistic-model-selection-measures/ https://www.methodology.psu.edu/resources/AIC-vs-BIC/ ## The Machine Learning Process 1. Look at the big picture. 2. Get the data. 3. Discover and visualize the data to gain insights. 4. Prepare the data for Machine Learning algorithms. 5. Select a model and train it. 6. Fine-tune your model. 7. Present your solution. 8. Launch, monitor, and maintain your system. <img src='https://www.kdnuggets.com/wp-content/uploads/crisp-dm-4-problems-fig1.png' width ="400"> **A proper machine learning workflow includes:** * Separate training and test sets * Trying appropriate algorithms (No Free Lunch) * Fitting model parameters * Tuning impactful hyperparameters * Proper performance metrics * Systematic cross-validation # Prediction Evaluation ## Bias - Variance There are 3 types of prediction error: bias, variance, and irreducible error. **Total Error = Bias + Variance + Irreducible Error** ``` #Know this ``` ### The Bias-Variance Tradeoff **Let's do a thought experiment:** 1. Imagine you've collected 5 different training sets for the same problem. 2. Now imagine using one algorithm to train 5 models, one for each of your training sets. 3. Bias vs. variance refers to the accuracy vs. consistency of the models trained by your algorithm. <img src='resources/Bias-vs.-Variance-v5-2-darts.png' width=500 /> **High bias** algorithms tend to be less complex, with simple or rigid underlying structure. + They train models that are consistent, but inaccurate on average. + These include linear or parametric algorithms such as regression and naive Bayes. On the other hand, **high variance** algorithms tend to be more complex, with flexible underlying structure. + They train models that are accurate on average, but inconsistent. + These include non-linear or non-parametric algorithms such as decision trees and nearest neighbors. ### Bias-Variance Tradeoff This tradeoff in complexity is why there's a tradeoff in bias and variance - an algorithm cannot simultaneously be more complex and less complex. **Total Error = Bias^2 + Variance + Irreducible Error** <img src='resources/Bias-vs.-Variance-v4-chart.png' width=500 /> ### Error from Bias **Bias** is the difference between your model's expected predictions and the true values. <img src='resources/noisy-sine-linear.png' width=500 /> ``` # if you slightly changed the data set, a lot would change for high variance ``` ### Error from Variance **Variance** refers to your algorithm's sensitivity to specific sets of training data. <img src='resources/noisy-sine-decision-tree.png' width=500/> Which one is overfit and which one is underfit? We want to try to find the proper balance of variance and bias <img src='resources/noisy-sine-third-order-polynomial.png' width=500 /> # Train Test Split **How do we know if our model is overfitting or underfitting?** If our model is not performing well on the training data, we are probably underfitting it. To know if our model is overfitting the data, we need to test our model on unseen data. We then measure our performance on the unseen data. If the model performs way worse on the unseen data, it is probably overfitting the data. The previous module introduced the idea of dividing your data set into two subsets: * **training set** —a subset to train a model. * **test set**—a subset to test the trained model. You could imagine slicing the single data set as follows: <img src='resources/testtrainsplit.png' width =550 /> **Never train on test data.** If you are seeing surprisingly good results on your evaluation metrics, it might be a sign that you are accidentally training on the test set. <img src='https://developers.google.com/machine-learning/crash-course/images/WorkflowWithTestSet.svg' width=500/> ## Model Evaluation Metrics for Regression **Mean Absolute Error** (MAE) is the mean of the absolute value of the errors: ![alt text](resources/mae.png) **Mean Squared Error** (MSE) is the mean of the squared errors: ![alt text](resources/mse.png) **Root Mean Squared Error (RMSE)** is the square root of the mean of the squared errors: ![alt text](resources/rmse.png) ``` # overfit would be training errors not bad, but testing error is high ``` MSE is more popular than MAE because MSE "punishes" larger errors. But, RMSE is even more popular than MSE because RMSE is interpretable in the "y" units. Additionally, I like to divide the RMSE by the standard deviation to convert it to something similiar to a Z-Score. # Practicum ### Identify my features and target variable ``` features df_features.columns ``` ### Create Train and Test Split The random state variable makes it so you can always have the same 'random' split ``` #improt train_test_split from sklearn package from sklearn.model_selection import train_test_split #call train_test_split on the data and capture the results X_train, X_test, y_train, y_test = train_test_split(df_features, target, random_state=1,test_size= .2) #check the shape of the results print("Training set - Features: ", X_train.shape, "Target: ", y_train.shape) print("Training set - Features: ", X_test.shape, "Target: ",y_test.shape) # fit a model from sklearn import linear_model #instantiate a linear regression object lm = linear_model.LinearRegression() #fit the linear regression to the training data lm.fit(X_train, y_train) print(lm.intercept_) print(lm.coef_) ``` ### How well did our model perform Previously we have looked at the R^2 of the model to determine how good of a model this is. ``` print ("R^2 Score:", lm.score(X_train, y_train)) #predict on the training data y_train_pred = lm.predict(X_train) y_train_pred #import the metrics module from sklearn from sklearn import metrics train_mae = metrics.mean_absolute_error(y_train, y_train_pred) train_mse = metrics.mean_squared_error(y_train, y_train_pred) train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_train_pred)) print('Mean Absolute Error:', train_mae ) print('Mean Squared Error:', train_mse) print('Root Mean Squared Error:' , train_rmse) ``` ***Sidenote:*** When using MAE or RMSE it can be difficult to understand how 'good' or 'bad' a model is because those numbers are dependent on the scale of the target variable. One way to handle this is to devide those metrics by the standard deviation of the target variable. Now your MAE and RMSE are given in terms of Z-scores. This is not an 'official' metric, but is a good way to help give context to a score. ``` price_std = target.std() print('Z-Score of Mean Absolute Error:', train_mae/price_std ) print('Z-Score of Root Mean Squared Error:' , train_rmse/price_std) ``` ### Predicting the Test Set ``` #predict on the test set of data y_pred = lm.predict(X_test) y_pred[:10] print ("Score:", lm.score(X_test, y_test)) test_mae = metrics.mean_absolute_error(y_test, y_pred) test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_pred)) print('Mean Absolute Error:' + str(metrics.mean_absolute_error(y_test, y_pred))) print('Mean Squared Error:' + str(metrics.mean_squared_error(y_test, y_pred))) print('Root Mean Squared Error:' + str(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))) print('Mean Absolute Error Z:', test_mae/price_std ) print('Root Mean Squared Error Z:' , test_rmse/price_std) ``` ### Comparing our Model's performance on training data versus test data. ``` print('Training: ', int(train_rmse), "vs. Testing: ", int(test_rmse)) #So not overfit because they're relatively the same ``` ## Check the assumptions of linear regression Article expalaining those assumptions and how to check them. https://towardsdatascience.com/assumptions-of-linear-regression-5d87c347140 1) Check to see if our errors are normally distributed ``` #create a data array of our errors/residuals # this is for the test set residuals = (y_test- y_pred) #create a data array of our errors/residuals residuals = (____- ____) #Create a histogram plot of the errors to see if it is normally distributed plt.hist(residuals) ``` 2) Checkt to see if our errors are Independent and identically distributed (IID) and homoscedastic. ``` # for test it's not bad, not a pattern in residuals sns.residplot(y_test, residuals, lowess=True, color="g") # if we did see a pattern, could take log ``` ## Use the log of the gross ``` #instantiate a linear regression object lm_log = LinearRegression() #fit the linear regression to the log of the target variable lm_log.fit(X_train, np.log(y_train)) #access output print(lm_log.intercept_) print(lm_log.coef_) # everything the same, just instead it is predicting on the log of y_train y_log_train_pred = lm_log.predict(X_train) y_log_train_pred[:10] ``` Now that we have the predictions, we need to exponentiate them to get them back into the original scale, dollars. ``` y_log_train_pred = np.exp(y_log_train_pred) y_log_train_pred[:10] log_train_mae = metrics.mean_absolute_error(y_train, y_log_train_pred) log_train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_log_train_pred)) print('Mean Absolute Error:', log_train_mae ) print('Root Mean Squared Error:' , log_train_rmse) ``` Let's see how it performs on the test data ``` y_log_pred = lm_log.predict(X_test) y_log_pred = np.exp(y_log_pred) test_log_mae = metrics.mean_absolute_error(y_test, y_log_pred) test__log_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_log_pred)) print('Mean Absolute Error:' + str(test_log_mae)) print('Root Mean Squared Error:' + str(test__log_rmse)) ``` Compare RMSE between orginal and logged target ``` print('Original: ', int(test_rmse), "vs. Logged: ", int(test__log_rmse)) ``` Can we check the residuals to see if it explains why the model didn't do as well ``` residuals = (y_test - y_log_pred) plt.hist(residuals, bins=30) sns.residplot(y_test, residuals, lowess=True, color="g") ``` ## Evaluate Polynomial model ### Train Test Split the polynomial data ``` #call train_test_split on the data and capture the results # df_poly for double, 3 for triple X_train_poly, X_test_poly, y_train_poly, y_test_poly = train_test_split(df_poly3, target, random_state=34,test_size=0.2) #check the shape of the results print("Training set - Features: ", X_train_poly.shape, "Target: ", y_train_poly.shape) print("Training set - Features: ", X_test_poly.shape, "Target: ",y_test_poly.shape) ``` ### Fit the Model ``` #instantiate a linear regression object lr_poly = LinearRegression() #fit the linear regression to the data lr_poly = lr_poly.fit(X_train_poly, y_train_poly) ``` ### Predict on the training set ``` train_preds = lr_poly.predict(X_train_poly) ``` ### Evaluate Training Data ``` train_mae_poly = metrics.mean_absolute_error(y_train_poly, train_preds) train_mse_poly = metrics.mean_squared_error(y_train_poly, train_preds) train_rmse_poly = np.sqrt(metrics.mean_squared_error(y_train_poly, train_preds)) print('Mean Absolute Error:', train_mae_poly ) print('Mean Squared Error:', train_mse_poly) print('Root Mean Squared Error:' , train_rmse_poly) ``` ### Predict the test set ``` test_preds = lr_poly.predict(X_test_poly) ``` ### Evaluate the test set ``` test_mae_poly = metrics.mean_absolute_error(y_test_poly, test_preds) test_rmse_poly = np.sqrt(metrics.mean_squared_error(y_test_poly, test_preds)) print('Mean Absolute Error:' + str(test_mae_poly)) print('Root Mean Squared Error:' + str(test_rmse_poly)) ``` ### Comparing our Model's performance on training data versus test data. ``` print('Training: ', int(train_rmse_poly), "vs. Testing: ", int(test_rmse_poly)) # want model to generalize better to unseen data # zipcode* will unpack them ``` ### Evaluate the Polynomial 3 dataset
github_jupyter
# Using Automated Machine Learning There are many kinds of machine learning algorithm that you can use to train a model, and sometimes it's not easy to determine the most effective algorithm for your particular data and prediction requirements. Additionally, you can significantly affect the predictive performance of a model by preprocessing the training data, using techniques such as normalization, missing feature imputation, and others. In your quest to find the *best* model for your requirements, you may need to try many combinations of algorithms and preprocessing transformations; which takes a lot of time and compute resources. Azure Machine Learning enables you to automate the comparison of models trained using different algorithms and preprocessing options. You can use the visual interface in [Azure Machine Learning studio](https://ml/azure.com) or the SDK to leverage this capability. he SDK gives you greater control over the settings for the automated machine learning experiment, but the visual interface is easier to use. In this lab, you'll explore automated machine learning using the SDK. ## Connect to Your Workspace The first thing you need to do is to connect to your workspace using the Azure ML SDK. > **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate. ``` import azureml.core from azureml.core import Workspace # Load the workspace from the saved config file ws = Workspace.from_config() print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name)) ``` ## Prepare Data for Automated Machine Learning You don't need to create a training script for automated machine learning, but you do need to load the training data. In this case, you'll create a dataset containing details of diabetes patients (just as you did in previous labs), and then split this into two datasets: one for training, and another for model validation. ``` from azureml.core import Dataset default_ds = ws.get_default_datastore() if 'diabetes dataset' not in ws.datasets: default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'], # Upload the diabetes csv files in /data target_path='diabetes-data/', # Put it in a folder path in the datastore overwrite=True, # Replace existing files of the same name show_progress=True) #Create a tabular dataset from the path on the datastore (this may take a short while) tab_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-data/*.csv')) # Register the tabular dataset try: tab_data_set = tab_data_set.register(workspace=ws, name='diabetes dataset', description='diabetes data', tags = {'format':'CSV'}, create_new_version=True) print('Dataset registered.') except Exception as ex: print(ex) else: print('Dataset already registered.') # Split the dataset into training and validation subsets diabetes_ds = ws.datasets.get("diabetes dataset") train_ds, test_ds = diabetes_ds.random_split(percentage=0.7, seed=123) print("Data ready!") ``` ## Configure Automated Machine Learning Now you're ready to configure the automated machine learning experiment. To do this, you'll need a run configuration that includes the required packages for the experiment environment, and a set of configuration settings that specifies how many combinations to try, which metric to use when evaluating models, and so on. > **Note**: In this example, you'll run the automated machine learning experiment on local compute to avoid waiting for a cluster to start. This will cause each iteration (child-run) to run serially rather than in parallel. For this reason, we're restricting the experiment to 6 iterations to reduce the amount of time taken. In reality, you'd likely try many more iterations on a compute cluster. ``` from azureml.train.automl import AutoMLConfig automl_config = AutoMLConfig(name='Automated ML Experiment', task='classification', compute_target='local', enable_local_managed=True, training_data = train_ds, validation_data = test_ds, label_column_name='Diabetic', iterations=6, primary_metric = 'AUC_weighted', max_concurrent_iterations=4, featurization='auto' ) print("Ready for Auto ML run.") ``` ## Run an Automated Machine Learning Experiment OK, you're ready to go. Let's run the automated machine learning experiment. ``` from azureml.core.experiment import Experiment from azureml.widgets import RunDetails print('Submitting Auto ML experiment...') automl_experiment = Experiment(ws, 'diabetes_automl') automl_run = automl_experiment.submit(automl_config) RunDetails(automl_run).show() automl_run.wait_for_completion(show_output=True) ``` ## Determine the Best Performing Model When the experiment has completed, view the output in the widget, and click the run that produced the best result to see its details. Then click the link to view the experiment details in the Azure portal and view the overall experiment details before viewing the details for the individual run that produced the best result. There's lots of information here about the performance of the model generated. Let's get the best run and the model that it produced. ``` best_run, fitted_model = automl_run.get_output() print(best_run) print(fitted_model) best_run_metrics = best_run.get_metrics() for metric_name in best_run_metrics: metric = best_run_metrics[metric_name] print(metric_name, metric) ``` Automated machine learning includes the option to try preprocessing the data, which is accomplished through the use of [Scikit-Learn transformation pipelines](https://scikit-learn.org/stable/modules/compose.html#combining-estimators) (not to be confused with Azure Machine Learning pipelines!). These produce models that include steps to transform the data before inferencing. You can view the steps in a model like this: ``` for step in fitted_model.named_steps: print(step) ``` Finally, having found the best performing model, you can register it. ``` from azureml.core import Model # Register model best_run.register_model(model_path='outputs/model.pkl', model_name='diabetes_model_automl', tags={'Training context':'Auto ML'}, properties={'AUC': best_run_metrics['AUC_weighted'], 'Accuracy': best_run_metrics['accuracy']}) # List registered models for model in Model.list(ws): print(model.name, 'version:', model.version) for tag_name in model.tags: tag = model.tags[tag_name] print ('\t',tag_name, ':', tag) for prop_name in model.properties: prop = model.properties[prop_name] print ('\t',prop_name, ':', prop) print('\n') ``` > **More Information**: For more information Automated Machine Learning, see the [Azure ML documentation](https://docs.microsoft.com/azure/machine-learning/how-to-configure-auto-train).
github_jupyter
![](https://raw.githubusercontent.com/om-hb/kgextension/master/docs/_static/logo.png) # Getting Started with the Knowledge Graph Extension for Python The **kgextension** package allows to access and use Linked Open Data to augment existing datasets for improving a classification/clustering task. It enables to incorporate knowledge graph information in [pandas.DataFrames](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) and can be used within the [scikit-learn pipeline](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html). Its functionality includes: * Linking datasets to any Linked Open Data (LOD) Source such as [DBpedia](https://wiki.dbpedia.org/), [WikiData](https://www.wikidata.org/wiki/Wikidata:Main_Page), or the [EU Open Data Portal](https://data.europa.eu/euodp/en/home) * Generation of new features from the LOD Sources * Hierarchy-based feature selection algorithms * Data Integration of features from different sources The source code can be found at [Github](https://github.com/om-hb/kgextension) and the documentation at [Read the Docs](https://kgextension.readthedocs.io/en/latest/) To demonstrate the usage of the KG extension, a [kaggle dataset about the top 50 bestselling books from Amazon (2009-2019)](https://www.kaggle.com/sootersaalu/amazon-top-50-bestselling-books-2009-2019) is chosen. The dataset contains 550 books which are classifed into `Fiction` and `Non-Fiction` (using Goodreads). In this small tutorial we will see how to increase the F1-measure from 0.81 to 0.93 using the kgextension. Lets load the data and have a look: ``` import pandas as pd df_raw = pd.read_csv('https://raw.githubusercontent.com/om-hb/kgextension/master/examples/data/book_genre_prediction.csv') display(df_raw.head()) display(df_raw.describe(include='all', percentiles=[])) ``` ## Scikit-learn baseline classification As a first step, a simple classification **without** additonal attributes from LOD (Linked Open Data) is performed. This serves as simple baseline. Afterwards new attributes will be added with the help of the KG extension to see if they help. As usual in a ML setup, a train test split is executed and the target attribute is encoded as 0 and 1. ``` from sklearn.model_selection import train_test_split from sklearn import preprocessing import numpy as np label_binarizer = preprocessing.LabelBinarizer() y = label_binarizer.fit_transform(df_raw['Genre']).ravel() #X = df_raw[['User Rating', 'Reviews', 'Price', 'Year']] # df_raw.drop(columns='Genre')# X = df_raw.drop(columns='Genre') X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.33, stratify=y, random_state=42) ``` Now lets train a RandomForest and see how good we are on the test set. But before we encode the name and author in a one hot fashion. There is also code in comments which drops the columns name and author (of course the f1-score drops as well). ``` #preprocess from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder one_hot_enc = OneHotEncoder(sparse=False, handle_unknown='ignore') ct = ColumnTransformer([("onehot", one_hot_enc, ["Name", "Author"])], remainder='passthrough') X_train_enc = ct.fit_transform(X_train) X_test_enc = ct.transform(X_test) import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay clf = RandomForestClassifier(random_state=1235).fit(X_train_enc, y_train) y_pred = clf.predict(X_test_enc) #drop name and author (thus only predicting on 'User Rating', 'Reviews', 'Price', and 'Year') #clf = RandomForestClassifier(random_state=1235).fit(X_train[['User Rating', 'Reviews', 'Price', 'Year']], y_train) #y_pred = clf.predict(X_test[['User Rating', 'Reviews', 'Price', 'Year']]) # result for fiction f1 score would be 0.76 print(classification_report(y_test, y_pred, target_names=label_binarizer.classes_)) ConfusionMatrixDisplay(confusion_matrix=confusion_matrix(y_test, y_pred), display_labels=label_binarizer.classes_).plot() plt.show() ``` ## Enhance the dataset Now it is the time to enhance the dataset with knowledge from Linked Open Data (LOD). In this case we try to find the author in DBpedia and fetch additional knowledge from it. As a very first step, the [kgextension pip package](https://pypi.org/project/kgextension/) needs to be downloaded and installed. ``` !pip install kgextension ``` The next step is to find the author to DBpedia. This is done with a Linker. [Several of them are implemented](https://kgextension.readthedocs.io/en/latest/source/usage_linking.html) like: - [Pattern Linker](https://kgextension.readthedocs.io/en/latest/source/usage_linking.html#pattern-linker) - creates a URI based on a cell in the dataframe - [Label Linker](https://kgextension.readthedocs.io/en/latest/source/usage_linking.html#label-linker) - runs a SPARQL query to search for entities with a given rdfs:label - [DBpedia Lookup Linker](https://kgextension.readthedocs.io/en/latest/source/usage_linking.html#dbpedia-lookup-linker) - queries the DBpedia Lookup web service - [DBpedia Spotlight Linker](https://kgextension.readthedocs.io/en/latest/source/usage_linking.html#dbpedia-spotlight-linker) - queries the DBpedia [Spotlight service](https://www.dbpedia-spotlight.org/) - [sameAs Linker](https://kgextension.readthedocs.io/en/latest/source/usage_linking.html#sameas-linker) - searches further URI by following owl:sameAs links All of these linkers can be further customized when initializing them (see the correspondend documentation). For our case we choose the DbpediaLookupLinker based on the column `Author`. Lets see if we can find some links: ``` from kgextension.linking_sklearn import DbpediaLookupLinker linker = DbpediaLookupLinker(column='Author') df_enhanced = linker.fit_transform(df_raw.drop(columns='Genre')) df_enhanced.head() ``` As you probably already noticed, all components are wrapped as scikit-learn transformers. Thus you can use them directly in our scikit-learn Pipeline. But there are also the same functions which can be directly applied to any dataframe (without the scikit-learn wrapper) in the `kgextension.linking` module instead of `kgextension.linking_sklearn`. But now lets continue with [Generators](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html). They actually extract the data from a knowledge graph in different ways e.g. - [Data Properties Generator](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#data-properties-generator) - creates an attribute for each literal value that the linked entity has - [Direct Type Generator](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#direct-type-generator) - extracts the type(s) of the linked ressources (using rdf:type) - [Unqualified Relation Generator](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#unqualified-relation-generator) - creates attributes from the existence of relations - [Qualified Relation Generator](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#qualified-relation-generator) - creates attributes from the existence of relations and takes also the types of the related resources into account - [Specific Relation Generator](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#specific-relation-generator) - creates attributes from a specific direct relation e.g. `http://purl.org/dc/terms/subject` - [Hierarchy Relation](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#hierarchy-relation) - create a hierarchy of the attributes with a user-defined hierarchy relation - [Custom SPARQL Generator](https://kgextension.readthedocs.io/en/latest/source/usage_generators.html#custom-sparql-generator) - creates additional attributes from a custom SPARQL query result For the running example, the `Specific Relation Generator` is used to retrive the values from the `http://purl.org/dc/terms/subject` relation in DBpedia. **Note: This step can take several minutes!** ``` from kgextension.generator_sklearn import SpecificRelationGenerator generator = SpecificRelationGenerator(columns=['new_link'], direct_relation='http://purl.org/dc/terms/subject') df_enhanced = generator.fit_transform(df_enhanced) df_enhanced.head() df_enhanced.columns ``` Wow, now we have 2803 columns in our dataset which can be used to create a better classifier in the end. Lets try it out with the same split and `random_state` as as before: ``` # prepare the enhanced dataframe X=df_enhanced.drop(columns=['Name', 'Author', 'new_link']) X=X.apply(lambda x: np.where(x.isna(), False, x)) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=1/3, stratify=y, random_state=42) clf = RandomForestClassifier(random_state=1235).fit(X_train, y_train) y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred, target_names=label_binarizer.classes_)) ConfusionMatrixDisplay(confusion_matrix=confusion_matrix(y_test, y_pred), display_labels=label_binarizer.classes_).plot() plt.show() ``` For the class `Fiction`, we get F1-score of 0.93 instead of 0.81 (improvement of 0.12) - not bad. As a last step, we analyze which features are actually helpful: ``` pd.set_option('display.max_colwidth', None) # for full width column ind = np.argsort(clf.feature_importances_)[-15:] pd.DataFrame({ 'features': X.columns[ind], 'importance': clf.feature_importances_[ind] }).sort_values('importance', ascending=False) ``` Still the `Reviews` is the most important feature but on second and third position the additonal information help to improve the classifiers performance. In the evaluation above we do not include the name and author(therefore the one hot encoding was not necessary). Now we try it with the name to compare apples with apples. ``` # prepare the enhanced dataframe import numpy as np X=df_enhanced.drop(columns=['new_link']) X=X.apply(lambda x: np.where(x.isna(), False, x)) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.33, stratify=y, random_state=42) X_train_enc = ct.fit_transform(X_train) # one hot transformation of 'Name' and 'Author' X_test_enc = ct.transform(X_test) clf = RandomForestClassifier(random_state=1235).fit(X_train_enc, y_train) y_pred = clf.predict(X_test_enc) print(classification_report(y_test, y_pred, target_names=label_binarizer.classes_)) ConfusionMatrixDisplay(confusion_matrix=confusion_matrix(y_test, y_pred), display_labels=label_binarizer.classes_).plot() plt.show() ``` The f1-measure is still the same. As stated previously, the two steps `Linking` and `Generation` can be included in a pipeline. It is also noticable that the implemented caching helps to increase the runtime, if queries would be sent out multiple times. This also ensures that the endpoint (in this case DBpedia) is protected against repeated queries. ``` from sklearn.pipeline import Pipeline pipeline = Pipeline(steps = [('lookup_linker', linker), ('generator', generator)]) df_enhanced = pipeline.fit_transform(df_raw.drop(columns='Genre')) df_enhanced.head() ``` But there is much more included in the package which is not shown here e.g. - [Link Exploration](https://kgextension.readthedocs.io/en/latest/source/usage_link_exploration.html#) - extensively search the Linked Open Data (LOD) Cloud for URIs connected to the URIs obtained from linking step - [Hierarchical Feature Selection](https://kgextension.readthedocs.io/en/latest/source/usage_feature_selection.html) - in the generation step many features are created. Thus reducing them to only the relevant part is helpful. - [Schema Matching & Fusion](https://kgextension.readthedocs.io/en/latest/source/usage_matching_fusion.html) - in case different sources are used, schema matching helps to reduce the size of attributes So have a look there and play around it. Have a lot of fun. In case you want to contact the contributors of the project, write a mail to [kgproject20@gmail.com](mailto:kgproject20@gmail.com)
github_jupyter
# Protein-ligand Docking tutorial using BioExcel Building Blocks (biobb) ### -- *PDBe REST-API Version* -- *** This tutorial aims to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3LFA](https://www.rcsb.org/structure/3LFA)), a well-known **Protein Kinase enzyme**, in complex with the FDA-approved **Dasatinib**, (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), a small molecule **kinase inhibitor** used for the treatment of **lymphoblastic** or **chronic myeloid leukemia** with resistance or intolerance to prior therapy. The tutorial will guide you through the process of identifying the **active site cavity** (pocket) and the final prediction of the **protein-ligand complex**. Please note that **docking algorithms**, and in particular, **AutoDock Vina** program used in this tutorial, are **non-deterministic**. That means that results obtained when running the workflow **could be diferent** from the ones we obtained during the writing of this tutorial (see [AutoDock Vina manual](http://vina.scripps.edu/manual.html)). We invite you to try the docking process several times to verify this behaviour. *** <div style="background:#b5e0dd; padding: 15px;"><strong>Important:</strong> it is recommended to execute this tutorial step by step (not as a single workflow execution, <strong><em>Run All</em></strong> mode), as it has interactive selections.</div> ## Settings ### Biobb modules used - [biobb_io](https://github.com/bioexcel/biobb_io): Tools to fetch biomolecular data from public databases. - [biobb_structure_utils](https://github.com/bioexcel/biobb_structure_utils): Tools to modify or extract information from a PDB structure file. - [biobb_chemistry](https://github.com/bioexcel/biobb_chemistry): Tools to perform chemoinformatics processes. - [biobb_vs](https://github.com/bioexcel/biobb_vs): Tools to perform virtual screening studies. ### Auxiliar libraries used - [nb_conda_kernels](https://github.com/Anaconda-Platform/nb_conda_kernels): Enables a Jupyter Notebook or JupyterLab application in one conda environment to access kernels for Python, R, and other languages found in other environments. - [nglview](http://nglviewer.org/#nglview): Jupyter/IPython widget to interactively view molecular structures and trajectories in notebooks. - [ipywidgets](https://github.com/jupyter-widgets/ipywidgets): Interactive HTML widgets for Jupyter notebooks and the IPython kernel. ### Conda Installation and Launch ```console git clone https://github.com/bioexcel/biobb_wf_virtual-screening.git cd biobb_wf_virtual-screening conda env create -f conda_env/environment.yml conda activate biobb_VS_tutorial jupyter-nbextension enable --py --user widgetsnbextension jupyter-nbextension enable --py --user nglview jupyter-notebook biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb ``` *** ## Pipeline steps 1. [Input Parameters](#input) 2. [Fetching PDB Structure](#fetch) 3. [Extract Protein Structure](#extractProtein) 4. [Computing Protein Cavities (PDBe REST-API)](#pdbe) 5. [Select Binding Site](#pdbeSelect) 6. [Generating Cavity Box ](#cavityBox) 7. [Downloading Small Molecule](#downloadSmallMolecule) 8. [Converting Small Molecule](#sdf2pdb) 9. [Preparing Small Molecule (ligand) for Docking](#ligand_pdb2pdbqt) 10. [Preparing Target Protein for Docking](#protein_pdb2pdbqt) 11. [Running the Docking](#docking) 12. [Extract a Docking Pose](#extractPose) 13. [Converting Ligand Pose to PDB format](#pdbqt2pdb) 14. [Superposing Ligand Pose to the Target Protein Structure](#catPdb) 15. [Comparing final result with experimental structure](#viewFinal) 16. [Questions & Comments](#questions) *** <img src="https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png" alt="Bioexcel2 logo" title="Bioexcel2 logo" width="400" /> *** <a id="input"></a> ## Input parameters **Input parameters** needed: - **pdb_code**: PDB code of the experimental complex structure (if exists).<br> In this particular example, the **p38α** structure in complex with the **Dasatinib drug** was experimentally solved and deposited in the **PDB database** under the **3LFA** PDB code. The protein structure from this PDB file will be used as a **target protein** for the **docking process**, after stripping the **small molecule**. An **APO structure**, or any other structure from the **p38α** [cluster 100](https://www.rcsb.org/search?request=%7B%22query%22%3A%7B%22type%22%3A%22terminal%22%2C%22service%22%3A%22sequence%22%2C%22parameters%22%3A%7B%22target%22%3A%22pdb_protein_sequence%22%2C%22value%22%3A%22RPTFYRQELNKTIWEVPERYQNLSPVGSGAYGSVCAAFDTKTGLRVAVKKLSRPFQSIIHAKRTYRELRLLKHMKHENVIGLLDVFTPARSLEEFNDVYLVTHLMGADLNNIVKCQKLTDDHVQFLIYQILRGLKYIHSADIIHRDLKPSNLAVNEDCELKILDFGLARHTDDEMTGYVATRWYRAPEIMLNWMHYNQTVDIWSVGCIMAELLTGRTLFPGTDHIDQLKLILRLVGTPGAELLKKISSESARNYIQSLTQMPKMNFANVFIGANPLAVDLLEKMLVLDSDKRITAAQALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPP%22%2C%22identity_cutoff%22%3A1%2C%22evalue_cutoff%22%3A0.1%7D%2C%22node_id%22%3A0%7D%2C%22return_type%22%3A%22polymer_entity%22%2C%22request_options%22%3A%7B%22pager%22%3A%7B%22start%22%3A0%2C%22rows%22%3A25%7D%2C%22scoring_strategy%22%3A%22combined%22%2C%22sort%22%3A%5B%7B%22sort_by%22%3A%22score%22%2C%22direction%22%3A%22desc%22%7D%5D%7D%2C%22request_info%22%3A%7B%22src%22%3A%22ui%22%2C%22query_id%22%3A%22bea5861f8b38a9e25a3e626b39d6bcbf%22%7D%7D) (sharing a 100% of sequence similarity with the **p38α** structure) could also be used as a **target protein**. This structure of the **protein-ligand complex** will be also used in the last step of the tutorial to check **how close** the resulting **docking pose** is from the known **experimental structure**. ----- - **ligandCode**: Ligand PDB code (3-letter code) for the small molecule (e.g. 1N1).<br> In this particular example, the small molecule chosen for the tutorial is the FDA-approved drug **Dasatinib** (PDB Code 1N1), a **tyrosine kinase inhibitor**, used in [lymphoblastic or chronic myeloid leukemia](https://go.drugbank.com/drugs/DB01254). ``` import nglview import ipywidgets pdb_code = "3LFA" # P38 + Dasatinib ligand_code = "1N1" # Dasatinib ``` <a id="fetch"></a> *** ## Fetching PDB structure Downloading **PDB structure** with the **protein molecule** from the PDBe database.<br> Alternatively, a **PDB file** can be used as starting structure. <br> *** **Building Blocks** used: - [Pdb](https://biobb-io.readthedocs.io/en/latest/api.html#module-api.pdb) from **biobb_io.api.pdb** *** ``` from biobb_io.api.pdb import pdb download_pdb = "download.pdb" prop = { "pdb_code": pdb_code, "filter": ["ATOM", "HETATM"] } pdb(output_pdb_path=download_pdb, properties=prop) ``` <a id="vis3D"></a> ### Visualizing 3D structure Visualizing the downloaded/given **PDB structure** using **NGL**.<br><br> Note (and try to identify) the **Dasatinib small molecule (1N1)** and the **detergent (β-octyl glucoside) (BOG)** used in the experimental reservoir solution to obtain the crystal. ``` view = nglview.show_structure_file(download_pdb, default=True) view.center() view._remote_call('setSize', target='Widget', args=['','600px']) view.render_image() view.download_image(filename='ngl1.png') view ``` <img src='ngl1.png'></img> <a id="extractProtein"></a> *** ## Extract Protein Structure Extract **protein structure** from the **downloaded PDB file**. Removing **any extra molecule** (ligands, ions, water molecules). <br><br> The **protein structure** will be used as a **target** in the **protein-ligand docking process**. *** **Building Blocks** used: - [extract_molecule](https://biobb-structure-utils.readthedocs.io/en/latest/utils.html#module-utils.extract_molecule) from **biobb_structure_utils.utils.extract_molecule** *** ``` from biobb_structure_utils.utils.extract_molecule import extract_molecule pdb_protein = "pdb_protein.pdb" extract_molecule(input_structure_path=download_pdb, output_molecule_path = pdb_protein) ``` <a id="vis3D"></a> ### Visualizing 3D structure Visualizing the downloaded/given **PDB structure** using **NGL**.<br><br> Note that the **small molecules** included in the original structure are now gone. The new structure only contains the **protein molecule**, which will be used as a **target** for the **protein-ligand docking**. ``` view = nglview.show_structure_file(pdb_protein, default=False) view.add_representation(repr_type='cartoon', selection='not het', colorScheme = 'atomindex') view.center() view._remote_call('setSize', target='Widget', args=['','600px']) view.render_image() view.download_image(filename='ngl2.png') view ``` <img src='ngl2.png'></img> <a id="pdbe"></a> *** ## Computing Protein Cavities (PDBe REST-API) Identifying the **protein cavities** (pockets) using the [**PDBe REST-API**](https://www.ebi.ac.uk/pdbe/api/doc/).<br> These **cavities** will be then used in the **docking procedure** to try to find the **best region of the protein surface** where the small molecule can **bind**. <br><br> Although in this particular example we already know the **binding site** region, as we started from a **protein-ligand complex** structure where the ligand was located in the same **binding site** as **Dasatinib** is binding, the **PDBe REST-API** can be used to automatically identify and extract the possible **binding sites** of our **target protein** from the PDB file annotations. This **REST-API endpoint** provides details on **binding sites** from the PDB files (or mmcif) information, such as **ligands**, **residues in the site** or **description of the site**. <br> *** **Building Blocks** used: - [api_binding_site](https://biobb-io.readthedocs.io/en/latest/api.html#module-api.api_binding_site) from **biobb_io.api.api_binding_site** *** ``` from biobb_io.api.api_binding_site import api_binding_site residues_json = "residues.json" prop = { "pdb_code": pdb_code } api_binding_site(output_json_path=residues_json, properties=prop) ``` <a id="checkJson"></a> ### Checking binding site output (json) Checking the **PDBe REST-API** output from the **json file**. Every **pocket** has a separated entry in the json output, with information such as: **residues forming the cavity, details of the cavity**, and **evidence used to detect the cavity**. ``` import json with open(residues_json) as json_file: data = json.load(json_file) print(json.dumps(data, indent=4)) ``` <a id="pdbeSelect"></a> ### Select binding site (cavity) Select a specific **binding site** (cavity) from the obtained list to be used in the **docking procedure**. <br> If the **PDBe REST-API** has successfully identified the correct **binding site**, which we know from the original **protein-ligand structure**, it just needs to be selected. In this particular example, the cavity we are interested in is the **binding site number 1**. <br> Choose a **binding site** from the **DropDown list**: ``` bindingSites = {} bsites = [] for i, item in enumerate(data[pdb_code.lower()]): bindingSites[i] = [] bsites.append(('binding_site' + str(i), i)) for res in item['site_residues']: bindingSites[i].append(res['author_residue_number']) # print('Residue id\'s for binding site %d: %s' % (i, ', '.join(str(v) for v in bindingSites[i]))) mdsel = ipywidgets.Dropdown( options=bsites, description='Binding Site:', disabled=False, ) display(mdsel) ``` <a id="viewPockets"></a> ### Visualizing selected binding site (cavity) Visualizing the selected **binding site** (cavity) using **NGL viewer**.<br> **Protein residues** forming the **cavity** are represented in **pink-colored surface**. ``` view = nglview.show_structure_file(download_pdb, default=False) view.add_representation(repr_type='cartoon', selection='not het', opacity=.2, color='#cccccc') view.add_representation(repr_type='surface', selection=', '.join(str(v) for v in bindingSites[mdsel.value]), color='pink', lowResolution= True, # 0: low resolution smooth=1 ) view.center() view._remote_call('setSize', target='Widget', args=['','600px']) view.render_image() view.download_image(filename='ngl3.png') view ``` <img src='ngl3.png'></img> <a id="cavityBox"></a> *** ## Generating Cavity Box Generating a **box** surrounding the selected **protein cavity** (pocket), to be used in the **docking procedure**. The **box** is defining the region on the **surface** of the **protein target** where the **docking program** should explore a possible **ligand dock**.<br> An offset of **12 Angstroms** is used to generate a **big enough box** to fit the **small molecule** and its possible rotations.<br> *** **Building Blocks** used: - [box_residues](https://biobb-vs.readthedocs.io/en/latest/utils.html#module-utils.box_residues) from **biobb_vs.utils.box_residues** *** ``` from biobb_vs.utils.box_residues import box_residues output_box = "box.pdb" prop = { "resid_list": bindingSites[mdsel.value], "offset": 12, "box_coordinates": True } box_residues(#input_pdb_path = pdb_single_chain, input_pdb_path = download_pdb, output_pdb_path = output_box, properties=prop) ``` <a id="vis3D"></a> ### Visualizing binding site box in 3D structure Visualizing the **protein structure**, the **selected cavity**, and the **generated box**, all together using **NGL** viewer. Using the **original structure** with the **small ligand** inside (Dasatinib, [1N1](https://www.rcsb.org/ligand/1N1)), to check that the **selected cavity** is placed in the **same region** as the **original ligand**. ``` view = nglview.NGLWidget() s = view.add_component(download_pdb) b = view.add_component(output_box) atomPair = [ [ "9999:Z.ZN1", "9999:Z.ZN2" ], [ "9999:Z.ZN2", "9999:Z.ZN4" ], [ "9999:Z.ZN4", "9999:Z.ZN3" ], [ "9999:Z.ZN3", "9999:Z.ZN1" ], [ "9999:Z.ZN5", "9999:Z.ZN6" ], [ "9999:Z.ZN6", "9999:Z.ZN8" ], [ "9999:Z.ZN8", "9999:Z.ZN7" ], [ "9999:Z.ZN7", "9999:Z.ZN5" ], [ "9999:Z.ZN1", "9999:Z.ZN5" ], [ "9999:Z.ZN2", "9999:Z.ZN6" ], [ "9999:Z.ZN3", "9999:Z.ZN7" ], [ "9999:Z.ZN4", "9999:Z.ZN8" ] ] # structure s.add_representation(repr_type='cartoon', selection='not het', color='#cccccc', opacity=.2) # ligands box b.add_representation(repr_type='ball+stick', selection='9999', color='pink', aspectRatio = 8) # lines box b.add_representation(repr_type='distance', atomPair= atomPair, labelVisible=False, color= 'black') # residues s.add_representation(repr_type='surface', selection=', '.join(str(v) for v in bindingSites[mdsel.value]), color='skyblue', lowResolution= True, # 0: low resolution smooth=1, surfaceType= 'av', contour=True, opacity=0.4, useWorker= True, wrap= True) view.center() view._remote_call('setSize', target='Widget', args=['','600px']) view.render_image() view.download_image(filename='ngl4.png') view ``` <img src='ngl4.png'></img> <a id="downloadSmallMolecule"></a> *** ## Downloading Small Molecule Downloading the desired **small molecule** to be used in the **docking procedure**. <br> In this particular example, the small molecule of interest is the FDA-approved drug **Dasatinib**, with PDB code **1N1**.<br> *** **Building Blocks** used: - [ideal_sdf](https://biobb-io.readthedocs.io/en/latest/api.html#module-api.ideal_sdf) from **biobb_io.api.ideal_sdf** *** ``` from biobb_io.api.ideal_sdf import ideal_sdf sdf_ideal = "ideal.sdf" prop = { "ligand_code": ligand_code } ideal_sdf(output_sdf_path=sdf_ideal, properties=prop) ``` <a id="sdf2pdb"></a> *** ## Converting Small Molecule Converting the desired **small molecule** to be used in the **docking procedure**, from **SDF** format to **PDB** format using the **OpenBabel chemoinformatics** tool. <br> *** **Building Blocks** used: - [babel_convert](https://biobb-chemistry.readthedocs.io/en/latest/babelm.html#module-babelm.babel_convert) from **biobb_chemistry.babelm.babel_convert** *** ``` from biobb_chemistry.babelm.babel_convert import babel_convert ligand = "ligand.pdb" prop = { "input_format": "sdf", "output_format": "pdb", "obabel_path": "obabel" } babel_convert(input_path = sdf_ideal, output_path = ligand, properties=prop) ``` <a id="ligand_pdb2pdbqt"></a> *** ## Preparing Small Molecule (ligand) for Docking Preparing the **small molecule** structure for the **docking procedure**. Converting the **PDB file** to a **PDBQT file** format (AutoDock PDBQT: Protein Data Bank, with Partial Charges (Q), & Atom Types (T)), needed by **AutoDock Vina**. <br><br> The process adds **partial charges** and **atom types** to every atom. Besides, the **ligand flexibility** is also defined in the information contained in the file. The concept of **"torsion tree"** is used to represent the **rigid and rotatable** pieces of the **ligand**. A rigid piece (**"root"**) is defined, with zero or more rotatable pieces (**"branches"**), hanging from the root, and defining the **rotatable bonds**.<br><br> More info about **PDBQT file format** can be found in the [AutoDock FAQ pages](http://autodock.scripps.edu/faqs-help/faq/what-is-the-format-of-a-pdbqt-file). *** **Building Blocks** used: - [babel_convert](https://biobb-chemistry.readthedocs.io/en/latest/babelm.html#module-babelm.babel_convert) from **biobb_chemistry.babelm.babel_convert** *** ``` from biobb_chemistry.babelm.babel_convert import babel_convert prep_ligand = "prep_ligand.pdbqt" prop = { "input_format": "pdb", "output_format": "pdbqt", "obabel_path": "obabel" } babel_convert(input_path = ligand, output_path = prep_ligand, properties=prop) ``` <a id="viewDrug"></a> ### Visualizing small molecule (drug) Visualizing the desired **drug** to be docked to the **target protein**, using **NGL viewer**.<br> - **Left panel**: **PDB-formatted** file, with all hydrogen atoms. - **Right panel**: **PDBqt-formatted** file (AutoDock Vina-compatible), with **united atom model** (only polar hydrogens are placed in the structures to correctly type heavy atoms as hydrogen bond donors). ``` from ipywidgets import HBox v0 = nglview.show_structure_file(ligand) v1 = nglview.show_structure_file(prep_ligand) v0._set_size('500px', '') v1._set_size('500px', '') def on_change(change): v1._set_camera_orientation(change['new']) v0.observe(on_change, ['_camera_orientation']) HBox([v0, v1]) ``` <img src='ngl5.png'></img> <a id="protein_pdb2pdbqt"></a> *** ## Preparing Target Protein for Docking Preparing the **target protein** structure for the **docking procedure**. Converting the **PDB file** to a **PDBqt file**, needed by **AutoDock Vina**. Similarly to the previous step, the process adds **partial charges** and **atom types** to every target protein atom. In this case, however, we are not taking into account **receptor flexibility**, although **Autodock Vina** allows some limited flexibility of selected **receptor side chains** [(see the documentation)](https://autodock-vina.readthedocs.io/en/latest/docking_flexible.html).<br> *** **Building Blocks** used: - [str_check_add_hydrogens](https://biobb-structure-utils.readthedocs.io/en/latest/utils.html#utils-str-check-add-hydrogens-module) from **biobb_structure_utils.utils.str_check_add_hydrogens** *** ``` from biobb_structure_utils.utils.str_check_add_hydrogens import str_check_add_hydrogens prep_receptor = "prep_receptor.pdbqt" prop = { "charges": True, "mode": "auto" } str_check_add_hydrogens( input_structure_path = pdb_protein, output_structure_path = prep_receptor, properties=prop) ``` <a id="docking"></a> *** ## Running the Docking Running the **docking process** with the prepared files: - **ligand** - **target protein** - **binding site box**<br> using **AutoDock Vina**. <br> *** **Building Blocks** used: - [autodock_vina_run](https://biobb-vs.readthedocs.io/en/latest/vina.html#module-vina.autodock_vina_run) from **biobb_vs.vina.autodock_vina_run** *** ``` from biobb_vs.vina.autodock_vina_run import autodock_vina_run output_vina_pdbqt = "output_vina.pdbqt" output_vina_log = "output_vina.log" autodock_vina_run(input_ligand_pdbqt_path = prep_ligand, input_receptor_pdbqt_path = prep_receptor, input_box_path = output_box, output_pdbqt_path = output_vina_pdbqt, output_log_path = output_vina_log) ``` <a id="viewDocking"></a> ### Visualizing docking output poses Visualizing the generated **docking poses** for the **ligand**, using **NGL viewer**. <br> - **Left panel**: **Docking poses** displayed with atoms coloured by **partial charges** and **licorice** representation. - **Right panel**: **Docking poses** displayed with atoms coloured by **element** and **ball-and-stick** representation. ``` models = 'all' v0 = nglview.show_structure_file(output_vina_pdbqt, default=False) v0.add_representation(repr_type='licorice', selection=models, colorScheme= 'partialCharge') v0.center() v1 = nglview.show_structure_file(output_vina_pdbqt, default=False) v1.add_representation(repr_type='ball+stick', selection=models) v1.center() v0._set_size('500px', '') v1._set_size('500px', '') def on_change(change): v1._set_camera_orientation(change['new']) v0.observe(on_change, ['_camera_orientation']) HBox([v0, v1]) ``` <img src='ngl6.png'></img> <a id="selectPose"></a> ### Select Docking Pose Select a specific **docking pose** from the output list for **visual inspection**. <br> Choose a **docking pose** from the **DropDown list**. ``` from Bio.PDB import PDBParser parser = PDBParser(QUIET = True) structure = parser.get_structure("protein", output_vina_pdbqt) models = [] for i, m in enumerate(structure): models.append(('model' + str(i), i)) mdsel = ipywidgets.Dropdown( options=models, description='Sel. model:', disabled=False, ) display(mdsel) ``` <a id="extractPose"></a> *** ## Extract a Docking Pose Extract a specific **docking pose** from the **docking** outputs. <br> *** **Building Blocks** used: - [extract_model_pdbqt](https://biobb-vs.readthedocs.io/en/latest/utils.html#module-utils.extract_model_pdbqt) from **biobb_vs.utils.extract_model_pdbqt** *** ``` from biobb_vs.utils.extract_model_pdbqt import extract_model_pdbqt output_pdbqt_model = "output_model.pdbqt" prop = { "model": mdsel.value + 1 } extract_model_pdbqt(input_pdbqt_path = output_vina_pdbqt, output_pdbqt_path = output_pdbqt_model, properties=prop) ``` <a id="pdbqt2pdb"></a> *** ## Converting Ligand Pose to PDB format Converting **ligand pose** to **PDB format**. <br> *** **Building Blocks** used: - [babel_convert](https://biobb-chemistry.readthedocs.io/en/latest/babelm.html#module-babelm.babel_convert) from **biobb_chemistry.babelm.babel_convert** *** ``` from biobb_chemistry.babelm.babel_convert import babel_convert output_pdb_model = "output_model.pdb" prop = { "input_format": "pdbqt", "output_format": "pdb", "obabel_path": "obabel" } babel_convert(input_path = output_pdbqt_model, output_path = output_pdb_model, properties=prop) ``` <a id="catPdb"></a> *** ## Superposing Ligand Pose to the Target Protein Structure Superposing **ligand pose** to the target **protein structure**, in order to see the **protein-ligand docking conformation**. <br><br>Building a new **PDB file** with both **target and ligand** (binding pose) structures. <br> *** **Building Blocks** used: - [cat_pdb](https://biobb-structure-utils.readthedocs.io/en/latest/utils.html#module-utils.cat_pdb) from **biobb_structure_utils.utils.cat_pdb** *** ``` from biobb_structure_utils.utils.cat_pdb import cat_pdb output_structure = "output_structure.pdb" cat_pdb(#input_structure1 = pdb_single_chain, input_structure1 = download_pdb, input_structure2 = output_pdb_model, output_structure_path = output_structure) ``` <a id="viewFinal"></a> ### Comparing final result with experimental structure Visualizing and comparing the generated **protein-ligand** complex with the original **protein-ligand conformation** (downloaded from the PDB database), using **NGL viewer**. <br> - **Licorice, element-colored** representation: **Experimental pose**. - **Licorice, green-colored** representation: **Docking pose**. <br> Note that outputs from **AutoDock Vina** don't contain all the atoms, as the program works with a **united-atom representation** (i.e. only polar hydrogens). ``` view = nglview.NGLWidget() # v1 = Experimental Structure v1 = view.add_component(download_pdb) v1.clear() v1.add_representation(repr_type='licorice', selection='[1N1]', radius=0.5) # v2 = Docking result v2 = view.add_component(output_structure) v2.clear() v2.add_representation(repr_type='cartoon', colorScheme = 'sstruc') v2.add_representation(repr_type='licorice', radius=0.5, color= 'green', selection='UNL') view._remote_call('setSize', target='Widget', args=['','600px']) view # align reference and output code = """ var stage = this.stage; var clist_len = stage.compList.length; var i = 0; var s = []; for(i = 0; i <= clist_len; i++){ if(stage.compList[i] != undefined && stage.compList[i].structure != undefined) { s.push(stage.compList[i]) } } NGL.superpose(s[0].structure, s[1].structure, true, ".CA") s[ 0 ].updateRepresentations({ position: true }) s[ 0 ].autoView() """ view._execute_js_code(code) view.render_image() view.download_image(filename='ngl7.png') view ``` <img src='ngl7.png'></img> *** <a id="questions"></a> ## Questions & Comments Questions, issues, suggestions and comments are really welcome! * GitHub issues: * [https://github.com/bioexcel/biobb](https://github.com/bioexcel/biobb) * BioExcel forum: * [https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library](https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library)
github_jupyter
# EDA To Prediction (DieTanic) ### *Sometimes life has a cruel sense of humor, giving you the thing you always wanted at the worst time possible.* -Lisa Kleypas The sinking of the Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. That's why the name **DieTanic**. This is a very unforgetable disaster that no one in the world can forget. It took about $7.5 million to build the Titanic and it sunk under the ocean due to collision. The Titanic Dataset is a very good dataset for begineers to start a journey in data science and participate in competitions in Kaggle. The Objective of this notebook is to give an **idea how is the workflow in any predictive modeling problem**. How do we check features, how do we add new features and some Machine Learning Concepts. I have tried to keep the notebook as basic as possible so that even newbies can understand every phase of it. If You Like the notebook and think that it helped you..**PLEASE UPVOTE**. It will keep me motivated. ## Contents of the Notebook: #### Part1: Exploratory Data Analysis(EDA): 1)Analysis of the features. 2)Finding any relations or trends considering multiple features. #### Part2: Feature Engineering and Data Cleaning: 1)Adding any few features. 2)Removing redundant features. 3)Converting features into suitable form for modeling. #### Part3: Predictive Modeling 1)Running Basic Algorithms. 2)Cross Validation. 3)Ensembling. 4)Important Features Extraction. ## Part1: Exploratory Data Analysis(EDA) ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('fivethirtyeight') import warnings warnings.filterwarnings('ignore') %matplotlib inline data=pd.read_csv('../input/train.csv') data.head() data.isnull().sum() #checking for total null values ``` The **Age, Cabin and Embarked** have null values. I will try to fix them. ### How many Survived?? ``` f,ax=plt.subplots(1,2,figsize=(18,8)) data['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True) ax[0].set_title('Survived') ax[0].set_ylabel('') sns.countplot('Survived',data=data,ax=ax[1]) ax[1].set_title('Survived') plt.show() ``` It is evident that not many passengers survived the accident. Out of 891 passengers in training set, only around 350 survived i.e Only **38.4%** of the total training set survived the crash. We need to dig down more to get better insights from the data and see which categories of the passengers did survive and who didn't. We will try to check the survival rate by using the different features of the dataset. Some of the features being Sex, Port Of Embarcation, Age,etc. First let us understand the different types of features. ## Types Of Features ### Categorical Features: A categorical variable is one that has two or more categories and each value in that feature can be categorised by them.For example, gender is a categorical variable having two categories (male and female). Now we cannot sort or give any ordering to such variables. They are also known as **Nominal Variables**. **Categorical Features in the dataset: Sex,Embarked.** ### Ordinal Features: An ordinal variable is similar to categorical values, but the difference between them is that we can have relative ordering or sorting between the values. For eg: If we have a feature like **Height** with values **Tall, Medium, Short**, then Height is a ordinal variable. Here we can have a relative sort in the variable. **Ordinal Features in the dataset: PClass** ### Continous Feature: A feature is said to be continous if it can take values between any two points or between the minimum or maximum values in the features column. **Continous Features in the dataset: Age** ## Analysing The Features ## Sex--> Categorical Feature ``` data.groupby(['Sex','Survived'])['Survived'].count() f,ax=plt.subplots(1,2,figsize=(18,8)) data[['Sex','Survived']].groupby(['Sex']).mean().plot.bar(ax=ax[0]) ax[0].set_title('Survived vs Sex') sns.countplot('Sex',hue='Survived',data=data,ax=ax[1]) ax[1].set_title('Sex:Survived vs Dead') plt.show() ``` This looks interesting. The number of men on the ship is lot more than the number of women. Still the number of women saved is almost twice the number of males saved. The survival rates for a **women on the ship is around 75% while that for men in around 18-19%.** This looks to be a **very important** feature for modeling. But is it the best?? Lets check other features. ## Pclass --> Ordinal Feature ``` pd.crosstab(data.Pclass,data.Survived,margins=True).style.background_gradient(cmap='summer_r') f,ax=plt.subplots(1,2,figsize=(18,8)) data['Pclass'].value_counts().plot.bar(color=['#CD7F32','#FFDF00','#D3D3D3'],ax=ax[0]) ax[0].set_title('Number Of Passengers By Pclass') ax[0].set_ylabel('Count') sns.countplot('Pclass',hue='Survived',data=data,ax=ax[1]) ax[1].set_title('Pclass:Survived vs Dead') plt.show() ``` People say **Money Can't Buy Everything**. But we can clearly see that Passenegers Of Pclass 1 were given a very high priority while rescue. Even though the the number of Passengers in Pclass 3 were a lot higher, still the number of survival from them is very low, somewhere around **25%**. For Pclass 1 %survived is around **63%** while for Pclass2 is around **48%**. So money and status matters. Such a materialistic world. Lets Dive in little bit more and check for other interesting observations. Lets check survival rate with **Sex and Pclass** Together. ``` pd.crosstab([data.Sex,data.Survived],data.Pclass,margins=True).style.background_gradient(cmap='summer_r') sns.factorplot('Pclass','Survived',hue='Sex',data=data) plt.show() ``` We use **FactorPlot** in this case, because they make the seperation of categorical values easy. Looking at the **CrossTab** and the **FactorPlot**, we can easily infer that survival for **Women from Pclass1** is about **95-96%**, as only 3 out of 94 Women from Pclass1 died. It is evident that irrespective of Pclass, Women were given first priority while rescue. Even Men from Pclass1 have a very low survival rate. Looks like Pclass is also an important feature. Lets analyse other features. ## Age--> Continous Feature ``` print('Oldest Passenger was of:',data['Age'].max(),'Years') print('Youngest Passenger was of:',data['Age'].min(),'Years') print('Average Age on the ship:',data['Age'].mean(),'Years') f,ax=plt.subplots(1,2,figsize=(18,8)) sns.violinplot("Pclass","Age", hue="Survived", data=data,split=True,ax=ax[0]) ax[0].set_title('Pclass and Age vs Survived') ax[0].set_yticks(range(0,110,10)) sns.violinplot("Sex","Age", hue="Survived", data=data,split=True,ax=ax[1]) ax[1].set_title('Sex and Age vs Survived') ax[1].set_yticks(range(0,110,10)) plt.show() ``` #### Observations: 1)The number of children increases with Pclass and the survival rate for passenegers below Age 10(i.e children) looks to be good irrespective of the Pclass. 2)Survival chances for Passenegers aged 20-50 from Pclass1 is high and is even better for Women. 3)For males, the survival chances decreases with an increase in age. As we had seen earlier, the Age feature has **177** null values. To replace these NaN values, we can assign them the mean age of the dataset. But the problem is, there were many people with many different ages. We just cant assign a 4 year kid with the mean age that is 29 years. Is there any way to find out what age-band does the passenger lie?? **Bingo!!!!**, we can check the **Name** feature. Looking upon the feature, we can see that the names have a salutation like Mr or Mrs. Thus we can assign the mean values of Mr and Mrs to the respective groups. **''What's In A Name??''**---> **Feature** :p ``` data['Initial']=0 for i in data: data['Initial']=data.Name.str.extract('([A-Za-z]+)\.') #lets extract the Salutations ``` Okay so here we are using the Regex: **[A-Za-z]+)\.**. So what it does is, it looks for strings which lie between **A-Z or a-z** and followed by a **.(dot)**. So we successfully extract the Initials from the Name. ``` pd.crosstab(data.Initial,data.Sex).T.style.background_gradient(cmap='summer_r') #Checking the Initials with the Sex ``` Okay so there are some misspelled Initials like Mlle or Mme that stand for Miss. I will replace them with Miss and same thing for other values. ``` data['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don'],['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr'],inplace=True) data.groupby('Initial')['Age'].mean() #lets check the average age by Initials ``` ### Filling NaN Ages ``` ## Assigning the NaN Values with the Ceil values of the mean ages data.loc[(data.Age.isnull())&(data.Initial=='Mr'),'Age']=33 data.loc[(data.Age.isnull())&(data.Initial=='Mrs'),'Age']=36 data.loc[(data.Age.isnull())&(data.Initial=='Master'),'Age']=5 data.loc[(data.Age.isnull())&(data.Initial=='Miss'),'Age']=22 data.loc[(data.Age.isnull())&(data.Initial=='Other'),'Age']=46 data.Age.isnull().any() #So no null values left finally f,ax=plt.subplots(1,2,figsize=(20,10)) data[data['Survived']==0].Age.plot.hist(ax=ax[0],bins=20,edgecolor='black',color='red') ax[0].set_title('Survived= 0') x1=list(range(0,85,5)) ax[0].set_xticks(x1) data[data['Survived']==1].Age.plot.hist(ax=ax[1],color='green',bins=20,edgecolor='black') ax[1].set_title('Survived= 1') x2=list(range(0,85,5)) ax[1].set_xticks(x2) plt.show() ``` ### Observations: 1)The Toddlers(age<5) were saved in large numbers(The Women and Child First Policy). 2)The oldest Passenger was saved(80 years). 3)Maximum number of deaths were in the age group of 30-40. ``` sns.factorplot('Pclass','Survived',col='Initial',data=data) plt.show() ``` The Women and Child first policy thus holds true irrespective of the class. ## Embarked--> Categorical Value ``` pd.crosstab([data.Embarked,data.Pclass],[data.Sex,data.Survived],margins=True).style.background_gradient(cmap='summer_r') ``` ### Chances for Survival by Port Of Embarkation ``` sns.factorplot('Embarked','Survived',data=data) fig=plt.gcf() fig.set_size_inches(5,3) plt.show() ``` The chances for survival for Port C is highest around 0.55 while it is lowest for S. ``` f,ax=plt.subplots(2,2,figsize=(20,15)) sns.countplot('Embarked',data=data,ax=ax[0,0]) ax[0,0].set_title('No. Of Passengers Boarded') sns.countplot('Embarked',hue='Sex',data=data,ax=ax[0,1]) ax[0,1].set_title('Male-Female Split for Embarked') sns.countplot('Embarked',hue='Survived',data=data,ax=ax[1,0]) ax[1,0].set_title('Embarked vs Survived') sns.countplot('Embarked',hue='Pclass',data=data,ax=ax[1,1]) ax[1,1].set_title('Embarked vs Pclass') plt.subplots_adjust(wspace=0.2,hspace=0.5) plt.show() ``` ### Observations: 1)Maximum passenegers boarded from S. Majority of them being from Pclass3. 2)The Passengers from C look to be lucky as a good proportion of them survived. The reason for this maybe the rescue of all the Pclass1 and Pclass2 Passengers. 3)The Embark S looks to the port from where majority of the rich people boarded. Still the chances for survival is low here, that is because many passengers from Pclass3 around **81%** didn't survive. 4)Port Q had almost 95% of the passengers were from Pclass3. ``` sns.factorplot('Pclass','Survived',hue='Sex',col='Embarked',data=data) plt.show() ``` ### Observations: 1)The survival chances are almost 1 for women for Pclass1 and Pclass2 irrespective of the Pclass. 2)Port S looks to be very unlucky for Pclass3 Passenegers as the survival rate for both men and women is very low.**(Money Matters)** 3)Port Q looks looks to be unlukiest for Men, as almost all were from Pclass 3. ### Filling Embarked NaN As we saw that maximum passengers boarded from Port S, we replace NaN with S. ``` data['Embarked'].fillna('S',inplace=True) data.Embarked.isnull().any()# Finally No NaN values ``` ## SibSip-->Discrete Feature This feature represents whether a person is alone or with his family members. Sibling = brother, sister, stepbrother, stepsister Spouse = husband, wife ``` pd.crosstab([data.SibSp],data.Survived).style.background_gradient(cmap='summer_r') f,ax=plt.subplots(1,2,figsize=(20,8)) sns.barplot('SibSp','Survived',data=data,ax=ax[0]) ax[0].set_title('SibSp vs Survived') sns.factorplot('SibSp','Survived',data=data,ax=ax[1]) ax[1].set_title('SibSp vs Survived') plt.close(2) plt.show() pd.crosstab(data.SibSp,data.Pclass).style.background_gradient(cmap='summer_r') ``` ### Observations: The barplot and factorplot shows that if a passenger is alone onboard with no siblings, he have 34.5% survival rate. The graph roughly decreases if the number of siblings increase. This makes sense. That is, if I have a family on board, I will try to save them instead of saving myself first. Surprisingly the survival for families with 5-8 members is **0%**. The reason may be Pclass?? The reason is **Pclass**. The crosstab shows that Person with SibSp>3 were all in Pclass3. It is imminent that all the large families in Pclass3(>3) died. ## Parch ``` pd.crosstab(data.Parch,data.Pclass).style.background_gradient(cmap='summer_r') ``` The crosstab again shows that larger families were in Pclass3. ``` f,ax=plt.subplots(1,2,figsize=(20,8)) sns.barplot('Parch','Survived',data=data,ax=ax[0]) ax[0].set_title('Parch vs Survived') sns.factorplot('Parch','Survived',data=data,ax=ax[1]) ax[1].set_title('Parch vs Survived') plt.close(2) plt.show() ``` ### Observations: Here too the results are quite similar. Passengers with their parents onboard have greater chance of survival. It however reduces as the number goes up. The chances of survival is good for somebody who has 1-3 parents on the ship. Being alone also proves to be fatal and the chances for survival decreases when somebody has >4 parents on the ship. ## Fare--> Continous Feature ``` print('Highest Fare was:',data['Fare'].max()) print('Lowest Fare was:',data['Fare'].min()) print('Average Fare was:',data['Fare'].mean()) ``` The lowest fare is **0.0**. Wow!! a free luxorious ride. ``` f,ax=plt.subplots(1,3,figsize=(20,8)) sns.distplot(data[data['Pclass']==1].Fare,ax=ax[0]) ax[0].set_title('Fares in Pclass 1') sns.distplot(data[data['Pclass']==2].Fare,ax=ax[1]) ax[1].set_title('Fares in Pclass 2') sns.distplot(data[data['Pclass']==3].Fare,ax=ax[2]) ax[2].set_title('Fares in Pclass 3') plt.show() ``` There looks to be a large distribution in the fares of Passengers in Pclass1 and this distribution goes on decreasing as the standards reduces. As this is also continous, we can convert into discrete values by using binning. ## Observations in a Nutshell for all features: **Sex:** The chance of survival for women is high as compared to men. **Pclass:**There is a visible trend that being a **1st class passenger** gives you better chances of survival. The survival rate for **Pclass3 is very low**. For **women**, the chance of survival from **Pclass1** is almost 1 and is high too for those from **Pclass2**. **Money Wins!!!**. **Age:** Children less than 5-10 years do have a high chance of survival. Passengers between age group 15 to 35 died a lot. **Embarked:** This is a very interesting feature. **The chances of survival at C looks to be better than even though the majority of Pclass1 passengers got up at S.** Passengers at Q were all from **Pclass3**. **Parch+SibSp:** Having 1-2 siblings,spouse on board or 1-3 Parents shows a greater chance of probablity rather than being alone or having a large family travelling with you. ## Correlation Between The Features ``` sns.heatmap(data.corr(),annot=True,cmap='RdYlGn',linewidths=0.2) #data.corr()-->correlation matrix fig=plt.gcf() fig.set_size_inches(10,8) plt.show() ``` ### Interpreting The Heatmap The first thing to note is that only the numeric features are compared as it is obvious that we cannot correlate between alphabets or strings. Before understanding the plot, let us see what exactly correlation is. **POSITIVE CORRELATION:** If an **increase in feature A leads to increase in feature B, then they are positively correlated**. A value **1 means perfect positive correlation**. **NEGATIVE CORRELATION:** If an **increase in feature A leads to decrease in feature B, then they are negatively correlated**. A value **-1 means perfect negative correlation**. Now lets say that two features are highly or perfectly correlated, so the increase in one leads to increase in the other. This means that both the features are containing highly similar information and there is very little or no variance in information. This is known as **MultiColinearity** as both of them contains almost the same information. So do you think we should use both of them as **one of them is redundant**. While making or training models, we should try to eliminate redundant features as it reduces training time and many such advantages. Now from the above heatmap,we can see that the features are not much correlated. The highest correlation is between **SibSp and Parch i.e 0.41**. So we can carry on with all features. ## Part2: Feature Engineering and Data Cleaning Now what is Feature Engineering? Whenever we are given a dataset with features, it is not necessary that all the features will be important. There maybe be many redundant features which should be eliminated. Also we can get or add new features by observing or extracting information from other features. An example would be getting the Initals feature using the Name Feature. Lets see if we can get any new features and eliminate a few. Also we will tranform the existing relevant features to suitable form for Predictive Modeling. ## Age_band #### Problem With Age Feature: As I have mentioned earlier that **Age is a continous feature**, there is a problem with Continous Variables in Machine Learning Models. **Eg:**If I say to group or arrange Sports Person by **Sex**, We can easily segregate them by Male and Female. Now if I say to group them by their **Age**, then how would you do it? If there are 30 Persons, there may be 30 age values. Now this is problematic. We need to convert these **continous values into categorical values** by either Binning or Normalisation. I will be using binning i.e group a range of ages into a single bin or assign them a single value. Okay so the maximum age of a passenger was 80. So lets divide the range from 0-80 into 5 bins. So 80/5=16. So bins of size 16. ``` data['Age_band']=0 data.loc[data['Age']<=16,'Age_band']=0 data.loc[(data['Age']>16)&(data['Age']<=32),'Age_band']=1 data.loc[(data['Age']>32)&(data['Age']<=48),'Age_band']=2 data.loc[(data['Age']>48)&(data['Age']<=64),'Age_band']=3 data.loc[data['Age']>64,'Age_band']=4 data.head(2) data['Age_band'].value_counts().to_frame().style.background_gradient(cmap='summer')#checking the number of passenegers in each band sns.factorplot('Age_band','Survived',data=data,col='Pclass') plt.show() ``` True that..the survival rate decreases as the age increases irrespective of the Pclass. ## Family_Size and Alone At this point, we can create a new feature called "Family_size" and "Alone" and analyse it. This feature is the summation of Parch and SibSp. It gives us a combined data so that we can check if survival rate have anything to do with family size of the passengers. Alone will denote whether a passenger is alone or not. ``` data['Family_Size']=0 data['Family_Size']=data['Parch']+data['SibSp']#family size data['Alone']=0 data.loc[data.Family_Size==0,'Alone']=1#Alone f,ax=plt.subplots(1,2,figsize=(18,6)) sns.factorplot('Family_Size','Survived',data=data,ax=ax[0]) ax[0].set_title('Family_Size vs Survived') sns.factorplot('Alone','Survived',data=data,ax=ax[1]) ax[1].set_title('Alone vs Survived') plt.close(2) plt.close(3) plt.show() ``` **Family_Size=0 means that the passeneger is alone.** Clearly, if you are alone or family_size=0,then chances for survival is very low. For family size > 4,the chances decrease too. This also looks to be an important feature for the model. Lets examine this further. ``` sns.factorplot('Alone','Survived',data=data,hue='Sex',col='Pclass') plt.show() ``` It is visible that being alone is harmful irrespective of Sex or Pclass except for Pclass3 where the chances of females who are alone is high than those with family. ## Fare_Range Since fare is also a continous feature, we need to convert it into ordinal value. For this we will use **pandas.qcut**. So what **qcut** does is it splits or arranges the values according the number of bins we have passed. So if we pass for 5 bins, it will arrange the values equally spaced into 5 seperate bins or value ranges. ``` data['Fare_Range']=pd.qcut(data['Fare'],4) data.groupby(['Fare_Range'])['Survived'].mean().to_frame().style.background_gradient(cmap='summer_r') ``` As discussed above, we can clearly see that as the **fare_range increases, the chances of survival increases.** Now we cannot pass the Fare_Range values as it is. We should convert it into singleton values same as we did in **Age_Band** ``` data['Fare_cat']=0 data.loc[data['Fare']<=7.91,'Fare_cat']=0 data.loc[(data['Fare']>7.91)&(data['Fare']<=14.454),'Fare_cat']=1 data.loc[(data['Fare']>14.454)&(data['Fare']<=31),'Fare_cat']=2 data.loc[(data['Fare']>31)&(data['Fare']<=513),'Fare_cat']=3 sns.factorplot('Fare_cat','Survived',data=data,hue='Sex') plt.show() ``` Clearly, as the Fare_cat increases, the survival chances increases. This feature may become an important feature during modeling along with the Sex. ## Converting String Values into Numeric Since we cannot pass strings to a machine learning model, we need to convert features loke Sex, Embarked, etc into numeric values. ``` data['Sex'].replace(['male','female'],[0,1],inplace=True) data['Embarked'].replace(['S','C','Q'],[0,1,2],inplace=True) data['Initial'].replace(['Mr','Mrs','Miss','Master','Other'],[0,1,2,3,4],inplace=True) ``` ### Dropping UnNeeded Features **Name**--> We don't need name feature as it cannot be converted into any categorical value. **Age**--> We have the Age_band feature, so no need of this. **Ticket**--> It is any random string that cannot be categorised. **Fare**--> We have the Fare_cat feature, so unneeded **Cabin**--> A lot of NaN values and also many passengers have multiple cabins. So this is a useless feature. **Fare_Range**--> We have the fare_cat feature. **PassengerId**--> Cannot be categorised. ``` data.drop(['Name','Age','Ticket','Fare','Cabin','Fare_Range','PassengerId'],axis=1,inplace=True) sns.heatmap(data.corr(),annot=True,cmap='RdYlGn',linewidths=0.2,annot_kws={'size':20}) fig=plt.gcf() fig.set_size_inches(18,15) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.show() ``` Now the above correlation plot, we can see some positively related features. Some of them being **SibSp andd Family_Size** and **Parch and Family_Size** and some negative ones like **Alone and Family_Size.** # Part3: Predictive Modeling We have gained some insights from the EDA part. But with that, we cannot accurately predict or tell whether a passenger will survive or die. So now we will predict the whether the Passenger will survive or not using some great Classification Algorithms.Following are the algorithms I will use to make the model: 1)Logistic Regression 2)Support Vector Machines(Linear and radial) 3)Random Forest 4)K-Nearest Neighbours 5)Naive Bayes 6)Decision Tree 7)Logistic Regression ``` #importing all the required ML packages from sklearn.linear_model import LogisticRegression #logistic regression from sklearn import svm #support vector Machine from sklearn.ensemble import RandomForestClassifier #Random Forest from sklearn.neighbors import KNeighborsClassifier #KNN from sklearn.naive_bayes import GaussianNB #Naive bayes from sklearn.tree import DecisionTreeClassifier #Decision Tree from sklearn.model_selection import train_test_split #training and testing data split from sklearn import metrics #accuracy measure from sklearn.metrics import confusion_matrix #for confusion matrix train,test=train_test_split(data,test_size=0.3,random_state=0,stratify=data['Survived']) train_X=train[train.columns[1:]] train_Y=train[train.columns[:1]] test_X=test[test.columns[1:]] test_Y=test[test.columns[:1]] X=data[data.columns[1:]] Y=data['Survived'] ``` ### Radial Support Vector Machines(rbf-SVM) ``` model=svm.SVC(kernel='rbf',C=1,gamma=0.1) model.fit(train_X,train_Y) prediction1=model.predict(test_X) print('Accuracy for rbf SVM is ',metrics.accuracy_score(prediction1,test_Y)) ``` ### Linear Support Vector Machine(linear-SVM) ``` model=svm.SVC(kernel='linear',C=0.1,gamma=0.1) model.fit(train_X,train_Y) prediction2=model.predict(test_X) print('Accuracy for linear SVM is',metrics.accuracy_score(prediction2,test_Y)) ``` ### Logistic Regression ``` model = LogisticRegression() model.fit(train_X,train_Y) prediction3=model.predict(test_X) print('The accuracy of the Logistic Regression is',metrics.accuracy_score(prediction3,test_Y)) ``` ### Decision Tree ``` model=DecisionTreeClassifier() model.fit(train_X,train_Y) prediction4=model.predict(test_X) print('The accuracy of the Decision Tree is',metrics.accuracy_score(prediction4,test_Y)) ``` ### K-Nearest Neighbours(KNN) ``` model=KNeighborsClassifier() model.fit(train_X,train_Y) prediction5=model.predict(test_X) print('The accuracy of the KNN is',metrics.accuracy_score(prediction5,test_Y)) ``` Now the accuracy for the KNN model changes as we change the values for **n_neighbours** attribute. The default value is **5**. Lets check the accuracies over various values of n_neighbours. ``` a_index=list(range(1,11)) a=pd.Series() x=[0,1,2,3,4,5,6,7,8,9,10] for i in list(range(1,11)): model=KNeighborsClassifier(n_neighbors=i) model.fit(train_X,train_Y) prediction=model.predict(test_X) a=a.append(pd.Series(metrics.accuracy_score(prediction,test_Y))) plt.plot(a_index, a) plt.xticks(x) fig=plt.gcf() fig.set_size_inches(12,6) plt.show() print('Accuracies for different values of n are:',a.values,'with the max value as ',a.values.max()) ``` ### Gaussian Naive Bayes ``` model=GaussianNB() model.fit(train_X,train_Y) prediction6=model.predict(test_X) print('The accuracy of the NaiveBayes is',metrics.accuracy_score(prediction6,test_Y)) ``` ### Random Forests ``` model=RandomForestClassifier(n_estimators=100) model.fit(train_X,train_Y) prediction7=model.predict(test_X) print('The accuracy of the Random Forests is',metrics.accuracy_score(prediction7,test_Y)) ``` The accuracy of a model is not the only factor that determines the robustness of the classifier. Let's say that a classifier is trained over a training data and tested over the test data and it scores an accuracy of 90%. Now this seems to be very good accuracy for a classifier, but can we confirm that it will be 90% for all the new test sets that come over??. The answer is **No**, because we can't determine which all instances will the classifier will use to train itself. As the training and testing data changes, the accuracy will also change. It may increase or decrease. This is known as **model variance**. To overcome this and get a generalized model,we use **Cross Validation**. # Cross Validation Many a times, the data is imbalanced, i.e there may be a high number of class1 instances but less number of other class instances. Thus we should train and test our algorithm on each and every instance of the dataset. Then we can take an average of all the noted accuracies over the dataset. 1)The K-Fold Cross Validation works by first dividing the dataset into k-subsets. 2)Let's say we divide the dataset into (k=5) parts. We reserve 1 part for testing and train the algorithm over the 4 parts. 3)We continue the process by changing the testing part in each iteration and training the algorithm over the other parts. The accuracies and errors are then averaged to get a average accuracy of the algorithm. This is called K-Fold Cross Validation. 4)An algorithm may underfit over a dataset for some training data and sometimes also overfit the data for other training set. Thus with cross-validation, we can achieve a generalised model. ``` from sklearn.model_selection import KFold #for K-fold cross validation from sklearn.model_selection import cross_val_score #score evaluation from sklearn.model_selection import cross_val_predict #prediction kfold = KFold(n_splits=10, random_state=22) # k=10, split the data into 10 equal parts xyz=[] accuracy=[] std=[] classifiers=['Linear Svm','Radial Svm','Logistic Regression','KNN','Decision Tree','Naive Bayes','Random Forest'] models=[svm.SVC(kernel='linear'),svm.SVC(kernel='rbf'),LogisticRegression(),KNeighborsClassifier(n_neighbors=9),DecisionTreeClassifier(),GaussianNB(),RandomForestClassifier(n_estimators=100)] for i in models: model = i cv_result = cross_val_score(model,X,Y, cv = kfold,scoring = "accuracy") cv_result=cv_result xyz.append(cv_result.mean()) std.append(cv_result.std()) accuracy.append(cv_result) new_models_dataframe2=pd.DataFrame({'CV Mean':xyz,'Std':std},index=classifiers) new_models_dataframe2 plt.subplots(figsize=(12,6)) box=pd.DataFrame(accuracy,index=[classifiers]) box.T.boxplot() new_models_dataframe2['CV Mean'].plot.barh(width=0.8) plt.title('Average CV Mean Accuracy') fig=plt.gcf() fig.set_size_inches(8,5) plt.show() ``` The classification accuracy can be sometimes misleading due to imbalance. We can get a summarized result with the help of confusion matrix, which shows where did the model go wrong, or which class did the model predict wrong. ## Confusion Matrix It gives the number of correct and incorrect classifications made by the classifier. ``` f,ax=plt.subplots(3,3,figsize=(12,10)) y_pred = cross_val_predict(svm.SVC(kernel='rbf'),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[0,0],annot=True,fmt='2.0f') ax[0,0].set_title('Matrix for rbf-SVM') y_pred = cross_val_predict(svm.SVC(kernel='linear'),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[0,1],annot=True,fmt='2.0f') ax[0,1].set_title('Matrix for Linear-SVM') y_pred = cross_val_predict(KNeighborsClassifier(n_neighbors=9),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[0,2],annot=True,fmt='2.0f') ax[0,2].set_title('Matrix for KNN') y_pred = cross_val_predict(RandomForestClassifier(n_estimators=100),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[1,0],annot=True,fmt='2.0f') ax[1,0].set_title('Matrix for Random-Forests') y_pred = cross_val_predict(LogisticRegression(),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[1,1],annot=True,fmt='2.0f') ax[1,1].set_title('Matrix for Logistic Regression') y_pred = cross_val_predict(DecisionTreeClassifier(),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[1,2],annot=True,fmt='2.0f') ax[1,2].set_title('Matrix for Decision Tree') y_pred = cross_val_predict(GaussianNB(),X,Y,cv=10) sns.heatmap(confusion_matrix(Y,y_pred),ax=ax[2,0],annot=True,fmt='2.0f') ax[2,0].set_title('Matrix for Naive Bayes') plt.subplots_adjust(hspace=0.2,wspace=0.2) plt.show() ``` ### Interpreting Confusion Matrix The left diagonal shows the number of correct predictions made for each class while the right diagonal shows the number of wrong prredictions made. Lets consider the first plot for rbf-SVM: 1)The no. of correct predictions are **491(for dead) + 247(for survived)** with the mean CV accuracy being **(491+247)/891 = 82.8%** which we did get earlier. 2)**Errors**--> Wrongly Classified 58 dead people as survived and 95 survived as dead. Thus it has made more mistakes by predicting dead as survived. By looking at all the matrices, we can say that rbf-SVM has a higher chance in correctly predicting dead passengers but NaiveBayes has a higher chance in correctly predicting passengers who survived. ### Hyper-Parameters Tuning The machine learning models are like a Black-Box. There are some default parameter values for this Black-Box, which we can tune or change to get a better model. Like the C and gamma in the SVM model and similarly different parameters for different classifiers, are called the hyper-parameters, which we can tune to change the learning rate of the algorithm and get a better model. This is known as Hyper-Parameter Tuning. We will tune the hyper-parameters for the 2 best classifiers i.e the SVM and RandomForests. #### SVM ``` from sklearn.model_selection import GridSearchCV C=[0.05,0.1,0.2,0.3,0.25,0.4,0.5,0.6,0.7,0.8,0.9,1] gamma=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] kernel=['rbf','linear'] hyper={'kernel':kernel,'C':C,'gamma':gamma} gd=GridSearchCV(estimator=svm.SVC(),param_grid=hyper,verbose=True) gd.fit(X,Y) print(gd.best_score_) print(gd.best_estimator_) ``` #### Random Forests ``` n_estimators=range(100,1000,100) hyper={'n_estimators':n_estimators} gd=GridSearchCV(estimator=RandomForestClassifier(random_state=0),param_grid=hyper,verbose=True) gd.fit(X,Y) print(gd.best_score_) print(gd.best_estimator_) ``` The best score for Rbf-Svm is **82.82% with C=0.05 and gamma=0.1**. For RandomForest, score is abt **81.8% with n_estimators=900**. # Ensembling Ensembling is a good way to increase the accuracy or performance of a model. In simple words, it is the combination of various simple models to create a single powerful model. Lets say we want to buy a phone and ask many people about it based on various parameters. So then we can make a strong judgement about a single product after analysing all different parameters. This is **Ensembling**, which improves the stability of the model. Ensembling can be done in ways like: 1)Voting Classifier 2)Bagging 3)Boosting. ## Voting Classifier It is the simplest way of combining predictions from many different simple machine learning models. It gives an average prediction result based on the prediction of all the submodels. The submodels or the basemodels are all of diiferent types. ``` from sklearn.ensemble import VotingClassifier ensemble_lin_rbf=VotingClassifier(estimators=[('KNN',KNeighborsClassifier(n_neighbors=10)), ('RBF',svm.SVC(probability=True,kernel='rbf',C=0.5,gamma=0.1)), ('RFor',RandomForestClassifier(n_estimators=500,random_state=0)), ('LR',LogisticRegression(C=0.05)), ('DT',DecisionTreeClassifier(random_state=0)), ('NB',GaussianNB()), ('svm',svm.SVC(kernel='linear',probability=True)) ], voting='soft').fit(train_X,train_Y) print('The accuracy for ensembled model is:',ensemble_lin_rbf.score(test_X,test_Y)) cross=cross_val_score(ensemble_lin_rbf,X,Y, cv = 10,scoring = "accuracy") print('The cross validated score is',cross.mean()) ``` ## Bagging Bagging is a general ensemble method. It works by applying similar classifiers on small partitions of the dataset and then taking the average of all the predictions. Due to the averaging,there is reduction in variance. Unlike Voting Classifier, Bagging makes use of similar classifiers. #### Bagged KNN Bagging works best with models with high variance. An example for this can be Decision Tree or Random Forests. We can use KNN with small value of **n_neighbours**, as small value of n_neighbours. ``` from sklearn.ensemble import BaggingClassifier model=BaggingClassifier(base_estimator=KNeighborsClassifier(n_neighbors=3),random_state=0,n_estimators=700) model.fit(train_X,train_Y) prediction=model.predict(test_X) print('The accuracy for bagged KNN is:',metrics.accuracy_score(prediction,test_Y)) result=cross_val_score(model,X,Y,cv=10,scoring='accuracy') print('The cross validated score for bagged KNN is:',result.mean()) ``` #### Bagged DecisionTree ``` model=BaggingClassifier(base_estimator=DecisionTreeClassifier(),random_state=0,n_estimators=100) model.fit(train_X,train_Y) prediction=model.predict(test_X) print('The accuracy for bagged Decision Tree is:',metrics.accuracy_score(prediction,test_Y)) result=cross_val_score(model,X,Y,cv=10,scoring='accuracy') print('The cross validated score for bagged Decision Tree is:',result.mean()) ``` ## Boosting Boosting is an ensembling technique which uses sequential learning of classifiers. It is a step by step enhancement of a weak model.Boosting works as follows: A model is first trained on the complete dataset. Now the model will get some instances right while some wrong. Now in the next iteration, the learner will focus more on the wrongly predicted instances or give more weight to it. Thus it will try to predict the wrong instance correctly. Now this iterative process continous, and new classifers are added to the model until the limit is reached on the accuracy. #### AdaBoost(Adaptive Boosting) The weak learner or estimator in this case is a Decsion Tree. But we can change the dafault base_estimator to any algorithm of our choice. ``` from sklearn.ensemble import AdaBoostClassifier ada=AdaBoostClassifier(n_estimators=200,random_state=0,learning_rate=0.1) result=cross_val_score(ada,X,Y,cv=10,scoring='accuracy') print('The cross validated score for AdaBoost is:',result.mean()) ``` #### Stochastic Gradient Boosting Here too the weak learner is a Decision Tree. ``` from sklearn.ensemble import GradientBoostingClassifier grad=GradientBoostingClassifier(n_estimators=500,random_state=0,learning_rate=0.1) result=cross_val_score(grad,X,Y,cv=10,scoring='accuracy') print('The cross validated score for Gradient Boosting is:',result.mean()) ``` #### XGBoost ``` import xgboost as xg xgboost=xg.XGBClassifier(n_estimators=900,learning_rate=0.1) result=cross_val_score(xgboost,X,Y,cv=10,scoring='accuracy') print('The cross validated score for XGBoost is:',result.mean()) ``` We got the highest accuracy for AdaBoost. We will try to increase it with Hyper-Parameter Tuning #### Hyper-Parameter Tuning for AdaBoost ``` n_estimators=list(range(100,1100,100)) learn_rate=[0.05,0.1,0.2,0.3,0.25,0.4,0.5,0.6,0.7,0.8,0.9,1] hyper={'n_estimators':n_estimators,'learning_rate':learn_rate} gd=GridSearchCV(estimator=AdaBoostClassifier(),param_grid=hyper,verbose=True) gd.fit(X,Y) print(gd.best_score_) print(gd.best_estimator_) ``` The maximum accuracy we can get with AdaBoost is **83.16% with n_estimators=200 and learning_rate=0.05** ### Confusion Matrix for the Best Model ``` ada=AdaBoostClassifier(n_estimators=200,random_state=0,learning_rate=0.05) result=cross_val_predict(ada,X,Y,cv=10) sns.heatmap(confusion_matrix(Y,result),cmap='winter',annot=True,fmt='2.0f') plt.show() ``` ## Feature Importance ``` f,ax=plt.subplots(2,2,figsize=(15,12)) model=RandomForestClassifier(n_estimators=500,random_state=0) model.fit(X,Y) pd.Series(model.feature_importances_,X.columns).sort_values(ascending=True).plot.barh(width=0.8,ax=ax[0,0]) ax[0,0].set_title('Feature Importance in Random Forests') model=AdaBoostClassifier(n_estimators=200,learning_rate=0.05,random_state=0) model.fit(X,Y) pd.Series(model.feature_importances_,X.columns).sort_values(ascending=True).plot.barh(width=0.8,ax=ax[0,1],color='#ddff11') ax[0,1].set_title('Feature Importance in AdaBoost') model=GradientBoostingClassifier(n_estimators=500,learning_rate=0.1,random_state=0) model.fit(X,Y) pd.Series(model.feature_importances_,X.columns).sort_values(ascending=True).plot.barh(width=0.8,ax=ax[1,0],cmap='RdYlGn_r') ax[1,0].set_title('Feature Importance in Gradient Boosting') model=xg.XGBClassifier(n_estimators=900,learning_rate=0.1) model.fit(X,Y) pd.Series(model.feature_importances_,X.columns).sort_values(ascending=True).plot.barh(width=0.8,ax=ax[1,1],color='#FD0F00') ax[1,1].set_title('Feature Importance in XgBoost') plt.show() ``` We can see the important features for various classifiers like RandomForests, AdaBoost,etc. #### Observations: 1)Some of the common important features are Initial,Fare_cat,Pclass,Family_Size. 2)The Sex feature doesn't seem to give any importance, which is shocking as we had seen earlier that Sex combined with Pclass was giving a very good differentiating factor. Sex looks to be important only in RandomForests. However, we can see the feature Initial, which is at the top in many classifiers.We had already seen the positive correlation between Sex and Initial, so they both refer to the gender. 3)Similarly the Pclass and Fare_cat refer to the status of the passengers and Family_Size with Alone,Parch and SibSp. I hope all of you did gain some insights to Machine Learning. Some other great notebooks for Machine Learning are: 1) For R:[Divide and Conquer by Oscar Takeshita](https://www.kaggle.com/pliptor/divide-and-conquer-0-82297/notebook) 2)For Python:[Pytanic by Heads and Tails](https://www.kaggle.com/headsortails/pytanic) 3)For Python:[Introduction to Ensembling/Stacking by Anisotropic](https://www.kaggle.com/arthurtok/introduction-to-ensembling-stacking-in-python) ### Thanks a lot for having a look at this notebook. If you found this notebook useful, **Do Upvote**.
github_jupyter
# What movie to watch tonight? The goal of this project is to build a search engine over a list of movies that have a dedicated page on Wikipedia. In order to achieve this goal, our work has been divided into different tasks that allowed us to fulfill the intent. Since we based the entire process upon using custom functions (choice due to the will of lightening the code and make it more readable), you won't find much commands, and we suggest to check the py files stored in this repository to better understand every step of which the notebook consists. Let's start. ## Libraries ``` import os import pickle import pandas as pd from tqdm import tqdm ``` ## 1. Data collection The first step to be taken, as a means to build the search engine, will be scraping the Wikipedia pages and store every information needed to build a dataframe from which our SE picks the useful ones and returns the results of a query. ### 1.1. Get the list of movies First thing first, we need the URLs. To acquire them, we created a function to download, directly from the page indicated by our TAs, the ones needed for the crawling phase. ``` import collector_utils movies_urls = [collector_utils.get_urls(i) for i in range(1,3)] dict(list(movies_urls[0].items())[0:10]) dict(list(movies_urls[1].items())[0:10]) ``` As you can see, we generate two distinct dictionaries containing, for each list, a set of indexd URLs compliant to the original order. Once yielded, we merge them and get the one that is going to be exploited for the latter purposes. ``` movies_urls[0].update(movies_urls[1]) movies_urls = movies_urls[0].copy() ``` ### 1.2. Crawl Wikipedia Here we start crawling the pages linked to the URLs. The custom function takes into account the possibility to get bloked by Wikipedia if too many requests are sent. To avoid this risk, we set a 20 minutes pause in case of [Response: 429]. Between any other two successful requests, instead, a random time lying within the range of 1-5 seconds is waited. If any other response code occurs, it just gets skipped and the loop continues (in this domain we find the case of non-existing pages). At the end of each iteration, an html file version of the page is stored in our laptop, within an 'Htmls' folder. ``` collector_utils.scraping(movies_urls, 1) ``` ### 1.3 Parse downloaded pages Here we start parsing the htmls, that means: extracting from each one a list of specific informations that are going to be stored in a dataframe and used to retrieve the results after each research. In particular, we want to keep the following stuff: <br> **1. Title** <br> **2. Intro** <br> **3. Plot** <br> **4. Film name** <br> **5. Director** <br> **6. Producer** <br> **7. Writer** <br> **8. Starring** <br> **9. Music** <br> **10. Release date** <br> **11. Runtime** <br> **12. Country** <br> **13. Language** <br> **14. Budget** <br> Our final decision was, since not every Wiki page show the same template for the infoboxes dedicated to movies, to just exclude those ones that did not fit the scheme we built. ``` import parser_utils ls = ['Htmls/' + file for file in os.listdir('Htmls') if file[-4:] == 'html'] ls.sort(key = lambda x: int(''.join(filter(str.isdigit, x)))) movie_data = list() fails = 0 try: os.makedirs('Tsv') except: _ = None for i in tqdm(range(len(ls))): path_file = ls[i] movie_info = parser_utils.info_extractor(path_file) if movie_info != False: movie_data.append(movie_info) pd.DataFrame(list(movie_info.values())).to_csv(r'Tsv\Article_' + str(i + 1) + '.tsv', sep='\t', header = False, index = False) else: fails += 1 #print(ls[i]) print(str(fails) + ' files are not Wikipedia movie pages') movies_info_df = pd.DataFrame(movie_data) movies_info_df['Doc_ID'] = movies_info_df.index movies_info_df.to_csv('movie_data.csv') movies_info_df = pd.read_csv('movie_data.csv') ``` As you can see, for each iteration a tsv file is created and then appended to a main dataframe which we will later use for our intentions. ## 2. Search Engine Now that the preparatory phase has been brought to completion, we can start developing the main object of this homework: the search engine itself. <br> What we need to do, primarily, is running a preprocessing procedure. This will remove punctuation, stopwords and stem each document. We do not show it here, since our custom **text_cleaner** function is stored inside the **utils.py** file and is later used within the **index_utils.py** one, in which we decided to create a specific class of elements and methods to ease the work. ### 2.1. Conjunctive query At this moment, we narrow our interest on the intro and plot of each document. It means that the first Search Engine will evaluate queries with respect to the aforementioned information. #### 2.1.1 Create your index! As already mentioned, the structure of our code was thought as finalized to get the py files recommended in the instructions, so it is not possible to split everything and show it in the notebook. Anyway, all that is needed to create the index is inside the **index_utils.py** library. What happens, basically, since we defined a class, is that, after initializing an _se_ object, we run a custom method (belonging to the class) called **create_engine**, and this one directly executes a series of commands that generate the index and the vocabulary for a specific search engine (which number will be taken as input, as well as the dataframe above which the procedures must be run). <br> The inverted index yielded will have the following shape: <br> $\{ term_id_1:[document_1, document_2, document_4], \\ \ \ term_id_2:[document_1, document_3, document_5, document_6],\ ...\}$ ``` import index_utils # Loading the dataframe movies_df = pd.read_csv('movie_data.csv') # Initializing the object and running the method se = index_utils.search_engine() se.create_engine(movies_df) # Storing the resulting output pickle.dump(se, open("se.p", "wb")) ``` #### 2.1.2 Execute the query We can now execute a query over the built search engine. The output will be the documents containing all the words searched, of which we just show the title, the intro and the URL. ``` query = input() se = pickle.load(open( "se.p", "rb" )) se.query(search_engine = 1, q = query, dataframe = movies_df) ``` ### 2.2 Conjunctive query & Ranking score In this second search engine, given a query, we are assigned the task get the top-k documents related to the latter. In particular, we have to: - find all the documents that contains all the words in the query. - sort them by their similarity with the query - return in output k documents, or all the documents with non-zero similarity with the query when the results are less than k. #### 2.2.1 Inverted index Same old story: within the module **create_engine**, by means of an if condition, we specify the search engine number, so, when the search engine to produce is the second one, an inverted index based upon a _Tf-Idf_ score (computed real-time) gets created. The resulting shape will be the following: $ \{ term_id_1:[(document1, tfIdf_{term,document1}), (document2, tfIdf_{term,document2}), (document4, tfIdf_{term,document4}),\ ...], \\ \ \ term_id_2:[(document1, tfIdf_{term,document1}), (document3, tfIdf_{term,document3}), (document5, tfIdf_{term,document5}), (document6, tfIdf_{term,document6}),\\ ...],\ ...\}$ ``` # Initializing the object and running the method. The second search engine is already trained via create_engnie() method. se = index_utils.search_engine() se.create_engine(movies_df) ``` #### 2.2.2 Execute the query Besides the module to create a search engine with its corresponding index and vocabulary, our class has been furthermore provided of another one, called **query** (already used before), which directly computes the _Cosine similarity_ in case of search engines 2 & 3. In this way, when we browse it, our final output will be a list of documents, ranked by their Cosine similarity with respect to the query entered in input. ``` se.query(search_engine = 2, q = query, dataframe = movies_df) ``` ## 3. Define a new score! On this task we were asked to define a new score by which the results for a query could be ranked in a more efficacious way. Dealing with movies-centered data, it has been somewhat hard to come out with an idea. Our way to manage the job has consisted in suggesting the user a series of additional questions to guide him through the choice of more specific information. In particular, after computing the frequency by which a string appears inside different columns, the search engine recommends the user how to rank the data that most properly match its request. In this way a different weight (score = score * 1.02) will be assigned to the column to which we have been addressed. <br> Practical example: the user types "Brad Pitt", after retrieving info about movies that contains the specified string, the search engine will generate some questions like "Do you mean Directed by: brad?", "Do you mean Directed by: pitt?", "Do you mean Starring: brad?", "Do you mean Starring: pitt?" and "Do you mean Starring: brad pitt?". Of course, since we are dealing with conjunctive queries, the highest importance will be assigned to the results contemplating the whole string taken as input (in this case "brad pitt"). ``` query = 'disney movie 2019' se.query(search_engine = 3, q = query, dataframe = movies_df) query = 'Brad Pitt' se.query(search_engine = 3, q = query, dataframe = movies_df) ```
github_jupyter
``` import random import math import os import subprocess from subprocess import Popen, PIPE, STDOUT from multiprocessing import Process import time from timeit import default_timer as timer import multiprocessing as mp # output_folder = "output_plane" # job_filename = "job_plane.txt" # progress_filename = "progress_plane.txt" # folder_id = "02691156" # plane output_folder = "output_car" progress_filename = "progress_car.txt" job_filename = "job_car.txt" folder_id = "02958343" # output_folder = "output_chair" # progress_filename = "progress_chair.txt" # job_filename = "job_chair.txt" # folder_id = "03001627" # chair # shapnet_path = "/NAS/data/shapenet/ShapeNetCore.v2" # render_path = "/NAS/home/6dof/models/research/keypointnet/tools/" shapnet_path = "/home/paperspace/zen/6dof/6dof_data/ShapeNetCore.v1" render_path = "/home/paperspace/zen/6dof/models/research/keypointnet/tools/" batch_size = 8 f = open( render_path + job_filename,"r") f_save = open( render_path + progress_filename,"r+") ids = [] done_ids = [] for s in f: ids.append(s.split(",")[1]) ids = ids[1:] for s in f_save: done_ids.append(s.strip()) f_save.close() dirs = os.listdir(shapnet_path) all_ids = [] for i in dirs: file_path = os.path.join(shapnet_path,i) if os.path.isdir(file_path): all_ids += os.listdir(file_path) from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import os from scipy import misc import tensorflow as tf import argparse import sys # output_dir = "/home/paperspace/zen/6dof/6dof_data/zen_plane/" # input_dir = "/home/paperspace/zen/6dof/models/research/keypointnet/tools/output_plane/02691156/" def get_matrix(lines): return np.array([[float(y) for y in x.strip().split(" ")] for x in lines]) def read_model_view_matrices(filename): with open(filename, "r") as f: lines = f.readlines() return get_matrix(lines[:4]), get_matrix(lines[4:]) def bytes_feature(values): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def generate(files, output_dir, input_dir, chunk_size = 40): file_chunks = [files[i:i + chunk_size] for i in range(0, len(files), chunk_size)] for i in range(len(file_chunks)): files = file_chunks[i] record_name = output_dir + '{0:04}'.format(i) + ".tfrecord" with tf.python_io.TFRecordWriter(record_name) as tfrecord_writer: with tf.Graph().as_default(): im0 = tf.placeholder(dtype=tf.uint8) im1 = tf.placeholder(dtype=tf.uint8) encoded0 = tf.image.encode_png(im0) encoded1 = tf.image.encode_png(im1) with tf.Session() as sess: for file_name in files: count = 0 indir = input_dir + file_name + "/" while tf.gfile.Exists(indir + "%06d.txt" % count): image0 = misc.imread(indir + "%06d.png" % (count * 2)) image1 = misc.imread(indir + "%06d.png" % (count * 2 + 1)) mat0, mat1 = read_model_view_matrices( indir + "%06d.txt" % count) mati0 = np.linalg.inv(mat0).flatten() mati1 = np.linalg.inv(mat1).flatten() mat0 = mat0.flatten() mat1 = mat1.flatten() st0, st1 = sess.run([encoded0, encoded1], feed_dict={im0: image0, im1: image1}) example = tf.train.Example(features=tf.train.Features(feature={ 'img0': bytes_feature(st0), 'img1': bytes_feature(st1), 'mv0': tf.train.Feature( float_list=tf.train.FloatList(value=mat0)), 'mvi0': tf.train.Feature( float_list=tf.train.FloatList(value=mati0)), 'mv1': tf.train.Feature( float_list=tf.train.FloatList(value=mat1)), 'mvi1': tf.train.Feature( float_list=tf.train.FloatList(value=mati1)), })) tfrecord_writer.write(example.SerializeToString()) count += 1 print("finished: " + file_name) # if __name__ == '__main__': # parser = argparse.ArgumentParser() # parser.add_argument('-t', '--target', dest='target', # required=True, # help='target set: car, plane, chair') # if '--' not in sys.argv: # parser.print_help() # exit(1) # argv = sys.argv[sys.argv.index('--') + 1:] # args, _ = parser.parse_known_args(argv) # if args.target == 'car': # output_dir = "/NAS/home/shapenet_rendering/shapenet_car/cars_with_keypoints/" # input_dir = "/NAS/home/shapenet_rendering/shapenet_car/02958343/" # elif args.target == "plane": # output_dir = "/NAS/home/shapenet_rendering/shapenet_plane/planes_with_keypoints/" # input_dir = "/NAS/home/shapenet_rendering/shapenet_plane/02691156/" # elif args.target == "chair": # output_dir = "/NAS/home/shapenet_rendering/shapenet_chair/chairs_with_keypoints/" # input_dir = "/NAS/home/shapenet_rendering/shapenet_chair/03001627/" # else: # parser.print_help() # exit(1) output_dir = "/home/paperspace/zen/6dof/6dof_data/zen_plane/" input_dir = "/home/paperspace/zen/6dof/models/research/keypointnet/tools/output_plane/02691156/" files = os.listdir(input_dir) if not os.path.isdir(output_dir): os.mkdir(output_dir) generate(files, output_dir, input_dir, 40) chunk_size = 40 file_chunks = [files[i:i + chunk_size] for i in range(0, len(files), chunk_size)] for i in range(len(file_chunks)): print(i) files = [output_dir + i for i in os.listdir(output_dir)] for fn in files: c = 0 for record in tf.python_io.tf_record_iterator(fn): c += 1 print(c) test = open("test.txt", "r") dev = open("dev.txt", "r") train = open("train.txt", "r") whole = list(range(80)) random.sample(whole, int(len(whole)/10)) ```
github_jupyter
``` import os import pathlib from tqdm.auto import tqdm from facade_project import FACADE_LABELME_ORIGINAL_DIR, FACADE_IMAGES_DIR, LABEL_NAME_TO_VALUE, NUM_IMAGES img_paths = [os.path.join(FACADE_LABELME_ORIGINAL_DIR, fname) for fname in sorted(os.listdir(FACADE_LABELME_ORIGINAL_DIR))] len(img_paths) ``` # From Labelme to COCO ``` from facade_project.coco.from_labelme import Labelme2coco def create_coco_ann_and_images_dir(json_files, dir_name, name_suffix, selected_classes=LABEL_NAME_TO_VALUE): data_dir = '{}/coco/{}'.format(FACADE_IMAGES_DIR, dir_name) img_dir = '{}/images/{}'.format(data_dir, name_suffix) pathlib.Path(img_dir).mkdir(parents=True, exist_ok=True) l2c_obj = Labelme2coco( json_files, '{}/ann_{}.json'.format(data_dir, name_suffix), only_labels=selected_classes, save_img_dir=img_dir, ) l2c_obj.save_json() return l2c_obj ``` # Split train - val ``` from facade_project.data import get_indices_split train_ind, val_ind = get_indices_split(NUM_IMAGES) train_json = [img_paths[i] for i in train_ind] val_json = [img_paths[i] for i in val_ind] ``` # Generate Run this only to generate the coco dataset again (careful about overwriting old ones) ## Training ``` l2c_obj = create_coco_ann_and_images_dir( json_files=train_json, dir_name='original', name_suffix='train', ) ``` ## Validation ``` create_coco_ann_and_images_dir( json_files=val_json, dir_name='original', name_suffix='val', ); ``` # Display This code is adapted from original code on http://cocodataset.org/ ``` %matplotlib inline from pycocotools.coco import COCO import numpy as np import skimage.io as io import matplotlib.pyplot as plt import pylab import PIL pylab.rcParams['figure.figsize'] = (8.0, 10.0) annotation_path = '{}/coco/original/ann_train.json'.format(FACADE_IMAGES_DIR) image_dir = '{}/coco/original/images/train'.format(FACADE_IMAGES_DIR) annotation_path coco=COCO(annotation_path) coco.getCatIds() # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(' '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(' '.join(nms))) img_metadata = coco.loadImgs(7)[0] image_path = os.path.join(image_dir, img_metadata['file_name']) I = PIL.Image.open(image_path) img_metadata plt.axis('off') plt.imshow(I) # load and display instance annotations plt.imshow(I); plt.axis('off') catIds = coco.getCatIds(catNms=[]); annIds = coco.getAnnIds(imgIds=img_metadata['id'], catIds=catIds) #print(annIds) anns = coco.loadAnns(annIds) coco.showAnns(anns) ``` ## coco format - template for annotations
github_jupyter
# 词嵌入基础 我们在[“循环神经网络的从零开始实现”](https://zh.d2l.ai/chapter_recurrent-neural-networks/rnn-scratch.html)一节中使用 one-hot 向量表示单词,虽然它们构造起来很容易,但通常并不是一个好选择。一个主要的原因是,one-hot 词向量无法准确表达不同词之间的相似度,如我们常常使用的余弦相似度。 Word2Vec 词嵌入工具的提出正是为了解决上面这个问题,它将每个词表示成一个定长的向量,并通过在语料库上的预训练使得这些向量能较好地表达不同词之间的相似和类比关系,以引入一定的语义信息。基于两种概率模型的假设,我们可以定义两种 Word2Vec 模型: 1. [Skip-Gram 跳字模型](https://zh.d2l.ai/chapter_natural-language-processing/word2vec.html#%E8%B7%B3%E5%AD%97%E6%A8%A1%E5%9E%8B):假设背景词由中心词生成,即建模 $P(w_o\mid w_c)$,其中 $w_c$ 为中心词,$w_o$ 为任一背景词; ![Image Name](https://cdn.kesci.com/upload/image/q5mjsq84o9.png?imageView2/0/w/960/h/960) 2. [CBOW (continuous bag-of-words) 连续词袋模型](https://zh.d2l.ai/chapter_natural-language-processing/word2vec.html#%E8%BF%9E%E7%BB%AD%E8%AF%8D%E8%A2%8B%E6%A8%A1%E5%9E%8B):假设中心词由背景词生成,即建模 $P(w_c\mid \mathcal{W}_o)$,其中 $\mathcal{W}_o$ 为背景词的集合。 ![Image Name](https://cdn.kesci.com/upload/image/q5mjt4r02n.png?imageView2/0/w/960/h/960) 在这里我们主要介绍 Skip-Gram 模型的实现,CBOW 实现与其类似,读者可之后自己尝试实现。后续的内容将大致从以下四个部分展开: 1. PTB 数据集 2. Skip-Gram 跳字模型 3. 负采样近似 4. 训练模型 ``` import collections import math import random import sys import time import os import numpy as np import torch from torch import nn import torch.utils.data as Data ``` ## PTB 数据集 简单来说,Word2Vec 能从语料中学到如何将离散的词映射为连续空间中的向量,并保留其语义上的相似关系。那么为了训练 Word2Vec 模型,我们就需要一个自然语言语料库,模型将从中学习各个单词间的关系,这里我们使用经典的 PTB 语料库进行训练。[PTB (Penn Tree Bank)](https://catalog.ldc.upenn.edu/LDC99T42) 是一个常用的小型语料库,它采样自《华尔街日报》的文章,包括训练集、验证集和测试集。我们将在PTB训练集上训练词嵌入模型。 ### 载入数据集 数据集训练文件 `ptb.train.txt` 示例: ``` aer banknote berlitz calloway centrust cluett fromstein gitano guterman ... pierre N years old will join the board as a nonexecutive director nov. N mr. is chairman of n.v. the dutch publishing group ... ``` ``` with open('/home/kesci/input/ptb_train1020/ptb.train.txt', 'r') as f: lines = f.readlines() # 该数据集中句子以换行符为分割 raw_dataset = [st.split() for st in lines] # st是sentence的缩写,单词以空格为分割 print('# sentences: %d' % len(raw_dataset)) # 对于数据集的前3个句子,打印每个句子的词数和前5个词 # 句尾符为 '' ,生僻词全用 '' 表示,数字则被替换成了 'N' for st in raw_dataset[:3]: print('# tokens:', len(st), st[:5]) ``` ### 建立词语索引 ``` counter = collections.Counter([tk for st in raw_dataset for tk in st]) # tk是token的缩写 counter = dict(filter(lambda x: x[1] >= 5, counter.items())) # 只保留在数据集中至少出现5次的词 idx_to_token = [tk for tk, _ in counter.items()] token_to_idx = {tk: idx for idx, tk in enumerate(idx_to_token)} dataset = [[token_to_idx[tk] for tk in st if tk in token_to_idx] for st in raw_dataset] # raw_dataset中的单词在这一步被转换为对应的idx num_tokens = sum([len(st) for st in dataset]) '# tokens: %d' % num_tokens ``` ### 二次采样 文本数据中一般会出现一些高频词,如英文中的“the”“a”和“in”。通常来说,在一个背景窗口中,一个词(如“chip”)和较低频词(如“microprocessor”)同时出现比和较高频词(如“the”)同时出现对训练词嵌入模型更有益。因此,训练词嵌入模型时可以对词进行二次采样。 具体来说,数据集中每个被索引词 $w_i$ 将有一定概率被丢弃,该丢弃概率为 $$ P(w_i)=\max(1-\sqrt{\frac{t}{f(w_i)}},0) $$ 其中 $f(w_i)$ 是数据集中词 $w_i$ 的个数与总词数之比,常数 $t$ 是一个超参数(实验中设为 $10^{−4}$)。可见,只有当 $f(w_i)>t$ 时,我们才有可能在二次采样中丢弃词 $w_i$,并且越高频的词被丢弃的概率越大。具体的代码如下: ``` def discard(idx): ''' @params: idx: 单词的下标 @return: True/False 表示是否丢弃该单词 ''' return random.uniform(0, 1) < 1 - math.sqrt( 1e-4 / counter[idx_to_token[idx]] * num_tokens) subsampled_dataset = [[tk for tk in st if not discard(tk)] for st in dataset] print('# tokens: %d' % sum([len(st) for st in subsampled_dataset])) def compare_counts(token): return '# %s: before=%d, after=%d' % (token, sum( [st.count(token_to_idx[token]) for st in dataset]), sum( [st.count(token_to_idx[token]) for st in subsampled_dataset])) print(compare_counts('the')) print(compare_counts('join')) ``` ### 提取中心词和背景词 ``` def get_centers_and_contexts(dataset, max_window_size): ''' @params: dataset: 数据集为句子的集合,每个句子则为单词的集合,此时单词已经被转换为相应数字下标 max_window_size: 背景词的词窗大小的最大值 @return: centers: 中心词的集合 contexts: 背景词窗的集合,与中心词对应,每个背景词窗则为背景词的集合 ''' centers, contexts = [], [] for st in dataset: if len(st) < 2: # 每个句子至少要有2个词才可能组成一对“中心词-背景词” continue centers += st for center_i in range(len(st)): window_size = random.randint(1, max_window_size) # 随机选取背景词窗大小 indices = list(range(max(0, center_i - window_size), min(len(st), center_i + 1 + window_size))) indices.remove(center_i) # 将中心词排除在背景词之外 contexts.append([st[idx] for idx in indices]) return centers, contexts all_centers, all_contexts = get_centers_and_contexts(subsampled_dataset, 5) tiny_dataset = [list(range(7)), list(range(7, 10))] print('dataset', tiny_dataset) for center, context in zip(*get_centers_and_contexts(tiny_dataset, 2)): print('center', center, 'has contexts', context) ``` *注:数据批量读取的实现需要依赖负采样近似的实现,故放于负采样近似部分进行讲解。* ## Skip-Gram 跳字模型 在跳字模型中,每个词被表示成两个 $d$ 维向量,用来计算条件概率。假设这个词在词典中索引为 $i$ ,当它为中心词时向量表示为 $\boldsymbol{v}_i\in\mathbb{R}^d$,而为背景词时向量表示为 $\boldsymbol{u}_i\in\mathbb{R}^d$ 。设中心词 $w_c$ 在词典中索引为 $c$,背景词 $w_o$ 在词典中索引为 $o$,我们假设给定中心词生成背景词的条件概率满足下式: $$ P(w_o\mid w_c)=\frac{\exp(\boldsymbol{u}_o^\top \boldsymbol{v}_c)}{\sum_{i\in\mathcal{V}}\exp(\boldsymbol{u}_i^\top \boldsymbol{v}_c)} $$ ### PyTorch 预置的 Embedding 层 ``` embed = nn.Embedding(num_embeddings=10, embedding_dim=4) print(embed.weight) x = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.long) print(embed(x)) ``` ### PyTorch 预置的批量乘法 ``` X = torch.ones((2, 1, 4)) Y = torch.ones((2, 4, 6)) print(torch.bmm(X, Y).shape) ``` ### Skip-Gram 模型的前向计算 ``` def skip_gram(center, contexts_and_negatives, embed_v, embed_u): ''' @params: center: 中心词下标,形状为 (n, 1) 的整数张量 contexts_and_negatives: 背景词和噪音词下标,形状为 (n, m) 的整数张量 embed_v: 中心词的 embedding 层 embed_u: 背景词的 embedding 层 @return: pred: 中心词与背景词(或噪音词)的内积,之后可用于计算概率 p(w_o|w_c) ''' v = embed_v(center) # shape of (n, 1, d) u = embed_u(contexts_and_negatives) # shape of (n, m, d) pred = torch.bmm(v, u.permute(0, 2, 1)) # bmm((n, 1, d), (n, d, m)) => shape of (n, 1, m) return pred ``` ## 负采样近似 由于 softmax 运算考虑了背景词可能是词典 $\mathcal{V}$ 中的任一词,对于含几十万或上百万词的较大词典,就可能导致计算的开销过大。我们将以 skip-gram 模型为例,介绍负采样 (negative sampling) 的实现来尝试解决这个问题。 负采样方法用以下公式来近似条件概率 $P(w_o\mid w_c)=\frac{\exp(\boldsymbol{u}_o^\top \boldsymbol{v}_c)}{\sum_{i\in\mathcal{V}}\exp(\boldsymbol{u}_i^\top \boldsymbol{v}_c)}$: $$ P(w_o\mid w_c)=P(D=1\mid w_c,w_o)\prod_{k=1,w_k\sim P(w)}^K P(D=0\mid w_c,w_k) $$ 其中 $P(D=1\mid w_c,w_o)=\sigma(\boldsymbol{u}_o^\top\boldsymbol{v}_c)$,$\sigma(\cdot)$ 为 sigmoid 函数。对于一对中心词和背景词,我们从词典中随机采样 $K$ 个噪声词(实验中设 $K=5$)。根据 Word2Vec 论文的建议,噪声词采样概率 $P(w)$ 设为 $w$ 词频与总词频之比的 $0.75$ 次方。 ``` def get_negatives(all_contexts, sampling_weights, K): ''' @params: all_contexts: [[w_o1, w_o2, ...], [...], ... ] sampling_weights: 每个单词的噪声词采样概率 K: 随机采样个数 @return: all_negatives: [[w_n1, w_n2, ...], [...], ...] ''' all_negatives, neg_candidates, i = [], [], 0 population = list(range(len(sampling_weights))) for contexts in all_contexts: negatives = [] while len(negatives) < len(contexts) * K: if i == len(neg_candidates): # 根据每个词的权重(sampling_weights)随机生成k个词的索引作为噪声词。 # 为了高效计算,可以将k设得稍大一点 i, neg_candidates = 0, random.choices( population, sampling_weights, k=int(1e5)) neg, i = neg_candidates[i], i + 1 # 噪声词不能是背景词 if neg not in set(contexts): negatives.append(neg) all_negatives.append(negatives) return all_negatives sampling_weights = [counter[w]**0.75 for w in idx_to_token] all_negatives = get_negatives(all_contexts, sampling_weights, 5) ``` *注:除负采样方法外,还有层序 softmax (hiererarchical softmax) 方法也可以用来解决计算量过大的问题,请参考[原书10.2.2节](https://zh.d2l.ai/chapter_natural-language-processing/approx-training.html#%E5%B1%82%E5%BA%8Fsoftmax)。* ### 批量读取数据 ``` class MyDataset(torch.utils.data.Dataset): def __init__(self, centers, contexts, negatives): assert len(centers) == len(contexts) == len(negatives) self.centers = centers self.contexts = contexts self.negatives = negatives def __getitem__(self, index): return (self.centers[index], self.contexts[index], self.negatives[index]) def __len__(self): return len(self.centers) def batchify(data): ''' 用作DataLoader的参数collate_fn @params: data: 长为batch_size的列表,列表中的每个元素都是__getitem__得到的结果 @outputs: batch: 批量化后得到 (centers, contexts_negatives, masks, labels) 元组 centers: 中心词下标,形状为 (n, 1) 的整数张量 contexts_negatives: 背景词和噪声词的下标,形状为 (n, m) 的整数张量 masks: 与补齐相对应的掩码,形状为 (n, m) 的0/1整数张量 labels: 指示中心词的标签,形状为 (n, m) 的0/1整数张量 ''' max_len = max(len(c) + len(n) for _, c, n in data) centers, contexts_negatives, masks, labels = [], [], [], [] for center, context, negative in data: cur_len = len(context) + len(negative) centers += [center] contexts_negatives += [context + negative + [0] * (max_len - cur_len)] masks += [[1] * cur_len + [0] * (max_len - cur_len)] # 使用掩码变量mask来避免填充项对损失函数计算的影响 labels += [[1] * len(context) + [0] * (max_len - len(context))] batch = (torch.tensor(centers).view(-1, 1), torch.tensor(contexts_negatives), torch.tensor(masks), torch.tensor(labels)) return batch batch_size = 512 num_workers = 0 if sys.platform.startswith('win32') else 4 dataset = MyDataset(all_centers, all_contexts, all_negatives) data_iter = Data.DataLoader(dataset, batch_size, shuffle=True, collate_fn=batchify, num_workers=num_workers) for batch in data_iter: for name, data in zip(['centers', 'contexts_negatives', 'masks', 'labels'], batch): print(name, 'shape:', data.shape) break ``` ## 训练模型 ### 损失函数 应用负采样方法后,我们可利用最大似然估计的对数等价形式将损失函数定义为如下 $$ \sum_{t=1}^T\sum_{-m\le j\le m,j\ne 0} [-\log P(D=1\mid w^{(t)},w^{(t+j)})-\sum_{k=1,w_k\sim P(w)^K}\log P(D=0\mid w^{(t)},w_k)] $$ 根据这个损失函数的定义,我们可以直接使用二元交叉熵损失函数进行计算: ``` class SigmoidBinaryCrossEntropyLoss(nn.Module): def __init__(self): super(SigmoidBinaryCrossEntropyLoss, self).__init__() def forward(self, inputs, targets, mask=None): ''' @params: inputs: 经过sigmoid层后为预测D=1的概率 targets: 0/1向量,1代表背景词,0代表噪音词 @return: res: 平均到每个label的loss ''' inputs, targets, mask = inputs.float(), targets.float(), mask.float() res = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none", weight=mask) res = res.sum(dim=1) / mask.float().sum(dim=1) return res loss = SigmoidBinaryCrossEntropyLoss() pred = torch.tensor([[1.5, 0.3, -1, 2], [1.1, -0.6, 2.2, 0.4]]) label = torch.tensor([[1, 0, 0, 0], [1, 1, 0, 0]]) # 标签变量label中的1和0分别代表背景词和噪声词 mask = torch.tensor([[1, 1, 1, 1], [1, 1, 1, 0]]) # 掩码变量 print(loss(pred, label, mask)) def sigmd(x): return - math.log(1 / (1 + math.exp(-x))) print('%.4f' % ((sigmd(1.5) + sigmd(-0.3) + sigmd(1) + sigmd(-2)) / 4)) # 注意1-sigmoid(x) = sigmoid(-x) print('%.4f' % ((sigmd(1.1) + sigmd(-0.6) + sigmd(-2.2)) / 3)) ``` ### 模型初始化 ``` embed_size = 100 net = nn.Sequential(nn.Embedding(num_embeddings=len(idx_to_token), embedding_dim=embed_size), nn.Embedding(num_embeddings=len(idx_to_token), embedding_dim=embed_size)) ``` ### 训练模型 ``` def train(net, lr, num_epochs): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("train on", device) net = net.to(device) optimizer = torch.optim.Adam(net.parameters(), lr=lr) for epoch in range(num_epochs): start, l_sum, n = time.time(), 0.0, 0 for batch in data_iter: center, context_negative, mask, label = [d.to(device) for d in batch] pred = skip_gram(center, context_negative, net[0], net[1]) l = loss(pred.view(label.shape), label, mask).mean() # 一个batch的平均loss optimizer.zero_grad() l.backward() optimizer.step() l_sum += l.cpu().item() n += 1 print('epoch %d, loss %.2f, time %.2fs' % (epoch + 1, l_sum / n, time.time() - start)) train(net, 0.01, 5) ``` ``` train on cpu epoch 1, loss 0.61, time 221.30s epoch 2, loss 0.42, time 227.70s epoch 3, loss 0.38, time 240.50s epoch 4, loss 0.36, time 253.79s epoch 5, loss 0.34, time 238.51s ``` *注:由于本地CPU上训练时间过长,故只截取了运行的结果,后同。大家可以自行在网站上训练。* ### 测试模型 ``` def get_similar_tokens(query_token, k, embed): ''' @params: query_token: 给定的词语 k: 近义词的个数 embed: 预训练词向量 ''' W = embed.weight.data x = W[token_to_idx[query_token]] # 添加的1e-9是为了数值稳定性 cos = torch.matmul(W, x) / (torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9).sqrt() _, topk = torch.topk(cos, k=k+1) topk = topk.cpu().numpy() for i in topk[1:]: # 除去输入词 print('cosine sim=%.3f: %s' % (cos[i], (idx_to_token[i]))) get_similar_tokens('chip', 3, net[0]) ``` ``` cosine sim=0.446: intel cosine sim=0.427: computer cosine sim=0.427: computers ``` ## 参考 * [Dive into Deep Learning](https://d2l.ai/chapter_natural-language-processing/word2vec.html). Ch14.1-14.4. * [动手学深度学习](http://zh.gluon.ai/chapter_natural-language-processing/word2vec.html). Ch10.1-10.3. * [Dive-into-DL-PyTorch on GitHub](https://github.com/ShusenTang/Dive-into-DL-PyTorch/blob/master/code/chapter10_natural-language-processing/10.3_word2vec-pytorch.ipynb)
github_jupyter
## Gating fixed primitives AliGater contains a range of gating functions from the basic you expect to more niche functions. Below is a rough illustration of some standard _fixed_ gates. Many of these are used internally in AliGater pattern-recognition functions but might some times be useful for the user directly. For exact usage of each function, please refer to the functions documentation. See the 'example strategy' notebook for a more coherent way of stringing functions & pattern recognition together to achieve flexible solutions. ``` import aligater as ag fcs = ag.loadFCS(ag.AGConfig.ag_home+"tutorial/data/example1.fcs", flourochrome_area_filter=True, return_type="agsample") #Overview of the ungated sample in forward and sidescatter ag.plotHeatmap(fcs(), x="FSC 488/10-A", y="SSC 488/10-A") ``` **gateThreshold** can be used to cut a view, either 1-dimensionally (producing a density histogram) or 2-dimensionally, drawing a straight line in the 2D-view. The version used will depend on the presence of a 'yCol' argument ``` ag.gateThreshold(fcs, name="tmp", xCol="FSC 488/10-A", yCol="SSC 488/10-A", thresh=205000, orientation='vertical', population='lower') #The same cut again, but in a 1-D histogram #We will save the output gate object as 'no_clutter'. no_clutter = ag.gateThreshold(fcs, name="tmp", xCol="FSC 488/10-A", thresh=205000, population='lower') ``` **quadGate** lets you draw a standard four field gate, returning four AGgate objects The AGgate objects are returned clockwise, in order: top-left, top-right, bottom-right, bottom-left ``` topLeft, topRight, bottomRight, bottomLeft = ag.quadGate(fcs, names=['1','2','3','4'], xCol="FSC 488/10-A", yCol="SSC 488/10-A", parentGate=no_clutter, xThresh=100000,yThresh=40000) ``` **customQuadGate** is a useful similar function that lets you shift two limits in a quadgate. The thresholds are then passed as a list, one of the lines has to be fixed - i.e. their limits match. The passed threshold-list should contain float limits, in order: ybottom, ytop, xleft, xright ``` topLeft, topRight, bottomRight, bottomLeft = ag.customQuadGate(fcs, names=["1","2","3","4"], xCol="FSC 488/10-A", yCol="SSC 488/10-A", parentGate=no_clutter, threshList=[100000,100000, 40000,80000]) ``` **boxGate** draws a box-gate in the view ``` box_pop = ag.gateBox(fcs, name="tmp", xCol="FSC 488/10-A", yCol="SSC 488/10-A", parentGate=no_clutter, xThreshLeft=25000,xThreshRight=175000, yThreshBottom=5000, yThreshTop=75000) ``` **EllipseGate** draws a circle or ellipse in the view NOTE: this function is a wrapper for gateEllipsoid which is implemented in cython, that function can be used instead. NOTE2: if gating an ellipse in a view with scales applied such as bilog or logish, width and heigh should be passed in the transformed scale. ``` ag.EllipseGate(fcs, name="tmp", xCol="FSC 488/10-A", yCol="SSC 488/10-A", parentGate=None, center=[110000,30000],width=75000, height=30000, angle=35) ```
github_jupyter
## <div style="text-align: center">A Journey with Scikit-Learn + 20 ML Algorithms</div> <div style="text-align: center">There are plenty of <b>courses and tutorials</b> that can help you learn Scikit-Learn from scratch but here in <b>Kaggle</b>, After reading, you can use this workflow to solve other real problems and use it as a template to deal with <b>machine learning</b> problems.</div> <div style="text-align:center">last update: <b>11/13/2018</b></div> <img src="http://scikit-learn.org/stable/_images/scikit-learn-logo-notext.png"> >###### you may be interested have a look at it: [**10-steps-to-become-a-data-scientist**](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist) --------------------------------------------------------------------- you can Fork and Run this kernel on Github: > ###### [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist) ------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some <font color="red"><b>UPVOTES</b></font> would be very much appreciated** ----------- <a id="top"></a> <br> ## Notebook Content 1. [Introduction](#1) 1. [Import](#2) 1. [Version](#3) 1. [Algorithms](#4) 1. [Data Collection](#5) 1. [Framework](#6) 1. [Applications](#7) 1. [How to use Sklearn Data Set? ](#8) 1. [Loading external data](#9) 1. [Model Deployment](#10) 1. [Families of ML algorithms](#11) 1. [Prepare Features & Targets](#12) 1. [Accuracy and precision](#13) 1. [Estimators](#14) 1. [Predictors](#15) 1. [K-Nearest Neighbours](#16) 1. [Radius Neighbors Classifier](#17) 1. [Logistic Regression](#18) 1. [Passive Aggressive Classifier](#19) 1. [Naive Bayes](#20) 1. [BernoulliNB](#21) 1. [SVM](#22) 1. [Nu-Support Vector Classification](#23) 1. [Linear Support Vector Classification](#24) 1. [Decision Tree](#25) 1. [ExtraTreeClassifier](#26) 1. [Neural network](#27) 1. [What is a Perceptron?](#28) 1. [The XOR Problem](#29) 1. [RandomForest](#30) 1. [Bagging classifier ](#31) 1. [AdaBoost classifier](#32) 1. [Gradient Boosting Classifier](#33) 1. [Linear Discriminant Analysis](#34) 1. [Quadratic Discriminant Analysis](#35) 1. [Kmeans](#36) 1. [conclusion](#37) 1. [References](#38) <a id="1"></a> <br> # 1-Introduction - The __open source__ Python ecosystem provides __a standalone, versatile and powerful scientific working environment__, including: [NumPy](http://numpy.org), [SciPy](http://scipy.org), [IPython](http://ipython.org), [Matplotlib](http://matplotlib.org), [Pandas](http://pandas.pydata.org/), _and many others..._ - Scikit-Learn builds upon NumPy and SciPy and __complements__ this scientific environment with machine learning algorithms; - By design, Scikit-Learn is __non-intrusive__, easy to use and easy to combine with other libraries; - Core algorithms are implemented in low-level languages. <a id="2"></a> <br> ## 1-1 Import ``` from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.gaussian_process.kernels import RBF from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.decomposition import PCA import matplotlib.pyplot as plt from pandas import get_dummies import plotly.graph_objs as go from sklearn import datasets from sklearn.svm import SVC import seaborn as sns import pandas as pd import numpy as np import matplotlib import warnings import sklearn import scipy import numpy import json import sys import csv import os ``` <a id="3"></a> <br> ## 1-2 Version ``` print('matplotlib: {}'.format(matplotlib.__version__)) print('sklearn: {}'.format(sklearn.__version__)) print('scipy: {}'.format(scipy.__version__)) print('seaborn: {}'.format(sns.__version__)) print('pandas: {}'.format(pd.__version__)) print('numpy: {}'.format(np.__version__)) print('Python: {}'.format(sys.version)) ``` ## 1-3 Setup A few tiny adjustments for better **code readability** ``` sns.set(style='white', context='notebook', palette='deep') warnings.filterwarnings('ignore') sns.set_style('white') %matplotlib inline ``` <a id="4"></a> <br> # 2- Algorithms **Supervised learning**: * Linear models (Ridge, Lasso, Elastic Net, ...) * Support Vector Machines * Tree-based methods (Random Forests, Bagging, GBRT, ...) * Nearest neighbors * Neural networks (basics) * Gaussian Processes * Feature selection **Unsupervised learning**: * Clustering (KMeans, Ward, ...) * Matrix decomposition (PCA, ICA, ...) * Density estimation * Outlier detection __Model selection and evaluation:__ * Cross-validation * Grid-search * Lots of metrics _... and many more!_ (See our [Reference](http://scikit-learn.org/dev/modules/classes.html)) <a id="5"></a> <br> ## 2-1 Data Collection **Data collection** is the process of gathering and measuring data, information or any variables of interest in a standardized and established manner that enables the collector to answer or test hypothesis and evaluate outcomes of the particular collection.[techopedia] **Iris dataset** consists of 3 different types of irises’ (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray The rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width.[6] ###### [Go to top](#top) ``` # import Dataset to play with it dataset = pd.read_csv('../input/Iris.csv') ``` **<< Note 1 >>** * Each row is an observation (also known as : sample, example, instance, record) * Each column is a feature (also known as: Predictor, attribute, Independent Variable, input, regressor, Covariate) After loading the data via **pandas**, we should checkout what the content is, description and via the following: ``` type(dataset) ``` <a id="6"></a> <br> # 3- Framework Data comes as a finite learning set ${\cal L} = (X, y)$ where * Input samples are given as an array $X$ of shape `n_samples` $\times$ `n_features`, taking their values in ${\cal X}$; * Output values are given as an array $y$, taking _symbolic_ values in ${\cal Y}$. ###### [Go to top](#top) The goal of supervised classification is to build an estimator $\varphi: {\cal X} \mapsto {\cal Y}$ minimizing $$ Err(\varphi) = \mathbb{E}_{X,Y}\{ \ell(Y, \varphi(X)) \} $$ where $\ell$ is a loss function, e.g., the zero-one loss for classification $\ell_{01}(Y,\hat{Y}) = 1(Y \neq \hat{Y})$. <a id="7"></a> <br> # 4- Applications - **Classifying** signal from background events; - **Diagnosing** disease from symptoms; - **Recognising** cats in pictures; - **Identifying** body parts with Kinect cameras; - ... ###### [Go to top](#top) <a id="8"></a> <br> # 5- How to use Sklearn Data Set? - Input data = Numpy arrays or Scipy sparse matrices ; - Algorithms are expressed using high-level operations defined on matrices or vectors (similar to MATLAB) ; - Leverage efficient low-leverage implementations ; - Keep code short and readable. ``` # Generate data from sklearn.datasets import make_blobs import numpy as np X, y = make_blobs(n_samples=1000, centers=20, random_state=123) labels = ["b", "r"] y = np.take(labels, (y < 10)) print(X) print(y[:5]) # X is a 2 dimensional array, with 1000 rows and 2 columns print(X.shape) # y is a vector of 1000 elements print(y.shape) # Rows and columns can be accessed with lists, slices or masks print(X[[1, 2, 3]]) # rows 1, 2 and 3 print(X[:5]) # 5 first rows print(X[500:510, 0]) # values from row 500 to row 510 at column 0 print(X[y == "b"][:5]) # 5 first rows for which y is "b" # Plot from matplotlib import pyplot as plt plt.rcParams["figure.figsize"] = (8, 8) plt.rcParams["figure.max_open_warning"] = -1 plt.figure() for label in labels: mask = (y == label) plt.scatter(X[mask, 0], X[mask, 1], c=label) plt.xlim(-10, 10) plt.ylim(-10, 10) plt.show() from sklearn.datasets import load_wine data = load_wine() data.target[[10, 80, 140]] list(data.target_names) ``` <a id="9"></a> <br> # 6- Loading external data - Numpy provides some [simple tools](https://docs.scipy.org/doc/numpy/reference/routines.io.html) for loading data from files (CSV, binary, etc); - For structured data, Pandas provides more [advanced tools](http://pandas.pydata.org/pandas-docs/stable/io.html) (CSV, JSON, Excel, HDF5, SQL, etc); ###### [Go to top](#top) ## 6-1 what is new? A new clustering algorithm: cluster.**OPTICS**: an algoritm related to cluster.**DBSCAN**, that has hyperparameters easier to set and that scales better ``` from sklearn.cluster import DBSCAN import numpy as np X = np.array([[1, 2], [2, 2], [2, 3],[8, 7], [8, 8], [25, 80]]) clustering = DBSCAN(eps=3, min_samples=2).fit(X) clustering.labels_ clustering ``` ## 6-2 Tip & Trick In this section we gather some useful advice and tools that may increase your quality-of-life when reviewing pull requests, running unit tests, and so forth. Some of these tricks consist of userscripts that require a browser extension such as TamperMonkey or GreaseMonkey; to set up userscripts you must have one of these extensions installed, enabled and running. We provide userscripts as GitHub gists; to install them, click on the “Raw” button on the gist page. ### 6-2-1 Profiling Python code ``` from sklearn.decomposition import NMF from sklearn.datasets import load_digits X = load_digits().data %timeit NMF(n_components=16, tol=1e-2).fit(X) ``` <a id="10"></a> <br> # 7- Model Deployment All learning algorithms in scikit-learn share a uniform and limited API consisting of complementary interfaces: - an `estimator` interface for building and fitting models; - a `predictor` interface for making predictions; - a `transformer` interface for converting data. Goal: enforce a simple and consistent API to __make it trivial to swap or plug algorithms__. In this section have been applied more than **20 learning algorithms** that play an important rule in your experiences and improve your knowledge in case of using sklearn. > **<< Note 3 >>** : The results shown here may be slightly different for your analysis because, for example, the neural network algorithms use random number generators for fixing the initial value of the weights (starting points) of the neural networks, which often result in obtaining slightly different (local minima) solutions each time you run the analysis. Also note that changing the seed for the random number generator used to create the train, test, and validation samples can change your results. ###### [Go to top](#top) <a id="11"></a> <br> ## 7-1 Families of ML algorithms There are several categories for machine learning algorithms, below are some of these categories: * Linear * Linear Regression * Logistic Regression * Support Vector Machines * Tree-Based * Decision Tree * Random Forest * GBDT * KNN * Neural Networks ----------------------------- And if we want to categorize ML algorithms with the type of learning, there are below type: * Classification * k-Nearest Neighbors * LinearRegression * SVM * DT * NN * clustering * K-means * HCA * Expectation Maximization * Visualization and dimensionality reduction: * Principal Component Analysis(PCA) * Kernel PCA * Locally -Linear Embedding (LLE) * t-distributed Stochastic Neighbor Embedding (t-SNE) * Association rule learning * Apriori * Eclat * Semisupervised learning * Reinforcement Learning * Q-learning * Batch learning & Online learning * Ensemble Learning **<< Note >>** > Here is no method which outperforms all others for all tasks ###### [Go to top](#top) <a id="12"></a> <br> ## 7-2 Prepare Features & Targets First of all seperating the data into dependent(**Feature**) and independent(**Target**) variables. **<< Note 4 >>** * X==>>Feature * y==>>Target ## Test error Issue: the training error is a __biased__ estimate of the generalization error. Solution: Divide ${\cal L}$ into two disjoint parts called training and test sets (usually using 70% for training and 30% for test). - Use the training set for fitting the model; - Use the test set for evaluation only, thereby yielding an unbiased estimate. ###### [Go to top](#top) ``` X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) ``` <a id="13"></a> <br> ## 7-3 Accuracy and precision - Recall that we want to learn an estimator $\varphi$ minimizing the generalization error $Err(\varphi) = \mathbb{E}_{X,Y}\{ \ell(Y, \varphi(X)) \}$. - Problem: Since $P_{X,Y}$ is unknown, the generalization error $Err(\varphi)$ cannot be evaluated. - Solution: Use a proxy to approximate $Err(\varphi)$. * **precision** : In pattern recognition, information retrieval and binary classification, precision (also called positive predictive value) is the fraction of relevant instances among the retrieved instances, * **recall** : recall is the fraction of relevant instances that have been retrieved over the total amount of relevant instances. * **F-score** : the F1 score is a measure of a test's accuracy. It considers both the precision p and the recall r of the test to compute the score: p is the number of correct positive results divided by the number of all positive results returned by the classifier, and r is the number of correct positive results divided by the number of all relevant samples (all samples that should have been identified as positive). The F1 score is the harmonic average of the precision and recall, where an F1 score reaches its best value at 1 (perfect precision and recall) and worst at 0. **What is the difference between accuracy and precision?** "Accuracy" and "precision" are general terms throughout science. A good way to internalize the difference are the common "bullseye diagrams". In machine learning/statistics as a whole, accuracy vs. precision is analogous to bias vs. variance. ###### [Go to top](#top) <a id="14"></a> <br> ## 7-4- Estimators ``` class Estimator(object): def fit(self, X, y=None): """Fits estimator to data.""" # set state of ``self`` return self # Import the nearest neighbor class from sklearn.neighbors import KNeighborsClassifier # Change this to try # something else # Set hyper-parameters, for controlling algorithm clf = KNeighborsClassifier(n_neighbors=5) # Learn a model from training data clf.fit(X, y) # Estimator state is stored in instance attributes clf._tree ``` <a id="15"></a> <br> ## 7-5- Predictors ``` # Make predictions print(clf.predict(X[:5])) # Compute (approximate) class probabilities print(clf.predict_proba(X[:5])) ``` <a id="16"></a> <br> ## 7-6 K-Nearest Neighbours In **Machine Learning**, the **k-nearest neighbors algorithm** (k-NN) is a non-parametric method used for classification and regression. In both cases, the input consists of the k closest training examples in the feature space. The output depends on whether k-NN is used for classification or regression: In k-NN classification, the output is a class membership. An object is classified by a majority vote of its neighbors, with the object being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor. In k-NN regression, the output is the property value for the object. This value is the average of the values of its k nearest neighbors. k-NN is a type of instance-based learning, or lazy learning, where the function is only approximated locally and all computation is deferred until classification. The k-NN algorithm is among the simplest of all machine learning algorithms. ###### [Go to top](#top) ``` # K-Nearest Neighbours from sklearn.neighbors import KNeighborsClassifier Model = KNeighborsClassifier(n_neighbors=8) Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="17"></a> <br> ## 7-7 Radius Neighbors Classifier Classifier implementing a **vote** among neighbors within a given **radius** In scikit-learn **RadiusNeighborsClassifier** is very similar to **KNeighborsClassifier** with the exception of two parameters. First, in RadiusNeighborsClassifier we need to specify the radius of the fixed area used to determine if an observation is a neighbor using radius. Unless there is some substantive reason for setting radius to some value, it is best to treat it like any other hyperparameter and tune it during model selection. The second useful parameter is outlier_label, which indicates what label to give an observation that has no observations within the radius - which itself can often be a useful tool for identifying outliers. ###### [Go to top](#top) ``` from sklearn.neighbors import RadiusNeighborsClassifier Model=RadiusNeighborsClassifier(radius=8.0) Model.fit(X_train,y_train) y_pred=Model.predict(X_test) #summary of the predictions made by the classifier print(classification_report(y_test,y_pred)) print(confusion_matrix(y_test,y_pred)) #Accouracy score print('accuracy is ', accuracy_score(y_test,y_pred)) ``` <a id="18"></a> <br> ## 7-8 Logistic Regression Logistic regression is the appropriate regression analysis to conduct when the dependent variable is **dichotomous** (binary). Like all regression analyses, the logistic regression is a **predictive analysis**. In statistics, the logistic model (or logit model) is a widely used statistical model that, in its basic form, uses a logistic function to model a binary dependent variable; many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model; it is a form of binomial regression. Mathematically, a binary logistic model has a dependent variable with two possible values, such as pass/fail, win/lose, alive/dead or healthy/sick; these are represented by an indicator variable, where the two values are labeled "0" and "1" ###### [Go to top](#top) ``` # LogisticRegression from sklearn.linear_model import LogisticRegression Model = LogisticRegression() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="19"></a> <br> ## 7-9 Passive Aggressive Classifier ``` from sklearn.linear_model import PassiveAggressiveClassifier Model = PassiveAggressiveClassifier() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="20"></a> <br> ## 7-10 Naive Bayes In machine learning, naive Bayes classifiers are a family of simple "**probabilistic classifiers**" based on applying Bayes' theorem with strong (naive) independence assumptions between the features. ``` # Naive Bayes from sklearn.naive_bayes import GaussianNB Model = GaussianNB() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="21"></a> <br> ## 7-11 BernoulliNB Like MultinomialNB, this classifier is suitable for **discrete data**. The difference is that while MultinomialNB works with occurrence counts, BernoulliNB is designed for binary/boolean features. ``` # BernoulliNB from sklearn.naive_bayes import BernoulliNB Model = BernoulliNB() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="22"></a> <br> ## 7-12 SVM The advantages of support vector machines are: * Effective in high dimensional spaces. * Still effective in cases where number of dimensions is greater than the number of samples. * Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient. * Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels. The disadvantages of support vector machines include: * If the number of features is much greater than the number of samples, avoid over-fitting in choosing Kernel functions and regularization term is crucial. * SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation ###### [Go to top](#top) ``` # Support Vector Machine from sklearn.svm import SVC Model = SVC() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="23"></a> <br> ## 7-13 Nu-Support Vector Classification > Similar to SVC but uses a parameter to control the number of support vectors. ``` # Support Vector Machine's from sklearn.svm import NuSVC Model = NuSVC() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="24"></a> <br> ## 7-14 Linear Support Vector Classification Similar to **SVC** with parameter kernel=’linear’, but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples. ###### [Go to top](#top) ``` # Linear Support Vector Classification from sklearn.svm import LinearSVC Model = LinearSVC() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="25"></a> <br> ## 7-15 Decision Tree Decision Trees (DTs) are a non-parametric supervised learning method used for **classification** and **regression**. The goal is to create a model that predicts the value of a target variable by learning simple **decision rules** inferred from the data features. ###### [Go to top](#top) ``` # Decision Tree's from sklearn.tree import DecisionTreeClassifier Model = DecisionTreeClassifier() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="26"></a> <br> ## 7-16 ExtraTreeClassifier An extremely randomized tree classifier. Extra-trees differ from classic decision trees in the way they are built. When looking for the best split to separate the samples of a node into two groups, random splits are drawn for each of the **max_features** randomly selected features and the best split among those is chosen. When max_features is set 1, this amounts to building a totally random decision tree. **Warning**: Extra-trees should only be used within ensemble methods. ###### [Go to top](#top) ``` # ExtraTreeClassifier from sklearn.tree import ExtraTreeClassifier Model = ExtraTreeClassifier() Model.fit(X_train, y_train) y_pred = Model.predict(X_test) # Summary of the predictions made by the classifier print(classification_report(y_test, y_pred)) print(confusion_matrix(y_test, y_pred)) # Accuracy score print('accuracy is',accuracy_score(y_pred,y_test)) ``` <a id="27"></a> <br> ## 7-17 Neural network I have used multi-layer Perceptron classifier. This model optimizes the log-loss function using **LBFGS** or **stochastic gradient descent**. ###### [Go to top](#top) <a id="28"></a> <br> ## 7-17-1 What is a Perceptron? There are many online examples and tutorials on perceptrons and learning. Here is a list of some articles: - [Wikipedia on Perceptrons](https://en.wikipedia.org/wiki/Perceptron) - Jurafsky and Martin (ed. 3), Chapter 8 ###### [Go to top](#top) This is an example that I have taken from a draft of the 3rd edition of Jurafsky and Martin, with slight modifications: We import *numpy* and use its *exp* function. We could use the same function from the *math* module, or some other module like *scipy*. The *sigmoid* function is defined as in the textbook: ``` import numpy as np def sigmoid(z): return 1 / (1 + np.exp(-z)) ``` Our example data, **weights** $w$, **bias** $b$, and **input** $x$ are defined as: ``` w = np.array([0.2, 0.3, 0.8]) b = 0.5 x = np.array([0.5, 0.6, 0.1]) ``` Our neural unit would compute $z$ as the **dot-product** $w \cdot x$ and add the **bias** $b$ to it. The sigmoid function defined above will convert this $z$ value to the **activation value** $a$ of the unit: ``` z = w.dot(x) + b print("z:", z) print("a:", sigmoid(z)) ``` <a id="29"></a> <br> ### 7-17-2 The XOR Problem The power of neural units comes from combining them into larger networks. Minsky and Papert (1969): A single neural unit cannot compute the simple logical function XOR. The task is to implement a simple **perceptron** to compute logical operations like AND, OR, and XOR. - Input: $x_1$ and $x_2$ - Bias: $b = -1$ for AND; $b = 0$ for OR - Weights: $w = [1, 1]$ with the following activation function: $$ y = \begin{cases} \ 0 & \quad \text{if } w \cdot x + b \leq 0\\ \ 1 & \quad \text{if } w \cdot x + b > 0 \end{cases} $$ ###### [Go to top](#top) We can define this activation function in Python as: ``` def activation(z): if z > 0: return 1 return 0 ``` For AND we could implement a perceptron as: ``` w = np.array([1, 1]) b = -1 x = np.array([0, 0]) print("0 AND 0:", activation(w.dot(x) + b)) x = np.array([1, 0]) print("1 AND 0:", activation(w.dot(x) + b)) x = np.array([0, 1]) print("0 AND 1:", activation(w.dot(x) + b)) x = np.array([1, 1]) print("1 AND 1:", activation(w.dot(x) + b)) ``` For OR we could implement a perceptron as: ``` w = np.array([1, 1]) b = 0 x = np.array([0, 0]) print("0 OR 0:", activation(w.dot(x) + b)) x = np.array([1, 0]) print("1 OR 0:", activation(w.dot(x) + b)) x = np.array([0, 1]) print("0 OR 1:", activation(w.dot(x) + b)) x = np.array([1, 1]) print("1 OR 1:", activation(w.dot(x) + b)) ``` There is no way to implement a perceptron for XOR this way. no see our prediction for iris ``` from sklearn.neural_network import MLPClassifier Model=MLPClassifier() Model.fit(X_train,y_train) y_pred=Model.predict(X_test) # Summary of the predictions print(classification_report(y_test,y_pred)) print(confusion_matrix(y_test,y_pred)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="30"></a> <br> ## 7-18 RandomForest A random forest is a meta estimator that **fits a number of decision tree classifiers** on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is always the same as the original input sample size but the samples are drawn with replacement if bootstrap=True (default). ###### [Go to top](#top) ``` from sklearn.ensemble import RandomForestClassifier Model=RandomForestClassifier(max_depth=2) Model.fit(X_train,y_train) y_pred=Model.predict(X_test) print(classification_report(y_test,y_pred)) print(confusion_matrix(y_pred,y_test)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="31"></a> <br> ## 7-19 Bagging classifier A Bagging classifier is an ensemble **meta-estimator** that fits base classifiers each on random subsets of the original dataset and then aggregate their individual predictions (either by voting or by averaging) to form a final prediction. Such a meta-estimator can typically be used as a way to reduce the variance of a black-box estimator (e.g., a decision tree), by introducing randomization into its construction procedure and then making an ensemble out of it. This algorithm encompasses several works from the literature. When random subsets of the dataset are drawn as random subsets of the samples, then this algorithm is known as Pasting . If samples are drawn with replacement, then the method is known as Bagging . When random subsets of the dataset are drawn as random subsets of the features, then the method is known as Random Subspaces . Finally, when base estimators are built on subsets of both samples and features, then the method is known as Random Patches .[http://scikit-learn.org] ###### [Go to top](#top) ``` from sklearn.ensemble import BaggingClassifier Model=BaggingClassifier() Model.fit(X_train,y_train) y_pred=Model.predict(X_test) print(classification_report(y_test,y_pred)) print(confusion_matrix(y_pred,y_test)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="32"></a> <br> ## 7-20 AdaBoost classifier An AdaBoost classifier is a meta-estimator that begins by fitting a classifier on the original dataset and then fits additional copies of the classifier on the same dataset but where the weights of incorrectly classified instances are adjusted such that subsequent classifiers focus more on difficult cases. This class implements the algorithm known as **AdaBoost-SAMME** . ``` from sklearn.ensemble import AdaBoostClassifier Model=AdaBoostClassifier() Model.fit(X_train,y_train) y_pred=Model.predict(X_test) print(classification_report(y_test,y_pred)) print(confusion_matrix(y_pred,y_test)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="33"></a> <br> ## 7-21 Gradient Boosting Classifier GB builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. ``` from sklearn.ensemble import GradientBoostingClassifier Model=GradientBoostingClassifier() Model.fit(X_train,y_train) y_pred=Model.predict(X_test) print(classification_report(y_test,y_pred)) print(confusion_matrix(y_pred,y_test)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="34"></a> <br> ## 7-22 Linear Discriminant Analysis Linear Discriminant Analysis (discriminant_analysis.LinearDiscriminantAnalysis) and Quadratic Discriminant Analysis (discriminant_analysis.QuadraticDiscriminantAnalysis) are two classic classifiers, with, as their names suggest, a **linear and a quadratic decision surface**, respectively. These classifiers are attractive because they have closed-form solutions that can be easily computed, are inherently multiclass, have proven to work well in practice, and have no **hyperparameters** to tune. ###### [Go to top](#top) ``` from sklearn.discriminant_analysis import LinearDiscriminantAnalysis Model=LinearDiscriminantAnalysis() Model.fit(X_train,y_train) y_pred=Model.predict(X_test) print(classification_report(y_test,y_pred)) print(confusion_matrix(y_pred,y_test)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="35"></a> <br> ## 7-23 Quadratic Discriminant Analysis A classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes’ rule. The model fits a **Gaussian** density to each class. ###### [Go to top](#top) ``` from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis Model=QuadraticDiscriminantAnalysis() Model.fit(X_train,y_train) y_pred=Model.predict(X_test) print(classification_report(y_test,y_pred)) print(confusion_matrix(y_pred,y_test)) #Accuracy Score print('accuracy is ',accuracy_score(y_pred,y_test)) ``` <a id="36"></a> <br> ## 7-24 Kmeans K-means clustering is a type of unsupervised learning, which is used when you have unlabeled data (i.e., data without defined categories or groups). The goal of this algorithm is **to find groups in the data**, with the number of groups represented by the variable K. The algorithm works iteratively to assign each data point to one of K groups based on the features that are provided. ###### [Go to top](#top) ``` from sklearn.cluster import KMeans iris_SP = dataset[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']] # k-means cluster analysis for 1-15 clusters from scipy.spatial.distance import cdist clusters=range(1,15) meandist=[] # loop through each cluster and fit the model to the train set # generate the predicted cluster assingment and append the mean # distance my taking the sum divided by the shape for k in clusters: model=KMeans(n_clusters=k) model.fit(iris_SP) clusassign=model.predict(iris_SP) meandist.append(sum(np.min(cdist(iris_SP, model.cluster_centers_, 'euclidean'), axis=1)) / iris_SP.shape[0]) """ Plot average distance from observations from the cluster centroid to use the Elbow Method to identify number of clusters to choose """ plt.plot(clusters, meandist) plt.xlabel('Number of clusters') plt.ylabel('Average distance') plt.title('Selecting k with the Elbow Method') # pick the fewest number of clusters that reduces the average distance # If you observe after 3 we can see graph is almost linear ``` <a id="36"></a> <br> ## 7-25 Plot classification probability ``` print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause iris = datasets.load_iris() X = iris.data[:, 0:2] # we only take the first two features for visualization y = iris.target n_features = X.shape[1] C = 10 kernel = 1.0 * RBF([1.0, 1.0]) # for GPC # Create different classifiers. classifiers = { 'L1 logistic': LogisticRegression(C=C, penalty='l1', solver='saga', multi_class='multinomial', max_iter=10000), 'L2 logistic (Multinomial)': LogisticRegression(C=C, penalty='l2', solver='saga', multi_class='multinomial', max_iter=10000), 'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2', solver='saga', multi_class='ovr', max_iter=10000), 'Linear SVC': SVC(kernel='linear', C=C, probability=True, random_state=0), 'GPC': GaussianProcessClassifier(kernel) } n_classifiers = len(classifiers) plt.figure(figsize=(3 * 2, n_classifiers * 2)) plt.subplots_adjust(bottom=.2, top=.95) xx = np.linspace(3, 9, 100) yy = np.linspace(1, 5, 100).T xx, yy = np.meshgrid(xx, yy) Xfull = np.c_[xx.ravel(), yy.ravel()] for index, (name, classifier) in enumerate(classifiers.items()): classifier.fit(X, y) y_pred = classifier.predict(X) accuracy = accuracy_score(y, y_pred) print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100)) # View probabilities: probas = classifier.predict_proba(Xfull) n_classes = np.unique(y_pred).size for k in range(n_classes): plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1) plt.title("Class %d" % k) if k == 0: plt.ylabel(name) imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)), extent=(3, 9, 1, 5), origin='lower') plt.xticks(()) plt.yticks(()) idx = (y_pred == k) if idx.any(): plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='w', edgecolor='k') ax = plt.axes([0.15, 0.04, 0.7, 0.05]) plt.title("Probability") plt.colorbar(imshow_handle, cax=ax, orientation='horizontal') plt.show() ``` # 9- Read more you can start to learn and review your knowledge about ML with a perfect dataset and try to learn and memorize the workflow for your journey in Data science world with read more sources, here I want to give some courses, e-books and cheatsheet: ## 9-1 Courses There are a lot of online courses that can help you develop your knowledge, here I have just listed some of them: 1. [Machine Learning Certification by Stanford University (Coursera)](https://www.coursera.org/learn/machine-learning/) 2. [Machine Learning A-Z™: Hands-On Python & R In Data Science (Udemy)](https://www.udemy.com/machinelearning/) 3. [Deep Learning Certification by Andrew Ng from deeplearning.ai (Coursera)](https://www.coursera.org/specializations/deep-learning) 4. [Python for Data Science and Machine Learning Bootcamp (Udemy)](Python for Data Science and Machine Learning Bootcamp (Udemy)) 5. [Mathematics for Machine Learning by Imperial College London](https://www.coursera.org/specializations/mathematics-machine-learning) 6. [Deep Learning A-Z™: Hands-On Artificial Neural Networks](https://www.udemy.com/deeplearning/) 7. [Complete Guide to TensorFlow for Deep Learning Tutorial with Python](https://www.udemy.com/complete-guide-to-tensorflow-for-deep-learning-with-python/) 8. [Data Science and Machine Learning Tutorial with Python – Hands On](https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/) 9. [Machine Learning Certification by University of Washington](https://www.coursera.org/specializations/machine-learning) 10. [Data Science and Machine Learning Bootcamp with R](https://www.udemy.com/data-science-and-machine-learning-bootcamp-with-r/) 11. [Creative Applications of Deep Learning with TensorFlow](https://www.class-central.com/course/kadenze-creative-applications-of-deep-learning-with-tensorflow-6679) 12. [Neural Networks for Machine Learning](https://www.class-central.com/mooc/398/coursera-neural-networks-for-machine-learning) 13. [Practical Deep Learning For Coders, Part 1](https://www.class-central.com/mooc/7887/practical-deep-learning-for-coders-part-1) 14. [Machine Learning](https://www.cs.ox.ac.uk/teaching/courses/2014-2015/ml/index.html) ## 9-2 Ebooks So you love reading , here is **10 free machine learning books** 1. [Probability and Statistics for Programmers](http://www.greenteapress.com/thinkstats/) 2. [Bayesian Reasoning and Machine Learning](http://web4.cs.ucl.ac.uk/staff/D.Barber/textbook/091117.pdf) 2. [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/) 2. [Understanding Machine Learning](http://www.cs.huji.ac.il/~shais/UnderstandingMachineLearning/index.html) 2. [A Programmer’s Guide to Data Mining](http://guidetodatamining.com/) 2. [Mining of Massive Datasets](http://infolab.stanford.edu/~ullman/mmds/book.pdf) 2. [A Brief Introduction to Neural Networks](http://www.dkriesel.com/_media/science/neuronalenetze-en-zeta2-2col-dkrieselcom.pdf) 2. [Deep Learning](http://www.deeplearningbook.org/) 2. [Natural Language Processing with Python](https://www.researchgate.net/publication/220691633_Natural_Language_Processing_with_Python) 2. [Machine Learning Yearning](http://www.mlyearning.org/) ## 9-3 Cheat Sheets Data Science is an ever-growing field, there are numerous tools & techniques to remember. It is not possible for anyone to remember all the functions, operations and formulas of each concept. That’s why we have cheat sheets. But there are a plethora of cheat sheets available out there, choosing the right cheat sheet is a tough task. So, I decided to write this article. Here I have selected the cheat sheets on the following criteria: comprehensiveness, clarity, and content [26]: 1. [Quick Guide to learn Python for Data Science ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Data-Science-in-Python.pdf) 1. [Python for Data Science Cheat sheet ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/beginners_python_cheat_sheet.pdf) 1. [Python For Data Science Cheat Sheet NumPy](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Numpy_Python_Cheat_Sheet.pdf) 1. [Exploratory Data Analysis in Python]() 1. [Data Exploration using Pandas in Python](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Data-Exploration-in-Python.pdf) 1. [Data Visualisation in Python](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/data-visualisation-infographics1.jpg) 1. [Python For Data Science Cheat Sheet Bokeh](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Python_Bokeh_Cheat_Sheet.pdf) 1. [Cheat Sheet: Scikit Learn ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/Scikit-Learn-Infographic.pdf) 1. [MLalgorithms CheatSheet](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/MLalgorithms-.pdf) 1. [Probability Basics Cheat Sheet ](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist/blob/master/cheatsheets/probability_cheatsheet.pdf) ###### [Go to top](#top) <a id="37"></a> <br> # 8- conclusion This kernel is a simple tutorial for machine learning with sklearn and it is not completed yet! >###### you may be interested have a look at it: [**10-steps-to-become-a-data-scientist**](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist) --------------------------------------------------------------------- you can Fork and Run this kernel on Github: > ###### [ GitHub](https://github.com/mjbahmani/10-steps-to-become-a-data-scientist) ------------------------------------------------------------------------------------------------------------- **I hope you find this kernel helpful and some <font color="red"><b>UPVOTES</b></font> would be very much appreciated** ----------- <a id="38"></a> <br> # 9- References 1. [Coursera](https://www.coursera.org/specializations/data-science-python) 1. [GitHub](https://github.com/mjbahmani) 1. [Sklearn](https://scikit-learn.org) ###### [Go to top](#top) #### This Kernel is not completed and will be updated soon!!!
github_jupyter
<a href="https://colab.research.google.com/github/evolu-tion/GenomeManagement/blob/master/example/Genome_info.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Bitter gourd project ## Bitter gourd (M. charantia) have 4 assembed genome references. | Genome version | Assembed genome ID | Citation | Genome Size | Number of scaffold |Completeness | N50 | GC content (%) | N-Gap content (%) | |----------------|--------------------|----------|--------------|--------------------|-------------|-----|------------|---------------| |OHB3-1 v1 |[GCA_001995035.1](https://www.ncbi.nlm.nih.gov/assembly/GCA_001995035.1)|Urasaki et al., 2017| 285.6 Mbp | 1,052 |Scaffold | 1,100,631 bp | 35.77 | 7.71 | |OHB3-1 v2 |[GCA_013281855.1](https://www.ncbi.nlm.nih.gov/assembly/GCA_013281855.1)|Matsumura et al., 2020| | | | | | | |Dali-11 |[CNA0000004](http://ftp.cngb.org/pub/CNSA/data1/CNP0000016/CNS0000443/CNA0000004/)|Cui et al., 2020| 294.0 Mbp | 6,764 |Scaffold | 21,142,588 bp | 35.74 | 2.36 | |TR(S108) |[GCA_900491585.1](https://www.ncbi.nlm.nih.gov/assembly/GCA_900491585.1), [CNA0000005](http://ftp.cngb.org/pub/CNSA/data1/CNP0000016/CNS0000444/CNA0000005/)|Cui et al., 2020| | | | | | | ## Get GenomeManagement package ``` !git clone https://github.com/evolu-tion/GenomeManagement.git ``` # Assessment assembled genome quality 1. Download assembled genomes from NCBI and/or CNGB 2. Assessment genome qualtiy using [GenomeManangement](https://github.com/evolu-tion/GenomeManagement) ``` # Retrieve M. charantia OHB3-1 genome information from RefSeq (https://www.ncbi.nlm.nih.gov/genome/12860?genome_assembly_id=306190) !mkdir -p genome !wget https://ftp.ncbi.nlm.nih.gov/genomes/refseq/plant/Momordica_charantia/latest_assembly_versions/GCF_001995035.1_ASM199503v1/GCF_001995035.1_ASM199503v1_genomic.fna.gz -q -O genome/GCF_001995035.1_ASM199503v1_genomic.fna.gz # Show some sequence of FASTA genome files !zcat genome/GCF_001995035.1_ASM199503v1_genomic.fna.gz | head # Get statistic of genome information using GenomeManagement !python GenomeManagement/get_genome_statistic.py -g genome/GCF_001995035.1_ASM199503v1_genomic.fna.gz # Retrieve M. charantia Dali-11 genome information from CNGB Nucleotide Sequence Archive (CNSA) with CNP0000016 assession !wget ftp://ftp.cngb.org/pub/CNSA/data1/CNP0000016/CNS0000443/CNA0000004/Dali-11_chr.fasta.gz -q -O genome/Dali-11_chr.fasta.gz # Show some sequence of FASTA genome files !zcat genome/Dali-11_chr.fasta.gz | head # Get statistic of genome information using GenomeManagement !python GenomeManagement/get_genome_statistic.py -g genome/Dali-11_chr.fasta.gz ```
github_jupyter
*** <center><h1>Face Rhythm</h1></center> *** <table><tr> <td> <img src="https://images.squarespace-cdn.com/content/5688a31305f8e23aa2893502/1614723283221-5Z5038AT7Y6KCOM2PIU4/Screenshot+from+2021-03-02+17-05-12.png?content-type=image%2Fpng" style="height: 200px"> </td> <td> <img src="https://images.squarespace-cdn.com/content/5688a31305f8e23aa2893502/1614723856628-J89PYYSF7K7JATE2KMF9/Screenshot+from+2021-03-02+17-23-46.png?format=300w&content-type=image%2Fpng" style="height: 200px"> </td> <td> <img src="https://images.squarespace-cdn.com/content/5688a31305f8e23aa2893502/1614723931026-OORV0RAPZNWV3R8TBOXB/Screenshot+from+2021-03-02+17-25-11.png?format=300w&content-type=image%2Fpng" style="height: 200px"> </td> <td> <img src="https://images.squarespace-cdn.com/content/5688a31305f8e23aa2893502/1614724055033-O3GBEF1D9MULFZKI2IUJ/Screenshot+from+2021-03-02+17-27-10.png?format=300w&content-type=image%2Fpng" style="height: 200px"> </td> <td> <img src="https://images.squarespace-cdn.com/content/5688a31305f8e23aa2893502/1614723378405-WXN74ZTT1KYZUQGDM07X/face_rhythm_banner2.png?format=1000w&content-type=image%2Fpng" style="height: 200px"> </td> </tr></table> *** ##### Notebook Shortcuts - **[Notebook Setup](#Notebook-Setup)**: Prepare all the necessary config files and folders - **[Set ROI](#Set-ROI)**: Set the ROI for the analysis - **[Run Optic Flow](#Run-Optic-Flow)**: Run the optic flow analysis - **[Clean Optic Flow](#Clean-Optic-Flow)**: Optic flow post-processing - **[Convolutional Dimensionality Reduction](#Convolutional-Dimensionality-Reduction)**: Convolutional Dimensionality Reduction - **[Analysis](#Analysis)**: Decompose and Analyze the optic flow data in many ways *** # Tips on running this notebook: In theory it would be nice if you could just enter the path of the video(s) and just let it run all the way through. In practice, there are a few hoops to jump through - Run the Notebook Setup Block (two blocks below this one). This should pretty much always be done, even if you are loading precomputed file from disk instead of calculating them. This step loads in some useful meta data used throughout. - Even if you are restarting at a specific point in your analysis, run your Setup Block then head down to your current analysis step cell *** <center><h1>Notebook Setup</h1></center> *** ### Creates config and locates videos **Crucially, always run this first cell every time you run this notebook.** Also, generally make sure to read through the config parameters before running. The Project path is the path to a folder (existing or not) where we will store our derived files. I recommend creating a project folder and then copying this notebook into that folder. The Video path is the path to a folder containing videos. The run name will determine the name of the config. You might create multiple configs if you want to re-run the same data with slightly different parameters Previous face rhythm users might be familiar with the 'sessions' structure. Some users will want to run multiple sessions through Face Rhythm at the same time. If that's you, then read the docs to see what parameters to change: https://face-rhythm.readthedocs.io/ If you did everything according to the readme, you should see that the video_path currently points to a folder containing one sample video in the testing folder. Give this a try! ``` # ALWAYS RUN THIS CELL # widen jupyter notebook window from IPython.core.display import display, HTML display(HTML("<style>.container {width:95% !important; }</style>")) from face_rhythm.util import helpers, setup from pathlib import Path # General Parameters overwrite_config = False remote = False # Select true if running on any system where your kernel isn't on your local machine trials = False # Let us know if you're using trials and want to use them as a dimension in the factorization multisession = False # Set to true if you're doing a multisession analysis # Project path, video path, and (optionally) run name project_path = Path(r'./').resolve() # often Path.cwd().resolve() is useful video_path = Path(r'../face-rhythm/test_data/single_session_single_video/session1').resolve() run_name = 'new' config_filepath = setup.setup_project(project_path, video_path, run_name, overwrite_config, remote, trials, multisession) from face_rhythm.util import helpers, setup # IMPORT VIDEOS # See the docs for more information on video path setting # https://face-rhythm.readthedocs.io/ config = helpers.load_config(config_filepath) config['Video']['file_prefix'] = 'gmou06' # change this to 'session_prefix' if multisession config['Video']['print_filenames'] = True config['General']['overwrite_nwbs'] = False helpers.save_config(config, config_filepath) setup.prepare_videos(config_filepath) ``` *** <center><h1>Set ROI</h1></center> *** ### Manually specify your roi This is good if your animal doesn't fill the view and if you have stationary objects nearby. ``` %matplotlib notebook from face_rhythm.util import helpers, set_roi # Select Bounding ROI for this analysis # This block of code will pop up a little GUI. Click around the # region of the face that you want to include in the analysis. # When you are done, click the 'Confirm ROI' button config = helpers.load_config(config_filepath) config['ROI']['session_to_set'] = 0 # 0 indexed. Chooses the session to use config['ROI']['vid_to_set'] = 0 # 0 indexed. Sets the video to use to make an image config['ROI']['frame_to_set'] = 1 # 0 indexed. Sets the frame number to use to make an image config['ROI']['load_from_file'] = False # if you've already run this and want to use the existing ROI, set to True helpers.save_config(config, config_filepath) frame, bbox_selector = set_roi.get_roi(config_filepath) # Don't run this until you're done selecting if not config['ROI']['load_from_file']: set_roi.process_roi(config_filepath, frame, bbox_selector) ``` *** <center><h1>Run Optic Flow</h1></center> *** # Optic flow calculation Multithread is generally 2X to many-X faster, but may fail when too many dots are selected (memory overload) *If show video set to true on a remote connection, the video won't show, but it will save to the proj folder.* Key Optic flow params: - **'spacing'**: ~ 3 to 12. Spacing between dots, in pixels. Inversely related to number of dots to use in the calculation. Try to keep the number of dots below 2000 if possible (eats up memory and computation time). More dots generally means better final results, more robust to outliers and weird stuff. I'd make the spacing as small (more dots) as you can go before you run out of RAM in the final calculations - **lk_params 'win_size'**: ~ 25,25 to 80,80. This is the spatial integration window for the optical flow measurement. Try to make it as small as possible without it becoming unstable. The two values are for X and Y length of square integration window. Probably keep the same for most applications - **multithread**: There are two ways to run optic flow: one single-threaded and one multi-threaded. Do parameter tuning on the single-threaded one so you can quit out of it, as well as watch the calculation as it happens with showVideo_pref=True. The multi-threaded one is only faster if you have a lot of cores in your CPU (>10), then it's faster, else stick with the single-threaded version and set showVideo_pref=False. ``` from face_rhythm.optic_flow import optic_flow import cv2 config = helpers.load_config(config_filepath) config['Optic']['vidNums_toUse'] = list(range(config['General']['sessions'][0]['num_vids'])) ## 0 indexing. Use this line of code to run all the videos in a particular session # Parameters for lucas kanade optical flow # win size: spatial integration window (make small as possible, but make bigger if youre having issues with outliers) # max level: only moderate effects if everything working properly. Keep around 3. # criteria values have to do with the search algorithm. For speed: EPS small, COUNT big (if data is gud) config['Optic']['spacing'] = 5 ## This is the distance between points in the grid (both in x and y dims) config['Optic']['lk'] = {} config['Optic']['lk']['winSize'] = (20,20) config['Optic']['lk']['maxLevel'] = 4 config['Optic']['lk']['criteria'] = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 4, 0.0001) config['Optic']['showVideo_pref'] = False ## much faster when video is off. If 'remote' option chosen (from first cell block), video will be saved as file in project folder. config['Video']['dot_size'] = 1 ## for viewing purposes config['Video']['save_demo'] = False # Whether to save the demo video (true for remote users when showvideo is true) config['Video']['demo_len'] = 10 # used when remote users when show_video==True config['Video']['fps_counterPeriod'] = 10 # number of frames between fps averaging calculation config['Video']['printFPS_pref'] = False # option for whether fps should be printed in notebook config['Optic']['recursive'] = True config['Optic']['recursive_relaxation_factor'] = 0.0035 config['Optic']['multithread'] = False # Must be False if 'recursive'==True OR if 'showVideo_pref'==True helpers.save_config(config, config_filepath) ### == CALCULATION == optic_flow.optic_workflow(config_filepath) ``` *** <center><h1>Clean Optic Flow</h1></center> *** ### Clean up displacements traces and make good positions traces Key Outlier removal params: - **outlier_threshold_positions**: ~ 20 to 100. If a dot strays more than this many pixels away from its anchor position, its displacement in the dimension it cross the threshold in, for those time points (and some time points around it, see params below), for that dot only, will be set to zero - **outlier_threshold_displacements** ~ 5 to 25. Similar to above, but for displacement. Only the outlier time points are removed (no window around outliers considered). - **framesHalted_beforeOutlier**: ~ 0 to 30. The number of frames to also remove before detected outlier events. Consider what is causing your outlier event. If it is an arm movement or something, how long does such a movement last? How long before it will cause a dot to move to the outlier threshold? - **framesHalted_afterOutlier**: ~ 0 to 10. Simlar to above but for after an outlier event is detected - **relaxation_factor** : ~ 0.03 to 0.1. This is the rate of the exponential decay / relaxation / attraction back to the anchor position that a point undergoes. It is meant to prevent baseline drift. Think of it like a high pass on the dot position trace ``` from face_rhythm.optic_flow import clean_results ## Create position trace from displacements ## This block does a few things: ## 1. Finds outliers: These are currently defined as time points when the integrated position goes beyond some threshold. ## Note that since displacements are calculated for x and y separately, outlier events are also separated into x outlier events ## and y outlier events. ## 2. Sets displacements during outlier events to ZERO: There are some parameters below that define the time window (in frames) ## before and after outliers to also set to zero. Note again, that DISPLACEMENT (the derivative of position) is set to zero, ## effectively pausing the position of the ingegrated position. ## 3. Rectifies the position to its 'anchor position': I am defining position as the integrated displacement arising from a STATIC ## place in the image. Because this analysis is image agnostic, drift naturally occurs. This term counteracts drift by simply ## relaxing each dot's position back to the center of its displacement analysis window. This term should be as low as possible ## because it also acts as a high pass filter, thus precluding analysis of slow timescale changes. ## Note that using a standard frequency filter (fir, iir) here for the rectification / relaxation doesn't work well config = helpers.load_config(config_filepath) config['Clean']['outlier_threshold_positions'] = 25 ## in pixels. If position goes past this, short time window before and including outlier timepoint has displacement set to 0 config['Clean']['outlier_threshold_displacements'] = 4 ## in pixels. If displacement goes past this, displacement set to 0 at those time points config['Clean']['framesHalted_beforeOutlier'] = 15 # in frames. best to make even config['Clean']['framesHalted_afterOutlier'] = 5 # in frames. best to make even config['Clean']['relaxation_factor'] = 0.0035 # This is the speed at which the integrated position exponentially relaxes back to its anchored position. Make ~0.005 to 0.05 for face video at 120 Hz helpers.save_config(config, config_filepath) clean_results.clean_workflow(config_filepath) # Display the new points! from face_rhythm.visualize import videos config = helpers.load_config(config_filepath) config['Video']['demo_len'] = 100 config['Video']['data_to_display'] = 'positions_cleanup_absolute' config['Video']['save_demo'] = True helpers.save_config(config, config_filepath) videos.visualize_points(config_filepath) ``` *** <center><h1>Convolutional Dimensionality Reduction</h1></center> *** ### Do some denoising and to get the number of dots down to a managable number In particular, it is nice for the batched CP decomposition later that the batches can be as big as possible in the temporal dimension, so doing some mild convolutional dim reduction first is helpful. ``` %matplotlib inline from face_rhythm.optic_flow import conv_dim_reduce config = helpers.load_config(config_filepath) # Create kernel config['CDR']['width_cosKernel'] = 20 # This is the radius of a 2-dimensional cosine kernel. If you get an error about SVD not working, probably increase this config['CDR']['num_dots'] = config['Optic']['num_dots'] # Distance between points in the grid, longer than optic config['CDR']['spacing'] = 5 # For displaying dots config['CDR']['display_points'] = False # checkout the dots and overlayed filter config['CDR']['vidNum'] = 0 # 0 indexed config['CDR']['frameNum'] = 1 # 0 indexed config['CDR']['dot_size'] = 1 config['CDR']['kernel_alpha'] = 0.3 config['CDR']['kernel_pixel'] = 10 # Coefficients of influence config['CDR']['num_components'] = 3 helpers.save_config(config, config_filepath) conv_dim_reduce.conv_dim_reduce_workflow(config_filepath) # Display the new points! from face_rhythm.visualize import videos config = helpers.load_config(config_filepath) config['Video']['demo_len'] = 100 config['Video']['data_to_display'] = 'positions_convDR_absolute' config['Video']['save_demo'] = True helpers.save_config(config, config_filepath) videos.visualize_points(config_filepath) ``` *** <center><h1>Analysis</h1></center> *** ### Decompose and Analyze the Data in different ways Below you'll find the following: - PCA done on the point positions - TCA done on the point positions - Spectral analysis of every pixel to transoform the basis to be oscillatory - TCA done on the spectra ### PCA ``` from face_rhythm.analysis import pca pca.pca_workflow(config_filepath, 'positions_convDR_absolute') %matplotlib notebook from face_rhythm.visualize import plots config = helpers.load_config(config_filepath) config['PCA']['n_factors_to_show'] = 3 helpers.save_config(config, config_filepath) plots.plot_pca_diagnostics(config_filepath) from face_rhythm.visualize import videos config = helpers.load_config(config_filepath) config['Video']['factor_category_to_display'] = 'PCA' # eg: 'TCA' or 'PCA' config['Video']['factor_to_display'] = 'factors_points' # eg: (if 'TCA'):'factors_frequential_points' (if 'PCA'):'factors_points' config['Video']['points_to_display'] = 'positions_convDR_absolute' # eg: 'positions_convDR_absolute' or 'positions_absolute' or 'positions_recursive' config['Video']['demo_len'] = 100 config['Video']['dot_size'] = 2 config['Video']['save_demo'] = True helpers.save_config(config, config_filepath) videos.visualize_factor(config_filepath) ``` ### Positional TCA Key TCA parameters: - **device**: runs a small function to get your device. Set to true if you want to use the GPU. If the input is small (< half the size of your GPU memory), set to True. It's super fast. - **rank**: ~ 2 to 10. The number of factors to look for in the PARAFAC model. More can be good but less reproduceable, but less can mix together obviously different factors - **tolerance**: ~1e-05 to 1e-07. The minimum variance of the model between steps before declaring convergence. If you're trying to really optimize your TCA fit, try decreasing this. - **n_iters**: ~100 to 600. The fit of the model usually improves if you give it more iterations ``` from face_rhythm.analysis import tca config = helpers.load_config(config_filepath) config['TCA']['pref_useGPU'] = False config['TCA']['rank'] = 4 config['TCA']['init'] = 'svd' # set this to 'svd' for small datasets config['TCA']['tolerance'] = 1e-06 config['TCA']['verbosity'] = 0 config['TCA']['n_iters'] = 100 helpers.save_config(config, config_filepath) tca.positional_tca_workflow(config_filepath, 'positions_convDR_meanSub') # you can use different positions data %matplotlib notebook from face_rhythm.visualize import plots config = helpers.load_config(config_filepath) config['TCA']['ftype'] = 'positional' helpers.save_config(config, config_filepath) plots.plot_tca_factors(config_filepath) from face_rhythm.visualize import videos config = helpers.load_config(config_filepath) config['Video']['factor_category_to_display'] = 'TCA' # eg: 'TCA' or 'PCA' config['Video']['factor_to_display'] = 'factors_positional_points' # eg: (if 'TCA'):'factors_frequential_points' (if 'PCA'):'scores_points' config['Video']['points_to_display'] = 'positions_convDR_absolute' # eg: 'positions_convDR_absolute' or 'positions_absolute' or 'positions_recursive' config['Video']['demo_len'] = 100 config['Video']['dot_size'] = 2 config['Video']['save_demo'] = True helpers.save_config(config, config_filepath) videos.visualize_factor(config_filepath) ``` ### Spectral Analysis I've played with a few different methods. While multiresolution methods seems ideal for this use-case, It just ends up severly overrepresenting low frequency factors, making noisier high frequency factors, and doing an overall worse job at reconstruction. A good ol' multitaper short time fourier transform seems to work fine. Adding in raw positions to subsequent dimensionality reduction later on seems like a natural thing to do, as single resolution spectral analysis ends up kind of ignoring slower dynamics. We recommend running the first cell to just visualize and assess the frequencies you're using. You can change this frequency distribution by altering the provided parameters Key Spectral analysis params: - **hop_length**: ~ 5 to 20. The length of the time window used for the short-time Fourier transform. Longer gives better spectral resolution, shorter gives better temporal resolution. There are several other parameters that are related but this is the most important. Longer windows (along with decreasing the overlap parameter) also decrease the size of the output spectrograms, which can help with memory and computation time in the subsequent analyses ``` from face_rhythm.analysis import spectral_analysis config = helpers.load_config(config_filepath) config['CQT']['hop_length'] = 16 config['CQT']['fmin_rough'] = 1.8 config['CQT']['sampling_rate'] = config['Video']['Fs'] config['CQT']['n_bins'] = 35 helpers.save_config(config, config_filepath) spectral_analysis.prepare_freqs(config_filepath) from face_rhythm.analysis import spectral_analysis spectral_analysis.cqt_workflow(config_filepath, 'positions_convDR_meanSub') %matplotlib notebook from face_rhythm.visualize import plots config = helpers.load_config(config_filepath) config['CQT']['pixelNum_toUse'] = 10 helpers.save_config(config, config_filepath) plots.plot_cqt(config_filepath) ``` ### TCA Key TCA parameters: - **device**: runs a small function to get your device. Set to true if you want to use the GPU. If the input is small (< half the size of your GPU memory), set to True. It's super fast. - **rank**: ~ 2 to 10. The number of factors to look for in the PARAFAC model. More can be good but less reproduceable, but less can mix together obviously different factors - **tolerance**: ~1e-05 to 1e-07. The minimum variance of the model between steps before declaring convergence. If you're trying to really optimize your TCA fit, try decreasing this. - **n_iters**: ~100 to 600. The fit of the model usually improves if you give it more iterations ``` from face_rhythm.analysis import tca %matplotlib notebook config = helpers.load_config(config_filepath) config['TCA']['pref_useGPU'] = False config['TCA']['rank'] = 8 config['TCA']['init'] = 'random' config['TCA']['tolerance'] = 1e-06 config['TCA']['verbosity'] = 0 config['TCA']['n_iters'] = 100 helpers.save_config(config, config_filepath) tca.full_tca_workflow(config_filepath, 'positions_convDR_meanSub') %matplotlib notebook from face_rhythm.visualize import plots config = helpers.load_config(config_filepath) config['TCA']['ftype'] = 'spectral' helpers.save_config(config, config_filepath) plots.plot_tca_factors(config_filepath) from face_rhythm.visualize import videos config = helpers.load_config(config_filepath) config['Video']['factor_category_to_display'] = 'TCA' # eg: 'TCA' or 'PCA' config['Video']['factor_to_display'] = 'factors_spectral_points' # eg: (if 'TCA'):'factors_frequential_points' (if 'PCA'):'scores_points' config['Video']['points_to_display'] = 'positions_convDR_absolute' # eg: 'positions_convDR_absolute' or 'positions_absolute' or 'positions_recursive' config['Video']['demo_len'] = 10 config['Video']['dot_size'] = 2 config['Video']['save_demo'] = True helpers.save_config(config, config_filepath) videos.visualize_factor(config_filepath) ``` # END analysis Below are some examples on how to access, plot, and manipulate the output data ## Outputs: Below is the output tree structure of the NWB file ``` config = helpers.load_config(config_filepath) nwb_path = config['General']['sessions'][0]['nwb'] helpers.dump_nwb(nwb_path) ``` ## Example: How to plot factors: Spectral ``` import numpy as np import matplotlib.pyplot as plt import sklearn as sk import sklearn.decomposition import pynwb %matplotlib notebook config = helpers.load_config(config_filepath) nwb_path = config['General']['sessions'][0]['nwb'] # freqs_Sxx = np.load(config['Paths']['freqs_Sxx']) Fs = config['Video']['Fs'] with pynwb.NWBHDF5IO(nwb_path, 'r') as io: nwbfile = io.read() freq_components = nwbfile.processing['Face Rhythm']['TCA']['factors_spectral_spectral'].data dot_components = nwbfile.processing['Face Rhythm']['TCA']['factors_spectral_points'].data temp_components_interp = nwbfile.processing['Face Rhythm']['TCA']['factors_spectral_temporal_interp'].data freqs_Sxx = nwbfile.processing['Face Rhythm']['CQT']['freqs_Sxx'].data num_factors = freq_components.shape[1] plt.figure() plt.plot(freqs_Sxx , freq_components[:,:]) # plt.plot(freq_components[:,:]) plt.xscale('log') plt.xlabel('frequency (Hz)') plt.legend(np.arange(num_factors)) plt.figure() plt.plot(dot_components[:,:]) plt.xlabel('dotIDs (concat X then Y)') plt.figure() plt.plot(np.arange(temp_components_interp.shape[0])/Fs , temp_components_interp[:,:]) plt.xlabel('time (s)') plt.figure() plt.imshow(np.corrcoef(np.array(temp_components_interp).T) - np.eye(num_factors),aspect='auto') plt.colorbar() pca = sklearn.decomposition.PCA(n_components=num_factors) pca.fit(temp_components_interp) # PCA(n_components=8) plt.figure() plt.plot(pca.explained_variance_ratio_) plt.xlabel('rank') plt.ylabel('explained variance ratio') ``` ## Example: How to plot factors: Positional ``` import sklearn as sk import sklearn.decomposition import pynwb %matplotlib notebook config = helpers.load_config(config_filepath) nwb_path = config['General']['sessions'][0]['nwb'] Fs = config['Video']['Fs'] with pynwb.NWBHDF5IO(nwb_path, 'r') as io: nwbfile = io.read() dot_components = np.array(nwbfile.processing['Face Rhythm']['TCA']['factors_positional_points'].data) temp_components = np.array(nwbfile.processing['Face Rhythm']['TCA']['factors_positional_temporal'].data) # plt.figure() # plt.plot(freqs_Sxx , freq_components[:,:]) # plt.xscale('log') # plt.xlabel('frequency (Hz)') plt.figure() plt.plot(dot_components[:,:]) plt.xlabel('dotIDs (concat X then Y)') plt.figure() plt.plot(np.arange(temp_components.shape[0])/Fs , temp_components[:,:]) plt.xlabel('time (s)') plt.figure() plt.imshow(np.corrcoef(np.array(temp_components).T) - np.eye(4),aspect='auto') plt.colorbar() ``` ## Example: How to access NWB output data ``` import pynwb import matplotlib.pyplot as plt import numpy as np config = helpers.load_config(config_filepath) nwb_path = config['General']['sessions'][0]['nwb'] with pynwb.NWBHDF5IO(nwb_path, 'r') as io: nwbfile = io.read() # look through the NWB outputs (see above example) to see available arrays to plot and how to access them example_data = nwbfile.processing['Face Rhythm']['Optic Flow']['positions_convDR_meanSub'].data print(example_data) plt.figure() plt.plot(example_data[0,0,:]) ``` ## Example of how to access parameters ``` config = helpers.load_config(config_filepath) print(config['Optic']['lk']['winSize']) ```
github_jupyter
# 多输入多输出通道 :label:`sec_channels` 虽然我们在 :numref:`subsec_why-conv-channels`中描述了构成每个图像的多个通道和多层卷积层。例如彩色图像具有标准的RGB通道来指示红、绿和蓝。 但是到目前为止,我们仅展示了单个输入和单个输出通道的简化例子。 这使得我们可以将输入、卷积核和输出看作二维张量。 当我们添加通道时,我们的输入和隐藏的表示都变成了三维张量。例如,每个RGB输入图像具有$3\times h\times w$的形状。我们将这个大小为$3$的轴称为*通道*(channel)维度。在本节中,我们将更深入地研究具有多输入和多输出通道的卷积核。 ## 多输入通道 当输入包含多个通道时,需要构造一个与输入数据具有相同输入通道数的卷积核,以便与输入数据进行互相关运算。假设输入的通道数为$c_i$,那么卷积核的输入通道数也需要为$c_i$。如果卷积核的窗口形状是$k_h\times k_w$,那么当$c_i=1$时,我们可以把卷积核看作形状为$k_h\times k_w$的二维张量。 然而,当$c_i>1$时,我们卷积核的每个输入通道将包含形状为$k_h\times k_w$的张量。将这些张量$c_i$连结在一起可以得到形状为$c_i\times k_h\times k_w$的卷积核。由于输入和卷积核都有$c_i$个通道,我们可以对每个通道输入的二维张量和卷积核的二维张量进行互相关运算,再对通道求和(将$c_i$的结果相加)得到二维张量。这是多通道输入和多输入通道卷积核之间进行二维互相关运算的结果。 在 :numref:`fig_conv_multi_in`中,我们演示了一个具有两个输入通道的二维互相关运算的示例。阴影部分是第一个输出元素以及用于计算这个输出的输入和核张量元素:$(1\times1+2\times2+4\times3+5\times4)+(0\times0+1\times1+3\times2+4\times3)=56$。 ![两个输入通道的互相关计算。](../img/conv-multi-in.svg) :label:`fig_conv_multi_in` 为了加深理解,我们(**实现一下多输入通道互相关运算**)。 简而言之,我们所做的就是对每个通道执行互相关操作,然后将结果相加。 ``` import tensorflow as tf from d2l import tensorflow as d2l def corr2d_multi_in(X, K): # 先遍历“X”和“K”的第0个维度(通道维度),再把它们加在一起 return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0) ``` 我们可以构造与 :numref:`fig_conv_multi_in`中的值相对应的输入张量`X`和核张量`K`,以(**验证互相关运算的输出**)。 ``` X = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) ``` ## 多输出通道 到目前为止,不论有多少输入通道,我们还只有一个输出通道。然而,正如我们在 :numref:`subsec_why-conv-channels`中所讨论的,每一层有多个输出通道是至关重要的。在最流行的神经网络架构中,随着神经网络层数的加深,我们常会增加输出通道的维数,通过减少空间分辨率以获得更大的通道深度。直观地说,我们可以将每个通道看作是对不同特征的响应。而现实可能更为复杂一些,因为每个通道不是独立学习的,而是为了共同使用而优化的。因此,多输出通道并不仅是学习多个单通道的检测器。 用$c_i$和$c_o$分别表示输入和输出通道的数目,并让$k_h$和$k_w$为卷积核的高度和宽度。为了获得多个通道的输出,我们可以为每个输出通道创建一个形状为$c_i\times k_h\times k_w$的卷积核张量,这样卷积核的形状是$c_o\times c_i\times k_h\times k_w$。在互相关运算中,每个输出通道先获取所有输入通道,再以对应该输出通道的卷积核计算出结果。 如下所示,我们实现一个[**计算多个通道的输出的互相关函数**]。 ``` def corr2d_multi_in_out(X, K): # 迭代“K”的第0个维度,每次都对输入“X”执行互相关运算。 # 最后将所有结果都叠加在一起 return tf.stack([corr2d_multi_in(X, k) for k in K], 0) ``` 通过将核张量`K`与`K+1`(`K`中每个元素加$1$)和`K+2`连接起来,构造了一个具有$3$个输出通道的卷积核。 ``` K = tf.stack((K, K + 1, K + 2), 0) K.shape ``` 下面,我们对输入张量`X`与卷积核张量`K`执行互相关运算。现在的输出包含$3$个通道,第一个通道的结果与先前输入张量`X`和多输入单输出通道的结果一致。 ``` corr2d_multi_in_out(X, K) ``` ## $1\times 1$ 卷积层 [~~1x1卷积~~] $1 \times 1$卷积,即$k_h = k_w = 1$,看起来似乎没有多大意义。 毕竟,卷积的本质是有效提取相邻像素间的相关特征,而$1 \times 1$卷积显然没有此作用。 尽管如此,$1 \times 1$仍然十分流行,时常包含在复杂深层网络的设计中。下面,让我们详细地解读一下它的实际作用。 因为使用了最小窗口,$1\times 1$卷积失去了卷积层的特有能力——在高度和宽度维度上,识别相邻元素间相互作用的能力。 其实$1\times 1$卷积的唯一计算发生在通道上。 :numref:`fig_conv_1x1`展示了使用$1\times 1$卷积核与$3$个输入通道和$2$个输出通道的互相关计算。 这里输入和输出具有相同的高度和宽度,输出中的每个元素都是从输入图像中同一位置的元素的线性组合。 我们可以将$1\times 1$卷积层看作是在每个像素位置应用的全连接层,以$c_i$个输入值转换为$c_o$个输出值。 因为这仍然是一个卷积层,所以跨像素的权重是一致的。 同时,$1\times 1$卷积层需要的权重维度为$c_o\times c_i$,再额外加上一个偏差。 ![互相关计算使用了具有3个输入通道和2个输出通道的 $1\times 1$ 卷积核。其中,输入和输出具有相同的高度和宽度。](../img/conv-1x1.svg) :label:`fig_conv_1x1` 下面,我们使用全连接层实现$1 \times 1$卷积。 请注意,我们需要对输入和输出的数据形状进行微调。 ``` def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = tf.reshape(X, (c_i, h * w)) K = tf.reshape(K, (c_o, c_i)) # 全连接层中的矩阵乘法 Y = tf.matmul(K, X) return tf.reshape(Y, (c_o, h, w)) ``` 当执行$1\times 1$卷积运算时,上述函数相当于先前实现的互相关函数`corr2d_multi_in_out`。让我们用一些样本数据来验证这一点。 ``` X = tf.random.normal((3, 3, 3), 0, 1) K = tf.random.normal((2, 3, 1, 1), 0, 1) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6 ``` ## 小结 * 多输入多输出通道可以用来扩展卷积层的模型。 * 当以每像素为基础应用时,$1\times 1$卷积层相当于全连接层。 * $1\times 1$卷积层通常用于调整网络层的通道数量和控制模型复杂性。 ## 练习 1. 假设我们有两个卷积核,大小分别为$k_1$和$k_2$(中间没有非线性激活函数)。 1. 证明运算可以用单次卷积来表示。 1. 这个等效的单卷积的维数是多少呢? 1. 反之亦然吗? 1. 假设输入为$c_i\times h\times w$,卷积核大小为$c_o\times c_i\times k_h\times k_w$,填充为$(p_h, p_w)$,步幅为$(s_h, s_w)$。 1. 前向传播的计算成本(乘法和加法)是多少? 1. 内存占用是多少? 1. 反向传播的内存占用是多少? 1. 反向传播的计算成本是多少? 1. 如果我们将输入通道$c_i$和输出通道$c_o$的数量加倍,计算数量会增加多少?如果我们把填充数量翻一番会怎么样? 1. 如果卷积核的高度和宽度是$k_h=k_w=1$,前向传播的计算复杂度是多少? 1. 本节最后一个示例中的变量`Y1`和`Y2`是否完全相同?为什么? 1. 当卷积窗口不是$1\times 1$时,如何使用矩阵乘法实现卷积? [Discussions](https://discuss.d2l.ai/t/1853)
github_jupyter
# Análisis Exploratorio de Datos A continuación, analizaremos los archivos para contestar algunas preguntas básicas. El primer paso es importar las librerías necesarias, en particular `pandas` para trabajar con la data y explorarla, y `os` para contar con distintas funciones que nos permiten explorar el directorio. ``` import pandas as pd import os ``` ### ¿Cuántos archivos tenemos en la carpeta? Desde nuestra ubicación, podemos abrir la carpeta con Python y hacer que nos muestre el contenido. El archivo `.DS_Store` es sólo un archivo de configuración local de Mac OS, al excluirlo podemos ver que existen 8 archivos en total. Finalmente, podemos utilizar comprensión de listas en Python para generar una lista de todos los nombres de archivo, mostraré varios ejemplos a continuación: ``` # Imprime una lista de los archivos en el directorio especificado. print("Todos los archivos: ", os.listdir("data_source")) # Genera los nombres de archivo siguiendo un patrón sencillo. filenames = ["chart{}.xlsx".format(i) for i in range(1,9)] print("\nPrimera forma:\nfilenames = ", filenames) # Comprensión de lista excluyendo un valor, y luego ordenando con sorted(). # Si los archivos no tuviesen un patrón claro, este método es más útil. filenames = sorted([f for f in os.listdir("data_source") if f != ".DS_Store"]) print("\nSegunda forma:\nfilenames = ", filenames) # Comprensión de lista tomando sólo los archivos .xlsx y ordenando con sorted(). # Muy útil si existen distintas extensiones de archivo y quieres extraer alguna específica. filenames = sorted([f for f in os.listdir("data_source") if ".xlsx" in f]) print("\nTercera forma:\nfilenames = ", filenames) ``` ### ¿Qué formato poseen? ¿Están en el mismo formato? ¿Los formatos pueden ser leídos directamente por `pandas`? En este caso, todos los archivos poseen el mismo formato (`.xlsx`), y pueden ser leídos directamente por `pandas`. Así como cualquiera de las siguientes extensiones: | Format Type | Data Description | Reader | Writer | |:---|:---|:---|:---| | text | CSV | read_csv | to_csv | | text | Fixed-Width Text File | read_fwf | | | text | JSON | read_json | to_json | | text | HTML | read_html | to_html | | text | Local clipboard | read_clipboard | to_clipboard | | --- | MS Excel | read_excel | to_excel | | binary | OpenDocument | read_excel | | | binary | HDF5 Format | read_hdf | to_hdf | | binary | Feather Format | read_feather | to_feather | | binary | Parquet Format | read_parquet | to_parquet | | binary | ORC Format | read_orc | | | binary | Msgpack | read_msgpack | to_msgpack | | binary | Stata | read_stata | to_stata | | binary | SAS | read_sas | | | binary | SPSS | read_spss | | | binary | Python Pickle Format | read_pickle | to_pickle | | SQL | SQL | read_sql | to_sql | | SQL | Google BigQuery | read_gbq | to_gbq | ### ¿Cuántas filas y columnas posee cada archivo? Para analizar cada archivo, hacemos uso de la lista `filenames` que creamos previamente, y podemos imprimir algunas filas e información de cada uno. Ahora tenemos una idea sobre la estructura de cada archivo. ``` for f in filenames: # f tomará el valor de cada nombre de archivo. print("\nARCHIVO: ", f) # Imprimimos el nombre del archivo para referencia. df = pd.read_excel("data_source/" + f) # Debemos agregar "data_source/" para que busque el archivo en la carpeta. print("\n", df.head()) print("\n", df.info()) ``` ### ¿Tenemos valores nulos (`NaN`) en ciertas columnas? ¿Cuál es la mejor estrategia para este conjunto de datos? En este caso, vamos a analizar los archivos uno por uno, testeando para encontrar valores nulos o inconsistencias en las columnas. ``` for f in filenames: print("\nARCHIVO: ", f) df = pd.read_excel("data_source/" + f) print(df.isnull().any()) # Imprimimos en pantalla qué columnas poseen valores nulos. ``` Encontramos que ninguno de los archivos posee valores nulos. En la siguiente etapa hablaremos de Tidy Data y cómo lograr unir estos archivos.
github_jupyter
# VADER Sentiment Analysis of Bitcointalk Topics ***Ronald DeLuca***<br> python3 -m spacy download en_core_web_sm ``` import csv import nltk import os import pandas as pd import re import spacy import textblob from textblob import TextBlob from textblob.base import BaseSentimentAnalyzer from textblob.sentiments import NaiveBayesAnalyzer, PatternAnalyzer from textblob.classifiers import NaiveBayesClassifier from operator import methodcaller from operator import attrgetter from spacy.tokens import Doc from collections import Counter from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer from wordcloud import WordCloud, STOPWORDS from matplotlib import pyplot as plt pd.options.display.max_colwidth = 500 ``` ### Input CSV Name Here ``` csvName = 'ATS_20190925-235549' # Change this string value to the file you want #csvName = 'GTC_20190925-021514' #csvName = 'PGT_20190923-153422' #csvName = 'TICS_20190925-045521' # The inputCsvName represents a .csv file located in the data/raw_data/*.csv folder inputCsvName = 'data/raw_data/' + csvName + '.csv' # Change the relative or absolute path here Analyzer = SentimentIntensityAnalyzer() # Custom Cryptocurrency word sentiment values new_words = { 'hold': 0.5, 'lambo': 1.5, 'moon': 1.5, 'mooning': 1.6, 'bull': 1, 'bear': -0.5, 'shill': -1, 'shilling': -1.5, 'pump': -0.75, 'decentralized': 0.5, 'noob': -0.5, 'whale': 0.5, '51%': -1, 'denial': -1.4, 'fundamental': 0.1, 'analysis': 0.3, 'oracle': 0.25, 'shitcoin': -3, 'volatile': -0.75, } # Update the VADER lexicon with these additional sentiment values Analyzer.lexicon.update(new_words) # Rate the post content and categorize it based on compound score def vader_polarity(text): """ Transform the output to a binary 0/1 result """ score = Analyzer.polarity_scores(text) total_positive_score = score['pos'] total_negative_score = score['neg'] total_neutral_score = score['neu'] compound_score = score['compound'] if (total_neutral_score > 1 and total_positive_score > total_negative_score and total_positive_score >= total_neutral_score): sentiment = 'Positive' elif (total_neutral_score > 1 and total_negative_score > total_positive_score and total_negative_score >= total_neutral_score): sentiment = 'Negative' elif (total_neutral_score > 1 and total_neutral_score > total_positive_score and total_neutral_score > total_negative_score): sentiment = 'Neutral' elif (total_neutral_score > 1 and total_negative_score == total_positive_score and total_negative_score >= total_neutral_score): sentiment = 'Neutral' elif (total_neutral_score <= 1 and total_positive_score == total_negative_score and total_positive_score == total_neutral_score): sentiment = "Neutral" elif (total_neutral_score <= 1 and total_positive_score > total_negative_score): sentiment = "Positive" elif (total_neutral_score <= 1 and total_negative_score > total_positive_score): sentiment = "Negative" else: if score['compound'] >= 0.5: sentiment = 'Positive' elif score['compound'] > -0.5 and score['compound'] < 0.5: sentiment = 'Neutral' elif score['compound'] <= -0.5: sentiment = 'Negative' return sentiment # A helper function that removes all the non ASCII characters # from the given string. Retuns a string with only ASCII characters. def strip_non_ascii(string): ''' Returns the string without non ASCII characters''' stripped = (c for c in string if 0 < ord(c) < 127) return ''.join(stripped) posts = [] # Open the CSV with open(inputCsvName, 'r') as csvfile: reader = csv.reader((x.replace('\0', '') for x in csvfile), delimiter=',') #reader.next() for row in reader: # Assign columns to usable names post= dict() post['id'] = row[0] post['msg_id'] = row[1] post['parent_id'] = row[2] post['link_id'] = row[3] post['Count_read'] = row[4] post['Forum'] = row[5] post['Time'] = row[6] post['Author'] = row[7] post['Rank'] = row[8] post['Activity'] = row[9] post['Merit'] = row[10] post['Trust'] = row[11] post['Title'] = row[12] post['Body'] = row[13] post['ScamHeader'] = row[14] post['clean'] = post['Body'] # Remove all non-ascii characters post['clean'] = strip_non_ascii(post['clean']) # Normalize case post['clean'] = post['clean'].lower() # Remove URLS. post['clean'] = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', post['clean']) post['clean'] = re.sub(r'stratum[+]tcp?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', post['clean']) # Fix classic post lingo post['clean'] = re.sub(r'\bthats\b', 'that is', post['clean']) post['clean'] = re.sub(r'\bive\b', 'i have', post['clean']) post['clean'] = re.sub(r'\bim\b', 'i am', post['clean']) post['clean'] = re.sub(r'\bya\b', 'yeah', post['clean']) post['clean'] = re.sub(r'\bcant\b', 'can not', post['clean']) post['clean'] = re.sub(r'\bwont\b', 'will not', post['clean']) post['clean'] = re.sub(r'\bid\b', 'i would', post['clean']) post['clean'] = re.sub(r'wtf', 'what the fuck', post['clean']) post['clean'] = re.sub(r'\bwth\b', 'what the hell', post['clean']) post['clean'] = re.sub(r'\br\b', 'are', post['clean']) post['clean'] = re.sub(r'\bu\b', 'you', post['clean']) post['clean'] = re.sub(r'\bk\b', 'ok', post['clean']) post['clean'] = re.sub(r'\bsux\b', 'sucks', post['clean']) post['clean'] = re.sub(r'\bno+\b', 'no', post['clean']) post['clean'] = re.sub(r'\bcoo+\b', 'cool', post['clean']) # Fix Cryptocurrency lingo post['clean'] = re.sub(r'\bath\b', 'all time high', post['clean']) post['clean'] = re.sub(r'\batl\b', 'all time low', post['clean']) post['clean'] = re.sub(r'\bbtfd\b', 'buy the fucking dip', post['clean']) post['clean'] = re.sub(r'\bico\b', 'initial coin offering', post['clean']) post['clean'] = re.sub(r'\bfomo\b', 'fear of missing out', post['clean']) post['clean'] = re.sub(r'\bfud\b', 'fear uncertainty doubt', post['clean']) post['clean'] = re.sub(r'\bfucking\b', 'fuck', post['clean']) post['clean'] = re.sub(r'\bfudster\b', 'fear uncertainty doubt spreader', post['clean']) post['clean'] = re.sub(r'\broi\b', 'return on investment', post['clean']) post['clean'] = re.sub(r'\bmacd\b', 'moving average convergence divergence', post['clean']) post['clean'] = re.sub(r'\bpoa\b', 'proof of authority', post['clean']) post['clean'] = re.sub(r'\bpow\b', 'proof of work', post['clean']) post['clean'] = re.sub(r'\bpos\b', 'proof of stake', post['clean']) post['clean'] = re.sub(r'\bdapp\b', 'decentralized application', post['clean']) post['clean'] = re.sub(r'\bdao\b', 'decentralized autonomous organization', post['clean']) post['clean'] = re.sub(r'\bhodl\b', 'hold', post['clean']) post['clean'] = re.sub(r'\bddos\b', 'distributed denial of service', post['clean']) post['clean'] = re.sub(r'\bkyc\b', 'know your customer', post['clean']) post['clean'] = re.sub(r'\brekt\b', 'wrecked', post['clean']) post['clean'] = re.sub(r'\bbullish\b', 'bull', post['clean']) post['clean'] = re.sub(r'\bbearish\b', 'bear', post['clean']) post['clean'] = re.sub(r'\bpumping\b', 'pump', post['clean']) post['clean'] = re.sub(r'\basic\b', 'application specific integrated circuit', post['clean']) post['clean'] = re.sub(r'\bdyor\b', 'do your own research', post['clean']) post['clean'] = re.sub(r'\berc\b', 'ethereum request for comments', post['clean']) post['clean'] = re.sub(r'\bfa\b', 'fundamental analysis', post['clean']) post['clean'] = re.sub(r'\bjomo\b', 'joy of missing out', post['clean']) post['clean'] = re.sub(r'\bmcap\b', 'market capitalization', post['clean']) post['clean'] = re.sub(r'\bmsb\b', 'money services business', post['clean']) post['clean'] = re.sub(r'\boco\b', 'one cancels the other order', post['clean']) post['clean'] = re.sub(r'\bpnd\b', 'pump and dump', post['clean']) post['clean'] = re.sub(r'\brsi\b', 'relative strength index', post['clean']) post['clean'] = re.sub(r'\butxo\b', 'unspent transaction output', post['clean']) post['clean'] = re.sub(r'\bvolatility\b', 'volatile', post['clean']) post['clean'] = re.sub(r'\blamborghini\b', 'lambo', post['clean']) # Create textblob object post['TextBlob'] = TextBlob(post['clean']) # Correct spelling (WARNING: SLOW) #post['TextBlob'] = post['TextBlob'].correct() #print(post['clean']) #print(vader_polarity(post['clean'])) #print(Analyzer.polarity_scores(post['clean'])) posts.append(post) # DEVELOP MODELS def polarity_scores(doc): return Analyzer.polarity_scores(doc.text) Doc.set_extension('polarity_scores', getter=polarity_scores, force=True) nlp = spacy.load('en_core_web_sm') negResults = '' posResults = '' neuResults = '' results = '' for post in posts: doc = nlp(post['clean']) tokens = [token.text for token in doc if not token.is_stop] score = doc._.polarity_scores #results += post['clean'] post['compound'] = score['compound'] post['sentiment'] = vader_polarity(post['clean']) # spaCy count most common words from all posts #docMain = nlp(results) #words = [token.text for token in docMain if not token.is_stop and token.is_punct != True # and token.is_space != True] #word_freq = Counter(words) #common_words = word_freq.most_common(30) #print(common_words) posts_sorted = sorted(posts, key=lambda k: k['compound']) # Posts filtered into sentiment categories # Posts that have a compound Negative value negative_posts = [d for d in posts_sorted if d['sentiment'] == 'Negative'] for post in negative_posts: #print(post['id'], post['compound'], post['clean']) negResults += (post['clean']) # Count most common words in Negative sentiment sentences negDoc = nlp(negResults) negWords = [token.text for token in negDoc if not token.is_stop and token.is_punct != True and token.is_space != True] word_freq2 = Counter(negWords) common_words2 = word_freq2.most_common(30) #print(common_words2) # Posts that have a compound Positive value positive_posts = [d for d in posts_sorted if d['sentiment'] == 'Positive'] for post in positive_posts: # print(post['Id'], post['polarity'], post['clean']) posResults += (post['clean']) # Count most common words in Positive sentiment sentences #posDoc = nlp(posResults) #posWords = [token.text for token in posDoc if not token.is_stop and token.is_punct != True # and token.is_space != True] #word_freq3 = Counter(posWords) #common_words3 = word_freq3.most_common(30) #print(common_words3) # Posts that have a compound Neutral value. neutral_posts = [d for d in posts_sorted if d['sentiment'] == 'Neutral'] for post in neutral_posts: # print(post['Id'], post['polarity'], post['clean']) neuResults += (post['clean']) #print(negResults) # PLOTS # A histogram of the compound scores. x = [d['compound'] for d in posts_sorted] num_bins = 21 n, bins, patches = plt.hist(x, num_bins, density=1, facecolor='green', alpha=0.5) plt.xlabel('Compound Scores') plt.ylabel('Probability') plt.title(r'Histogram of Compound Scores') plt.subplots_adjust(left=0.15) plt.show() # A pie chart showing the number of posts in each sentiment category pos = len(positive_posts) neu = len(negative_posts) neg = len(neutral_posts) labels = 'Positive', 'Neutral', 'Negative' sizes = [pos, neu, neg] colors = ['yellowgreen', 'gold', 'lightcoral'] plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90) plt.axis('equal') plt.show() ``` ## Negative Sentiment WordCloud ``` stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color='white', stopwords=stopwords, width=1600, height=800, random_state=21, colormap='jet', max_words=50, max_font_size=200).generate(negResults) plt.figure(figsize=(10, 10)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ``` ## Positive Sentiment WordCloud ``` stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color='white', stopwords=stopwords, width=1600, height=800, random_state=21, colormap='jet', max_words=50, max_font_size=200).generate(posResults) plt.figure(figsize=(10, 10)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() ``` ## Neutral Sentiment WordCloud ``` stopwords = set(STOPWORDS) wordcloud = WordCloud( background_color='white', stopwords=stopwords, width=1600, height=800, random_state=21, colormap='jet', max_words=50, max_font_size=200).generate(neuResults) plt.figure(figsize=(10, 10)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") plt.show() data = pd.read_csv(inputCsvName) data['Time'] = pd.to_datetime(data.Time) #data.head() def assessments(str): blob = TextBlob(str) return blob.sentiment_assessments.assessments def translated(series): blob = TextBlob(series) try: trans = blob.translate(to='en') return trans.raw except textblob.exceptions.NotTranslated: return '' data['Blob'] = data.Body.apply(TextBlob) data['Polarity'] = data.Blob.apply(attrgetter('sentiment.polarity')) data['Subjectivity'] = data.Blob.apply(attrgetter('sentiment.subjectivity')) data['Assessment'] = data.Body.apply(assessments) # data['Detect_lang'] = data.Blob.apply( # methodcaller('detect_language')) # powered by google API # data['Translated'] = data.Body.apply(translated) data['Vader_sentiment'] = data.Body.apply(Analyzer.polarity_scores) data['Vader_compound'] = data['Vader_sentiment'].apply(lambda x: x['compound']) ``` ## Topic's Post Compared using VADER and Textblob Sentiment ``` data[['Body', 'Polarity', 'Subjectivity', 'Vader_compound', 'Vader_sentiment']] ``` # Is this Cryptocurrency a Scam? Based on reports leading to acknowledgement of a scam possibility, combined with the sentiment analysis results, a plausible determination can be shown. ***NOTE***: These results should not be taken as fact, but rather as an aide to determine the possibility that a particular cryptocurrency could be showing signs of a scam attempt. ``` isScamHeader = False count = 0 scamCount = 0 scam = 'Very Unlikely' ratio = (neg / (pos + neg + neu)) * 100 print('Analysis for: ', row[12]) for word in common_words2: count+=1 if word[0] == 'scam': print('Count of the word Scam: ', word[1]) print('Word use ranked at: ', count) scamCount = count if row[14] == 'True': isScamHeader = True if((20 > ratio >= 10) and isScamHeader == False): scam = 'Unlikely' if((35 > ratio >= 20) and isScamHeader == False): scam = 'Possible' if((ratio >= 35) and isScamHeader == False and (scamCount == 0)): scam = 'Possible' if((ratio >= 35) and isScamHeader == False and (0 < scamCount < 10)): scam = 'Likely' if((20 > ratio >= 10) and isScamHeader == True): scam = 'Possible' if((33 > ratio >= 20) and isScamHeader == True and (0 < scamCount <= 30)): scam = 'Likely' if((50 > ratio >= 33) and isScamHeader == True and (0 < scamCount <= 20)): scam = 'Very Likely' if((ratio >= 50) and isScamHeader == True and (0 < scamCount < 10)): scam = 'Almost Certain' print('Negative Sentiment in topic: ~', round(ratio), '%') print('Likelihood of scam: \033[1m' + scam) if(isScamHeader == True): print('This Cryptocurrency has been reported as a potential scam attempt') print('\033[0m') ``` ## Explanation of Results The likelihood of the cryptocurrency being a scam is based on several characteristics sorted into categories: - **Very Unlikely**: Shows minimal negativity, not reported as scam, low use of the word *scam* - **Unlikely**: Shows low negativity, not reported as scam, low use of the word *scam* - **Possible**: Shows moderative negativity, moderate use of the word *scam* and may have been reported - **Likely**: Shows above normal negativity, above normal use of the word *scam* and may have been reported - **Very Likely**: Shows above normal negativity, above normal use of the word *scam* and was reported - **Almost Certain**: Shows high negativity, high usage of the word scam and was reported as *scam* The results of this analysis are shown above this explanation and mention the likelihood along with a note if the scam was reported on Bitcointalk.org as showing signs of a scam attempt. These results show the word count of *scam* and its ranking compared to other words in the forum topic. Based on the increasing negativity, the ranking of the usage of *scam* in comparison to other word choices and the potential reporting of a scam, this analysis places emphasis on a building negativity relative to other posts and ranks the likelihood accordingly. ``` #os.system('jupyter nbconvert --to html vaderSentAnalysis.ipynb') # Output without custom filename os.system('jupyter nbconvert --to html vaderSentAnalysis.ipynb --output-dir ./data/processed --output ' + csvName + '.html') #os.system('jupyter nbconvert --to markdown vaderSentAnalysis.ipynb --output-dir ./data/processed --output ' + csvName + '.md') #os.system('jupyter nbconvert --to pdf vaderSentAnalysis.ipynb --output-dir ./data/processed --output ' + csvName + '.pdf') ``` An output HTML file named after the input CSV will be converted and placed into the parent directory. This HTML file may be viewed in a web browser to review a complete run of this analysis notebook on that particular cryptocurrency topic. There are two other converts which are currently disabled in the cell above (markdown & PDF export). These can be enabled at your discretion, however the PDF export requires [texlive](https://www.tug.org/texlive/) to be installed and located in PATH. NOTE: There may be CSS issues which do not show the updated outputs, please clear cache and refresh browser to find update custom.CSS file. Also, ensure that these output conversions to HTML, MD and/or PDF happen after the notebook has run and is saved. You may need to run the cell above again to get the output cells to show correctly in the processed files.
github_jupyter
# Building Your Predictor The next step after preparing and importing your data via `Getting_Data_Ready.ipynb` is to build your first model. The overall process for this is: * Setup * Create a Predictor * Deploy a Predictor * Obtain a Forecast To get started, simply execute the cells below: ## Setup Import the standard Python Libraries that are used in this lesson. ``` import sys import os import time import boto3 # importing forecast notebook utility from notebooks/common directory #sys.path.insert( 0, os.path.abspath("../../common") ) import util ``` The line below will retrieve your shared variables from the first notebook. ``` # %store -r ``` The last part of the setup process is to validate that your account can communicate with Amazon Forecast, the cell below does just that. ``` import sagemaker import boto3 sagemaker_session = sagemaker.Session() role = sagemaker.get_execution_role() bucket = sagemaker_session.default_bucket() region = boto3.Session().region_name session = boto3.Session(region_name=region) forecast = session.client(service_name='forecast') forecastquery = session.client(service_name='forecastquery') %store -r forecast_project_name %store -r forecast_dataset_group_arn %store -r forecast_dataset_arn %store -r forecast_role_name %store -r forecast_key %store -r forecast_ds_import_job_arn ``` ## Create a Predictor Now in the previous notebook, your data was imported to be used by Forecast, here we will once again define your dataset information and then start building your model or predictor. Forecast horizon is the number of number of time points to predicted in the future. For weekly data, a value of 12 means 12 weeks. Our example is hourly data, we try forecast the next day, so we can set to 24. ``` predictor_name = forecast_project_name + '_deeparp_algo' forecast_horizon = 24 algorithm_arn = 'arn:aws:forecast:::algorithm/Deep_AR_Plus' create_predictor_response=forecast.create_predictor(PredictorName=predictor_name, AlgorithmArn=algorithm_arn, ForecastHorizon=forecast_horizon, PerformAutoML= False, PerformHPO=False, EvaluationParameters= {"NumberOfBacktestWindows": 1, "BackTestWindowOffset": 24}, InputDataConfig= {"DatasetGroupArn": forecast_dataset_group_arn}, FeaturizationConfig= {"ForecastFrequency": "H", "Featurizations": [ {"AttributeName": "target_value", "FeaturizationPipeline": [ {"FeaturizationMethodName": "filling", "FeaturizationMethodParameters": {"frontfill": "none", "middlefill": "zero", "backfill": "zero"} } ] } ] } ) forecast_predictor_arn=create_predictor_response['PredictorArn'] ``` Check the status of the predictor. When the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on data size, model selection and hyper parameters,it can take 10 mins to more than one hour to be **ACTIVE**. ``` status_indicator = util.StatusIndicator() while True: status = forecast.describe_predictor(PredictorArn=forecast_predictor_arn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end() ``` ### Get Error Metrics ``` forecast.get_accuracy_metrics(PredictorArn=forecast_predictor_arn) ``` ## Create a Forecast Now create a forecast using the model that was trained ``` forecast_name = forecast_project_name + '_deeparp_algo_forecast' create_forecast_response=forecast.create_forecast(ForecastName=forecast_name, PredictorArn=forecast_predictor_arn) forecast_arn = create_forecast_response['ForecastArn'] ``` Check the status of the forecast process, when the status change from **CREATE_IN_PROGRESS** to **ACTIVE**, we can continue to next steps. Depending on data size, model selection and hyper parameters,it can take 10 mins to more than one hour to be **ACTIVE**. ``` status_indicator = util.StatusIndicator() while True: status = forecast.describe_forecast(ForecastArn=forecast_arn)['Status'] status_indicator.update(status) if status in ('ACTIVE', 'CREATE_FAILED'): break time.sleep(10) status_indicator.end() ``` ### Get Forecast Once created, the forecast results are ready and you view them. ``` from pprint import pprint print(forecast_arn) print() forecastResponse = forecastquery.query_forecast( ForecastArn=forecast_arn, Filters={"item_id":"client_12"} ) pprint(forecastResponse) ``` ## Next Steps Now that your forecast has been created, to evaluate it use `3.Evaluating_Your_Predictor.ipynb` but before opening it, execute the cell below to share your variables again with the next notebook. ``` %store forecast_arn %store forecast_predictor_arn ```
github_jupyter
``` import tensorflow as tf import tensorflow.contrib.slim as slim X = tf.placeholder(tf.float32, [None, 64,64,3]) Y = tf.placeholder(tf.float32, [None, 10]) Z = tf.placeholder(tf.float32, [None, 100]) def GENERATOR(Z): x = slim.fully_connected(Z, (8 * 8 * 16), activation_fn=None) x = tf.reshape(x, [-1, 8, 8, 16]) for i in range(3): x = slim.conv2d(x, 16, 3, 1, activation_fn=tf.nn.elu) x = slim.conv2d(x, 16, 3, 1, activation_fn=tf.nn.elu) x = upscale(x, 2) x = slim.conv2d(x, 3, 3, 1, activation_fn=None) return x def upscale(x, scale): _, h, w, _ = int_shape(x) return tf.image.resize_nearest_neighbor(x, (h*scale, w*scale)) def int_shape(tensor): shape = tensor.get_shape().as_list() return [num if num is not None else -1 for num in shape] def norm_img(image): return image/127.5 - 1. def denorm_img(norm): return tf.clip_by_value(((norm + 1)*127.), 0, 255) def sample_z(m, n): return np.random.uniform(-1., 1., size=[m, n]) def DISCRIMINATOR(X): with tf.variable_scope('D', reuse=tf.AUTO_REUSE) as vs: x = slim.conv2d(X, 16, 3, 1, activation_fn=tf.nn.elu) x = slim.conv2d(x, 16, 3, 1, activation_fn=tf.nn.elu) x = slim.conv2d(x, 16, 3, 1, activation_fn=tf.nn.elu) # 64 * 16 x = slim.conv2d(x, 32, 3, 2, activation_fn=tf.nn.elu) x = slim.conv2d(x, 32, 3, 1, activation_fn=tf.nn.elu) x = slim.conv2d(x, 32, 3, 1, activation_fn=tf.nn.elu) # 32 * 32 x = slim.conv2d(x, 48, 3, 2, activation_fn=tf.nn.elu) x = slim.conv2d(x, 48, 3, 1, activation_fn=tf.nn.elu) x = slim.conv2d(x, 48, 3, 1, activation_fn=tf.nn.elu) # 16 * 48 x = tf.layers.flatten(x) z = slim.fully_connected(x, 100, activation_fn=None) return z G = GENERATOR(Z) RD = DISCRIMINATOR(X) GD = DISCRIMINATOR(G) _RD = GENERATOR(RD) _GD = GENERATOR(GD) L_REAL = tf.reduce_mean(tf.abs(_RD - X)) L_GENE = tf.reduce_mean(tf.abs(_GD - X)) K = tf.placeholder(tf.float32) # BEGAN D_LOSS = L_REAL - K*L_GENE G_LOSS = tf.reduce_mean(tf.abs(_GD - X)) D_solver = (tf.train.AdamOptimizer(learning_rate=0.001) .minimize(D_LOSS)) G_solver = (tf.train.AdamOptimizer(learning_rate=0.001) .minimize(G_LOSS)) sess = tf.Session() sess.run(tf.global_variables_initializer()) # BEGAN K_NOW = 0.001 gamma = 0.3 lam = 1e-3 for i in range(2000): NOISE = sample_z(16,100) BAT_IMAGE = images[0:16] _, L_REAL_NOW = sess.run([D_solver, L_REAL], feed_dict={ X: BAT_IMAGE, Z: NOISE, K: K_NOW }) _, L_GENE_NOW = sess.run([G_solver, L_GENE], feed_dict={ X: BAT_IMAGE, Z: NOISE}) K_NOW = K_NOW + lam * (gamma*L_REAL_NOW - L_GENE_NOW) measure = L_GENE_NOW + np.abs(gamma*L_REAL_NOW - L_GENE_NOW) print('Iter-{}; Convergence measure: {:.4} D Loss: {:.4}'.format(i, measure, L_REAL_NOW)) if i % 10 == 0: made_image = sess.run(G, feed_dict={Z:sample_z(16,100)}) np.random.shuffle(images) c = 0 for j in made_image[1:3]: c += 1 cv2.imwrite('%03d-%02d.jpg'%(i,c),(j+1)*120) NOISE= _RD made_image = sess.run(G, feed_dict={ X: BAT_IMAGE, Z: NOISE, K: K_NOW } ) c = 0 i = 0 for j in BAT_IMAGE[:5]: c += 1 cv2.imwrite('N%03d-%02d.jpg'%(i,c),cv2.cvtColor((j+1)*120, cv2.COLOR_RGB2BGR)) G = GENERATOR(Z) RD = DISCRIMINATOR(X) GD = DISCRIMINATOR(G) L_REAL = tf.reduce_mean(tf.abs(G - X)) L_GENE = tf.reduce_mean(tf.abs(G - X)) K = tf.placeholder(tf.float32) # BEGAN D_LOSS = L_REAL - K*L_GENE G_LOSS = tf.reduce_mean(tf.abs(G - X)) D_solver = (tf.train.AdamOptimizer(learning_rate=0.001) .minimize(D_LOSS)) G_solver = (tf.train.AdamOptimizer(learning_rate=0.001) .minimize(G_LOSS)) K_NOW = 0.001 sess = tf.Session() sess.run(tf.global_variables_initializer()) gamma = 0.7 for i in range(1000): NOISE = sample_z(16,100) BAT_IMAGE = images[0:16] _, L_REAL_NOW, _RD = sess.run([D_solver, L_REAL, RD], feed_dict={ X: BAT_IMAGE, Z: NOISE, K: K_NOW }) _, L_GENE_NOW = sess.run([G_solver, L_GENE], feed_dict={ X: BAT_IMAGE, Z: _RD}) K_NOW = K_NOW + lam * (gamma*L_REAL_NOW - L_GENE_NOW) measure = L_GENE_NOW + np.abs(gamma*L_REAL_NOW - L_GENE_NOW) print('Iter-{}; Convergence measure: {:.4} D Loss: {:.4}'.format(i, measure, L_REAL_NOW)) if i % 10 == 0: made_image = sess.run(G, feed_dict={Z:sample_z(16,100)}) np.random.shuffle(images) c = 0 for j in made_image[1:3]: c += 1 cv2.imwrite('%03d-%02d.jpg'%(i,c),cv2.cvtColor((j+1)*120, cv2.COLOR_RGB2BGR)) from keras.preprocessing.image import load_img, img_to_array import keras import tensorflow as tf import numpy as np import cv2 import os import math files = os.listdir(r'..\AnimeFaceDetecter\Datas\result\miku') keras.backend.set_image_data_format('channels_last') #keras.backend.set_image_data_format('channels_first') images = [] for i in files: image = load_img(r'..\AnimeFaceDetecter\Datas\result\miku\{}'.format(i),False,target_size=(64,64)) image = img_to_array(image) image = image images.append(norm_img(image)) print(image.shape) import numpy as np images= np.zeros([16,64,64,3]) ```
github_jupyter
# **pyspec** example notebook: 2D spectrum This notebook showcases a basic usage of **pyspec** for computing 2D spectrum and its associated isotropic spectrum. Other featrures such as bin average in log space and confidence limit estimation are also shown. ``` import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm %matplotlib inline import seawater as sw from pyspec import spectrum as spec ``` # Load random data with $\kappa^{-3}$ spectrum ``` fni = "data/synthetic_uv.npz" uv_synthetic = np.load(fni) up = uv_synthetic['up'] # We may also want to calculate the wavenumber spectrum of a 3d-array along two dimensions, and # then average along the third dimension. Here we showcase that pyspec capability by repeating the # up array... up2 = np.tile(up,(10,1,1)).T up2.shape ``` # Compute and plot the 2D spectrum using $dx = dy = 1$ ``` spec2d10 = spec.TWODimensional_spec(up2,1.,1.) spec2d = spec.TWODimensional_spec(up,1.,1.) fig = plt.figure(figsize=(9,7)) ax = fig.add_subplot(111) cf = ax.contourf(spec2d.kk1,spec2d.kk2,spec2d.spec.mean(axis=-1),np.logspace(-6,6,10),norm=LogNorm(vmin=1.e-6,vmax=1e6)) cb = plt.colorbar(cf) ax.set_xlabel(r'$k_x$') ax.set_ylabel(r'$k_y$') cb.set_label(r'log$_{10}$ E') fig = plt.figure(figsize=(9,7)) ax = fig.add_subplot(111) cf = ax.contourf(spec2d.kk1,spec2d.kk2,spec2d10.spec.mean(axis=-1),np.logspace(-6,6,10),norm=LogNorm(vmin=1.e-6,vmax=1e6)) cb = plt.colorbar(cf) ax.set_xlabel(r'$k_x$') ax.set_ylabel(r'$k_y$') cb.set_label(r'log$_{10}$ E') ``` # Calculating the isotropic spectrum The class "TWODimensional_spec" has the objects "ispec" for isotropic the spectrum and "kr" for the isotropic wavenumber. The isotropic spectrum is computed by interpolating the 2D spectrum from Cartesian to polar coordinates and integrating in the azimuthal direction; the integration is not very accurate at low wavenumbers due to the paucity of information. An important point is that we neglect the corners ($\kappa > max(k_x,k_y)$) since in this square domain it preferentially selects some direction. Hence, we just need to plot it. ``` spec2d.ndim k3 = np.array([.5e-2,.5]) E3 = 1/k3**3/1e5 fig = plt.figure(figsize=(9,7)) ax = fig.add_subplot(111) plt.loglog(spec2d.ki,spec2d10.ispec.mean(axis=-1)) plt.loglog(k3,E3,'k--') plt.text(1.e-2,50,r'$\kappa^{-3}$',fontsize=25) ax.set_xlabel(r"Wavenumber") ax.set_ylabel(r"Spectral density") ``` # Averaging with 10 bins decade Because we generally plot and analyze spectra in $\log_{10}\times\log_{10}$, it is sometimes useful to bin the spectrum. This makes the spectrum uniformaly space in log space. This may be useful for avoinding bias of more data at highwanumber when trying to least-squares fit slopes to the spectrum in log space. The module **spec** has a built in function that does the spectral average. Here we use 10 bins per deca. ``` ki, Ei = spec.avg_per_decade(spec2d.ki,spec2d.ispec,nbins = 10) fig = plt.figure(figsize=(9,7)) ax = fig.add_subplot(111) plt.loglog(spec2d.ki,spec2d.ispec,label='raw') plt.loglog(ki,Ei,label='binned') ax.set_xlabel(r"Wavenumber") ax.set_ylabel(r"Spectral density") plt.legend(loc=3) ``` # Adding error bars **pyspec** has a built-in function to calculate confidence limits to the 1D spectrum. The function **spec_error** calculates these confidence limits assuming that the estimates of the spectrum are $\chi^2$-distributed. Suppose we have estimated the spectra Ei with different number of averaing at different wavenumber. Thus we have different number of spectral realization. To illustrate how to use the function, we pick some arbitrary numbers. ``` sn = 5*np.ones(Ei.size) # number of spectral realizations sn[10:16] = 20 sn[16:] = 100 El,Eu = spec.spec_error(Ei, sn, ci=0.95) # calculate lower and upper limit of confidence limit fig = plt.figure(figsize=(9,7)) ax = fig.add_subplot(111) ax.fill_between(ki,El,Eu, color='r', alpha=0.25) plt.loglog(ki,Ei,color='r') ax.set_xlabel(r"Wavenumber") ax.set_ylabel(r"Spectral density") ```
github_jupyter
# IBM Data Science Capstone Project This Project is to create a model which can recommend the nearest neighborhood in canada to move to using machine learning alogrithms and geo api from foursquare ## Table of Content: --- * [Prepare & Clean The Data](#Cleaning-and-Data-Preparing) * [Visualize The Data](#) # Cleaning and Data Preparing ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from geopy.geocoders import Nominatim import folium from pandas.io.json import json_normalize import requests import matplotlib.cm as cm import matplotlib.colors as colors from sklearn.cluster import KMeans data = pd.read_html("https://en.wikipedia.org/w/index.php?title=List_of_postal_codes_of_Canada:_M&oldid=1011037969") df = data[0] df.head() # cleaning df = df[df['Borough'] != 'Not assigned'] df.head(10) df[df['Neighbourhood'] == 'Not assigned'] df.shape ``` ### Import Data Corndinates ``` df_cor = pd.read_csv('https://cocl.us/Geospatial_data') df_cor.head() df_cor.set_index('Postal Code') p_data = df.join(df_cor.set_index('Postal Code'), on='Postal Code') p_data.reset_index(drop = True, inplace=True) p_data.head() CLIENT_ID = 'QMFYKU4KSWVFRMNFCPSMQR2Q3SBS55EGZNEQKKERK1G02530' # your Foursquare ID CLIENT_SECRET = '2044ODGCFHSQ0WBXMIGYKXCOBG4FZIF13OAIAHJNUHK5Q00J' # your Foursquare Secret ACCESS_TOKEN = 'RHKVHHQNZPLMSTJ3IEY5Q1JVKWEW5QIHNBZUEVQ2CEVAWOXW' # your FourSquare Access Token VERSION = '20180604' LIMIT = 100 radius = 500 def venues_(name, latitude, longitude, radius=500): venues_list = [] for name, latitude, longitude in zip(name, latitude, longitude): url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&ll={},{}&oauth_token={}&v={}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, latitude, longitude, ACCESS_TOKEN, VERSION, radius, LIMIT) response = requests.get(url).json()['response']['groups'][0]['items'] venues_list.append([(name, latitude, longitude, v['venue']['name'], v['venue']['location']['lat'], v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in response]) df_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) df_venues.columns=['Neighbourhood', 'Neighbourhood_lat', 'Neighbourhood_lng', 'Venue', 'Venue_lat', 'Venue_lng', 'Venue_cat'] return(df_venues) combiend_df = venues_(p_data['Neighbourhood'], p_data['Latitude'], p_data['Longitude']) combiend_df.head() combiend_df.groupby('Neighbourhood').count() len(combiend_df['Venue_cat'].unique()) combiend_df_onehot = pd.get_dummies(combiend_df[['Venue_cat']], prefix="", prefix_sep="") combiend_df_onehot['Neighbourhood'] = combiend_df['Neighbourhood'] # move neighborhood file to the first column neigh_col = [combiend_df_onehot.columns[-1]] + list(combiend_df_onehot.columns[:-1]) combiend_df_onehot = combiend_df_onehot[neigh_col] combiend_df_onehot.head() toronto_df = combiend_df_onehot.groupby('Neighbourhood').mean().reset_index() toronto_df def top_venues(data, num): df_venues = data.iloc[1:] df_venues_sorted = df_venues.sort_values(ascending=False) return df_venues_sorted.index.values[0: num] num_venues = 10 col_ind = ['st', 'nd', 'rd'] cols = ['Neighbourhood'] for i in np.arange(num_venues): try: cols.append('{}{} Common Venues'.format(i+1, col_ind[i])) except: cols.append('{}th Common Venues'.format(i+1)) neigh_venues_sorted = pd.DataFrame(columns=cols) neigh_venues_sorted['Neighbourhood'] = toronto_df['Neighbourhood'] for i in np.arange(toronto_df.shape[0]): neigh_venues_sorted.iloc[i, 1:] = top_venues(toronto_df.iloc[i, :], num_venues) neigh_venues_sorted.head() toronto_df_clusters = toronto_df.drop('Neighbourhood', 1) clusters = KMeans(n_clusters=5, random_state=0).fit(toronto_df_clusters) clusters.labels_[0:20] neigh_venues_sorted.insert(0, 'Cluster Labels', clusters.labels_) toronto_data = p_data toronto_data = toronto_data.join(neigh_venues_sorted.set_index('Neighbourhood'), on='Neighbourhood', how='right') toronto_data.head() address = 'Toronto' geolocator = Nominatim(user_agent='explorer') location = geolocator.geocode(address) latitude = location.latitude longitude = location.longitude map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11) # set color scheme for the clusters x = np.arange(5) ys = [i + x + (i*x)**2 for i in range(5)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] # add markers to the map markers_colors = [] for lat, lon, poi, cluster in zip(toronto_data['Latitude'], toronto_data['Longitude'], toronto_data['Neighbourhood'], toronto_data['Cluster Labels']): label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=5, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=0.7).add_to(map_clusters) map_clusters ```
github_jupyter
# Random Decision Trees Regression Example ## Boston housing prices The objective is to predict the median price of a home in Boston. The variables are crime rate, zoning information, proportion of non-retail business, etc. This dataset has median prices in Boston for 1972. Even though the data is pretty old, the methodology for analytics is valid for more recent datasets. <b>The purpose of this demonstration is to show the use of SAP HANA's Predictive Analytics Library to created Random Decision Trees model.</b> The dataset is from Kaggle. https://www.kaggle.com/c/boston-housing. For tutorials use only. ## Housing Values in Suburbs of Boston in 1972 The <font color='red'>medv</font> variable is the target variable. ### Data description The Boston data frame has 506 rows and 14 columns. This data frame contains the following columns: 1. __crim__: per capita crime rate by town. 2. __zn__: proportion of residential land zoned for lots over 25,000 sq.ft. 3. __indus__: proportion of non-retail business acres per town. 4. __chas__: Charles River dummy variable (1 if tract bounds river; 0 otherwise). 5. __nox__: nitrogen oxides concentration (parts per 10 million). 6. __rm__: average number of rooms per dwelling. 7. __age__: proportion of owner-occupied units built prior to 1940. 8. __dis__: weighted mean of distances to five Boston employment centres. 9. __rad__: index of accessibility to radial highways. 10. __tax__: full-value property-tax rate per \$10000 11. __ptratio__: pupil-teacher ratio by town. 12. __black__: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. 13. __lstat__: lower status of the population (percent). 14. __medv__: median value of owner-occupied homes in $1000s. </td></tr></table> ### Factoids The prices in Boston across years is below. If we had a historical dataset, an analysis could be done to account for the macro trends as well. The second graph shows the intuition we have with respect to prices in relation to crime rate. It is expected that house prices will be lower in areas where crime rates are higher. The third figure is a chart showing how inflation may affect prices. So, for deeper analysis and prediction, we may want to consider inflation. In this notebook, these factors are not considered. They are here to demonstrate the need for deep domain analysis. <table><tr> <td><img src="images/boston_prices_by_year.png" alt="Boston home prices" title="Boston housing prices" style="float:left;" /></td> <td><img src="images/Crime-Rate-and-Median-House-Prices.png" alt="Boston home prices" title="Boston housing prices" /></td> <td><img src="images/Inflation_Adjusted_Housing_Prices_1890_2006.jpg" alt="Inflation adjusted prices" title="Inflation adjusted prices" style="float:left;" /> </td></tr></table> In this notebook, we will use the dataset for Boston housing prices and predict the price based on numerous factors. ``` from hana_ml import dataframe from hana_ml.algorithms.pal import clustering from hana_ml.algorithms.pal import trees import numpy as np import matplotlib.pyplot as plt import logging ``` ## Load data The data is loaded into 4 tables, for full, training, validation, and test sets: <li>BOSTON_HOUSING_PRICES</li> <li>BOSTON_HOUSING_PRICES_TRAINING</li> <li>BOSTON_HOUSING_PRICES_VALIDATION</li> <li>BOSTON_HOUSING_PRICES_TEST</li> To do that, a connection is created and passed to the loader. There is a config file, config/e2edata.ini that controls the connection parameters and whether or not to reload the data from scratch. In case the data is already loaded, there would be no need to load the data. A sample section is below. If the config parameter, reload_data is true then the tables for test, training, and validation are (re-)created and data inserted into them. Although this ini file has other sections, please do not modify them. Only the [hana] section should be modified. #########################<br> [hana]<br> url=host.sjc.sap.corp<br> user=username<br> passwd=userpassword<br> port=3xx15<br> #########################<br> ``` from data_load_utils import DataSets, Settings url, port, user, pwd = Settings.load_config("../../config/e2edata.ini") connection_context = dataframe.ConnectionContext(url, port, user, pwd) full_tbl, training_tbl, validation_tbl, test_tbl = DataSets.load_boston_housing_data(connection_context) ``` # Create Data Frames Create the data frames for the full, test, training, and validation sets. Let us also do some dtaa exploration. ## Define Datasets - Training, validation, and test sets Data frames are used keep references to data so computation on large data sets in HANA can happen in HANA. Trying to bring the entire data set into the client will likely result in out of memory exceptions. The original/full dataset is split into training, test and validation sets. In the example below, they reside in different tables. ``` full_set = connection_context.table(full_tbl) training_set = connection_context.table(training_tbl) validation_set = connection_context.table(validation_tbl) test_set = connection_context.table(test_tbl) ``` ## Simple Exploration Let us look at the number of rows in the data set ``` print('Number of rows in full set: {}'.format(full_set.count())) print('Number of rows in training set: {}'.format(training_set.count())) print('Number of rows in validation set: {}'.format(validation_set.count())) print('Number of rows in test set: {}'.format(test_set.count())) ``` ### Let's look at the columns ``` print(full_set.columns) ``` ### Let's look at the data types ``` full_set.dtypes() ``` ### Set up the features and labels for the model ``` features=['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'BLACK', 'LSTAT'] label='MEDV' ``` # Create model using training data For demonstration, we will create two models, model and model_with_id, one where we have a unique id in the training set and one where there is none. We are using Random Decision Trees regression and SVM routines in this example Documentation is <a href="https://help.sap.com/http.svc/rc/DRAFT/3f0dbe754b194c42a6bf3405697b711f/2.0.031/en-US/html/index.html">here</a> ## Preprocessing SAP HANA Predictive Analytics Library takes DOUBLE and INTEGER data types for most numeric types. Since we have DECIMALs and TINYINTs in our data set, we cast them to the types required by PAL. ``` # Cast to correct types so PAL can consume it. dfts = training_set.cast(['CRIM', "ZN", "INDUS", "NOX", "RM", "AGE", "DIS", "PTRATIO", "BLACK", "LSTAT", "MEDV"], "DOUBLE") dfts = dfts.cast(["CHAS", "RAD", "TAX"], "INTEGER") dfts = dfts.to_head("ID") dfts.head(5).collect() ``` ## Create the model Although we had seen graphically that only a few features had an impact on housing prices, let us use all the features to create a model. We will then use the model to check for importance of the features. ``` # We build the model without IDs. Project only the features and the label. df = dfts.select(features, label) model = trees.RDTRegressor() model.fit(df, features=features, label=label) ``` ### SQL statements executed Calling PAL directly would require a number of SQL statements and all that is encapsulated in the Python library functions. ## Model analysis Let's just see what features are most important. Note that we are using a sort function. The property __feature_importances___ is automatically set when the fit() method is called above. ``` model.feature_importances_.sort(['IMPORTANCE'], desc=True).collect() ``` __As you can see above, LSTAT, RM, NOX, and PTRATIO seem to have the most impact on prices.__ # Predict using test set Let us now do some predictions and see how well the model generalizes. The predict() method always takes a unique identifier to identify the prediction on a specific data row. This way, the caller (python programmer) can then join with the original data set to get the rest of the values for that unique row. The test_set has columns of types that PAL does not deal with and therefore the columns are cast to the types that are accepted. In order to look at the predicted value as well as the true value, the name of the unique identifier for rows in the result table is renamed to PREDICTED_ID. This result table is joined with the test set so the predicted and true value can be compared. For the predictions we look at the standard error. The standard error is defined as the number of standard deviations away the prediction is from the true value. ``` df_test = test_set.cast(['CRIM', "ZN", "INDUS", "NOX", "RM", "AGE", "DIS", "PTRATIO", "BLACK", "LSTAT", "MEDV"], "DOUBLE") df_test = df_test.cast(["CHAS", "RAD", "TAX"], "INTEGER") df_test = df_test.to_head("ID") # Note that we are renaming the column ID in the result of predict() model.enable_parallel_by_parameter_partitions() result_df = model.predict(df_test, key= 'ID', features=features, verbose=True).rename_columns({'ID': 'PREDICTED_ID'}) # Note the use of join() method to join two tables. jdf = result_df.join(test_set, '{}."PREDICTED_ID"={}."ID"'.format(result_df.name, test_set.name), how='inner') result_df.collect() ``` ### Predictions Let us look at the predictions. The predicted values are in 'SCORE' and the actual values are in 'MEDV'. So, we just rename the 'SCORE' column to 'PREDICTED' In addition, the column 'CONFIDENCE' is the standard error which is the number of standard deviations away the actual values is from the predicted value. This column is renamed to 'STANDARD_ERROR' ``` jdf.select(['ID', 'SCORE', 'MEDV', 'CONFIDENCE']).rename_columns({"CONFIDENCE": "STANDARD_ERROR", "SCORE": "PREDICTED"}).sort("STANDARD_ERROR", desc=False).head(5).collect() ``` ### Out of bag error Let us look at the out of bag errors which is a method of measuring the prediction error. Here we look at the first 4 rows ``` model.oob_error_.head(4).collect() ``` ## Scoring We now score the results from are test data. The scoring function we use is R^2. __In the function below, PAL is not invoked but a query is directly executed against data in HANA__ ``` r2_score = model.score(df_test, key='ID', features=features, label=label) print("r2 score is {}".format(r2_score)) ``` ## Model The model is available and can be saved for later predictions ``` # The generated model is in the database. model.model_.head(4).collect() ``` ## Close the Connection ``` connection_context.close() ```
github_jupyter
``` import numpy as np import json import scipy.interpolate import matplotlib.pyplot as plt from collections import OrderedDict from pprint import pprint #All the files paths file_kinect="./Données/Kinect/chris1/chris1_1_interpolated.txt" file_xsens="./Données/Xsens/chris1/chris1_1_interpolated.txt" file_mobilenet="./Données/Mobilenet/chris1/chris1_1_interpolated.txt" #We import all the files in a json format with open(file_kinect) as f1: dataKinect = json.load(f1, object_pairs_hook=OrderedDict) with open(file_xsens) as f2: dataXsens = json.load(f2, object_pairs_hook=OrderedDict) with open(file_mobilenet) as f3: dataMobilenet = json.load(f3, object_pairs_hook=OrderedDict) #We collect the positions and copy them in variables positions_Kinect=dataKinect['positions'] positions_Xsens=dataXsens['positions'] positions_Mobilenet=dataMobilenet['positions'] #Xsens Rotation on the y axis teta=np.pi/2 Rotation_y_Xsens=np.array([[np.cos(teta),-np.sin(teta),0],[np.sin(teta),np.cos(teta),0],[0,0,1]]) for frame in positions_Xsens.keys(): frame_pos=positions_Xsens[frame] for bPart in frame_pos.keys(): pos=frame_pos[bPart] pos_float=[] for coord in pos: pos_float.append(float(coord)) frame_pos[bPart]=np.dot(pos_float,Rotation_y_Xsens) positions_Xsens[frame]=frame_pos #Xsens Rotation on the x axis teta=np.pi/2 Rotation_x_Xsens=np.array([[np.cos(teta),0,np.sin(teta)],[0,1,0],[-np.sin(teta),0,np.cos(teta)]]) for frame in positions_Xsens.keys(): frame_pos=positions_Xsens[frame] for bPart in frame_pos.keys(): pos=frame_pos[bPart] pos_float=[] for coord in pos: pos_float.append(float(coord)) frame_pos[bPart]=np.dot(pos_float,Rotation_x_Xsens) positions_Xsens[frame]=frame_pos #Kinect Rotation on the y axis teta=-3*np.pi/2 Rotation_y_Kinect=np.array([[np.cos(teta),-np.sin(teta),0],[np.sin(teta),np.cos(teta),0],[0,0,1]]) for frame in positions_Kinect.keys(): frame_pos=positions_Kinect[frame] for bPart in frame_pos.keys(): pos=frame_pos[bPart] pos_float=[] for coord in pos: pos_float.append(float(coord)) frame_pos[bPart]=np.dot(pos_float,Rotation_y_Kinect) positions_Kinect[frame]=frame_pos #Kinect Roation on the x axis teta=np.pi/2 Rotation_x_Kinect=np.array([[np.cos(teta),0,np.sin(teta)],[0,1,0],[-np.sin(teta),0,np.cos(teta)]]) for frame in positions_Kinect.keys(): frame_pos=positions_Kinect[frame] for bPart in frame_pos.keys(): pos=frame_pos[bPart] pos_float=[] for coord in pos: pos_float.append(float(coord)) frame_pos[bPart]=np.dot(pos_float,Rotation_x_Kinect) positions_Kinect[frame]=frame_pos #Body parts of the Kinect and the Xsens bPartsKinect=list(list(positions_Kinect.values())[0].keys()) bPartsXsens=list(list(positions_Xsens.values())[0].keys()) #Common body_parts between the kinect, mobilenet and the Xsens, we compare only those parts common_body_parts=['Head', 'lAnkle', 'lElbow', 'lHip', 'lKnee', 'lShoulder', 'lWrist', 'mShoulder', 'rAnkle', 'rElbow', 'rHip', 'rKnee', 'rShoulder', 'rWrist'] body_parts_Mobilenet={'Head':'Head','lAnkle':'rAnkle','lElbow':'rElbow', 'lHip':'rHip', 'lKnee':'rKnee', 'lShoulder':'rShoulder', 'lWrist':'rWrist', 'mShoulder':'mShoulder', 'rAnkle':'lAnkle', 'rElbow':'lElbow', 'rHip':'lHip', 'rKnee':'lKnee', 'rShoulder':'lShoulder', 'rWrist':'lWrist'} #Initialize dictionaries which will contain the distances for each time distances_Kinect={} distances_Xsens={} distances_Mobilenet={} for time in positions_Kinect.keys(): distances_Kinect[time]={} distances_Xsens[time]={} distances_Mobilenet[time]={} bPartsMobilenet=list(positions_Mobilenet[time].keys()) #Detected body parts on the current time for bPart in common_body_parts: #Compute the distances of the mobilenet xKinect=positions_Kinect[time][bPart][1] yKinect=positions_Kinect[time][bPart][2] distanceKinect=np.sqrt(xKinect**2+yKinect**2) #Sometimes the mobilenet doesn't detect all the body parts, we check if those body parts exist first if bPart in bPartsMobilenet: xMobilenet=positions_Mobilenet[time][bPart][0] yMobilenet=positions_Mobilenet[time][bPart][1] distanceMobilenet=np.sqrt(xMobilenet**2+yMobilenet**2) else: distanceMobilenet=10000 #Save the distances in a dictionnary distances_Kinect[time][bPart]=distanceKinect distances_Mobilenet[time][bPart]=distanceMobilenet for bPart in bPartsXsens: #Compute the distances for the Xsens xXsens=positions_Xsens[time][bPart][1] yXsens=positions_Xsens[time][bPart][2] distanceXsens=np.sqrt(xXsens**2+yXsens**2) distances_Xsens[time][bPart]=distanceXsens #Connecting body parts of the Xsens with Kinect body_parts_Xsens={"Head":"Head","mShoulder":"T8","rShoulder":"RightShoulder","rElbow":"RightUpperArm", "rWrist":"RightForeArm","lShoulder":"LeftShoulder","lElbow":"RightUpperArm","lWrist":"LeftForeArm", "rHip":"RightUpperLeg","rKnee":"RightLowerLeg","rAnkle":"RightFoot","lHip":"LeftUpperLeg", "lKnee":"LeftLowerLeg","lAnkle":"LeftFoot"} body_parts_Mobilenet={"Head":"Head","lAnkle":"rAnkle",'lElbow':'rElbow', 'lHip':'rHip', 'lKnee':'rKnee', 'lShoulder':'rShoulder', 'lWrist':'rWrist', 'mShoulder':'mShoulder', 'rAnkle':'lAnkle', 'rElbow':'lElbow', 'rHip':'lHip', 'rKnee':'lKnee', 'rShoulder':'lShoulder', 'rWrist':'lWrist'} #Body Parts of the Kinect bPartsKinect=list(list(positions_Kinect.values())[0].keys()) common_body_parts=['Head', 'lAnkle', 'lElbow', 'lHip', 'lKnee', 'lShoulder', 'lWrist', 'mShoulder', 'rAnkle', 'rElbow', 'rHip', 'rKnee', 'rShoulder', 'rWrist'] #Filling the variance dictionnary including the variance between the three algorithms for each body part in all times Variances={} #For each time we create a new dictionary containing all body parts and their variances for time in positions_Kinect.keys(): Variances[time]={} #Filling for each body part for bPart in common_body_parts: distanceKinect=distances_Kinect[time][bPart] #Since the Xsens has different body parts names, we look for its equivalent in the body_parts_Xsens dictionnary XbPart=body_parts_Xsens[bPart] distanceXsens=distances_Xsens[time][XbPart] var=np.var((positions_Kinect[time][bPart][1:],positions_Mobilenet[time][body_parts_Mobilenet[bPart]],positions_Xsens[time][body_parts_Xsens[bPart]][1:])) Variances[time][bPart]=var #Plot of the evolution of the variance of the tree distances for a body part Times=list(Variances.keys()) Times_float=[] for time in Times: Times_float.append(float(time)) Times_float=sorted(Times_float) Var_rWrist=[] Var_rElbow=[] Var_lWrist=[] Var_lElbow=[] Var_rShoulder=[] Var_lShoulder=[] for time in Times_float: Var_rWrist.append(Variances[str(time)]['rWrist']) Var_rElbow.append(Variances[str(time)]['rElbow']) Var_lWrist.append(Variances[str(time)]['lWrist']) Var_lElbow.append(Variances[str(time)]['lElbow']) Var_rShoulder.append(Variances[str(time)]['rShoulder']) Var_lShoulder.append(Variances[str(time)]['lShoulder']) plt.plot(Times_float,Var_rWrist,label='rWrist') plt.plot(Times_float,Var_rElbow,color='red',label='rElbow') plt.plot(Times_float,Var_lWrist,color='green',label='lWrist') plt.plot(Times_float,Var_lElbow,label='lElbow') plt.plot(Times_float,Var_rShoulder,label='rShoulder') plt.plot(Times_float,Var_lShoulder,label='lShoulder',color='black') plt.legend() plt.show() #Comparaison with the terrain field (Xsens) Difference_Mobilenet=[] Difference_Kinect=[] bPart='lElbow' Times_float=[] Times=list(Variances.keys()) Times_float=[] for time in Times: Times_float.append(float(time)) Times_float=sorted(Times_float) for time in Times_float: diff_Mobilenet=np.sqrt(np.var((positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][:],positions_Xsens[str(time)][body_parts_Xsens[bPart]][1:]))) diff_Kinect=np.sqrt(np.var((positions_Kinect[str(time)][bPart][1:],positions_Xsens[str(time)][body_parts_Xsens[bPart]][1:]))) Difference_Mobilenet.append(diff_Mobilenet) Difference_Kinect.append(diff_Kinect) plt.plot(Times_float,Difference_Mobilenet,color='blue',label='Var Mob-Xs') plt.plot(Times_float,Difference_Kinect,color='red',label='Var Kinect-Xs') plt.legend() plt.show() #Comparaison with the terrain field (Xsens) Difference_Mobilenet=[] Difference_Kinect=[] bPart='lElbow' Times_float=[] Times=list(Variances.keys()) Times_float=[] for time in Times: Times_float.append(float(time)) Times_float=sorted(Times_float) for time in Times_float: diff_Mobilenet=np.sqrt((positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][0]-positions_Xsens[str(time)][body_parts_Xsens[bPart]][1])**2+(positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][1]-positions_Xsens[str(time)][body_parts_Xsens[bPart]][2])**2) diff_Kinect=np.sqrt((positions_Kinect[str(time)][bPart][1]-positions_Xsens[str(time)][body_parts_Xsens[bPart]][1])**2+(positions_Kinect[str(time)][bPart][2]-positions_Xsens[str(time)][body_parts_Xsens[bPart]][2])**2) Difference_Mobilenet.append(diff_Mobilenet) Difference_Kinect.append(diff_Kinect) plt.plot(Times_float,Difference_Mobilenet,color='blue',label='dMobil--dXsens') plt.plot(Times_float,Difference_Kinect,color='red',label='dKinect--dXsens') plt.legend() plt.show() plt.plot(Times_float,y_bPart_valuesX,'green',label='Xsens') plt.plot(Times_float,y_bPart_valuesM,'blue',label='Mobilenet') plt.plot(Times_float,y_bPart_valuesK,'red',label='Kinect') plt.legend() plt.title("y values after interpolation %s"%bPart) plt.show() plt.plot(Times_float,x_bPart_valuesX,'green',label='Xsens') plt.plot(Times_float,x_bPart_valuesM,'blue',label='Mobilenet') plt.plot(Times_float,x_bPart_valuesK,'red',label='Kinect') plt.legend() plt.title("x values after interpolation %s"%bPart) plt.show() bPart='lElbow' x_bPart_valuesX=[] y_bPart_valuesX=[] x_bPart_valuesK=[] y_bPart_valuesK=[] x_bPart_valuesM=[] y_bPart_valuesM=[] Times_float=[] Times=list(Variances.keys()) Times_float=[] for time in Times: Times_float.append(float(time)) Times_float=sorted(Times_float) for time in Times_float: xX=positions_Xsens[str(time)][body_parts_Xsens[bPart]][1] yX=positions_Xsens[str(time)][body_parts_Xsens[bPart]][2] x_bPart_valuesX.append(xX) y_bPart_valuesX.append(yX) xK=positions_Kinect[str(time)][bPart][1] yK=positions_Kinect[str(time)][bPart][2] x_bPart_valuesK.append(xK) y_bPart_valuesK.append(yK) xM=positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][0] yM=positions_Mobilenet[str(time)][body_parts_Mobilenet[bPart]][1] x_bPart_valuesM.append(xM) y_bPart_valuesM.append(yM) plt.plot(Times_float,y_bPart_valuesX,'green',label='Xsens') plt.plot(Times_float,y_bPart_valuesM,'blue',label='Mobilenet') plt.plot(Times_float,y_bPart_valuesK,'red',label='Kinect') plt.legend() plt.title("y values after interpolation %s"%bPart) plt.show() ```
github_jupyter
<small><small><i> All the IPython Notebooks in this lecture series by Dr. Milan Parmar are available @ **[GitHub](https://github.com/milaan9/04_Python_Functions/tree/main/002_Python_Functions_Built_in)** </i></small></small> # Python `frozenset()` The **`frozenset()`** function returns an immutable frozenset object initialized with elements from the given iterable. Frozen set is just an immutable version of a **[Python set](https://github.com/milaan9/02_Python_Datatypes/blob/main/006_Python_Sets.ipynb)** object. While elements of a set can be modified at any time, elements of the frozen set remain the same after creation. Due to this, frozen sets can be used as keys in **[Dictionary](https://github.com/milaan9/02_Python_Datatypes/blob/main/005_Python_Dictionary.ipynb)** or as elements of another set. But like sets, it is not ordered (the elements can be set at any index). **Syntax**: ```python frozenset([iterable]) ``` ## `frozenset()` Parameters The **`format()`** function takes a single parameter: * **iterable (Optional)** - the iterable which contains elements to initialize the frozenset with. * Iterable can be set, dictionary, **[tuple](https://github.com/milaan9/02_Python_Datatypes/blob/main/004_Python_Tuple.ipynb)**, etc. ## Return Value from `frozenset()` The **`frozenset()`** function returns an immutable **`frozenset`** initialized with elements from the given iterable. If no parameters are passed, it returns an empty **`frozenset`**. ``` # Example 1: Working of Python frozenset() # tuple of vowels vowels = ('a', 'e', 'i', 'o', 'u') fSet = frozenset(vowels) print('The frozen set is:', fSet) print('The empty frozen set is:', frozenset()) # frozensets are immutable fSet.add('v') # Example 2: frozenset() for Dictionary # When you use a dictionary as an iterable for a frozen set, # it only takes keys of the dictionary to create the set. # random dictionary person = {"name": "John", "age": 23, "sex": "male"} fSet = frozenset(person) print('The frozen set is:', fSet) ``` ## Frozenset operations Like normal sets, frozenset can also perform different operations like **`copy()`**, **`union()`**, **`intersection()`**, **`difference()`**, and **`symmetric_difference()`**. ``` # Example 1: Frozensets # initialize A and B A = frozenset([1, 2, 3, 4]) B = frozenset([3, 4, 5, 6]) # copying a frozenset C = A.copy() # Output: frozenset({1, 2, 3, 4}) print(C) # union print(A.union(B)) # Output: frozenset({1, 2, 3, 4, 5, 6}) # intersection print(A.intersection(B)) # Output: frozenset({3, 4}) # difference print(A.difference(B)) # Output: frozenset({1, 2}) # symmetric_difference print(A.symmetric_difference(B)) # Output: frozenset({1, 2, 5, 6}) ``` Similarly, other set methods like **`isdisjoint()`**, **`issubset()`**, and **`issuperset()`** are also available. ``` # Example 2: Frozensets # initialize A, B and C A = frozenset([1, 2, 3, 4]) B = frozenset([3, 4, 5, 6]) C = frozenset([5, 6]) # isdisjoint() method print(A.isdisjoint(C)) # Output: True # issubset() method print(C.issubset(B)) # Output: True # issuperset() method print(B.issuperset(C)) # Output: True ```
github_jupyter