code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="0L61TfSFZd0L" # Acknowledgments # # # * https://www.udemy.com/course/pytorch-for-deep-learning-with-python-bootcamp/ # * https://archive.ics.uci.edu/ml/datasets/iris # # # * https://pytorch.org/ # * Activation Function Info -[Link](https://www.geeksforgeeks.org/activation-functions-neural-networks/#:~:text=The%20purpose%20of%20the%20activation,the%20output%20of%20a%20neuron.&text=We%20know%2C%20neural%20network%20has,and%20their%20respective%20activation%20function.) # # # # # # # # # # + id="Cxd5KNfa9-xY" executionInfo={"status": "ok", "timestamp": 1608700187773, "user_tz": 480, "elapsed": 4037, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} import torch import torch.nn as nn import torch.nn.functional as F # + id="l6znl3W5-E-m" executionInfo={"status": "ok", "timestamp": 1608700187774, "user_tz": 480, "elapsed": 4030, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} class Model(nn.Module): def __init__(self, in_features=4, h1=8, h2=9, out_features=3): super().__init__() self.fc1 = nn.Linear(in_features,h1) self.fc2 = nn.Linear(h1,h2) self.out = nn.Linear(h2,out_features) def forward(self,x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.out(x) return(x) # + id="jFrsbBCe-E70" executionInfo={"status": "ok", "timestamp": 1608700187774, "user_tz": 480, "elapsed": 4025, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} torch.manual_seed(32) model = Model() # + id="7g858Pf1-E4i" executionInfo={"status": "ok", "timestamp": 1608700187775, "user_tz": 480, "elapsed": 4020, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/", "height": 212} id="LNix17Mf-E1z" executionInfo={"status": "ok", "timestamp": 1608700187776, "user_tz": 480, "elapsed": 4014, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="fddde6c7-f2e9-4ec0-bba7-1e6422bb699f" data = pd.read_csv('/content/iris.csv') data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 212} id="OaDIs8VN-Ey7" executionInfo={"status": "ok", "timestamp": 1608700187777, "user_tz": 480, "elapsed": 4005, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="b49e6e43-c22d-43d8-d6b3-bc9a2537025e" data.tail() # + colab={"base_uri": "https://localhost:8080/", "height": 406} id="LUUU3JxL-Ev3" executionInfo={"status": "ok", "timestamp": 1608700188647, "user_tz": 480, "elapsed": 4866, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="28574861-fb2c-4e3e-ae12-fe583c511a69" fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10,7)) fig.tight_layout() plots = [(0,1),(2,3),(0,2),(1,3)] colors = ['b','r','g'] labels = ['Iris setosa', 'Iris virginica', 'Iris versicolor'] for i, ax in enumerate(axes.flat): for j in range(3): x = data.columns[plots[i][0]] y = data.columns[plots[i][1]] ax.scatter(data[data['target']==j][x], data[data['target']==j][y], color=colors[j]) ax.set(xlabel=x, ylabel=y) fig.legend(labels=labels, loc=3, bbox_to_anchor=(1.0, 0.85)) plt.show() # + id="bUBJYxpM-Esn" executionInfo={"status": "ok", "timestamp": 1608700188648, "user_tz": 480, "elapsed": 4860, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} X = data.drop('target', axis=1) y = data['target'] # + colab={"base_uri": "https://localhost:8080/"} id="TdAqqe2J-Epw" executionInfo={"status": "ok", "timestamp": 1608700188649, "user_tz": 480, "elapsed": 4854, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="5a848197-efe2-43e6-cb79-8d3bc3265576" type(y) # + id="sDg_kKKc-Emn" executionInfo={"status": "ok", "timestamp": 1608700188649, "user_tz": 480, "elapsed": 4848, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} X = X.values y = y.values # + colab={"base_uri": "https://localhost:8080/"} id="MUIr481C-Eji" executionInfo={"status": "ok", "timestamp": 1608700188650, "user_tz": 480, "elapsed": 4843, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="1b61dd31-cdcb-4476-fbdc-f2c54e77d991" X # + id="mibUPLup-EgX" executionInfo={"status": "ok", "timestamp": 1608700189489, "user_tz": 480, "elapsed": 5675, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} from sklearn.model_selection import train_test_split # + id="_H8HpDjm-Ecw" executionInfo={"status": "ok", "timestamp": 1608700189490, "user_tz": 480, "elapsed": 5671, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 33) # + id="FQ3XD9Kn-EaU" executionInfo={"status": "ok", "timestamp": 1608700189490, "user_tz": 480, "elapsed": 5666, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} X_train = torch.FloatTensor(X_train) X_test = torch.FloatTensor(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="Eh_ryeyw-EW5" executionInfo={"status": "ok", "timestamp": 1608700189491, "user_tz": 480, "elapsed": 5660, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="f5192e33-021b-4681-8465-07033f1c2899" X_test[0:5] # + id="Jqt-5iN--EUA" executionInfo={"status": "ok", "timestamp": 1608700189491, "user_tz": 480, "elapsed": 5653, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} y_train = torch.LongTensor(y_train) # + id="Csmu-uD4-EQ6" executionInfo={"status": "ok", "timestamp": 1608700189492, "user_tz": 480, "elapsed": 5649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} y_test = torch.LongTensor(y_test) # + id="wOKD40Qk-EOB" executionInfo={"status": "ok", "timestamp": 1608700189493, "user_tz": 480, "elapsed": 5645, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(),lr=0.01) # + colab={"base_uri": "https://localhost:8080/"} id="PRIr-3S_-ELA" executionInfo={"status": "ok", "timestamp": 1608700189493, "user_tz": 480, "elapsed": 5639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="2facf5d9-af9f-4253-9ea6-8cd8a776be00" model.parameters() # + colab={"base_uri": "https://localhost:8080/"} id="ImppFfMa-EH5" executionInfo={"status": "ok", "timestamp": 1608700189494, "user_tz": 480, "elapsed": 5633, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="6e2e321f-f2e4-410b-806f-33f1a3754eff" model.parameters # + colab={"base_uri": "https://localhost:8080/"} id="auPNKQ4s-EE0" executionInfo={"status": "ok", "timestamp": 1608700189706, "user_tz": 480, "elapsed": 5838, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="a28ebb5e-c77c-4fc6-df4a-c8818a9894f1" epochs = 100 losses = [] for i in range(epochs): # Run Forward and get a prediction y_pred = model.forward(X_train) # Calculate the loss/error loss = criterion(y_pred,y_train) losses.append(loss) if i%10==0: print(f'Epoch {i} and loss is:{loss}') # BackPropagation optimizer.zero_grad() loss.backward() optimizer.step() # + colab={"base_uri": "https://localhost:8080/", "height": 299} id="eHqP-zni-EBx" executionInfo={"status": "ok", "timestamp": 1608700189938, "user_tz": 480, "elapsed": 6062, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="14ffbb1d-fbbe-4d17-d55d-8ab7dda89153" plt.plot(range(epochs),losses) plt.ylabel('Loss') plt.xlabel('Epoch') # + id="_VBFWTP--D-7" executionInfo={"status": "ok", "timestamp": 1608700189939, "user_tz": 480, "elapsed": 6055, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} with torch.no_grad(): y_eval = model.forward(X_test) loss = criterion(y_eval, y_test) # + colab={"base_uri": "https://localhost:8080/"} id="g0_97PlM-D8C" executionInfo={"status": "ok", "timestamp": 1608700189939, "user_tz": 480, "elapsed": 6049, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="2df82d66-3630-439d-9af0-1f62afaa7b34" loss # + colab={"base_uri": "https://localhost:8080/"} id="45Zr_Xw3-D5B" executionInfo={"status": "ok", "timestamp": 1608700189940, "user_tz": 480, "elapsed": 6043, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="3c1c94fc-b808-458e-e7a9-d90d5bb85e3a" correct = 0 with torch.no_grad(): for i, data in enumerate(X_test): y_val = model.forward(data) print(f'{i+1}.) {str(y_val)} {y_test[i]}') if y_val.argmax().item() == y_test[i]: correct += 1 print(f'We got {correct} results') # + colab={"base_uri": "https://localhost:8080/"} id="B6FH1pZimZ_S" executionInfo={"status": "ok", "timestamp": 1608700189941, "user_tz": 480, "elapsed": 6036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="c40ff23a-7834-4230-d223-9a9a0bf5472f" correct = 0 with torch.no_grad(): for i, data in enumerate(X_test): y_val = model.forward(data) print(f'{i+1}.) {y_val.argmax().item()} {y_test[i]}') if y_val.argmax().item() == y_test[i]: correct += 1 print(f'We got {correct} results') # + id="VnbWcGqmmZ8Y" executionInfo={"status": "ok", "timestamp": 1608700189941, "user_tz": 480, "elapsed": 6029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} torch.save(model.state_dict(), 'my_iris_model') # + colab={"base_uri": "https://localhost:8080/"} id="JgHaw3LemZ5x" executionInfo={"status": "ok", "timestamp": 1608700189942, "user_tz": 480, "elapsed": 6023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="ea417b2e-9271-4941-d4cb-c67ac5154559" new_model = Model() new_model.load_state_dict(torch.load('my_iris_model')) # + colab={"base_uri": "https://localhost:8080/"} id="ah3UQUZ_mZ2u" executionInfo={"status": "ok", "timestamp": 1608700189942, "user_tz": 480, "elapsed": 6014, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="732dcc4d-2f2c-4028-ef89-c2adebb65d0d" new_model.eval() # + colab={"base_uri": "https://localhost:8080/"} id="WVeEb7suDQ3J" executionInfo={"status": "ok", "timestamp": 1608701359138, "user_tz": 480, "elapsed": 381, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="91ebaeef-70c8-4653-d595-248da40b48f3" with torch.no_grad(): y_val = new_model.forward(X_test) loss = criterion(y_val, y_test) print(f'{loss:.8f}') # + id="Fi6_gsjomZzn" executionInfo={"status": "ok", "timestamp": 1608700189943, "user_tz": 480, "elapsed": 6008, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} unknown_flower = torch.tensor([5.2,3.3,2.6,0.8]) # + colab={"base_uri": "https://localhost:8080/", "height": 195} id="oA0SSmhUmZwn" executionInfo={"status": "ok", "timestamp": 1608700898448, "user_tz": 480, "elapsed": 374, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="31940889-cdc2-4c70-ce9f-4a636e181423" data = pd.read_csv('/content/iris.csv') data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 410} id="8CVyzfdUmZrl" executionInfo={"status": "ok", "timestamp": 1608700941026, "user_tz": 480, "elapsed": 1150, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="68cfdc6e-8137-41f6-b014-c986b6c66dd5" fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10,7)) fig.tight_layout() plots = [(0,1),(2,3),(0,2),(1,3)] colors = ['b','r','g'] labels = ['Iris setosa', 'Iris virginica', 'Iris versicolor','unknown_flower'] for i, ax in enumerate(axes.flat): for j in range(3): x = data.columns[plots[i][0]] y = data.columns[plots[i][1]] ax.scatter(data[data['target']==j][x], data[data['target']==j][y], color=colors[j]) ax.set(xlabel=x, ylabel=y) # Add a plot for our mystery iris: ax.scatter(unknown_flower[plots[i][0]],unknown_flower[plots[i][1]], color='y') fig.legend(labels=labels, loc=3, bbox_to_anchor=(1.0, 0.85)) plt.show() # + id="abNg-Ee0DPyD" with torch.no_grad(): y_val = new_model.forward(X_test) loss = criterion(y_val, y_test) print(f'{loss:.8f}') # + colab={"base_uri": "https://localhost:8080/"} id="sIc6g_YWmZos" executionInfo={"status": "ok", "timestamp": 1608701685987, "user_tz": 480, "elapsed": 408, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} outputId="dbd39c58-5867-4a6c-9e66-8992ed12b3ce" with torch.no_grad(): print(new_model(unknown_flower)) print(new_model(unknown_flower).argmax()) # + id="SP90-V9kmZmP" executionInfo={"status": "aborted", "timestamp": 1608700190775, "user_tz": 480, "elapsed": 6816, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="JfHHIpXimZjv" executionInfo={"status": "aborted", "timestamp": 1608700190776, "user_tz": 480, "elapsed": 6812, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="-3aMsjDEmZgk" executionInfo={"status": "aborted", "timestamp": 1608700190777, "user_tz": 480, "elapsed": 6808, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="SFV7U_DTmZbF" executionInfo={"status": "aborted", "timestamp": 1608700190777, "user_tz": 480, "elapsed": 6804, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="rshMw0yvmZYL" executionInfo={"status": "aborted", "timestamp": 1608700190778, "user_tz": 480, "elapsed": 6800, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="i374_TpKmZVV" executionInfo={"status": "aborted", "timestamp": 1608700190778, "user_tz": 480, "elapsed": 6796, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="Pwu6-iWimZSc" executionInfo={"status": "aborted", "timestamp": 1608700190778, "user_tz": 480, "elapsed": 6791, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="twt_KaiY-D2L" executionInfo={"status": "aborted", "timestamp": 1608700190779, "user_tz": 480, "elapsed": 6788, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="Kzh0THEF-DzO" executionInfo={"status": "aborted", "timestamp": 1608700190779, "user_tz": 480, "elapsed": 6783, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="1oWCLKai-Dwf" executionInfo={"status": "aborted", "timestamp": 1608700190780, "user_tz": 480, "elapsed": 6778, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="DnODPbt5-Dtk" executionInfo={"status": "aborted", "timestamp": 1608700190780, "user_tz": 480, "elapsed": 6773, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}} # + id="GmXpXlga-DqO" executionInfo={"status": "aborted", "timestamp": 1608700190781, "user_tz": 480, "elapsed": 6769, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghw8BSfwc8OzBGTM5fhbQReSUvJjndNEmYbz0NxwQ=s64", "userId": "07383015962707534051"}}
PyTorch ANN - MultiVariate(Iris Dataset).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python_pytorch # language: python # name: machine_learning # --- # # Gaussian Process Distribution of Relaxation Times. # ## In this tutorial we will reproduce Figure 8 of the article https://doi.org/10.1016/j.electacta.2019.135316 # # This tutorial shows how the GP-DRT model can recover DRT from the impedance missing some data points and predict the impedance value at those not measured frequencies. # # The impedance and analytical DRT in this tutorial share the same format as in `ex1_simple_ZARC_model.ipynb`. The only difference is that impedance here at lower frequencies ($f < 10^{-3}~{\rm Hz}$) are not available. import numpy as np import matplotlib.pyplot as plt from math import sin, cos, pi import GP_DRT from scipy.optimize import minimize # %matplotlib inline # ## 1) Define parameters of the ZARC circuit # + # define the frequency range N_freqs = 71 freq_vec = np.logspace(-3., 4., num=N_freqs, endpoint=True) xi_vec = np.log(freq_vec) tau = 1/freq_vec # define the frequency range used for prediction freq_vec_star = np.logspace(-4., 4., num=81, endpoint=True) xi_vec_star = np.log(freq_vec_star) # parameters for ZARC model R_inf = 10 R_ct = 50 phi = 0.8 tau_0 = 1. C = tau_0**phi/R_ct Z_exact = R_inf+1./(1./R_ct+C*(1j*2.*pi*freq_vec)**phi) gamma_fct = (R_ct)/(2.*pi)*sin((1.-phi)*pi)/(np.cosh(phi*np.log(tau/tau_0))-cos((1.-phi)*pi)) # used for plotting only freq_vec_plot = np.logspace(-4., 4., num=10*(N_freqs-1), endpoint=True) tau_plot = 1/freq_vec_plot gamma_fct_plot = (R_ct)/(2.*pi)*sin((1.-phi)*pi)/(np.cosh(phi*np.log(tau_plot/tau_0))-cos((1.-phi)*pi)) # for plotting only # adding random noise to the impedance rng = np.random.seed(214974) sigma_n_exp = 0.1 Z_exp = Z_exact + sigma_n_exp*(np.random.normal(0, 1, N_freqs)+1j*np.random.normal(0, 1, N_freqs)) # - # ## 2) show the synthetic impedance in a Nyquist plot. # ### Note: this is similar to Figure 8 (a) # + # Nyquist plot of impedance together with labeled frequency points fig, ax = plt.subplots() plt.plot(np.real(Z_exact), -np.imag(Z_exact), linewidth=4, color="black", label="exact") plt.plot(np.real(Z_exp), -np.imag(Z_exp), "o", markersize=10, color="red", label="synth exp") plt.annotate(r'$10^{-3}$', xy=(np.real(Z_exp[0]), -np.imag(Z_exp[0])), xytext=(np.real(Z_exp[0])-15, -np.imag(Z_exp[0])), arrowprops=dict(arrowstyle="-",connectionstyle="arc")) plt.annotate(r'$10^{-2}$', xy=(np.real(Z_exp[10]), -np.imag(Z_exp[10])), xytext=(np.real(Z_exp[10])-2, 10-np.imag(Z_exp[10])), arrowprops=dict(arrowstyle="-",connectionstyle="arc")) plt.annotate(r'$10^{-1}$', xy=(np.real(Z_exp[20]), -np.imag(Z_exp[20])), xytext=(np.real(Z_exp[20])-2, 6-np.imag(Z_exp[20])), arrowprops=dict(arrowstyle="-",connectionstyle="arc")) plt.annotate(r'$1$', xy=(np.real(Z_exp[30]), -np.imag(Z_exp[30])), xytext=(np.real(Z_exp[30]), 10-np.imag(Z_exp[30])), arrowprops=dict(arrowstyle="-",connectionstyle="arc")) plt.annotate(r'$10$', xy=(np.real(Z_exp[40]), -np.imag(Z_exp[40])), xytext=(np.real(Z_exp[40])-1, 10-np.imag(Z_exp[40])), arrowprops=dict(arrowstyle="-",connectionstyle="arc")) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=15) plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) plt.legend(frameon=False, fontsize = 15) plt.axis('scaled') plt.xticks(range(10, 70, 10)) plt.yticks(range(0, 60, 10)) plt.gca().set_aspect('equal', adjustable='box') plt.xlabel(r'$Z_{\rm re}/\Omega$', fontsize = 20) plt.ylabel(r'$-Z_{\rm im}/\Omega$', fontsize = 20) plt.show() # - # ## 3) Compute the optimal hyperparameters # + # initialize the parameter for global 3D optimization to maximize the marginal log-likelihood as shown in eq (31) sigma_n = sigma_n_exp sigma_f = 5. ell = 1. theta_0 = np.array([sigma_n, sigma_f, ell]) seq_theta = np.copy(theta_0) def print_results(theta): global seq_theta seq_theta = np.vstack((seq_theta, theta)) print('{0:.7f} {1:.7f} {2:.7f}'.format(theta[0], theta[1], theta[2])) GP_DRT.NMLL_fct(theta_0, Z_exp, xi_vec) GP_DRT.grad_NMLL_fct(theta_0, Z_exp, xi_vec) print('sigma_n, sigma_f, ell') # minimize the NMLL L(\theta) w.r.t sigma_n, sigma_f, ell using the Newton-CG method as implemented in scipy res = minimize(GP_DRT.NMLL_fct, theta_0, args=(Z_exp, xi_vec), method='Newton-CG', \ jac=GP_DRT.grad_NMLL_fct, callback=print_results, options={'disp': True}) # collect the optimized parameters sigma_n, sigma_f, ell = res.x # - # ## 4) Core of the GP-DRT # ### 4a) Compute matrices # calculate the matrices shown in eq (18) K = GP_DRT.matrix_K(xi_vec, xi_vec, sigma_f, ell) L_im_K = GP_DRT.matrix_L_im_K(xi_vec, xi_vec, sigma_f, ell) L2_im_K = GP_DRT.matrix_L2_im_K(xi_vec, xi_vec, sigma_f, ell) Sigma = (sigma_n**2)*np.eye(N_freqs) # ### 4b) Factorize the matrices and solve the linear equations # + # the matrix $\mathcal L^2_{\rm im} \mathbf K + \sigma_n^2 \mathbf I$ whose inverse is needed K_im_full = L2_im_K + Sigma # Cholesky factorization, L is a lower-triangular matrix L = np.linalg.cholesky(K_im_full) # solve for alpha alpha = np.linalg.solve(L, Z_exp.imag) alpha = np.linalg.solve(L.T, alpha) # estimate the gamma of eq (21a), the minus sign, which is not included in L_im_K, refers to eq (65) gamma_fct_est = -np.dot(L_im_K.T, alpha) # covariance matrix inv_L = np.linalg.inv(L) inv_K_im_full = np.dot(inv_L.T, inv_L) inv_K_im_full2 = np.linalg.inv(K_im_full) np.diag(np.dot(inv_K_im_full, K_im_full)) # estimate the sigma of gamma for eq (21b) cov_gamma_fct_est = K - np.dot(L_im_K.T, np.dot(inv_K_im_full, L_im_K)) sigma_gamma_fct_est = np.sqrt(np.diag(cov_gamma_fct_est)) # - # ### 4c) Predict the imaginary part of the GP-DRT and impedance # + # initialize the imaginary part of impedance vector Z_im_vec_star = np.empty_like(xi_vec_star) Sigma_Z_im_vec_star = np.empty_like(xi_vec_star) gamma_vec_star = np.empty_like(xi_vec_star) Sigma_gamma_vec_star = np.empty_like(xi_vec_star) # calculate the imaginary part of impedance at each $\xi$ point for the plot for index, val in enumerate(xi_vec_star): xi_star = np.array([val]) # compute matrices shown in eq (18), k_star corresponds to a new point k_star = GP_DRT.matrix_K(xi_vec, xi_star, sigma_f, ell) L_im_k_star = GP_DRT.matrix_L_im_K(xi_vec, xi_star, sigma_f, ell) #; L_im_k_star = L_im_k_star.T L2_im_k_star = GP_DRT.matrix_L2_im_K(xi_star, xi_vec, sigma_f, ell); L2_im_k_star= L2_im_k_star.T k_star_star = GP_DRT.matrix_K(xi_star, xi_star, sigma_f, ell) L_im_k_star_star = GP_DRT.matrix_L_im_K(xi_star, xi_star, sigma_f, ell) L2_im_k_star_star = GP_DRT.matrix_L2_im_K(xi_star, xi_star, sigma_f, ell) # compute Z_im_star mean and standard deviation using eq (26) Z_im_vec_star[index] = np.dot(L2_im_k_star.T,np.dot(inv_K_im_full,Z_exp.imag)) Sigma_Z_im_vec_star[index] = L2_im_k_star_star-np.dot(L2_im_k_star.T, np.dot(inv_K_im_full, L2_im_k_star)) # compute Z_im_star mean and standard deviation gamma_vec_star[index] = -np.dot(L_im_k_star.T,np.dot(inv_K_im_full,Z_exp.imag)) Sigma_gamma_vec_star[index] = k_star_star-np.dot(L_im_k_star.T, np.dot(inv_K_im_full, L_im_k_star)) # - # ### 4d) Plot the obtained GP-DRT against the analytical DRT # Note: we can see the prediction credibility interval broadening at low frequencies # plot the DRT and its confidence region plt.semilogx(freq_vec_plot, gamma_fct_plot, linewidth=4, color="black", label="exact") plt.semilogx(freq_vec_star, gamma_vec_star, linewidth=4, color="red", label="GP-DRT") plt.semilogx([1E-3, 1E-3], [-5, 25], ':', linewidth=3, color="black") plt.fill_between(freq_vec_star, gamma_vec_star-3*np.sqrt(abs(Sigma_gamma_vec_star)), gamma_vec_star+3*np.sqrt(abs(Sigma_gamma_vec_star)), color="0.4", alpha=0.3) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=15) plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) plt.axis([1E-4,1E4,-5,25]) plt.legend(frameon=False, fontsize = 15) plt.xlabel(r'$f/{\rm Hz}$', fontsize = 20) plt.ylabel(r'$\gamma/\Omega$', fontsize = 20) plt.show() # ### 4e) Plot the imaginary part of the GP-DRT impedance together with the exact one and the synthetic experiment # Note: we can see the prediction credibility interval broadening at low frequencies plt.semilogx(freq_vec, -Z_exp.imag, "o", markersize=10, color="black", label="synth exp") plt.semilogx(freq_vec_star, -Z_im_vec_star, linewidth=4, color="red", label="GP-DRT") plt.semilogx([1E-3, 1E-3], [-5, 25], ':', linewidth=3, color="black") plt.fill_between(freq_vec_star, -Z_im_vec_star-3*np.sqrt(abs(Sigma_Z_im_vec_star)), -Z_im_vec_star+3*np.sqrt(abs(Sigma_Z_im_vec_star)), alpha=0.3) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=15) plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) plt.axis([1E-4,1E4,-5,25]) plt.legend(frameon=False, fontsize = 15) plt.xlabel(r'$f/{\rm Hz}$', fontsize = 20) plt.ylabel(r'$-Z_{\rm im}/\Omega$', fontsize = 20) plt.show()
tutorial/ex3_truncated_ZARC_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import numpy as np import matplotlib.pyplot as plt from synthetic import simulate_lorenz_96 from models.clstm import cLSTM, train_model_ista, train_model_adam device = torch.device('cuda') ## data import pandas as pd df = pd.read_csv('C:/Users/chanyoung/Desktop/TCDF-master/data/gang_csv.csv') df =df.drop('1210030000',axis = 1) df_ = df.loc[192:203] df_np = df_.values X = torch.tensor(df_np[np.newaxis], dtype=torch.float32, device=device) clstm = cLSTM(X.shape[-1], hidden=2).cuda(device=device) train_loss_adam = train_model_ista( clstm, X, context=2, lam=10.0, lam_ridge=1e-2, lr=1e-3, max_iter=20000, check_every=50) a =clstm.explainer a def arrange_input(data, context): ''' Arrange a single time series into overlapping short sequences. Args: data: time series of shape (T, dim). context: length of short sequences. ''' assert context >= 1 and isinstance(context, int) input = torch.zeros(len(data) - context, context, data.shape[1], dtype=torch.float32, device=data.device) target = torch.zeros(len(data) - context, context, data.shape[1], dtype=torch.float32, device=data.device) for i in range(context): start = i end = len(data) - context + i input[:, i, :] = data[start:end] target[:, i, :] = data[start+1:end+1] return input.detach(), target.detach() X, Y = zip(*[arrange_input(x, 2) for x in X]) X = torch.cat(X, dim=0) Y = torch.cat(Y, dim=0) X pred = [clstm.networks[i](X)[0] for i in range(57)] pred[0][:,:,0] Y[:,:,0] pred, _ = clstm.networks[1](X) pred weight_ = clstm.networks[0].lstm.weight_ih_l0 torch.norm(weight_, dim=0) torch.norm(clstm.networks[0].lstm.weight_hh_l0, dim = 0) GC_est = clstm.GC().cpu().data.numpy() fig, axarr = plt.subplots( figsize=(10, 5)) axarr.imshow(GC_est, cmap='Blues', vmin=0, vmax=1, extent=(0, len(GC_est), len(GC_est), 0)) axarr.set_ylabel('Affected series') axarr.set_xlabel('Causal series') print('Estimated variable usage = %.2f%%' % (100 * np.mean(GC_est))) print('less loss = ',min(train_loss_adam).cpu().numpy()) clstm.weight_causality(1)
cs224w/test2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Speed Chart # # Throughput # On a mac: brew install graphviz from graphviz import Digraph # + # graphviz python package docs: https://graphviz.readthedocs.io/en/stable/ dot = Digraph(comment='Throughput') # interesting realworld numbers: https://asteroidsathome.net/boinc/cpu_list.php # http://www.sisoftware.eu/2017/06/23/intel-core-i9-skl-x-review-and-benchmarks-cpu-avx512-is-here/ # https://en.wikipedia.org/wiki/List_of_Intel_Core_i9_microprocessors # Intel i9 7900X - FP32 @ 271 GFLOPS, multiply by four => 1084 GB/s dot.node('CPU', 'CPU @ 20 x 4.3 GHz') dot.edge('CPU', 'CPU', label='1,084 GB/s') # http://www.sisoftware.eu/2017/06/24/intel-core-i9-skl-x-review-and-benchmarks-cache-memory-performance/ dot.attr('node', shape='cylinder') dot.node('L1', 'L1 @ 0.3 MB') # @ 320 kB I + 320 kB D dot.edge('CPU', 'L1', label='2,200 GB/s') dot.node('L2', 'L2 @ 10MB') # @ 10MB (10 * 1024 KiB) dot.edge('CPU', 'L2', label='1,010 GB/s') dot.node('L3', 'L3 @ 13.75MB') # @ 13.75MB dot.edge('CPU', 'L3', label=' 289 GB/s') dot.attr('node', shape='rect') dot.attr('node', shape='cylinder') dot.node('RAM', 'RAM @ 128 GB') dot.attr('node', shape='rect') dot.edge('CPU', 'RAM', label='69.3 GB/s') # https://en.wikipedia.org/wiki/PCI_Express dot.attr('node', shape='rect') dot.node('PCIe0', 'PCIe x1') dot.node('PCIe1', 'PCIe x4') dot.node('PCIe2', 'PCIe x16') dot.node('PCIe3', 'PCIe x16') dot.node('PCIe4', 'PCIe x16') dot.node('PCIe5', 'PCIe x4') dot.node('PCIe6', 'PCIe x4') dot.attr('node', shape='oval') dot.edge('CPU', 'PCIe0', label=' 1.0 GB/s') dot.edge('CPU', 'PCIe1', label=' 3.94 GB/s') dot.edge('CPU', 'PCIe2', label='15.8 GB/s') dot.edge('CPU', 'PCIe3', label='15.8 GB/s') dot.edge('CPU', 'PCIe4', label='15.8 GB/s') dot.edge('CPU', 'PCIe5', label=' 3.94 GB/s') dot.edge('CPU', 'PCIe6', label='15.8 GB/s') dot.attr('node', shape='cylinder') dot.node('SATA', 'SATA 3.0 HDD @ 4096 GB') dot.attr('node', shape='rect') # https://www.newegg.com/Product/Product.aspx?Item=N82E16822179129 dot.edge('PCIe0', 'SATA', label='0.22 GB/s') # Samsung 1TB PM961 Single Sided 80mm (2280/2280SS) M.2 PCI Express 3.0 x4 (PCIe Gen3 x4) OEM NVMe SSD - MZVLW1T0HMLH dot.attr('node', shape='cylinder') dot.node('NVMe', 'M.2 SSD @ 1024 GB') dot.attr('node', shape='rect') # note read = 3,000 GB/s dot.edge('PCIe1', 'NVMe', label='1.7 GB/s') dot.edge('NVMe', 'PCIe1', label='3 GB/s') # (Mellanox MHQH19B-XTR) dot.node('ETH1', 'Ethernet 40Gb') dot.edge('PCIe2', 'ETH1', label=' 5 GB/s') # InfiniBand # https://en.wikipedia.org/wiki/InfiniBand # http://www.colfaxdirect.com/store/pc/viewPrd.asp?idproduct=2839 dot.node('IB1', 'InfiniBand 4x') dot.edge('PCIe6', 'IB1', label=' 12.5 GB/s') # 11.3 TFLOPs multiply by four to get bytes per second => 45.2 TB/s dot.node('GPU1', 'GPU @ 3584 x 1.5 GHz') dot.edge('GPU1', 'GPU1', label=' 45,200 GB/s') dot.attr('node', shape='cylinder') dot.node('VRAM', 'VRAM @ 11 GB') # (GDDR5X) dot.attr('node', shape='rect') dot.edge('PCIe3', 'GPU1') dot.edge('GPU1', 'VRAM', label=' 484 GB/s') # NVLink # https://en.wikipedia.org/wiki/NVLink dot.node('NVLINK1', 'NVLINK 2.0') dot.edge('GPU1', 'NVLINK1', label=' 300 GB/s') # USB dot.attr('node', shape='rect') dot.node('USB', 'USB 3.1') # gen 2 dot.edge('PCIe4', 'USB', label='1.25 GB/s') dot.attr('node', shape='oval') #dot.attr('edge', penwidth='5') # https://en.wikipedia.org/wiki/Thunderbolt_(interface)#Thunderbolt_3 # https://www.asus.com/us/Motherboard-Accessory/ThunderboltEX-3/ # https://arstechnica.com/gadgets/2017/08/laptop-external-graphics-card-review/ dot.attr('node', shape='rect') dot.node('TB', 'THUNDERBOLT 3') dot.edge('PCIe5', 'TB', label='5 GB/s') dot.attr('node', shape='oval') dot # -
Throughput Chart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''base'': conda)' # language: python # name: python3 # --- # default_exp diffdrive # hide from fastcore.all import * # # Differential Drive Chapter # # Some code to create and display maps/likelihoods in Chapter 4. # # export import gtsam import math import PIL import numpy as np import plotly.express as px import plotly.graph_objects as go # ## Pinhole Figures # + # export def axes(length=1.5): """Add axes to a plotly figure""" return [ go.Scatter3d(x=[0,length], y=[0,0], z=[0,0], mode="lines", line=dict(color="red")), go.Scatter3d(x=[0,0], y=[0,length], z=[0,0], mode="lines", line=dict(color="green")), go.Scatter3d(x=[0,0], y=[0,0], z=[0,length], mode="lines", line=dict(color="blue")), go.Scatter3d(x=[0], y=[0], z=[0], mode="markers", marker=dict(size=3, color="cyan")), ] def plane(Z:float): return [ go.Scatter3d(x=[-1.2, 1.2], y=[-1,-1], z=[Z, Z], mode="lines", line=dict(color="blue")), go.Scatter3d(x=[-1.2, 1.2], y=[ 1, 1], z=[Z, Z], mode="lines", line=dict(color="blue")), go.Scatter3d(x=[-1.2,-1.2], y=[-1, 1], z=[Z, Z], mode="lines", line=dict(color="blue")), go.Scatter3d(x=[ 1.2, 1.2], y=[-1, 1], z=[Z, Z], mode="lines", line=dict(color="blue")), ] def ray(point3, F, color="orange"): X,Y,Z = point3 x,y = F*X/Z, F*Y/Z return go.Scatter3d(x=[X,0,x], y=[Y,0,y], z=[Z,0,F], marker=dict(size=3, color=color)) def show_3d(fig): camera = dict(up=dict(x=0, y=-1, z=0), center=dict(x=0, y=0, z=0), eye=dict(x=1.7, y=-0.3, z=-1.1)) fig.update_layout(scene_camera=camera, margin=dict(t=0, r=0, l=0, b=0), title="Pinhole Camera Model", showlegend=False) fig.show() # - feet = gtsam.Point3(-3,0,5) # point at the feet of the person, 5 meters in front of camera, 3 meters to the left head = gtsam.Point3(-3,-2,5) # point at the top of the head (note, Y = *minus* 2 meters) F = 1 # meter show_3d(go.Figure(data = plane(-F) + [ray(feet, -F), ray(head, -F)] + axes())) show_3d(go.Figure(data = plane(F) + [ray(feet, F), ray(head, F)] + axes())) # ## Reading Images # + #export import PIL import requests def read_image(image_name): """Read image from a the book repo""" url = f"https://raw.githubusercontent.com/gtbook/robotics/main/Figures5/{image_name}" return PIL.Image.open(requests.get(url, stream=True).raw) # - image_name = "LL_color_1201754063.387872.bmp" image = read_image(image_name) # locally: PIL.Image.open(image_name) print(f"resolution = {image.width}x{image.height}") import matplotlib.pyplot as plt plt.imshow(image) grayscale_image = PIL.ImageOps.grayscale(image) plt.imshow(grayscale_image, cmap="gray") # ## Easy Convolution # + #export import torch def conv2(input, filter): """Convolve input image of shape (iW,iH) with filter of shape (kW,kH)""" iW, iH = input.shape kW, kH = filter.shape _input = torch.reshape(input, (1, 1, iW, iH)) _filter = torch.reshape(filter, (1, 1, kW, kH)) _output = torch.conv2d(_input, _filter, padding='same') return torch.reshape(_output, (iW, iH)) # - grayscale = torch.from_numpy(np.asarray(grayscale_image, dtype=float)) print(f"type={type(grayscale)}, dtype={grayscale.dtype}, shape={grayscale.shape}") filter = torch.tensor([[-1.0, 0.0, 1.0]], dtype=float) filter.shape vertical_edges = conv2(grayscale, filter) plt.imshow(vertical_edges, cmap="RdYlGn")
diffdrive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Which _apps_ are in my project? # ### Overview # Here we focus on listing all apps within a single project. As with any **list**-type call, we will get minimal information about each apps. There are two versions of this call: # # 1. (default) **paginated** call that will return 50 apps # 2. **all-records** call that will page through and return all apps . # # Note, we can also query all available apps (in all our projects). # # ### Prerequisites # 1. You need to be a member (or owner) of _at least one_ project. # 2. You need your _authentication token_ and the API needs to know about it. See <a href="Setup_API_environment.ipynb">**Setup_API_environment.ipynb**</a> for details. # 3. You understand how to <a href="projects_listAll.ipynb" target="_blank">list</a> projects you are a member of (we will just use that call directly and pick one here). # 4. You have at least one app in your project, maybe from <a href="apps_copyFromPublicApps.ipynb" target="_blank">copying one</a> # # ## Imports # We import the _Api_ class from the official sevenbridges-python bindings below. import sevenbridges as sbg # ## Initialize the object # The _Api_ object needs to know your **auth\_token** and the correct path. Here we assume you are using the .sbgrc file in your home directory. For other options see <a href="Setup_API_environment.ipynb">Setup_API_environment.ipynb</a> # + # User input: specify platform {cgc, sbg} prof = 'cgc' config_config_file = sbg.Config(profile=prof) api = sbg.Api(config=config_config_file) # - # ## List all apps within a project # Here we can return the apps in a specific project or all the apps in projects you are a member of. A **list**-call for apps returns the following useful *attributes*: # * **id** _Unique_ identifier for each app, including the latest version number # * **name** Name of app, maybe _non-unique_ # * **project** Project the app is in. # * **href** Address<sup>1</sup> of the app. # # <sup>1</sup> This is the address where, by using API you can get this resource # By passing a **project_id** into the _api.apps.query()_ below, we will get the apps **inside that project**. # + # [USER INPUT] Set project name: project_name = 'Keep on Smiling' # LIST all projects and check for name match my_project = [p for p in api.projects.query(limit=100).all() \ if p.name == project_name] if not my_project: # exploit fact that empty list is False, {list, tuple, etc} is True print('The project named (%s) does not exist, please check spelling (especially trailing spaces)' \ % project_name) raise KeyboardInterrupt else: my_project = my_project[0] # list the apps in ONE project my_apps = api.apps.query(project = my_project.id, limit=100) print(' In Project (%s), you have %i apps.' % (my_project.name, my_apps.total)) for aa in my_apps.all(): print('App name is (%s); \t App id is (%s)' % (aa.name, aa.id)) print('\n') # - # By **not** passing any **project_id** into the _api.apps.query()_ below, we will get all the apps for **any project** you are a member of. # + # list the app in ALL your projects my_apps_all_projects = api.apps.query(limit=100) print(' In all your projects, you have %i apps.' % (my_apps_all_projects.total)) for aa in my_apps_all_projects.all(): print('App name is (%s); \t App id is (%s)' % (aa.name, aa.id)) print('\n') # - # ## Additional Information # Detailed documentation of this particular REST architectural style request is available [here](http://docs.cancergenomicscloud.org/docs/list-all-your-projects)
Recipes/CGC/apps_listAll.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session # - # # Police Shooting Insights # # With the nation divided on the conduct of law enforcement, I decided to look into the Washington Post dataset to investigate trends hidden in the numbers. # # Given the current scenarios with protests breaking out across many cities and states, I decided to focus the analysis and visualizations around the following measures: # # * Geography - Death toll by State and City. # * Race - Death toll by race # * Armed Category - If individuals were armed when they were killed # * Flee Category - If individuals were fleeing from the police when they were killed # * Threat Level - If individuals were attacking the police when they were killed # * Time - Death by race analyzed by Month and Year # * Body Camera - If police disproportionately kills more of a specific race with or without body camera # # # Basic assumptions. # # As the rules of society and law enforcement tell us, if you encounter law enforcement agent and comply to all their commands, there will be no reason for law enforcement to use lethal force. On the other hand, if you do not comply with commands and try to attack agents in any way, shape or form, law enforcement may use lethal force in case they feel their life is in danger. # # # Key Findings # # # # 1. Individuals that were 'Unarmed', 'Not Attacking', and 'Not Fleeing' from the police accounted for 1.9% of all deaths in the dataset. This was a total of 103 individuals over the last 5 years. Focusing on racial biases, I found that the % of deaths by race were of: # # White: 44.11%, Black: 30.39%, Hispanic: 18.62%, Other: 2.94%, Asian: 2.94%, Native American: 0.09%. Therefore, 'White' was the most affected race by 'unjustified' shooting situations, followed by 'Black' and 'Hispanic'. # # # 2. Individuals that were 'Armed', 'Attacking', but 'Not Fleeing' from the police showed the highest mortality levels. # # White: 55.44%, Black: 24.52%, Hispanic: 15.53%, Asian: 2.06%, Native American: 1.42%, Other: 1.00%. Therefore, we can see that 'White' leads aggressive encounters with the police by a large margin, followed by 'Black' and 'Hispanic' respectively. # # 3. Individuals that were 'Armed', 'Attacking', and 'Fleeing' from the police was the second highest mortality levels. # # White: 46.43%, Black: 31.83%, Hispanic: 18.33%, Native American: 1.53%, Other 0.98%, Asian: 0.87%. In this analysis, we can see that 'White' leads in this category, but there is a substantial increase in the death toll for 'Black'. This is likely because 'Black' has the highest rate of 'Fleeing' from law enforcement. # # 4. There was no substantial variation in death rates throughout the months of the year. The death rates also remained relatively constant from 2015 through 2019, with less than 1% variation. # # # 5. Regarding Body Cameras, 'White' were killed the most when not using body cameras. Only 9.45% of 'White' deaths were caught on a body camera, followed by 11.75% for 'Hispanic' and 15.71% for 'Black'. 'Asian' was the most likely to be recorded on body cameras, with 17.20%. With these numbers in mind, White were 66.24% more likely to be killed without body camera when compared to Black, and 24.33% more likely to be killed without body camera when compared to Hispanic. # # # # **Were people primarily armed?** # # # Considering the entire dataset, the 'Other' race was the lowest armed at 86%, and surprisingly 'Asian' led the armed category at 91.1%, despite the low number of total 'Asian' fatalities. # # **Which race fled the most from the police?** # # # 'Black' led the flee category at 41.18%, while'Asian' was the least likely race to flee from the police at 19.31%. # # **Which race was most likely to attack the Police** # # # In order to address this issue, I analyzed the likelihood of 'Armed' and 'Unarmed' individuals to 'Attack' officers. # # When 'Armed', 'Other' was most likely to attack the police at 74.35%, 'Black' at 72.68% and 'White' at 70.10% # While when 'Unarmed', 'White' led the number of attacks at 43.15% and 'Black' following at 41.46% # # # # **When looking at Geography these were the deadliest States and Cities for White, Black and Hispanic.** # # * City: 'Chicago' was the deadliest city for 'Black', 'Los Angeles' the deadliest city for 'Hispanic' and 'Phoenix' the deadliest city for 'White'. # # * State: 'California' was the deadliest state for 'Black', 'Hispanic' and 'White' # # # # Surprisingly, the deadliest states for individuals 'Unarmed' , 'Not Attacking' and 'Not Fleeing' diverged from the states with the highest numbers of total fatalities. These states were: # # 1. NE: 8.33% # 2. DC: 7.69% # 3. MN: 4.91% # 4. CT: 4.76% # 5. OK: 4.26% # # * These numbers account for the percentage of 'Unarmed' , 'Not Attacking' and 'Not Fleeing' killed compared to the total deaths in the state. # # # # Almost half of all individuals killed under these circumstances above were 'White'. # # 1. White: 46.43% # 2. Black: 31.83% # 3. Hispanic: 18.33% # # # # Therefore, considering only this dataset and disregarding external factors, the analysis does not suggest that the police is disproportionately targeting minorities. In most categories, 'White' led the death toll, followed by 'Black' and 'Hispanic'. A small percentage (1.9%) of deaths were from individuals 'Unarmed' , 'Not Attacking' and 'Not Fleeing', suggesting that 'Police Brutality' is not a predominant source of mortality. Within cases of 'Police Brutality', 'White' was the most impacted, being 50% more likely to die compared to 'Black', and 250% compared to 'Hispanic'. # # IMPORTING LABRARIES AND READING DATA # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns import os import datetime # %matplotlib inline # - df1 = pd.read_csv('../input/data-police-shootings/fatal-police-shootings-data.csv') # # INITIAL LOOK AT THE DATAFRAME df1.head() df1.shape df1.info() df1.isnull().sum() df1.describe() # + df1.id = df1.id.astype('category') df1.armed = df1.armed.astype('category') df1.gender = df1.gender.astype('category') df1.city = df1.city.astype('category') df1.state = df1.state.astype('category') df1.race = df1.race.astype('category') df1.threat_level = df1.threat_level.astype('category') df1.flee = df1.flee.astype('category') df1.manner_of_death = df1.manner_of_death.astype('category') #Properly assinging categorical records as a category # - df1.info() df1.corr() # + _kg_hide-input=true sns.heatmap(df1.corr()) # + df1.replace(to_replace = ['A'], value = ['Asian'], inplace = True) df1.replace(to_replace = ['B'], value = ['Black'], inplace = True) df1.replace(to_replace = ['H'], value = ['Hispanic'], inplace = True) df1.replace(to_replace = ['N'], value = ['Native American'], inplace = True) df1.replace(to_replace = ['O'], value = ['Other'], inplace = True) df1.replace(to_replace = ['W'], value = ['White'], inplace = True) #Properly naming each one of the races, to facilitate analysis and comprehension in visualizations # - df1['month'] = pd.to_datetime(df1['date']).dt.month df1['year'] = pd.to_datetime(df1['date']).dt.year df1.head() MissingPercentage = (((df1.isna().sum())/df1.shape[0])*100) MissingPercentage # + # Exploratory look at the data. Focus on Manner of Death, Armed, Gender, Race, Threat Level and Flee # - df1.manner_of_death.value_counts() #Majority of individuals were 'just' shot and not tasered and shot. # + df1.armed.unique() # Large variety of armed categories. Will have to be categorized in order to improve comprehension # + df1.armed.value_counts(normalize=True) #we can see the majority of the armed categories were gun, knife, toy weapon and undetermined # - df1.race.value_counts(normalize=True) # White, Black and Hispanic accounted for 95.5% of all deaths. Might be worth focusing on them, and contrasting these three races with other races # + df1.threat_level.value_counts(normalize=True) # Majority of individuals killed attacked the Police. One observation is that the 'Other' and 'Undertermined' categories are very subjective. # - df1.flee.value_counts(normalize=True) # We can see that a large part of the individuals don't run from the police. # # CREATING CATEGORIES - BUCKETING # In order to facilitate our analysis, and understand if there is racial baisis in shootings, we will create categories for the following # Armed = Will be categorized into Armed and Unarmed # Fleeing = Will be categorized into Fleeing and Not Fleeing # **ARMED CATEGORY - BUCKET** list(df1.armed.unique()) UnavailableUndetermined = ['NaN','undetermined',] Unarmed = ['unarmed'] Armed = ['gun', 'toy weapon', 'nail gun', 'knife', 'shovel', 'hammer', 'hatchet', 'sword', 'machete', 'box cutter', 'metal object', 'screwdriver', 'lawn mower blade', 'flagpole', 'guns and explosives', 'cordless drill', 'crossbow', 'metal pole', 'Taser', 'metal pipe', 'metal hand tool', 'blunt object', 'metal stick', 'sharp object', 'meat cleaver', 'carjack', 'chain', "contractor's level", 'unknown weapon', 'stapler', 'beer bottle', 'bean-bag gun', 'baseball bat and fireplace poker', 'straight edge razor', 'gun and knife', 'ax', 'brick', 'baseball bat', 'hand torch', 'chain saw', 'garden tool', 'scissors', 'pole', 'pick-axe', 'flashlight', 'vehicle', 'baton', 'spear', 'chair', 'pitchfork', 'hatchet and gun', 'rock', 'piece of wood', 'bayonet', 'pipe', 'glass shard', 'motorcycle', 'pepper spray', 'metal rake', 'crowbar', 'oar', 'machete and gun', 'tire iron', 'air conditioner', 'pole and knife', 'baseball bat and bottle', 'fireworks', 'pen', 'chainsaw', 'gun and sword', 'gun and car', 'pellet gun', 'claimed to be armed', 'BB gun', 'incendiary device', 'samurai sword', 'bow and arrow', 'gun and vehicle', 'vehicle and gun', 'wrench', 'walking stick', 'barstool', 'grenade', 'BB gun and vehicle', 'wasp spray', 'air pistol', 'Airsoft pistol', 'baseball bat and knife', 'vehicle and machete', 'ice pick', 'car, knife and mace'] df_UnavailableUndetermined = pd.DataFrame({'armed': UnavailableUndetermined}) df_UnavailableUndetermined ['category'] = 'Unavailable_Undetermined' df_UnavailableUndetermined df_Unarmed = pd.DataFrame({'armed': Unarmed}) df_Unarmed ['category'] = 'Unarmed' df_Unarmed df_Armed = pd.DataFrame({'armed': Armed}) df_Armed ['category'] = 'Armed' df_Armed df_lookup2 = df_Armed df_lookup2 df_lookup1 = df_lookup2.append(df_Unarmed) df_lookup1.shape df_lookup = df_lookup1.append(df_UnavailableUndetermined) df_lookup df2 = pd.merge(df1, df_lookup, on = 'armed', how = 'outer' ) df2 = df2.rename({'category':'armed_category'}, axis = 1) df2.head() df2.armed_category.value_counts(normalize = True) # **FLEE CATEGORY - BUCKET** df2.flee.unique() Fleeing = ['Car', 'Foot', 'Other'] NotFleeing = ['Not fleeing'] FleeLookUp2 = pd.DataFrame({'flee': Fleeing}) FleeLookUp2['flee_category'] = "Fleeing" FleeLookUp1 = pd.DataFrame({'flee': NotFleeing}) FleeLookUp1['flee_category'] = "Not_Fleeing" FleeLookUp = FleeLookUp1.append(FleeLookUp2) FleeLookUp.head() df3 = pd.merge(df2,FleeLookUp,how='outer', on = 'flee') df3.head() df3.flee_category.value_counts(normalize=True) # # INITIAL LOOK AT THE DATA df3.race.value_counts(normalize=True) #As we've seen previously, the majority of crimes are committed by 3 racial groups. White, Black and Hispanic df3.race.value_counts(normalize=True).plot(kind='pie', figsize = (8,8)) plt.title('Deaths by Race\nNormalized Data') df3.state.value_counts(normalize=True)[:10] df3.state.value_counts(normalize=True)[:10].sum() #we can see that the top 10 states in the US account for 53.32% of all deaths in the US. Migh be worth focusing on these states to look for trends df3.city.value_counts(normalize=True)[:10] #Interesting topic: For the top 10 states, some capitals were not present in the top 10 cities, or the opposite where the city is in the top 10, but not the state. This is the case for: # Denver/CO, Kansas City/Kansas,Oklahoma / Oklahoma City, Georgia/ Atlanta, North Carolina / Raleigh, Washington / Seattle # + # I will make a few filtered data sets to evaluate only specific sections of the dataset related to race, state and city # - RaceList = ['White', 'Black', 'Hispanic'] df3_race = df3[df3.race.isin(RaceList)] df3_race.race.unique() # + #StateList = ['CA','TX','FL','AZ','CO','GA','OK','NC','OH','WA'] #df3_race_state = df3_race[df3_race.state.isin(StateList)] #df3_race_state.state.unique() # - CityList = ['Los Angeles','Phoenix','Houston','Las Vegas','San Antonio','Columbus','Chicago','Albuquerque','Kansas City','Jacksonville'] df3_race_city = df3_race[df3_race.city.isin(CityList)] df3_race_city.city.unique() # # VISUALIZATIONS # # VISUALIZATIONS - GENERAL GEOGRAPHY # # Focusing on the top 10 states and cities, and top 3 races df3_race_city.groupby('race').city.value_counts(normalize=True).unstack().plot(kind='bar', figsize=(18,8)) plt.title('Deaths Per Race and City') plt.ylabel('% of Total Deaths per Race') df3_race_city.groupby('race').city.value_counts(normalize = True).unstack() df3_race_state.groupby('race').state.value_counts(normalize=True).unstack().plot(kind='bar', figsize=(18,8)) plt.title('Deaths Per Race and State') plt.ylabel('% of Total Deaths per Race') df3_race_state.groupby('race').state.value_counts(normalize=True).unstack() # # VISUALIZATION - THREAT LEVEL, FLEE & ARMED BY RACE # + df3.groupby('race').armed_category.value_counts().unstack().plot(kind = 'bar', stacked=True,figsize = (15,6)) plt.title('Total Number of Armed Individuals by Race') df3.groupby('race').armed_category.value_counts(normalize=True).unstack().plot(kind = 'bar', stacked=True,figsize = (15,6)) plt.title('Percentage of Armed Individuals by Race') # - vis1b_df = df3.groupby('race').flee_category.value_counts(normalize=True).unstack() vis1b_df vis1b_df.plot(kind = 'bar', stacked = True, figsize=(15,6)) plt.title('Percentage of Individuals by Flee Category') # + VIS1D = df3[df3.armed_category == 'Armed'].groupby('race').threat_level.value_counts(normalize=True).unstack().plot(kind='bar', stacked= True, figsize=(18,6)) plt.title('Likelyhood of Individual to Attack When Armed') VIS1E = df3[df3.armed_category == 'Unarmed'].groupby('race').threat_level.value_counts(normalize=True).unstack().plot(kind='bar', stacked= True, figsize=(18,6)) plt.title('Likelyhood of Individual to Attack When Unarmed') # We can see all races are less likely to attack police when unarmed. # Asians are least likely to attack police overall. # Black, Other and White are the most likely to attack police both Armed and Unarmed # - df3.groupby('race').armed_category.value_counts(normalize = True).unstack() df3[df3.flee_category == 'Fleeing'].groupby('race').armed_category.value_counts(normalize=True).unstack() #As a surprise, Asians are the most likely to try to flee in case they are unarmed, followed by Black df3[df3.flee_category == 'Fleeing'].groupby('race').armed_category.value_counts(normalize=True).unstack().plot(kind = 'bar', stacked=False,figsize = (12,6)) # Likelyhood of individual trying to flee in case they are armed or unarmed # # VISUALIZATION - DEATHS BY RACE AND STATE df3.state.value_counts(normalize=False)[:10].plot(kind='pie', figsize=(10,10)) plt.title('Percentage of Deaths in Top 10 States') # + VIS2A = df3_race_state[df3_race_state.armed_category == 'Armed'].groupby(['state','armed_category']).race.value_counts().unstack().plot(kind = 'bar', stacked=False, figsize = (18,6)) plt.title('Total Number of Individuals Killed When Armed, by State and Race') VIS2B = df3_race_state[df3_race_state.armed_category == 'Unarmed'].groupby(['state','armed_category']).race.value_counts().unstack().plot(kind = 'bar', stacked=False, figsize=(18,6)) plt.title('Total Number of Individuals Killed When Unrmed, by State and Race') # - # # VISUALS 3 - IS THE POLICE KILLING UNARMED MINORITIES? df3.groupby(['armed_category','race']).threat_level.value_counts(normalize=True).unstack() # + df3[df3.armed_category == 'Unarmed'].groupby('race').threat_level.value_counts(normalize=False).unstack().plot(kind='bar', figsize=(15,6)) plt.title('Number of Deaths of Unarmed Individuals categorized by Threat Level and Race') #The owner of the dataset probably needs to be more specific on what 'Other' in Threat Level means, given that it was the largest category for all races # - VIS2B = df3_race_state.groupby('race').state.value_counts(normalize=True).unstack().plot(kind = 'bar', figsize = (18,6)) #Where do most races die based on % of total deaths in top 10 states # # VISUALIZATION - DEATHS BY RACE AND CITY # df3.city.value_counts(normalize=False)[:10].plot(kind='pie', figsize=(10,10)) plt.title('Deadliest Cities in the US') VIS3A = df3_race_city.groupby('race').city.value_counts(normalize=False).unstack().plot(kind='bar', figsize=(18,6)) plt.title('Deadliest Cities in the US by Race') VIS3A df3_race_city[df3_race_city.armed_category == 'Unarmed'].groupby(['city','armed_category','threat_level']).race.value_counts(normalize=False).unstack() df3_race_city.groupby(['armed_category','race']).city.value_counts(normalize=False).unstack().plot(kind='bar', stacked=True, figsize=(18,8)) plt.title('Armed Category and Race of Individuals Killed in Deadliest Cities') #trend remains the same in deadliest cities, with the majority individuals killed being armed # # CURIOSITIES # # Finding cases of Police Brutallity by Race and States (df3.groupby('armed_category').flee_category.value_counts().unstack()) # + ((df3.groupby('armed_category').flee_category.value_counts().unstack())/(df3.shape[0]))*100 # "Only" 3.5% of all deaths were related to unarmed civilians that were not fleeing. # + ((df3.groupby(['armed_category','threat_level']).flee_category.value_counts().unstack())/(df3.shape[0]))*100 # "Only" 1.9% of all deaths were related to unarmed civilians that were not fleeing and were not attacking the police. # - # Unarmed, Not attacking, Not Fleeing # + ThreatLevelList = ['other', 'undetermined'] df_unarmed_nothreat_notfleeing = df3[(df3.threat_level.isin(ThreatLevelList)) & (df3.armed_category == 'Unarmed') & (df3.flee_category == 'Not_Fleeing')] df_unarmed_nothreat_notfleeing.shape # - df_unarmed_nothreat_notfleeing.race.value_counts(normalize=True) # Armed, Attacking, Not Fleeing # + ThreatLevelList = ['attack'] df_armed_threat_notfleeing = df3[(df3.threat_level.isin(ThreatLevelList)) & (df3.armed_category == 'Armed') & (df3.flee_category == 'Not_Fleeing')] df_armed_threat_notfleeing.shape # - df_armed_threat_notfleeing.race.value_counts(normalize=True) # Armed, Attacking, Fleeing # # + ThreatLevelList = ['attack'] df_armed_threat_fleeing = df3[(df3.threat_level.isin(ThreatLevelList)) & (df3.armed_category == 'Armed') & (df3.flee_category == 'Fleeing')] df_armed_threat_fleeing.shape # - df_armed_threat_fleeing.race.value_counts(normalize=True) # + # Percentage of killings per state, of citiezed that were unarmed, no threat and not fleeing ((df_unarmed_nothreat_notfleeing.state.value_counts(normalize=False)/df3.state.value_counts(normalize=False))*100).sort_values(ascending=False) # + ((df_unarmed_nothreat_notfleeing.state.value_counts()/df3.state.value_counts())*100).sort_values(ascending=False)[:10].plot(kind='pie', figsize=(10,10)) # Despite of low mortality rates in these states, the chance of being shot while unarmed, not posing threat and not fleeing is higher than in the states with higher total killings # - # # Deaths per Year, Month and Race df3.year.value_counts(normalize=True) df3.groupby('month').race.value_counts(normalize=True).unstack().plot(kind='bar', figsize=(18,6)) df3.groupby('year').race.value_counts(normalize=True).unstack().plot(kind='bar', figsize=(18,6)) # # DEATH BY RACE WITH BODY CAMERA # + df3.groupby('race').body_camera.value_counts(normalize=False).unstack().plot(kind='bar', figsize=(18,8)) plt.title('Total Number of Fatalities Captured on Body Camera by Race') df3.groupby('race').body_camera.value_counts(normalize=True).unstack().plot(kind='bar', figsize=(18,8)) plt.title('Percentage of Fatalities Captured on Body Camera by Race') # - df3.groupby('race').body_camera.value_counts().unstack() df3.groupby('race').body_camera.value_counts(normalize=True).unstack()
Is the Police Killing Minorities?.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial 2: Reverse non-equilibrium simulation of a MPCD fluid # # In this part of the tutorials, we will use the azplugins implementation of the [reverse perturbation method](https://journals.aps.org/pre/abstract/10.1103/PhysRevE.59.4894) by M&uuml;ller-Plathe to generate shear flow in a simple multi-particle collision dynamics (MPCD) fluid. For details on the general method and setup, refer to the [LJ reverse perturbation tutorial](01_reverse_perturbation_1_LJ.ipynb). # # ### System setup # # First, we need to set up a simple MPCD simulation. # # ```Python # import numpy as np # import sys # import hoomd # from hoomd import md # from hoomd import data # from hoomd import azplugins # from hoomd.azplugins import mpcd # # L = 10 # kT = 1.0 # rho_mpcd = 5 # viscosity_mpcd = 3.955 # # hoomd.context.initialize() # hoomd.context.SimulationContext() # # snapshot = hoomd.data.make_snapshot(N=0,box=data.boxdim(L=L)) # system = hoomd.init.read_snapshot(snapshot) # # N_mpcd = int(rho_mpcd*system.box.get_volume()) # snap = hoomd.mpcd.data.make_snapshot(N=N_mpcd) # # snap.particles.position[:] = np.random.uniform(-L/2.0, L/2.0, (N_mpcd,3)) # snap.particles.velocity[:] = np.random.normal(0, np.sqrt(kT), (N_mpcd,3)) # snap.particles.velocity[:] -= np.average(snap.particles.velocity,axis=0) # # mpcd_sys = hoomd.mpcd.init.read_snapshot(snap) # ``` # # We want to set up a simulation at a reduced temperature of $k_\text{B}T=1.0$ and at a number density of $\rho_\text{mpcd}=5$ in a cubic box with $L = 10$. Both positions and velocities are set up randomly in the box. If you have an idea what flow field to expect, you can make equilibration faster by modifying the velocities accordingly. MPCD kinematic viscosities can be calculated from (see this [publication](https://journals.aps.org/pre/pdf/10.1103/PhysRevE.72.016701)): # # $\eta = \eta_\text{coll} +\eta_\text{kin}$ # # $\frac{\eta_\text{coll}}{\sqrt{k_\text{B}T a^2/m}} = \frac{1}{\lambda}\frac{1-\cos{\alpha}}{18}\left(1 -\frac{1}{\rho_\text{mpcd}}\right)$ # # $\frac{\eta_\text{coll}}{\sqrt{k_\text{B}T a^2/m}} = \lambda\left(\frac{1}{(4-2\cos{\alpha}-2\cos{2\alpha})} \frac{5\rho_\text{mpcd}}{\rho_\text{mpcd}-1} -\frac{1}{2}\right)$ # # where $m=1$ is the mass of each particle, $a=1$ is the grid size, $\alpha$ is the collision angle, and $\lambda$ is the timestep, defined later. We use the same setup to measure fluid properties as in the first half of the tutorial. # # ### Eqilibration # # For the tutorial the system was equilibrated for 10,000 timesteps with stochastic rotation dynamics (SRD) with an angle $\alpha$= ``angle=130`` at a timestep of $\lambda$= ``dt=0.1``. For a real simulation it may be neccessary to equilibrate the system properly/longer before starting the shear flow. More information about the MPCD algorithm and how to couple MPCD fluids to MD particles can be found in the [hoomd-blue doumentation](https://hoomd-blue.readthedocs.io/en/stable/package-mpcd.html). # # ```Python # hoomd.mpcd.integrator(dt=0.1) # mpcd_sys.sorter.set_period(25) # srd = hoomd.mpcd.collide.srd(seed=512, period=1, angle=130., kT=kT) # bulk = hoomd.mpcd.stream.bulk(period=1) # # hoomd.run(1e5) # ``` # # The main difference between the LJ reverse perturbation and the MPCD reverse perturbation is a slight change in the function and its arguments. No group argment is needed, but all the other parameters are the same. The setup for measuring quantites is the same as for the LJ fluid, except that the mpcd doesn't have a gsd snapshot functionality. # # ```Python # # f = azplugins.mpcd.reverse_perturbation(width=1,Nswap=1,period=1,target_momentum=0.5) # # # log the exchanged momentum during the simulation # log = hoomd.analyze.log(filename="tutorial_reverse_perturbation_2_mpcd.log", # quantities=['rp_momentum'], # period=1e2,overwrite=True) # # measure_vel = azplugins.flow.FlowProfiler(system=system, axis=2, bins=10, range=(-L/2,L/2), area=L**2) # analyze = hoomd.analyze.callback(measure_vel, period=1e2) # # ``` # # ### Run the non-equilibrium simulation # # ```Python # hoomd.run(1e6) # # snap = mpcd_sys.take_snapshot() # pos = snap.particles.position # vel = snap.particles.velocity # np.save('tutorial_reverse_perturbation_2_mpcd_pos.npy',pos) # np.save('tutorial_reverse_perturbation_2_mpcd_vel.npy',vel) # np.savetxt('tutorial_reverse_perturbation_2_mpcd_vx.hist', np.column_stack((measure_vel.centers, measure_vel.number_velocity[:,0]))) # ``` # # # For MPCD it is impractical to write actual snapshots of the system. Instead, configuration is saved at the end in numpy arrays for restarting purposes, which can be read in for the next simulation instead of random positions/velocities as starting point. # # ### azplugins.flow.reverse_perturbation parameters # # Please see the [LJ reverse perturbation tutorial](01_reverse_perturbation_1_LJ.ipynb) for a detailed description of the parameters. # # <div class="alert alert-info"> # # Note # # For MPCD, the speed of sound should be the upper limit for velocites, so it is advisable to not exceed $v_\text{max}=0.5$ in the desired flow field. # # </div> # # Because the viscosity is known, $v_\text{max}$ can be computed from the target Reynolds number $Re$: # # $v_\text{max} = \frac{2\eta_\text{mpcd}Re}{\rho_\text{mpcd}L_z}$. # # You can also estimate $v_\text{max}$ from the known viscosity and the reverse_perturbation parameters: # # $v_\text{max}=\frac{\text{target momentum}\cdot N_\text{swap}/\text{period}}{L_x L_y\lambda\eta_\text{mpcd}}\frac{L_z}{4}$. # # This can also be useful to estimate the parameters for the reverse perturbation based on a desired Reynolds number or for setting up the velocity profile close to the expected shape at the beginning of the simulation. # # ### Analyzing the results # # The simulation can be analyzed the same way than the LJ fluid. Because the mpcd viscosities are known, the pure mpcd fluid can be a useful check for the simulation setup. # # ### Future reading # # - Original publication: <NAME>. Reversing the perturbation innonequilibrium # molecular dynamics: An easy way to calculate the shear viscosity of # fluids. Phys. Rev. E, 59:4894-4898, May 1999. # <http://dx.doi.org/10.1103/PhysRevE.59.4894> # - Follow-up publications, describe the algorithm in more detail: # * <NAME>., & <NAME>. (2004). Reverse Non-equilibrium Molecular Dynamics. Lecture Notes in Physics, 310–326. <http://dx.doi.org/10.1007/978-3-540-39895-0_10> # * <NAME>., & <NAME>. (1999). Cause and effect reversed in non-equilibrium molecular dynamics: an easy route to transport coefficients. Computational and Theoretical Polymer Science, 9(3-4), 203–209. <http://dx.doi.org/10.1016/s1089-3156(99)00006-9> # - Our own publications about the method, describe some of the observed problems of the algorithm: # * Instability of Shear Flows in Spatially Periodic Domains, MP Howard, A Statt, HA Stone, TM Truskett, arXiv preprint arXiv:1907.07086 <https://arxiv.org/pdf/1907.07086.pdf> # * Unexpected secondary flows in reverse nonequilibrium shear flow simulations, A Statt, MP Howard, AZ Panagiotopoulos, Physical Review Fluids 4 (4), 043905 <https://arxiv.org/pdf/1811.04097.pdf> # - This is the documentation page of the lammps implementation of the same algorithm: https://lammps.sandia.gov/doc/fix_viscosity.html Comparing against a different simulation package can be a useful for finding bugs.
doc/source/tutorial/01_reverse_perturbation/01_reverse_perturbation_2_mpcd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''lssl'': conda)' # name: python3613jvsc74a57bd07e2a9fa3d96e0b0167b7c016fb778693fe54466e00460bfa005b3c271472f290 # --- import os, sys sys.path.append('../') sys.path.append('../../') import numpy as np import h5py import pickle from data_utils import getSeizureTimes # # File markers for EEG clips RESAMPLE_DIR = '/media/nvme_data/siyitang/TUH_eeg_seq_v1.5.2/resampled_signal/' #RESAMPLE_DIR = '/home/siyitang/data/TUH_v1.5.2/TUH_eeg_seq_v1.5.2/resampled_signal' CLIP_LEN = 12 TIME_STEP_SIZE = 1 STRIDE = CLIP_LEN FREQUENCY = 200 FILES_TO_CONSIDER = {} for split in ['train', 'dev', 'test']: file_to_consider_txt = split+'Set_seizureDetect_files.txt' with open(file_to_consider_txt, 'r') as f: fstr = f.readlines() FILES_TO_CONSIDER[split] = [fstr[i].strip('\n').split(',')[0].split('/')[-1] for i in range(len(fstr))] print(len(FILES_TO_CONSIDER['train'])) print(len(FILES_TO_CONSIDER['dev'])) print(len(FILES_TO_CONSIDER['test'])) FILES_TO_CONSIDER['train'][:5] # + RAW_DATA_DIR = "/media/nvme_data/TUH/v1.5.2/edf/" #RAW_DATA_DIR = "/data/crypt/eegdbs/temple/tuh_eeg_seizure/v1.5.2/edf/" edf_files = [] for path, subdirs, files in os.walk(RAW_DATA_DIR): for name in files: if ".edf" in name: edf_files.append(os.path.join(path, name)) # - VARIABLE_LENGTH = False # + np.random.seed(123) resampled_files = os.listdir(RESAMPLE_DIR) for split in ['train', 'dev', 'test']: physical_clip_len = int(FREQUENCY*CLIP_LEN) if VARIABLE_LENGTH: filemarker = os.path.join("variable_length", split+"_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+"_timestep"+str(TIME_STEP_SIZE)+".txt") else: filemarker = split+"_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+".txt" write_str = [] for h5_fn in resampled_files: edf_fn = h5_fn.split('.h5')[0]+'.edf' if edf_fn not in FILES_TO_CONSIDER[split]: continue edf_fn_full = [file for file in edf_files if edf_fn in file] if len(edf_fn_full) != 1: print("{} found {} times!".format(edf_fn, len(edf_fn_full))) print(edf_fn_full) edf_fn_full = edf_fn_full[0] seizure_times = getSeizureTimes(edf_fn_full.split('.edf')[0]) h5_fn_full = os.path.join(RESAMPLE_DIR, h5_fn) with h5py.File(h5_fn_full, 'r') as hf: resampled_sig = hf["resampled_signal"][()] if VARIABLE_LENGTH: num_clips = (resampled_sig.shape[-1] - CLIP_LEN * FREQUENCY) // (STRIDE * FREQUENCY) + 2 else: num_clips = (resampled_sig.shape[-1] - CLIP_LEN * FREQUENCY) // (STRIDE * FREQUENCY) + 1 for i in range(num_clips): start_window = i * FREQUENCY * STRIDE end_window = np.minimum(start_window + FREQUENCY * CLIP_LEN, resampled_sig.shape[-1]) # only include last short clip if it's longer than 60s time step size if VARIABLE_LENGTH: if (i == num_clips-1) and (end_window - start_window) < (TIME_STEP_SIZE * FREQUENCY): break is_seizure = 0 for t in seizure_times: start_t = int(t[0] * FREQUENCY) end_t = int(t[1] * FREQUENCY) if not ((end_window < start_t) or (start_window > end_t)): is_seizure = 1 break write_str.append(edf_fn + ',' + str(i) + ',' + str(is_seizure) + '\n') np.random.shuffle(write_str) with open(filemarker, 'w') as f: for curr_str in write_str: f.writelines(curr_str) # - # # Get seizure/non-seizure balanced train set # + if VARIABLE_LENGTH: train_filemarker = os.path.join("variable_length", split+"_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+"_timestep"+str(TIME_STEP_SIZE)+".txt") else: train_filemarker = os.path.join("train_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+".txt") with open(train_filemarker, 'r') as f: train_str = f.readlines() # - sz_tuples = [] nonsz_tuples = [] for curr_str in train_str: file, clip_idx, sz_label = curr_str.strip('\n').split(',') sz_label = int(sz_label) if sz_label == 1: sz_tuples.append((file, clip_idx, sz_label)) else: nonsz_tuples.append((file, clip_idx, sz_label)) print(len(sz_tuples)) print(len(nonsz_tuples)) # ### Keep all the seizure files and undersample non-seizure files... # + np.random.seed(123) np.random.shuffle(nonsz_tuples) nonsz_tuple_small = nonsz_tuples[:len(sz_tuples)] len(nonsz_tuple_small) # - balanced_files = sz_tuples + nonsz_tuple_small np.random.shuffle(balanced_files) balanced_files[:5] # + if VARIABLE_LENGTH: balanced_train_filemarker = os.path.join( "variable_length", "train_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+"_timestep"+str(TIME_STEP_SIZE)+"_balanced.txt") else: balanced_train_filemarker = "train_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+"_balanced.txt" with open(balanced_train_filemarker, "w") as f: for tup in balanced_files: f.writelines(tup[0] + ',' + str(tup[1]) + ',' + str(tup[2]) + '\n') # - # # Get `pos_weight` to weigh the loss function from data.dataloader import load_dataset import torch ## on gemini RAW_DATA_DIR = "/media/nvme_data/TUH/v1.5.2/" PREPROC_DIR = "/media/nvme_data/siyitang/TUH_eeg_seq_v1.5.2/resampled_signal" USE_FFT = True dataloaders = load_dataset(input_dir=PREPROC_DIR, raw_data_dir=RAW_DATA_DIR, train_batch_size=64, test_batch_size=64, clip_len=CLIP_LEN, time_step_size=TIME_STEP_SIZE, stride=STRIDE, standardize=False, num_workers=8, augmentation=True, use_fft=USE_FFT, balance_train=True) y_train = [] x_train = [] file_name_train = [] for x, y, _, _, _, file_name in dataloaders['train']: y_train.append(y) x_train.append(x) file_name_train.extend(file_name) x_train = torch.cat(x_train, dim=0) x_train = x_train.data.cpu().numpy() x_train.shape y_train = torch.cat(y_train, dim=0) y_train = y_train.data.cpu().numpy() y_train.shape y_single = np.sum(y_train, axis=-1) y_single.shape pos_clip_idxs = (y_single != 0) pos_timesteps = np.sum(y_train[pos_clip_idxs,:] == 1) pos_timesteps pos_clip_neg_timesteps = np.sum(y_train[pos_clip_idxs,:] == 0) pos_clip_neg_timesteps neg_clip_idxs = (y_single == 0) neg_clip_neg_timesteps = np.sum(y_train[neg_clip_idxs,:] == 0) neg_clip_neg_timesteps print("Total positive time steps:", pos_timesteps) print("Total negative time steps:", neg_clip_neg_timesteps+pos_clip_neg_timesteps) pos_weight = (neg_clip_neg_timesteps+pos_clip_neg_timesteps) / pos_timesteps pos_weight # # Compute mean & std of train set x_train.shape mean = np.mean(x_train) std = np.std(x_train) print("Mean: {:.3f}, Std: {:.3f}".format(mean, std)) if USE_FFT: with open("./mean_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+"_fft.pkl", "wb") as pf: pickle.dump(mean, pf) with open("./std_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+"_fft.pkl", "wb") as pf: pickle.dump(std, pf) else: with open("./mean_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+".pkl", "wb") as pf: pickle.dump(mean, pf) with open("./std_cliplen"+str(CLIP_LEN)+"_stride"+str(STRIDE)+".pkl", "wb") as pf: pickle.dump(std, pf)
src/dataloaders/prepare/eeg/write_filemarkers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Timing Tests of `fastnumbers` Functions Compared to Equivalent Solutions # # In order for you to see the benefit of `fastnumbers`, some timings are collected below for comparison to equivalent python implementations. The numbers may change depending on the machine you are on or the Python version you are using. # # Feel free to download this Jupyter Notebook and run the tests yourself to see how `fastnumbers` performs on your machine (it takes about 1-2 minutes total). # # **This notebook contains timing results for Python 3.6.** # # #### Some notes about the data # # - Each test is the time it takes for the function to run 100,000 times on a given input. # - Each test is repeated either 5 or 100 times, and the mean ± standard deviation is reported. # - The fastest time is shown in **bold** # - The timing results for the pure-Python functions include about 10-15 ms of "function call overhead"; the `fastnumbers` functions do not suffer from as much overhead because they are C-extensions. # - Python version-dependent behaviors: # - **Python 2.7** has a particularly slow `int` function, so the `fastnumbers` speedup is much larger on Python 2.7 than Python 3.x # - **Python >=3.6** is slightly slower in general than previous versions because underscores are now allowed in floats and integers which makes parsing take a bit longer due to the extra logic. # # #### Notes about the `Timing` class below # # The timing runner class is implemented below, and this is used in all the tests to perform the actual timing tests in the sections below. In general you can skip this implementation, but of note is the `THINGS_TO_TIME` tuple, which contains the values that are passed to the functions to type the various input types. # + from __future__ import print_function, division import re import math import timeit from IPython.display import Markdown, display, clear_output class Timer(object): """Class to time functions and make pretty tables of the output.""" # This is a list of all the things we will time with an associated label. THINGS_TO_TIME = ( ('not_a_number', 'Non-number String'), ('-41053', 'Int String'), ('35892482945872302493947939485729', 'Large Int String'), ('-41053.543034e34', 'Float String'), ('-41053.543028758302e256', 'Large Float String'), (-41053, 'Int'), (-41053.543028758302e100, 'Float'), ) # Formatting strings. FUNCTION_CALL_FMT = '{}({!r})' def __init__(self, title): display(Markdown('### ' + title)) self.functions = [] def add_function(self, func, label, setup='pass'): """Add a function to be timed and compared.""" self.functions.append((func, setup, label)) def time_functions(self, repeat=5): """Time all the given functions against all input then display results.""" # Collect the function labels to make the header of this table. # Show that the units are seconds for each. function_labels = [label + ' (ms)' for _, _, label in self.functions] # Construct the table strings, formatted in Markdown. # Store each line as a string element in a list. # This portion here is the table header only for now. table = Table() table.add_header('Input type', *function_labels) # For each value, time each function and collect the results. for value, value_label in self.THINGS_TO_TIME: row = [] for func, setup, _ in self.functions: call = self.FUNCTION_CALL_FMT.format(func, value) try: row.append(self._timeit(call, setup, repeat)) except (ValueError, TypeError): # We might send in some invalid input accidentally. # Ignore those inputs. break # Only add this row if the for loop quit without break. else: # Convert to milliseconds row = [(mean * 1000, stddev * 1000) for mean, stddev in row] # Make the lowest value bold. min_indx = min(enumerate(row), key=lambda x: x[1])[0] row = ['{:.3f} ± {:.3f}'.format(*x) for x in row] row[min_indx] = self.bold(row[min_indx]) table.add_row(value_label, *row) # Show the results in a table. display(Markdown(str(table))) @staticmethod def mean(x): return math.fsum(x) / len(x) @staticmethod def stddev(x): mean = Timer.mean(x) sum_of_squares = math.fsum((v - mean)**2 for v in x) return math.sqrt(sum_of_squares / (len(x) - 1)) @staticmethod def bold(x): return "**{}**".format(x) def _timeit(self, call, setup, repeat=5): """Perform the actual timing and return a formatted string of the runtime""" result = timeit.repeat(call, setup, number=100000, repeat=repeat) return self.mean(result), self.stddev(result) class Table(list): """List of strings that can be made into a Markdown table.""" def add_row(self, *elements): self.append('|'.join(elements)) def add_header(self, *elements): self.add_row(*elements) seperators = ['---'] * len(elements) seperators = [sep + (':' if i != 0 else '') for i, sep in enumerate(seperators)] self.add_row(*seperators) def __str__(self): return '\n'.join(self) # - # ## Built-in Functions Drop-in Replacement Timing Results # The following timing tests compare the performance of Python's builtin `int` and `float` functions against the implementations from `fastnumbers` for various input types. timer = Timer('Timing comparison of `int` functions') timer.add_function('int', 'builtin') timer.add_function('int', 'fastnumbers', 'from fastnumbers import int') timer.time_functions(repeat=100) timer = Timer('Timing comparison of `float` functions') timer.add_function('float', 'builtin') timer.add_function('float', 'fastnumbers', 'from fastnumbers import float') timer.time_functions(repeat=100) # ## Error-Handling Conversion Functions Timing Results # The following timing tests compare the performance of the `fastnumbers` functions that convert input to numeric types while doing error handling with common equivalent pure-Python implementations. # + def int_re(x, int_match=re.compile(r'[-+]?\d+$').match): """Function to simulate fast_int but with regular expressions.""" try: if int_match(x): return int(x) else: return x except TypeError: return int(x) def int_try(x): """Function to simulate fast_int but with try/except.""" try: return int(x) except ValueError: return x timer = Timer('Timing comparison of `int` functions with error handling') timer.add_function('int_try', 'try/except', 'from __main__ import int_try') timer.add_function('int_re', 'regex', 'from __main__ import int_re') timer.add_function('fast_int', 'fastnumbers', 'from fastnumbers import fast_int') timer.time_functions() # + def float_re(x, float_match=re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match): """Function to simulate fast_float but with regular expressions.""" try: if float_match(x): return float(x) else: return x except TypeError: return float(x) def float_try(x): """Function to simulate fast_float but with try/except.""" try: return float(x) except ValueError: return x timer = Timer('Timing comparison of `float` functions with error handling') timer.add_function('float_try', 'try/except', 'from __main__ import float_try') timer.add_function('float_re', 'regex', 'from __main__ import float_re') timer.add_function('fast_float', 'fastnumbers', 'from fastnumbers import fast_float') timer.time_functions() # + def real_re(x, int_match=re.compile(r'[-+]?\d+$').match, real_match=re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match): """Function to simulate fast_real but with regular expressions.""" try: if int_match(x): return int(x) elif real_match(x): return float(x) else: return x except TypeError: if type(x) in (float, int): return x else: raise TypeError def real_try(x): """Function to simulate fast_real but with try/except.""" try: a = float(x) except ValueError: return x else: b = int(a) return b if a == b else b timer = Timer('Timing comparison of `float` (but coerce to `int` if possible) functions with error handling') timer.add_function('real_try', 'try/except', 'from __main__ import real_try') timer.add_function('real_re', 'regex', 'from __main__ import real_re') timer.add_function('fast_real', 'fastnumbers', 'from fastnumbers import fast_real') timer.time_functions() # + def forceint_re(x, int_match=re.compile(r'[-+]\d+$').match, float_match=re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match): """Function to simulate fast_forceint but with regular expressions.""" try: if int_match(x): return int(x) elif float_match(x): return int(float(x)) else: return x except TypeError: return int(x) def forceint_try(x): """Function to simulate fast_forceint but with try/except.""" try: return int(x) except ValueError: try: return int(float(x)) except ValueError: return x timer = Timer('Timing comparison of forced `int` functions with error handling') timer.add_function('forceint_try', 'try/except', 'from __main__ import forceint_try') timer.add_function('forceint_re', 'regex', 'from __main__ import forceint_re') timer.add_function('fast_forceint', 'fastnumbers', 'from fastnumbers import fast_forceint') timer.time_functions() # - # ## Checking Functions Timing Results # The following timing tests compare the performance of the `fastnumbers` functions that check if an input *could* be converted to numeric type with common equivalent pure-Python implementations. # + def isint_re(x, int_match=re.compile(r'[-+]?\d+$').match): """Function to simulate isint but with regular expressions.""" t = type(x) return t == int if t in (float, int) else bool(int_match(x)) def isint_try(x): """Function to simulate isint but with try/except.""" try: int(x) except ValueError: return False else: return type(x) != float timer = Timer('Timing comparison to check if value can be converted to `int`') timer.add_function('isint_try', 'try/except', 'from __main__ import isint_try') timer.add_function('isint_re', 'regex', 'from __main__ import isint_re') timer.add_function('isint', 'fastnumbers', 'from fastnumbers import isint') timer.time_functions() # + def isfloat_re(x, float_match=re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match): """Function to simulate isfloat but with regular expressions.""" t = type(x) return t == float if t in (float, int) else bool(float_match(x)) def isfloat_try(x): """Function to simulate isfloat but with try/except.""" try: float(x) except ValueError: return False else: return type(x) != int timer = Timer('Timing comparison to check if value can be converted to `float`') timer.add_function('isfloat_try', 'try/except', 'from __main__ import isfloat_try') timer.add_function('isfloat_re', 'regex', 'from __main__ import isfloat_re') timer.add_function('isfloat', 'fastnumbers', 'from fastnumbers import isfloat') timer.time_functions() # + def isreal_re(x, real_match=re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match): """Function to simulate isreal but with regular expressions.""" return type(x) in (float, int) or bool(real_match(x)) def isreal_try(x): """Function to simulate isreal but with try/except.""" try: float(x) except ValueError: return False else: return True timer = Timer('Timing comparison to check if value can be converted to `float` or `int`') timer.add_function('isreal_try', 'try/except', 'from __main__ import isreal_try') timer.add_function('isreal_re', 'regex', 'from __main__ import isreal_re') timer.add_function('isreal', 'fastnumbers', 'from fastnumbers import isreal') timer.time_functions() # + def isintlike_re(x, int_match=re.compile(r'[-+]?\d+$').match, float_match=re.compile(r'[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$').match): """Function to simulate isintlike but with regular expressions.""" try: if int_match(x): return True elif float_match(x): return float(x).is_integer() else: return False except TypeError: return int(x) == x def isintlike_try(x): """Function to simulate isintlike but with try/except.""" try: a = int(x) except ValueError: try: a = float(x) except ValueError: return False else: return a.is_integer() else: return a == float(x) timer = Timer('Timing comparison to check if value can be coerced losslessly to `int`') timer.add_function('isintlike_try', 'try/except', 'from __main__ import isintlike_try') timer.add_function('isintlike_re', 'regex', 'from __main__ import isintlike_re') timer.add_function('isintlike', 'fastnumbers', 'from fastnumbers import isintlike') timer.time_functions() # -
TIMING_36.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Collecting data from internet # --- # Web scraping consists of # # - obtaining raw data from source ( requests ) # - filtering out useful data from garbage ( beautifulsoup4 ) # - saving into appropriate format ( csv/json ) # # `scrapy` is scrapping framework which extends above libraries # requests is http library for python # requests makes http easier than builtin urllib import requests url = 'http://www.mfd.gov.np' req = requests.get(url) req dir(req) req.headers req.status_code req.text from bs4 import BeautifulSoup soup = BeautifulSoup(req.text, "html.parser") # *you can use `lxml` instead of `html.parser` which is must faster for large html content* soup type(soup) dir(soup) tables = soup.find_all('table') tables len(tables) tables[0] tables[0].find_all('tr') cities = [] headers = [] for row in tables[0].find_all('tr'): ths = row.find_all('th') if ths: headers = [th.text.strip() for th in ths] else: tds = row.find_all('td') data = {} if tds and len(tds) >= 4: data[headers[0]] = tds[0].text.strip() data[headers[1]] = tds[1].text.strip() data[headers[2]] = tds[2].text.strip() data[headers[3]] = tds[3].text.strip() cities.append(data) print(cities) # **Alternative Method** # *in case of multiple tables within webpage, we can use css selectors* div = soup.find('div', attrs={'class': 'weather-data-table'}) div table = div.find('table') # + # first_table = tables[0] # - table.find_all('th', attrs={'class': 'center'}) data_set = [] for tr in table.find_all('tr'): _data = {} tds = tr.find_all('td') if tds and len(tds) > 3: # _data['Station'] = t # print(tds) _data['Station'] = tds[0].string _data['Maximum'] = tds[1].string _data['Minimum'] = tds[2].string _data['Rainfall'] = tds[3].string data_set.append(_data) print(data_set) data_set[0].keys() # *writing to csv file* import csv with open('dataset.csv', 'w') as csvfile: csvdoc = csv.DictWriter(csvfile, fieldnames=data_set[0].keys()) csvdoc.writeheader() csvdoc.writerows(data_set) data_set[0] data_set[0].keys() # *json output* import json json.dump(data_set, open('dataset.json', 'w')) json.dumps(data_set) # **Practice ** *Obtain some data from any of website available*
DataScience/References/Scraping Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp mgmnt.prep # - # # Main Preprocessing # # > This module comprises preprocessing techniques applied to software artifacts (TODO:cite here the papers employed for this preprocessings): # > # >This is an adapted version of Daniel McCrystal Nov 2019 # > # >This version also includes BPE preprocesing and NLTK. It's the main class to execute conventional pipelines. # # >Author: @danaderp March 2020 # #! pip install dit # #! pip install nltk # #! pip install tokenizers # #! pip install tensorflow_datasets # ! pip install -U tensorflow-gpu # ! pip install tensorflow_datasets #export from typing import List, Set, Callable, Tuple, Dict, Optional import re from nltk.stem.snowball import SnowballStemmer import nltk import pandas as pd import glob import os import pathlib from string import punctuation import csv from nltk.stem.snowball import SnowballStemmer englishStemmer=SnowballStemmer("english") # #! pip install nltk nltk.download('stopwords') #export from tensorflow.keras.preprocessing import text from pathlib import Path import glob from datetime import datetime #export # Imports import pandas as pd import sentencepiece as sp import numpy as np import json from pathlib import Path import sys import sentencepiece as spm from tokenizers import ByteLevelBPETokenizer from tokenizers.processors import BertProcessing #export import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) from zipfile import ZipFile # ! unzip -qq cisco/CSB-CICDPipelineEdition-master.zip # ## Setup #hide path_data = '../dvc-ds4se/' #dataset path def libest_params(): return { 'system': 'libest', #'path_zip': Path("cisco/sacp-python-common.zip"), 'saving_path': path_data+ 'se-benchmarking/traceability/testbeds/processed/libest_data', 'language': 'english', 'dataset' : path_data + '' #'model_prefix': path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_128k' #For BPE Analysis #'model_prefix': path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k' 'model_prefix':path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k' } model_prefix = { 'bpe8k' : path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k', 'bpe32k' : path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_32k', 'bpe128k' : path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_128k' } #params = default_params() params = libest_params() # # Conventional Preprocessing Class #export class ConventionalPreprocessing(): '''NLTK libraries for Conventional Preprocessing''' def __init__(self, params, bpe = False): self.params = params #If BPE provided, then preprocessing with BPE is allowed on CONV if bpe: self.sp_bpe = spm.SentencePieceProcessor() self.sp_bpe.load(params['model_prefix']+'.model') else: self.sp_bpe = None pass def bpe_pieces_pipeline(self, doc_list): '''Computes BPE preprocessing according to params''' encoded_str = '' if self.sp_bpe is None: logging.info('Provide a BPE Model!') else: encoded_str = [self.sp_bpe.encode_as_pieces(doc) for doc in doc_list] return encoded_str #ToDo Transforme it into a For-Comprenhension def clean_punctuation(self, token): #remove terms !"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~0123456789 return re.sub(r'[^a-zA-Z\s]', ' ', token, re.I|re.A) def split_camel_case_token(self, token): return re.sub('([a-z])([A-Z])', r'\1 \2', token) def remove_terms(self, filtered_tokens): remove_terms = punctuation + '0123456789' return [token for token in filtered_tokens if token not in remove_terms and len(token)>2 and len(token)<21] def stemmer(self, filtered_tokens): return [englishStemmer.stem(token) for token in filtered_tokens ] def stop_words(self, filtered_tokens): stop_words = nltk.corpus.stopwords.words(self.params['language']) return [token for token in filtered_tokens if token not in stop_words] def basic_pipeline(self, dict_filenames): '''@dict_filenames: {filename: code}''' pre_process = [( key.replace('.txt', '-pre.txt') , self.clean_punctuation(dict_filenames[key][0]) ) for key in dict_filenames] pre_process = [( doc[0] , self.split_camel_case_token(doc[1]) ) for doc in pre_process] pre_process = [( doc[0] , doc[1].lower() ) for doc in pre_process] pre_process = [( doc[0] , doc[1].strip()) for doc in pre_process] # Leading whitepsace are removed pre_process_tokens = [(doc[0] , nltk.WordPunctTokenizer().tokenize(doc[1])) for doc in pre_process] filtered_tokens = [(doc[0], self.stop_words(doc[1]) ) for doc in pre_process_tokens] #Stop Words filtered_tokens = [(doc[0], self.stemmer(doc[1]) ) for doc in filtered_tokens] #Filtering Stemmings filtered_tokens = [(doc[0], self.remove_terms(doc[1])) for doc in filtered_tokens] #Filtering remove-terms pre_process = [(doc[0], ' '.join(doc[1])) for doc in filtered_tokens] return pre_process def fromdocs_pipeline(self, docs): #TODO """@tokenized_file: a list of tokens that represents a document/code""" pre_process = [ self.clean_punctuation(doc) for doc in docs] logging.info('fromtokens_pipeline: clean punctuation') pre_process = [ self.split_camel_case_token(doc) for doc in pre_process] logging.info('fromtokens_pipeline: camel case') pre_process = [ doc.lower() for doc in pre_process] logging.info('fromtokens_pipeline: lowe case') pre_process = [ doc.strip() for doc in pre_process] # Leading whitepsace are removed logging.info('fromtokens_pipeline: white space removed') pre_process_tokens = [ nltk.WordPunctTokenizer().tokenize(doc) for doc in pre_process] logging.info('fromtokens_pipeline: WordPunctTokenizer') filtered_tokens = [ self.stop_words(doc) for doc in pre_process_tokens] #Stop Words logging.info('fromtokens_pipeline: Stop words') filtered_tokens = [ self.stemmer(doc) for doc in filtered_tokens] #Filtering Stemmings logging.info('fromtokens_pipeline: Stemmings') filtered_tokens = [ self.remove_terms(doc) for doc in filtered_tokens] #Filtering remove-terms logging.info('fromtokens_pipeline: Removed Special Terns') pre_process = [ ' '.join(doc) for doc in filtered_tokens] logging.info('fromtokens_pipeline END') return pre_process def frombatch_pipeline(self, batch): #TODO """@batch: a TensorFlow Dataset Batch""" pre_process = [ self.clean_punctuation( doc.decode("utf-8") ) for doc in batch] logging.info('frombatch_pipeline: clean punctuation') pre_process = [ self.split_camel_case_token(doc) for doc in pre_process] logging.info('frombatch_pipeline: camel case') pre_process = [ doc.lower() for doc in pre_process] logging.info('frombatch_pipeline: lowe case') pre_process = [ doc.strip() for doc in pre_process] # Leading whitepsace are removed logging.info('frombatch_pipeline: white space removed') pre_process_tokens = [ nltk.WordPunctTokenizer().tokenize(doc) for doc in pre_process] logging.info('frombatch_pipeline: WordPunctTokenizer') filtered_tokens = [ self.stop_words(doc) for doc in pre_process_tokens] #Stop Words logging.info('frombatch_pipeline: Stop words') filtered_tokens = [ self.stemmer(doc) for doc in filtered_tokens] #Filtering Stemmings logging.info('frombatch_pipeline: Stemmings') filtered_tokens = [ self.remove_terms(doc) for doc in filtered_tokens] #Filtering remove-terms logging.info('frombatch_pipeline: Removed Special Terns') #pre_process = [ ' '.join(doc) for doc in filtered_tokens] logging.info('frombatch_pipeline [END]') return filtered_tokens def fromtensor_pipeline(self, ts_x): """@ts_x: es un elemento del tensor""" #TODO pre_process = self.clean_punctuation(ts_x) pre_process = self.split_camel_case_token(pre_process) pre_process = pre_process.lower() pre_process = pre_process.strip() pre_process = nltk.WordPunctTokenizer().tokenize(pre_process) filtered_tokens = self.stop_words(pre_process) filtered_tokens = self.stemmer(filtered_tokens) filtered_tokens = self.remove_terms(filtered_tokens) pre_process = ' '.join(filtered_tokens) logging.info('fromtokens_pipeline END') return pre_process def SaveCorpus(self, df, language='js', sep=',', mode='a'): timestamp = datetime.timestamp(datetime.now()) path_to_link = self.params['saving_path'] + '['+ self.params['system'] + '-' + language + '-{}].csv'.format(timestamp) df.to_csv(path_to_link, header=True, index=True, sep=sep, mode=mode) logging.info('Saving in...' + path_to_link) pass def LoadCorpus(self, timestamp, language='js', sep=',', mode='a'): path_to_link = self.params['saving_path'] + '['+ self.params['system'] + '-' + language + '-{}].csv'.format(timestamp) return pd.read_csv(path_to_link, header=0, index_col=0, sep=sep) #export def open_file(f, encoding='utf-8'): try: #return open(filename, 'r', encoding="ISO-8859-1").read() return open(f, 'r', encoding = encoding).read() except: print("Exception: ", sys.exc_info()[0]) #export def get_files(system, ends): path = Path("cisco/CSB-CICDPipelineEdition-master/") names = [entry for entry in path.glob('**/*' +ends)] filenames = [(filename, os.path.basename(filename), open_file(filename) ) for filename in names] return pd.DataFrame( filenames ,columns = ['names','filenames','content']) # ## 1. Processing Software Corpora from GitHub # > Cisco Repositories path = Path("cisco/CSB-CICDPipelineEdition-master/") path #hide def sacp_params(model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k'): return { 'system': 'sacp-python-common', 'path_zip': Path("/tf/data/cisco/sacp_data/sacp-python-common.zip"), 'dataset': 'tf/data/cisco/sacp_data/', 'saving_path': '../../'+'data/cisco/sacp_data/', 'language': 'english', 'model_prefix':model_prefix #For BPE Analysis } params = sacp_params() params prep = ConventionalPreprocessing(params, bpe = True) # + ######################################################## # - #explore archive = ZipFile(params['path_zip'], 'r') files = archive.namelist() #explore files = [name for name in archive.namelist() if name.endswith('.py')] #recursively finds files len(files) #export def get_file_zip(params, ends): archive = ZipFile( params['path_zip'], 'r') names = [name for name in archive.namelist() if name.endswith(ends)] filenames = [(filename, os.path.basename(filename), archive.read(filename) ) for filename in names] return pd.DataFrame( filenames ,columns = ['names','filenames','content']) #tst df_sampling = get_file_zip(params = params, ends='.py') #tst df_sampling.head() df_sampling[df_sampling['names'].str.contains('auth_utility.py')] #tst prep.SaveCorpus(df_sampling, language='py') #tst df_sampling = prep.LoadCorpus(1595859280.080238, language='py') df_sampling.head() # ### Creating standard dataframe for issues and pull-request (cisco) pr_data = pd.read_csv('cisco/sacp-pullrequest-01.csv', sep = '~', header = 0, encoding='UTF-8') pr_data # Merging all the system artifacts in one file pr_all_sys = pr_data.copy() pr_all_sys = pr_all_sys.replace(np.nan, ' ', regex=True) pr_all_sys['text'] = pr_all_sys['title'].astype(str) + pr_all_sys['labels'].astype(str) + pr_all_sys['body'].astype(str)#merging tree columns for the text pr_all_sys = pr_all_sys[['id-pr','text']] pr_all_sys = pr_all_sys.rename(columns={'id-pr': 'ids'}) pr_all_sys['type'] = 'pr' #<------- File Type Standard for Target or Source pr_all_sys.head() pr_all_code = df_sampling.copy() #pr_all_code['text'] = pr_all_code.apply(lambda row: row['content'].decode("utf-8"), axis = 1) pr_all_code['content'] = pr_all_code['content'].apply(lambda x: eval(x)) pr_all_code['text'] = pr_all_code['content'].apply(lambda x: x.decode("utf-8")) pr_all_code = pr_all_code[['names','text']] pr_all_code = pr_all_code.rename(columns={'names': 'ids'}) pr_all_code['type'] = 'py' pr_all_sys = pd.concat([pr_all_sys, pr_all_code]) pr_all_sys['conv'] = prep.fromdocs_pipeline( pr_all_sys['text'].values ) #Conventional Preprocessing pr_all_sys prep.SaveCorpus(pr_all_sys, language='all-corpus', sep='~') # Loading for Preprocessing df_sacp = prep.LoadCorpus(1595953540.866044, language='all-corpus', sep='~') df_sacp.head() df_sacp.dropna( inplace = True ) #empty files are not considered #Iterating All Possible BPEs configs for bpe in model_prefix.keys(): mpr = model_prefix[bpe] prep = ConventionalPreprocessing(sacp_params(model_prefix = mpr), bpe = True) df_sacp[bpe] = prep.bpe_pieces_pipeline( df_sacp['text'].values ) #BPE Preprocessing df_sacp.head() prep.SaveCorpus(df_sacp, language='all-corpus', sep='~') # old code down [becareful] #debugging path = Path("cisco/CSB-CICDPipelineEdition-master/") names = [entry for entry in path.glob('**/*.py')] #hide #here looking for a file with encoding problems temp_list=[] for filename in names: print(filename) try: temp_list.append(open(filename, 'r', encoding="ISO-8859-1").read()) except FileNotFoundError as err: print('lookattheerr' + str(err)) except: print('bydefault') len(temp_list) df_java = get_files(system = params['system'], ends='.py') df_java.head() df_java.shape SaveCorpus(df_java, language='py') df_test = LoadCorpus(1592266849.29903,language='py') df_test.head() # # 2. Processing Software Corpora from CodeSearchNet #CodeSearchNet Parameters params = { 'system':'codesearchnet', 'saving_path': 'test_data/', 'language': 'english' } #[step1] Create Preprocesser <---------- preprocess_pipeline = ConventionalPreprocessing(params= params) python_files = sorted(Path('codesearch/python/').glob('**/*.gz')) java_files = sorted(Path('codesearch/java/').glob('**/*.gz')) java_files # + columns_long_list = ['repo', 'path', 'url', 'code', 'code_tokens', 'docstring', 'docstring_tokens', 'language', 'partition'] columns_short_list = ['code_tokens', 'docstring_tokens', 'language', 'partition'] def jsonl_list_to_dataframe(file_list, columns=columns_long_list): """Load a list of jsonl.gz files into a pandas DataFrame.""" return pd.concat([pd.read_json(f, orient='records', compression='gzip', lines=True)[columns] for f in file_list], sort=False) # - python_searchnet_df = jsonl_list_to_dataframe(python_files) java_searchnet_df = jsonl_list_to_dataframe(java_files) java_searchnet_df.head() java_searchnet_df.shape pytrain = java_searchnet_df[java_searchnet_df.partition.eq('train')] javatrain = java_searchnet_df[java_searchnet_df.partition.eq('train')].copy() pytrain.shape javatrain.shape preprocess_pipeline.SaveCorpus(javatrain, language='java') #Saving codesearchnet only training samples # ### Testing Preprocessing for CodeSearchNet java_searchnet_df = preprocess_pipeline.LoadCorpus(1592409554.097457, language='java') java_searchnet_df.head() java_searchnet_df['code'].values[0] preprocessed_df = preprocess_pipeline.fromdocs_pipeline(java_searchnet_df['code'].values) preprocessed_df[0] df_preprocessed = java_searchnet_df.copy() df_preprocessed['preprocessed'] = preprocessed_df df_preprocessed.head() SaveCorpus(df_preprocessed, language='preprocessed-java') #Saving codesearchnet # # 3. Processing from Wikipedia # >Inspired by [KD](https://www.kdnuggets.com/2017/11/building-wikipedia-text-corpus-nlp.html) # > # >Dump Wiki File [here](https://dumps.wikimedia.org/enwiki/latest/) #export import tensorflow_datasets as tfds #Config description: Wikipedia dataset for en, parsed from 20190301 dump. #Download size: 15.72 GiB #Dataset size: Unknown size #Examples: train 5,824596 dataset_name = 'wikipedia/20200301.en' #'wikipedia/20190301.en' #Download the dataset and create a tf.data.Dataset ds, info = tfds.load(dataset_name, split='train', with_info=True) #Accessing Metadata with DatasetInfo print(info.splits['train'].num_examples) info dataset_wiki = [] #dataset_wiki = ds.map(lambda ex_text, ex_title: preprocess_pipeline.fromtensor_pipeline( ex_text.decode("utf-8") )) dataset_wiki = [preprocess_pipeline.fromtensor_pipeline( ex['text'].decode("utf-8") ) for ex in tfds.as_numpy(ds)] df_dataset_wiki = pd.DataFrame( dataset_wiki ,columns = ['text']) # Build your input pipeline ds = ds.batch(2) # Get Numpy Arrays for ex in tfds.as_numpy(ds): #print( preprocess_pipeline.fromtensor_pipeline( ex['text'].decode("utf-8") ) ) #print("NEXT!!!") #print(ex['text'].decode("utf-8")) #print(ex) #np_text, np_title = ex['text'], ex['title'] print(preprocess_pipeline.frombatch_pipeline( ex['text'] )) ds = ds.unbatch() np_text[90] len(np_text) for ex in ds.take(4): print(ex) ds params = { 'system':'wiki', 'saving_path': 'test_data/', 'language': 'english' } import functools # # 4. Processing from Semeru Format and Converting into Mappings # > @danaderp July 29'20 import functools #hide semeru_format = path_data + 'se-benchmarking/traceability/datasets/formatted/semeru_format/' #Setting Up SemeruFormat def libest_params(model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k'): return { 'system': 'libest', 'saving_path': path_data+ 'se-benchmarking/traceability/testbeds/processed/', 'language': 'english', 'dataset' : { 'req':pathlib.Path( semeru_format + 'LibEST_semeru_format/requirements'), 'src':pathlib.Path( semeru_format + 'LibEST_semeru_format/source_code'), 'tc':pathlib.Path( semeru_format + 'LibEST_semeru_format/test') }, 'ends': ['.txt','.c','.h'], 'model_prefix':model_prefix, 'encoding':'utf-8' } def ebt_params(model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k'): return { 'system': 'ebt', 'saving_path': path_data+ 'se-benchmarking/traceability/testbeds/processed/', 'language': 'english', 'dataset' : { 'req':pathlib.Path( semeru_format + 'EBT_semeru_format/requirements'), 'tc': pathlib.Path( semeru_format + 'EBT_semeru_format/test_cases'), 'src':pathlib.Path( semeru_format + 'EBT_semeru_format/source_code') }, 'ends': ['.txt','.java','.c','.h','.TXT'], 'model_prefix':model_prefix, #'encoding':'ISO-8859-1' 'encoding':'utf-8' #english encoding } def itrust_params(model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k'): return { 'system': 'itrust', 'saving_path': path_data+ 'se-benchmarking/traceability/testbeds/processed/', 'language': 'english', 'dataset' : { 'uc':pathlib.Path( semeru_format + 'iTrust_semeru_format/use_cases'), 'src':pathlib.Path( semeru_format + 'iTrust_semeru_format/source_code') }, 'ends': ['.txt','.java','.c','.h','.TXT','.jsp'], 'model_prefix':model_prefix, 'encoding':'ISO-8859-1' #'encoding':'utf-8' #english encoding } def smos_params(model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k'): return { 'system': 'smos', 'saving_path': path_data+ 'se-benchmarking/traceability/testbeds/processed/', 'language': 'italian', 'dataset' : { 'uc':pathlib.Path( semeru_format + 'SMOS_semeru_format/use_cases'), 'src':pathlib.Path( semeru_format + 'SMOS_semeru_format/source_code') }, 'ends': ['.txt','.java','.c','.h','.TXT','.jsp'], 'model_prefix':model_prefix, 'encoding':'ISO-8859-1' #'encoding':'utf-8' #english encoding } #parameters = libest_params(model_prefix=model_prefix['bpe8k']) parameters = smos_params() parameters parameters['dataset'].keys() logging.info("artifacts: " + str(parameters['dataset'].keys()) ) logging.info("artifacts: " + str(parameters['dataset']['uc'] ) ) lst = [entry for entry in parameters['dataset']['uc'].glob('**/*' + ".txt" )] lst[0] tmp = [(filename, os.path.basename(filename), open_file(filename, encoding=parameters['encoding']) ) for filename in lst] tmp[0] #export def loading_artifacts( params ): #Creating the mappings df_sys_g = pd.DataFrame( [] ,columns = ['ids','filenames','text']) #global dataframe for art in parameters['dataset'].keys(): sys_names = [[entry for entry in parameters['dataset'][art].glob('**/*' + ex )] for ex in parameters['ends']] sys_names = functools.reduce(lambda a,b : a+b,sys_names) #Flatting logging.info("artifacts: " + str( len(sys_names) ) ) sys_filenames = [(filename, os.path.basename(filename), open_file(filename, encoding=params['encoding']) ) for filename in sys_names] df_sys_l = pd.DataFrame( sys_filenames ,columns = ['ids','filenames','text']) #local dataframe df_sys_l['type'] = art df_sys_g = pd.concat([df_sys_g, df_sys_l ], ignore_index=True, sort=False) return df_sys_g df_test = loading_artifacts( params = parameters ) df_test[df_test['type']=='src'].head() #export def processing_artifacts( model_prefix, df_sys_all, funct_params ): df_sys_all = df_sys_all.copy() for bpe in model_prefix.keys(): #BPE Preprocessing prep = ConventionalPreprocessing( funct_params( model_prefix[bpe] ) , bpe = True) #Creating the Preprocessing Object df_sys_all[ bpe ] = prep.bpe_pieces_pipeline( df_sys_all['text'].values ) df_sys_all['conv'] = prep.fromdocs_pipeline( df_sys_all['text'].values ) #Conventional Preprocessing return df_sys_all, prep df_test_sys,r_prep = processing_artifacts( model_prefix = model_prefix, df_sys_all = df_test, funct_params = smos_params #itrust_params#ebt_params ) df_test_sys.head() df_test_sys['ids'].values[0] df_test_sys[df_test_sys['filenames'].str.contains('.java', regex=False)] r_prep.SaveCorpus(df_test_sys, language='all-corpus', sep='~') r_prep.LoadCorpus(1609221582.171744,language='all-corpus', sep='~') # ### Pre-processing from Semeru Format # + #Special Case EBT To Create Separate Files [Only one implementation] # - #Canonical EBT def ebt_params(model_prefix = path_data+'models/bpe/sentencepiece/wiki_py_java_bpe_8k'): return { 'system': 'ebt', 'saving_path': path_data+ 'se-benchmarking/traceability/testbeds/processed/', 'language': 'english', 'dataset' : { 'req':pathlib.Path( semeru_format + 'EBT_semeru_format/requirements.txt'), 'tc': pathlib.Path( semeru_format + 'EBT_semeru_format/test_cases.txt'), 'src':pathlib.Path( semeru_format + 'EBT_semeru_format/source_code') }, 'ends': ['.txt','.java','.c','.h','.TXT'], 'model_prefix':model_prefix, #'encoding':'ISO-8859-1' 'encoding':'utf-8' #english encoding } params = ebt_params() params['dataset']['req'] pd_ebt = pd.read_fwf(params['dataset']['req'],header=None,sep="/t") with open(params['dataset']['tc']) as fp: Lines = fp.readlines() for line in Lines: print(line.split("\t")) l = line.split("\t") p = semeru_format + 'EBT_semeru_format/test_cases/'+l[0]+'.txt' with open(p, "w") as wp: wp.writelines(l[1]) dict_filenames = {} #creating the function #base_dir = os.path.abspath(os.getcwd()) test_dir = pathlib.Path('test_data/LibEST_semeru_format/test') #path = os.path.join(base_dir, test_dir) #reading all files in a folder for filename in glob.glob(os.path.join(test_dir, '*.txt')): with open(filename, 'r') as f: # open in readonly mode dict_filenames[filename] = [f.read()] [os.path.join(test_dir,filename) for filename in os.listdir(test_dir)] #reading all files in a folder for filename in [os.path.join(test_dir,filename) for filename in os.listdir(test_dir)]: with open(filename, 'r') as f: # open in readonly mode dict_filenames[filename] = [f.read()] os.path.basename('test_data/LibEST_semeru_format/requirements/RQ17.txt').replace('.txt', '-pre.txt') dict_filenames re.sub(r'[^a-zA-Z\s]', ' ', "Ho:;<le_C$%&\oMe_estTa?@[\\is34~", re.I|re.A).split() remove_terms(clean_punctuation("their corresponding URIs:\n\n\n +------------------------+-----------------+-------------------+\n | Operation |Operation path | Details |\n +========================+=================+===================+\n | Distribution of CA | /cacerts | Section 4.1 |\n | Certificates (MUST) | | |\n +------------------------+-----------------+-------------------+\n | Enrollment of | /simpleenroll | Section 4.2 |\n | Clients (MUST) | | |\n +------------------------+-----------------+-------------------+\n | Re-enrollment of | /simplereenroll | Section 4.2.2 |\n | Clients (MUST) | | |\n +------------------------+-----------------+-------------------+\n | Full CMC (OPTIONAL) | /fullcmc | Section 4.3 |\n +------------------------+-----------------+-------------------+\n | Server-Side Key | /serverkeygen | Section 4.4 |\n | Generation (OPTIONAL) | | |\n +------------------------+-----------------+-------------------+\n | CSR Attributes | /csrattrs | Section 4.5 |\n | (OPTIONAL) | | |\n +------------------------+-----------------+-------------------+\n\n ")) remove_terms(split_camel_case_token(dict_filenames['test_data/LibEST_semeru_format/requirements/RQ17.txt'][0])) pre_process = preprocess_pipeline.basic_pipeline(dict_filenames) pre_process[0] #Writing Into A File df_pre_processed = pd.DataFrame(pre_process, columns =['filename', 'text']) #/.../benchmarking/traceability/testbeds/nltk df_pre_processed base_dir = os.path.abspath(os.getcwd()) pre_path = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-tc].csv' final_path = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-vocab-tc].csv' df_pre_processed.to_csv(pre_path, header=None, index=None, sep=' ', mode='a') def save_dict(a_dict, path): a_file = open(path, "w") writer = csv.writer(a_file) for key, value in a_dict.items(): writer.writerow([key, value]) a_file.close() # + #1-Building the corpus vocabulary tokenizer_corpora = text.Tokenizer() tokenizer_corpora.fit_on_texts([doc[1] for doc in pre_process]) word2id = tokenizer_corpora.word_index id2word = {v:k for k, v in word2id.items()} # - save_dict(id2word,final_path) id2word #Merging All the Vocabulary vocab_path_tc = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-vocab-tc].csv' df_read_vocab_tc = pd.read_csv(vocab_path_tc, names=['ids', 'text'], header=None) df_read_vocab_tc.shape vocab_path_src = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-vocab-src].csv' df_read_vocab_src = pd.read_csv(vocab_path_src, names=['ids', 'text'], header=None) df_read_vocab_src.shape vocab_path_req = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-vocab-req].csv' df_read_vocab_req = pd.read_csv(vocab_path_req, names=['ids', 'text'], header=None) df_read_vocab_req.shape super_vocab_set = set(df_read_vocab_tc['text']) | set(df_read_vocab_src['text']) | set(df_read_vocab_req['text']) len(super_vocab_set) df_all_vocab = pd.DataFrame(list(super_vocab_set)) print(df_all_vocab) df_all_vocab.to_csv('/tf/main/benchmarking/traceability/testbeds/nltk/[libest-vocab-all].csv', header=None, index=None, sep=' ', mode='a') # + #Merging all the corpuses pre_doc_path_tc = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-tc].csv' pre_doc_path_req = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-req].csv' pre_doc_path_src = '/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-src].csv' #df_read_pre_tc = pd.read_csv(pre_doc_path_tc, header=None, sep=' ') #Need to inclide sep pre_doc_path = [pre_doc_path_tc, pre_doc_path_req, pre_doc_path_src] lis= [list(df_read[1]) for df_read in [pd.read_csv(path, header=None, sep=' ')for path in pre_doc_path]] print(len(lis[0]), len(lis[1]), len(lis[2])) # - lis = functools.reduce(lambda a,b : a+b,lis) lis df_reduced_pre = pd.DataFrame(lis) df_reduced_pre df_reduced_pre.to_csv('/tf/main/benchmarking/traceability/testbeds/nltk/[libest-pre-all].csv') # ######################################################### # hide from nbdev.showdoc import * # ! nbdev_build_docs from nbdev.export import * notebook2script()
nbs/0.1_mgmnt.prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <b>Calcule a integral dada</b> # $13. \int x^2(x^3 + 1)^{\frac{3}{4}}dx$ # $u = x^3 + 1$ # $du = 3x^2 dx$ # $\frac{du}{3} = x^2dx$ # <b>Aplicando as substituições</b> # $\int x^2(x^3 + 1)^{\frac{3}{4}}dx \rightarrow \frac{1}{3} \cdot \int u^{\frac{3}{4}} du$ # <b>Integrando $\frac{1}{3} \cdot \int u^{\frac{3}{4}}du$</b> # $\frac{1}{3} \cdot \int u^{\frac{3}{4}} du = \frac{1}{3} \cdot \frac{u^{\frac{3}{4} + 1}}{\frac{3}{4} + 1} + C$ # $\frac{1}{3} \cdot \int u^{\frac{3}{4}} du = \frac{1}{3} \cdot \frac{u^{\frac{7}{4}}}{\frac{7}{4}} + C$ # $\frac{1}{3} \cdot \int u^{\frac{3}{4}} du = \frac{4}{3} \cdot \frac{u^{\frac{7}{4}}}{7} + C$ # <b>Desfazendo as substituições</b> # $\frac{1}{3} \cdot \int u^{\frac{3}{4}} du = \frac{4}{21}\cdot\sqrt[4]{(x^3 + 1)^7} + C$
Problemas 5.2/13.ipynb
# ##### Copyright 2021 Google LLC. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # diet1_b # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/diet1_b.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a> # </td> # <td> # <a href="https://github.com/google/or-tools/blob/master/examples/contrib/diet1_b.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a> # </td> # </table> # First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab. # !pip install ortools # + # Copyright 2010 <NAME> <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple diet problem in Google CP Solver. Standard Operations Research example in Minizinc Minimize the cost for the products: Type of Calories Chocolate Sugar Fat Food (ounces) (ounces) (ounces) Chocolate Cake (1 slice) 400 3 2 2 Chocolate ice cream (1 scoop) 200 2 2 4 Cola (1 bottle) 150 0 4 1 Pineapple cheesecake (1 piece) 500 0 4 5 Compare with the following models: * Tailor/Essence': http://hakank.org/tailor/diet1.eprime * MiniZinc: http://hakank.org/minizinc/diet1.mzn * SICStus: http://hakank.org/sicstus/diet1.pl * Zinc: http://hakank.org/minizinc/diet1.zinc * Choco: http://hakank.org/choco/Diet.java * Comet: http://hakank.org/comet/diet.co * ECLiPSe: http://hakank.org/eclipse/diet.ecl * Gecode: http://hakank.org/gecode/diet.cpp * Gecode/R: http://hakank.org/gecode_r/diet.rb * JaCoP: http://hakank.org/JaCoP/Diet.java This version use ScalProd() instead of Sum(). This model was created by <NAME> (<EMAIL>) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ from ortools.constraint_solver import pywrapcp # Create the solver. solver = pywrapcp.Solver("Diet") # # data # n = 4 price = [50, 20, 30, 80] # in cents limits = [500, 6, 10, 8] # requirements for each nutrition type # nutritions for each product calories = [400, 200, 150, 500] chocolate = [3, 2, 0, 0] sugar = [2, 2, 4, 4] fat = [2, 4, 1, 5] # # declare variables # x = [solver.IntVar(0, 100, "x%d" % i) for i in range(n)] cost = solver.IntVar(0, 10000, "cost") # # constraints # solver.Add(solver.ScalProd(x, calories) >= limits[0]) solver.Add(solver.ScalProd(x, chocolate) >= limits[1]) solver.Add(solver.ScalProd(x, sugar) >= limits[2]) solver.Add(solver.ScalProd(x, fat) >= limits[3]) # objective objective = solver.Minimize(cost, 1) # # solution # solution = solver.Assignment() solution.AddObjective(cost) solution.Add(x) # last solution since it's a minimization problem collector = solver.LastSolutionCollector(solution) search_log = solver.SearchLog(100, cost) solver.Solve( solver.Phase(x + [cost], solver.INT_VAR_SIMPLE, solver.ASSIGN_MIN_VALUE), [objective, search_log, collector]) # get the first (and only) solution print("cost:", collector.ObjectiveValue(0)) print([("abcdefghij" [i], collector.Value(0, x[i])) for i in range(n)]) print() print("failures:", solver.Failures()) print("branches:", solver.Branches()) print("WallTime:", solver.WallTime()) print()
examples/notebook/contrib/diet1_b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### This notebook should be run on **`Google Colab`** <br> # if you don't have enough performance for heavy computation # + colab={} colab_type="code" id="mnI7fuXhklHI" import numpy as np from keras.models import Sequential from keras.optimizers import Adam from keras.layers import Flatten, Dense, Dropout import pandas as pd import matplotlib.pyplot as plt import cv2, random, os, ntpath from sklearn.utils import shuffle from keras.layers import Convolution2D #check from sklearn.model_selection import train_test_split import matplotlib.image as mpimg from imgaug import augmenters as iaa # + colab={} colab_type="code" id="nJKAzqPQkvXg" # !git clone https://github.com/minlaxz/simulator_track # + colab={} colab_type="code" id="ZRnDbOoik4aI" datadir = 'simulator_track' columns = ['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'] data = pd.read_csv(os.path.join(datadir,'driving_log.csv'), names = columns) pd.set_option('display.max_colwidth', -1) data.head(3) # + colab={} colab_type="code" id="aczm2qm7k5Mo" def path_leaf(path): head, tail= ntpath.split(path) return tail data['center'] = data['center'].apply(path_leaf) data['left'] = data['left'].apply(path_leaf) data['right'] = data['right'].apply(path_leaf) data.head(3) # + colab={} colab_type="code" id="FuwClnaEk9IR" num_bins = 25 samples_per_bin = 250 hist, bins= np.histogram(data['steering'], num_bins) #6386 steering points will split into 25 cate print(len(bins)) #bins return from histogram is 26 length but no zero value center = (bins[:-1] + bins[1:]) * 0.5 print(len(center)) #rearraged including zero plt.bar(center, hist, width=0.05) #plt.plot((np.min(data['steering']), np.max(data['steering'])), (200,200)) plt.plot((-1,1),(samples_per_bin,samples_per_bin)) # + colab={} colab_type="code" id="WjGZel2OlAH_" print(bins) print('total data', len(data)) #6386 remove_list = [] for j in range(num_bins): #0 -> 25 list_ = [] for i in range(len(data['steering'])): #0 -> 6386 if data['steering'][i] >= bins[j] and data['steering'][i] <= bins[j+1]: list_.append(i) list_ = shuffle(list_) #shuffle for data stablity list_ = list_[samples_per_bin:] #250 to the end **CROP remove_list.extend(list_) #like append but whole list adding print('removed' , len(remove_list)) data.drop(data.index[remove_list], inplace=True) print('remaining' , len(data)) hist, _ = np.histogram(data['steering'], (num_bins)) # + colab={} colab_type="code" id="zS0V3r-OlDfZ" #plt.bar(center, hist, width=0.05) #plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin,samples_per_bin)) # + colab={} colab_type="code" id="iq4RkIxmlFaw" def load_img_steering(datadir, df): image_path=[] steering = [] for i in range(len(data)): #0 -> 1874 dropped data indexed_data = data.iloc[i] center , left , right = indexed_data[0], indexed_data[1], indexed_data[2] image_path.append(os.path.join(datadir+center.strip())) steering.append(float(indexed_data[3])) image_paths = np.asarray(image_path) steerings = np.asarray(steering) return image_paths, steerings image_paths , steerings = load_img_steering(datadir+'/IMG/' , data) # + colab={} colab_type="code" id="bBPkVOgjlII4" X_train, X_valid , y_train, y_valid = train_test_split(image_paths, steerings, test_size=0.2, random_state=6) print('Training samples', len(X_train)) print('Validation samples', len(X_valid)) # + colab={} colab_type="code" id="gDV5OluRlLRQ" def zoom(image): zoom = iaa.Affine(scale=(1, 1.3)) image = zoom.augment_image(image) return image def pan(image): pan = iaa.Affine(translate_percent={"x":(-0.1, 0.1), "y":(-0.1,0.1)}) image = pan.augment_image(image) return image def img_random_brightness(image): brightness = iaa.Multiply((0.2, 1.2)) image = brightness.augment_image(image) return image def img_random_flip(image,steering_angle): image = cv2.flip(image, 1) steering_angle = -steering_angle return image, steering_angle def random_augment(image, steering_angle): #image = mpimg.imread(image) if np.random.rand() < 0.5: image = pan(image) if np.random.rand() < 0.5: image = zoom(image) if np.random.rand() < 0.5: image = img_random_brightness(image) if np.random.rand() < 0.5: image, steering_angle = img_random_flip(image, steering_angle) return image, steering_angle # + colab={} colab_type="code" id="7aODNrNIlabi" def img_preprocess(img): img = img[60:135,:,:] img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV) img = cv2.GaussianBlur(img, (3, 3), 0) img = cv2.resize(img, (200, 66)) img = img/255 return img # + colab={} colab_type="code" id="fVHdzFg1lfMJ" def batch_generator(image_paths, steering_ang, batch_size, istraining): while True: batch_img = [] batch_steering = [] for i in range(batch_size): random_index = random.randint(0,len(image_paths)-1) if istraining: im, steering = random_augment(image_paths[random_index], steering_ang[random_index]) else: im = mpimg.imread(image_paths[random_index]) steering = steering_ang[random_index] im = img_preprocess(im) batch_img.append(im) batch_steering.append(steering) yield (np.asarray(batch_img), np.asarray(batch_steering)) # + colab={} colab_type="code" id="Gs3jyzGAp8gx" def nvidia_model(): model = Sequential() model.add(Convolution2D(24, 5, 5, subsample=(2,2), input_shape=(66,200,3) , activation='elu' )) model.add(Convolution2D(36, 5 ,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(48, 5 ,5, subsample=(2,2), activation='elu')) model.add(Convolution2D(64, 3 ,3, activation='elu')) model.add(Convolution2D(64, 3 ,3, activation='elu')) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(100, activation='elu')) model.add(Dropout(0.5)) model.add(Dense(50, activation='elu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='elu')) model.add(Dropout(0.5)) model.add(Dense(1)) optimizer = Adam(lr = 0.001) model.compile(loss='mse', optimizer=optimizer) return model # + colab={} colab_type="code" id="OWci14FRqAKW" model = nvidia_model() print(model.summary()) # + colab={} colab_type="code" id="e-TuFbWNqB5P" history = model.fit_generator(batch_generator(X_train, y_train, 100, 1 ), steps_per_epoch=300, epochs=10, validation_data=batch_generator(X_valid, y_valid, 100, 0), validation_steps=200, verbose=1, shuffle=1) # + colab={} colab_type="code" id="qh3Z3C2KqFYb" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['loss', 'val_loss']) plt.title('Loss') plt.xlabel('epoch')
CNN-DNN/SELF_DRIVING_CAR_MODEL_TRAINING.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Mosviz NIRISS example notebook # Note: We silence most warnings for now. For debugging, you can comment out the next cell and then restart the kernel to re-enable warnings. import warnings warnings.simplefilter('ignore') # Next, start Mosviz. # + from jdaviz.configs.mosviz.helper import Mosviz mosviz = Mosviz() mosviz.app # + # Run this cell if your desired data path is a temporary directory. import tempfile data_dir = tempfile.gettempdir() # + active="" # # *** OR *** # # Enable and run this cell if you have a different data path. # # Replace the value with your real path. # # data_dir = '/path/to/my_mosvis_data_dir' # + # Run this cell if you wish to download and unzip our example data. # This requires the "zipfile" package. from zipfile import ZipFile from astropy.utils.data import download_file import pathlib example_data = 'https://stsci.box.com/shared/static/9lkf5zha6zkf8ujnairy6krobbh038wt.zip' fn = download_file(example_data, cache=True) with ZipFile(fn, 'r') as sample_data_zip: sample_data_zip.extractall(data_dir) level3_path = (pathlib.Path(data_dir) / 'NIRISS_for_parser_p0171') data_dir = level3_path # - # Once loaded below, click on one of the rows in the table to have your data displayed. mosviz.load_data(directory=data_dir, instrument="niriss")
notebooks/MosvizNIRISSExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # AWS VPC - NAT vs EndPoint # > Comparison # # - toc: true # - comments: true # - author: <NAME> # - categories: [aws] # ### Comparison # # The following table shows the options you have if you decide to stay in private subnets. # Gateway VPC Endpoint Interface VPC Endpoint NAT Gateway # Supported AWS services S3, DynamoDB some all # Price per hour1 free $0.01 $0.045 # Price per GB1 free $0.01 $0.045 # # As you can see, using Gateway VPC Endpoints is cheaper than using Interface VPC Endpoint which is cheaper than using NAT Gateways # # # Nat Gateway Usage for 15 GB monthly usage for 1 NAT Gateway # # 730 hours in a month x 0.045 USD = 32.85 USD (Gateway usage hourly cost) # 15 GB per month x 0.045 USD = 0.68 USD (NAT Gateway data processing cost) # 32.85 USD + 0.68 USD = 33.53 USD (NAT Gateway processing and month hours) # 1 NAT Gateways x 33.53 USD = 33.53 USD (Total NAT Gateway usage and data processing cost) # # Total NAT Gateway usage and data processing cost (monthly): 33.53 USD # # # InterFace EndPoint Usage for 15 GB Monthly Usage for 1 VPC EndPoint # # 730 hours in a month x 0.01 USD = 7.30 USD (Hourly cost for endpoint ENI) # 15 GB per month x 0.01 USD = 0.15 USD (PrivateLink data processing cost) # 7.30 USD + 0.15 USD = 7.45 USD (Hourly cost and data processing per endpoint ENI) # 1 VPC endpoints x 1 ENIs per VPC endpoint x 7.45 USD = 7.45 USD (Total PrivateLink endpoints and data processing cost) # # Total PrivateLink endpoints and data processing cost (monthly): 7.45 USD # # # Gateway EndPoint Usage for 15 GB Monthly Usage # # Intra region: # (15 GB x 0.01 USD per GB outbound) + (15 GB x 0.01 USD per GB inbound) = 0.30 USD # # Data Transfer cost (monthly): 0.30 USD # ### Question # # We don't need NAT Gateway in Prod to run 24*7 for accessing EC2 instance or we can run it only when needed # If its free atleast Data transfer charges will not apply # # ### References # https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html # https://docs.aws.amazon.com/vpc/latest/userguide/vpce-gateway.html #
_notebooks/2020-08-15-NAT Gateway vs VPC Gateway End Point.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %% [markdown] slideshow={"slide_type": "slide"} # <h1 style="text-align:center;">Machine Learning for Programmers</h1> # <h2 style="text-align:center;">Setup (on Linux)</h2> # <h3 style="text-align:center;">Dr. <NAME></h3> # %% [markdown] slideshow={"slide_type": "slide"} # # Code for this Course # # `https://github.com/hoelzl/ml-for-programmers` # # ## Important Commits # # - `master` branch # - `starter-kit-v1` tag # %% [markdown] slideshow={"slide_type": "slide"} # # Required Packages # # - numpy # - pandas # - matplotlib, seaborn # - scikit-learn # <hr/> # # - pytorch # - fastai # %% [markdown] slideshow={"slide_type": "slide"} # <h1 style="text-align:center;">Setting up your environment</h1> # # %% [markdown] slideshow={"slide_type": "slide"} # # Pip Install ...? # # - May or may not be what you want... # - Use virtual environment(s) # %% [markdown] slideshow={"slide_type": "slide"} # # Hardware and OS # # - Traditional ML: OS does not matter # - For Deep Learning: Linux, nVidia GPU # - Some libraries provide limited/no support for Windows # - Many DL saved_models are much slower without GPU # - Only CUDA is well supported by all frameworks # - This may change over time (ROCm 4.0 is in beta for PyTorch 1.8) # %% [markdown] slideshow={"slide_type": "subslide"} # # If you use Windows # # - Cloud instances work well # - Don't use a local VM # - Need to configure GPU passthrough # - WSL2 works amazingly well... # - ... but right now only with the Insider Program Dev Channel # %% [markdown] slideshow={"slide_type": "slide"} # # Installation Options # # - `pip` + manual installation of libraries # - `conda` (also installs native dependencies) # - Mixed: # - `conda` for basics, # - `pip` for *everything else* # %% [markdown] slideshow={"slide_type": "subslide"} # # Not Recommended # # - `poetry` etc. # - Truly mixed `conda` and `pip` install # %% [markdown] slideshow={"slide_type": "slide"} # # Installing Conda # # - Download Miniconda from <https://docs.conda.io/en/latest/miniconda.html#linux-installers> # - Follow the instructions on <https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html> # %% [markdown] slideshow={"slide_type": "slide"} # # Setting up a Conda Environment # # - Download the code from GitHub to get the `environment.yml` file # - Install using `conda env create --file environment.yml` # - Don't update with `conda update --all` (or similar) # - Use `conda env update --file environment.yml --prune` instead # %% [markdown] slideshow={"slide_type": "slide"} #
notebooks/nb040_installation_on_linux.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] colab_type="text" id="view-in-github" # [View in Colaboratory](https://colab.research.google.com/github/douglaswchung/california-housing-value/blob/master/optimizing_neural_net_performance.ipynb) # + [markdown] colab_type="text" id="JndnmDMp66FL" # #### Copyright 2017 Google LLC. # + colab={} colab_type="code" id="hMqWDc_m6rUC" # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="eV16J6oUY-HN" # # Improving Neural Net Performance # + [markdown] colab_type="text" id="0Rwl1iXIKxkm" # **Learning Objective:** Improve the performance of a neural network by normalizing features and applying various optimization algorithms # # **NOTE:** The optimization methods described in this exercise are not specific to neural networks; they are effective means to improve most types of models. # + [markdown] colab_type="text" id="lBPTONWzKxkn" # ## Setup # # First, we'll load the data. # + colab={} colab_type="code" id="VtYVuONUKxko" import math from IPython import display from matplotlib import cm from matplotlib import gridspec from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import metrics import tensorflow as tf from tensorflow.python.data import Dataset tf.logging.set_verbosity(tf.logging.ERROR) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",") california_housing_dataframe = california_housing_dataframe.reindex( np.random.permutation(california_housing_dataframe.index)) # + colab={} colab_type="code" id="B8qC-jTIKxkr" def preprocess_features(california_housing_dataframe): """Prepares input features from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the features to be used for the model, including synthetic features. """ selected_features = california_housing_dataframe[ ["latitude", "longitude", "housing_median_age", "total_rooms", "total_bedrooms", "population", "households", "median_income"]] processed_features = selected_features.copy() # Create a synthetic feature. processed_features["rooms_per_person"] = ( california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"]) return processed_features def preprocess_targets(california_housing_dataframe): """Prepares target features (i.e., labels) from California housing data set. Args: california_housing_dataframe: A Pandas DataFrame expected to contain data from the California housing data set. Returns: A DataFrame that contains the target feature. """ output_targets = pd.DataFrame() # Scale the target to be in units of thousands of dollars. output_targets["median_house_value"] = ( california_housing_dataframe["median_house_value"] / 1000.0) return output_targets # + colab={"base_uri": "https://localhost:8080/", "height": 1205} colab_type="code" id="Ah6LjMIJ2spZ" outputId="08b7d885-b98c-46d1-cdd6-4d32a9d2e530" # Choose the first 12000 (out of 17000) examples for training. training_examples = preprocess_features(california_housing_dataframe.head(12000)) training_targets = preprocess_targets(california_housing_dataframe.head(12000)) # Choose the last 5000 (out of 17000) examples for validation. validation_examples = preprocess_features(california_housing_dataframe.tail(5000)) validation_targets = preprocess_targets(california_housing_dataframe.tail(5000)) # Double-check that we've done the right thing. print "Training examples summary:" display.display(training_examples.describe()) print "Validation examples summary:" display.display(validation_examples.describe()) print "Training targets summary:" display.display(training_targets.describe()) print "Validation targets summary:" display.display(validation_targets.describe()) # + [markdown] colab_type="text" id="NqIbXxx222ea" # ## Train the Neural Network # # Next, we'll train the neural network. # + colab={} colab_type="code" id="6k3xYlSg27VB" def construct_feature_columns(input_features): """Construct the TensorFlow Feature Columns. Args: input_features: The names of the numerical input features to use. Returns: A set of feature columns """ return set([tf.feature_column.numeric_column(my_feature) for my_feature in input_features]) # + colab={} colab_type="code" id="De9jwyy4wTUT" def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None): """Trains a neural network model. Args: features: pandas DataFrame of features targets: pandas DataFrame of targets batch_size: Size of batches to be passed to the model shuffle: True or False. Whether to shuffle the data. num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely Returns: Tuple of (features, labels) for next data batch """ # Convert pandas data into a dict of np arrays. features = {key:np.array(value) for key,value in dict(features).items()} # Construct a dataset, and configure batching/repeating. ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit ds = ds.batch(batch_size).repeat(num_epochs) # Shuffle the data, if specified. if shuffle: ds = ds.shuffle(10000) # Return the next batch of data. features, labels = ds.make_one_shot_iterator().get_next() return features, labels # + colab={} colab_type="code" id="W-51R3yIKxk4" def train_nn_regression_model( my_optimizer, steps, batch_size, hidden_units, training_examples, training_targets, validation_examples, validation_targets): """Trains a neural network regression model. In addition to training, this function also prints training progress information, as well as a plot of the training and validation loss over time. Args: my_optimizer: An instance of `tf.train.Optimizer`, the optimizer to use. steps: A non-zero `int`, the total number of training steps. A training step consists of a forward and backward pass using a single batch. batch_size: A non-zero `int`, the batch size. hidden_units: A `list` of int values, specifying the number of neurons in each layer. training_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for training. training_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for training. validation_examples: A `DataFrame` containing one or more columns from `california_housing_dataframe` to use as input features for validation. validation_targets: A `DataFrame` containing exactly one column from `california_housing_dataframe` to use as target for validation. Returns: A tuple `(estimator, training_losses, validation_losses)`: estimator: the trained `DNNRegressor` object. training_losses: a `list` containing the training loss values taken during training. validation_losses: a `list` containing the validation loss values taken during training. """ periods = 10 steps_per_period = steps / periods # Create a DNNRegressor object. my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0) dnn_regressor = tf.estimator.DNNRegressor( feature_columns=construct_feature_columns(training_examples), hidden_units=hidden_units, optimizer=my_optimizer ) # Create input functions. training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value"], batch_size=batch_size) predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value"], num_epochs=1, shuffle=False) predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value"], num_epochs=1, shuffle=False) # Train the model, but do so inside a loop so that we can periodically assess # loss metrics. print "Training model..." print "RMSE (on training data):" training_rmse = [] validation_rmse = [] for period in range (0, periods): # Train the model, starting from the prior state. dnn_regressor.train( input_fn=training_input_fn, steps=steps_per_period ) # Take a break and compute predictions. training_predictions = dnn_regressor.predict(input_fn=predict_training_input_fn) training_predictions = np.array([item['predictions'][0] for item in training_predictions]) validation_predictions = dnn_regressor.predict(input_fn=predict_validation_input_fn) validation_predictions = np.array([item['predictions'][0] for item in validation_predictions]) # Compute training and validation loss. training_root_mean_squared_error = math.sqrt( metrics.mean_squared_error(training_predictions, training_targets)) validation_root_mean_squared_error = math.sqrt( metrics.mean_squared_error(validation_predictions, validation_targets)) # Occasionally print the current loss. print " period %02d : %0.2f" % (period, training_root_mean_squared_error) # Add the loss metrics from this period to our list. training_rmse.append(training_root_mean_squared_error) validation_rmse.append(validation_root_mean_squared_error) print "Model training finished." # Output a graph of loss metrics over periods. plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Squared Error vs. Periods") plt.tight_layout() plt.plot(training_rmse, label="training") plt.plot(validation_rmse, label="validation") plt.legend() print "Final RMSE (on training data): %0.2f" % training_root_mean_squared_error print "Final RMSE (on validation data): %0.2f" % validation_root_mean_squared_error return dnn_regressor, training_rmse, validation_rmse # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="KueReMZ9Kxk7" outputId="5000787d-e7fc-49ec-81c8-3754edff467e" _ = train_nn_regression_model( my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0007), steps=5000, batch_size=70, hidden_units=[10, 10], training_examples=training_examples, training_targets=training_targets, validation_examples=validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="flxmFt0KKxk9" # ## Linear Scaling # It can be a good standard practice to normalize the inputs to fall within the range -1, 1. This helps SGD not get stuck taking steps that are too large in one dimension, or too small in another. Fans of numerical optimization may note that there's a connection to the idea of using a preconditioner here. # + colab={} colab_type="code" id="Dws5rIQjKxk-" def linear_scale(series): min_val = series.min() max_val = series.max() scale = (max_val - min_val) / 2.0 return series.apply(lambda x:((x - min_val) / scale) - 1.0) # + [markdown] colab_type="text" id="MVmuHI76N2Sz" # ## Task 1: Normalize the Features Using Linear Scaling # # **Normalize the inputs to the scale -1, 1.** # # **Spend about 5 minutes training and evaluating on the newly normalized data. How well can you do?** # # As a rule of thumb, NN's train best when the input features are roughly on the same scale. # # Sanity check your normalized data. (What would happen if you forgot to normalize one feature?) # # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="yD948ZgAM6Cx" outputId="053d1570-7b73-4a72-d888-3751a925f634" def normalize_linear_scale(examples_dataframe): """Returns a version of the input `DataFrame` that has all its features normalized linearly.""" # # Your code here: normalize the inputs. return examples_dataframe.apply(lambda x:linear_scale(x)) # pass normalized_dataframe = normalize_linear_scale(preprocess_features(california_housing_dataframe)) normalized_training_examples = normalized_dataframe.head(12000) normalized_validation_examples = normalized_dataframe.tail(5000) _ = train_nn_regression_model( my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0007), steps=5000, batch_size=70, hidden_units=[10, 10], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="yMFvTFQ3hJgx" outputId="20548378-3000-470a-a35f-0befb697de80" display.display(normalized_dataframe.describe()) # + [markdown] colab_type="text" id="jFfc3saSxg6t" # ### Solution # # Click below for one possible solution. # + [markdown] colab_type="text" id="Ax_IIQVRx4gr" # Since normalization uses min and max, we have to ensure it's done on the entire dataset at once. # # We can do that here because all our data is in a single DataFrame. If we had multiple data sets, a good practice would be to derive the normalization parameters from the training set and apply those identically to the test set. # + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="D-bJBXrJx-U_" outputId="29b957f4-f9ab-442f-9df9-07c287892de3" def normalize_linear_scale(examples_dataframe): """Returns a version of the input `DataFrame` that has all its features normalized linearly.""" processed_features = pd.DataFrame() processed_features["latitude"] = linear_scale(examples_dataframe["latitude"]) processed_features["longitude"] = linear_scale(examples_dataframe["longitude"]) processed_features["housing_median_age"] = linear_scale(examples_dataframe["housing_median_age"]) processed_features["total_rooms"] = linear_scale(examples_dataframe["total_rooms"]) processed_features["total_bedrooms"] = linear_scale(examples_dataframe["total_bedrooms"]) processed_features["population"] = linear_scale(examples_dataframe["population"]) processed_features["households"] = linear_scale(examples_dataframe["households"]) processed_features["median_income"] = linear_scale(examples_dataframe["median_income"]) processed_features["rooms_per_person"] = linear_scale(examples_dataframe["rooms_per_person"]) return processed_features normalized_dataframe = normalize_linear_scale(preprocess_features(california_housing_dataframe)) normalized_training_examples = normalized_dataframe.head(12000) normalized_validation_examples = normalized_dataframe.tail(5000) display.display(normalized_dataframe.describe()) # + [markdown] colab_type="text" id="MrwtdStNJ6ZQ" # ## Task 2: Try a Different Optimizer # # ** Use the Adagrad and Adam optimizers and compare performance.** # # The Adagrad optimizer is one alternative. The key insight of Adagrad is that it modifies the learning rate adaptively for each coefficient in a model, monotonically lowering the effective learning rate. This works great for convex problems, but isn't always ideal for the non-convex problem Neural Net training. You can use Adagrad by specifying `AdagradOptimizer` instead of `GradientDescentOptimizer`. Note that you may need to use a larger learning rate with Adagrad. # # For non-convex optimization problems, Adam is sometimes more efficient than Adagrad. To use Adam, invoke the `tf.train.AdamOptimizer` method. This method takes several optional hyperparameters as arguments, but our solution only specifies one of these (`learning_rate`). In a production setting, you should specify and tune the optional hyperparameters carefully. # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="61GSlDvF7-7q" outputId="e84c86a0-f39d-4fab-c05e-f6f12109b10a" # # YOUR CODE HERE: Retrain the network using Adagrad and then Adam. _ = train_nn_regression_model( my_optimizer=tf.train.AdamOptimizer(learning_rate=0.01), steps=5000, batch_size=70, hidden_units=[10, 10], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # # + [markdown] colab_type="text" id="FSPZIiYgyh93" # ### Solution # # Click below for the solution # + [markdown] colab_type="text" id="X1QcIeiKyni4" # First, let's try Adagrad. # + colab={} colab_type="code" id="Ntn4jJxnypGZ" _, adagrad_training_losses, adagrad_validation_losses = train_nn_regression_model( my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.5), steps=500, batch_size=100, hidden_units=[10, 10], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="5JUsCdRRyso3" # Now let's try Adam. # + colab={} colab_type="code" id="lZB8k0upyuY8" _, adam_training_losses, adam_validation_losses = train_nn_regression_model( my_optimizer=tf.train.AdamOptimizer(learning_rate=0.009), steps=500, batch_size=100, hidden_units=[10, 10], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="twYgC8FGyxm6" # Let's print a graph of loss metrics side by side. # + colab={} colab_type="code" id="8RHIUEfqyzW0" plt.ylabel("RMSE") plt.xlabel("Periods") plt.title("Root Mean Squared Error vs. Periods") plt.plot(adagrad_training_losses, label='Adagrad training') plt.plot(adagrad_validation_losses, label='Adagrad validation') plt.plot(adam_training_losses, label='Adam training') plt.plot(adam_validation_losses, label='Adam validation') _ = plt.legend() # + [markdown] colab_type="text" id="UySPl7CAQ28C" # ## Task 3: Explore Alternate Normalization Methods # # **Try alternate normalizations for various features to further improve performance.** # # If you look closely at summary stats for your transformed data, you may notice that linear scaling some features leaves them clumped close to `-1`. # # For example, many features have a median of `-0.8` or so, rather than `0.0`. # + colab={"base_uri": "https://localhost:8080/", "height": 715} colab_type="code" id="QWmm_6CGKxlH" outputId="4ad38020-7ff3-4280-f851-3967fd75aec7" _ = normalized_training_examples.hist(bins=20, figsize=(18, 12), xlabelsize=10) # + colab={"base_uri": "https://localhost:8080/", "height": 500} colab_type="code" id="n8DF2aOTotVR" outputId="7b5525ae-e81c-404d-818e-ab258976f302" test=clip(training_examples["rooms_per_person"], 0, 6) plt.hist(test,bins=20) # + [markdown] colab_type="text" id="Xx9jgEMHKxlJ" # We might be able to do better by choosing additional ways to transform these features. # # For example, a log scaling might help some features. Or clipping extreme values may make the remainder of the scale more informative. # + colab={} colab_type="code" id="baKZa6MEKxlK" def log_normalize(series): return series.apply(lambda x:math.log(x+1.0)) def clip(series, clip_to_min, clip_to_max): return series.apply(lambda x:( min(max(x, clip_to_min), clip_to_max))) def z_score_normalize(series): mean = series.mean() std_dv = series.std() return series.apply(lambda x:(x - mean) / std_dv) def binary_threshold(series, threshold): return series.apply(lambda x:(1 if x > threshold else 0)) # + [markdown] colab_type="text" id="-wCCq_ClKxlO" # The block above contains a few additional possible normalization functions. Try some of these, or add your own. # # Note that if you normalize the target, you'll need to un-normalize the predictions for loss metrics to be comparable. # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="8ToG-mLfMO9P" outputId="51ec6e81-a2dc-4597-c573-c59c28fc7852" def normalize(examples_dataframe): """Returns a version of the input `DataFrame` that has all its features normalized.""" # # YOUR CODE HERE: Normalize the inputs. processed_dataframe = pd.DataFrame() processed_dataframe["latitude"] = linear_scale(examples_dataframe["latitude"]) processed_dataframe["longitude"] = linear_scale(examples_dataframe["longitude"]) processed_dataframe["median_income"] = log_normalize(examples_dataframe["median_income"]) processed_dataframe["rooms_per_person"] = clip(examples_dataframe["rooms_per_person"], 0, 6) processed_dataframe["housing_median_age"] = linear_scale(examples_dataframe["housing_median_age"]) return processed_dataframe # pass normalized_dataframe = normalize(preprocess_features(california_housing_dataframe)) normalized_training_examples = normalized_dataframe.head(12000) normalized_validation_examples = normalized_dataframe.tail(5000) _ = train_nn_regression_model( my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.15), steps=5000, batch_size=120, hidden_units=[12, 6, 2], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="GhFtWjQRzD2l" # ### Solution # # Click below for one possible solution. # + [markdown] colab_type="text" id="OMoIsUMmzK9b" # These are only a few ways in which we could think about the data. Other transformations may work even better! # # `households`, `median_income` and `total_bedrooms` all appear normally-distributed in a log space. # # `latitude`, `longitude` and `housing_median_age` would probably be better off just scaled linearly, as before. # # `population`, `totalRooms` and `rooms_per_person` have a few extreme outliers. They seem too extreme for log normalization to help. So let's clip them instead. # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="XDEYkPquzYCH" outputId="7911be23-129a-432a-a70c-b24bd1a1a0f0" def normalize(examples_dataframe): """Returns a version of the input `DataFrame` that has all its features normalized.""" processed_features = pd.DataFrame() processed_features["households"] = log_normalize(examples_dataframe["households"]) processed_features["median_income"] = log_normalize(examples_dataframe["median_income"]) processed_features["total_bedrooms"] = log_normalize(examples_dataframe["total_bedrooms"]) processed_features["latitude"] = linear_scale(examples_dataframe["latitude"]) processed_features["longitude"] = linear_scale(examples_dataframe["longitude"]) processed_features["housing_median_age"] = linear_scale(examples_dataframe["housing_median_age"]) processed_features["population"] = linear_scale(clip(examples_dataframe["population"], 0, 5000)) processed_features["rooms_per_person"] = linear_scale(clip(examples_dataframe["rooms_per_person"], 0, 5)) processed_features["total_rooms"] = linear_scale(clip(examples_dataframe["total_rooms"], 0, 10000)) return processed_features normalized_dataframe = normalize_linear_scale(preprocess_features(california_housing_dataframe)) normalized_training_examples = normalized_dataframe.head(12000) normalized_validation_examples = normalized_dataframe.tail(5000) _ = train_nn_regression_model( my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.15), steps=1000, batch_size=50, hidden_units=[10, 10], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="b7atJTbzU9Ca" # ## Optional Challenge: Use only Latitude and Longitude Features # # **Train a NN model that uses only latitude and longitude as features.** # # Real estate people are fond of saying that location is the only important feature in housing price. # Let's see if we can confirm this by training a model that uses only latitude and longitude as features. # # This will only work well if our NN can learn complex nonlinearities from latitude and longitude. # # **NOTE:** We may need a network structure that has more layers than were useful earlier in the exercise. # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="XxsbgENnxnfN" outputId="911bdbfc-dceb-477c-838a-a01b7266d9a4" def normalize(examples_dataframe): """Returns a version of the input `DataFrame` that has all its features normalized.""" # # YOUR CODE HERE: Normalize the inputs. processed_dataframe = pd.DataFrame() processed_dataframe["latitude"] = linear_scale(examples_dataframe["latitude"]) processed_dataframe["longitude"] = linear_scale(examples_dataframe["longitude"]) return processed_dataframe # pass normalized_dataframe = normalize(preprocess_features(california_housing_dataframe)) normalized_training_examples = normalized_dataframe.head(12000) normalized_validation_examples = normalized_dataframe.tail(5000) _ = train_nn_regression_model( my_optimizer=tf.train.AdagradOptimizer(learning_rate=1.0), steps=10000, batch_size=240, hidden_units=[20, 10, 4, 2], training_examples=normalized_training_examples, training_targets=training_targets, validation_examples=normalized_validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="P8BLQ7T71JWd" # ### Solution # # Click below for a possible solution. # + [markdown] colab_type="text" id="1hwaFCE71OPZ" # It's a good idea to keep latitude and longitude normalized: # + colab={"base_uri": "https://localhost:8080/", "height": 656} colab_type="code" id="djKtt4mz1ZEc" outputId="44d4549b-d527-41da-976f-6f52fac3c142" def location_location_location(examples_dataframe): """Returns a version of the input `DataFrame` that keeps only the latitude and longitude.""" processed_features = pd.DataFrame() processed_features["latitude"] = linear_scale(examples_dataframe["latitude"]) processed_features["longitude"] = linear_scale(examples_dataframe["longitude"]) return processed_features lll_dataframe = location_location_location(preprocess_features(california_housing_dataframe)) lll_training_examples = lll_dataframe.head(12000) lll_validation_examples = lll_dataframe.tail(5000) _ = train_nn_regression_model( my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.05), steps=500, batch_size=50, hidden_units=[10, 10, 5, 5, 5], training_examples=lll_training_examples, training_targets=training_targets, validation_examples=lll_validation_examples, validation_targets=validation_targets) # + [markdown] colab_type="text" id="Dw2Mr9JZ1cRi" # This isn't too bad for just two features. Of course, property values can still vary significantly within short distances.
optimizing_neural_net_performance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from netgan.netgan import * #import tensorflow as tf import tensorflow.compat.v1 as tf from netgan import utils import scipy.sparse as sp import numpy as np from matplotlib import pyplot as plt from sklearn.metrics import roc_auc_score, average_precision_score import time # %matplotlib inline # - # #### Load the data _A_obs, _X_obs, _z_obs = utils.load_npz('data/cora_ml.npz') _A_obs = _A_obs + _A_obs.T _A_obs[_A_obs > 1] = 1 lcc = utils.largest_connected_components(_A_obs) _A_obs = _A_obs[lcc,:][:,lcc] _N = _A_obs.shape[0] val_share = 0.1 test_share = 0.05 #seed = 481516234 # #### Load the train, validation, test split from file loader = np.load('pretrained/cora_ml/split.npy').item() train_ones = loader['train_ones'] val_ones = loader['val_ones'] val_zeros = loader['val_zeros'] test_ones = loader['test_ones'] test_zeros = loader['test_zeros'] train_graph = sp.coo_matrix((np.ones(len(train_ones)),(train_ones[:,0], train_ones[:,1]))).tocsr() assert (train_graph.toarray() == train_graph.toarray().T).all() # #### Parameters rw_len = 16 batch_size = 128 walker = utils.RandomWalker(train_graph, rw_len, p=1, q=1, batch_size=batch_size) # #### Create our NetGAN model netgan = NetGAN(_N, rw_len, walk_generator= walker.walk, gpu_id=0, use_gumbel=True, disc_iters=3, W_down_discriminator_size=32, W_down_generator_size=128, l2_penalty_generator=1e-7, l2_penalty_discriminator=5e-5, generator_layers=[40], discriminator_layers=[30], temp_start=5, temperature_decay=0.99998, learning_rate=0.0003, legacy_generator=True) # #### Load pretrained model saver = tf.train.Saver() saver.restore(netgan.session, "pretrained/cora_ml/pretrained_gen.ckpt") # #### Generate random walks on the trained model sample_many = netgan.generate_discrete(10000, reuse=True, legacy=True) samples = [] for _ in range(60): if (_+1) % 10 == 0: print(_+1) samples.append(sample_many.eval({netgan.tau: 0.5})) # #### Assemble score matrix from the random walks rws = np.array(samples).reshape([-1, rw_len]) scores_matrix = utils.score_matrix_from_random_walks(rws, _N).tocsr() # #### Compute graph statistics A_select = sp.csr_matrix((np.ones(len(train_ones)), (train_ones[:,0], train_ones[:,1]))) A_select = train_graph sampled_graph = utils.graph_from_scores(scores_matrix, A_select.sum()) plt.spy(sampled_graph, markersize=.2) plt.show() plt.spy(A_select, markersize=.2) plt.show() utils.edge_overlap(A_select.toarray(), sampled_graph)/A_select.sum() utils.compute_graph_statistics(sampled_graph) utils.compute_graph_statistics(A_select.toarray())
src/netgan/graph_generation_pretrained.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Activities with BM25 - Old style # # Steps taken previously: # 1. Select list of activities (we have 87 `feature_profiles.csv`) # 2. Train Word2Vec model on entire Wikivoyage corpus (no filtering!) # 3. For each activity: # - get vector with 50 most similar words # - manually remove words that are not relevant for a topic (we did that in a shared gsheet) # - what is left is the search query for the activity (`feature_terms.csv`) # 4. Get texts for all destinations in scope # 5. Use BM25 to create a score for each place/activity pair. # # + import pandas as pd import numpy as np from matplotlib import pyplot as plt # %matplotlib inline # - source_dir = '../../../../' feature_input_dir = 'src/stairway/sources/wikivoyage/feature_engineering/features_input_data/' feature_terms_file = 'feature_terms.csv' feature_mapping_file = 'feature_profiles.csv' # ## Destinations queries_df = pd.read_csv(source_dir + feature_input_dir + feature_terms_file, header=None, index_col=0) queries = queries_df.apply( # lambda x: ','.join(x.dropna().astype(str)), lambda x: x.dropna().astype(str).tolist(), axis=1 ) queries.head() # Nice texts types = pd.read_csv(source_dir + feature_input_dir + feature_mapping_file) types.head() # ## Place texts # + # %%time path_wiki_in = source_dir + 'data/wikivoyage/raw/enwikivoyage-20191001-pages-articles.xml.bz2' from gensim.corpora import WikiCorpus wiki = WikiCorpus(path_wiki_in, article_min_tokens=0) # - # %%time corpus = list(wiki.get_texts()) print(len(corpus)) # Get index for places in scope df = pd.read_csv(source_dir + 'data/wikivoyage/enriched/wikivoyage_destinations.csv') df.shape df_all = pd.read_csv(source_dir + 'data/wikivoyage/clean/wikivoyage_metadata_all.csv') df_all.shape # Check that df_all matches with corpus size! Otherwise indexing wouldn't work. assert len(corpus) == len(df_all) # get indices from df_all that are in scope scope = df_all.loc[lambda row: row['pageid'].isin(df['wiki_id'])][['pageid']] scope.shape # get texts for places in scope corpus_scope = [corpus[i] for i in scope.index] len(corpus_scope) # ## BM25 # # [Explaination of BM25](https://turi.com/learn/userguide/feature-engineering/bm25.html) including a Python example/libary. The transformed output is a column of type float with the BM25 score for each document. # # This implementation seems easiest to use: https://pypi.org/project/rank-bm25/ # + from rank_bm25 import BM25Okapi bm25 = BM25Okapi(corpus_scope) # - # ### Try one query = queries['art galleries'] query # apply bm25 doc_scores = bm25.get_scores(query) # print min, max scores and how many documents got a score bigger than 0 print('min:', min(doc_scores), 'max:', max(doc_scores), '>0:', sum(doc_scores > 0)) top_5 = np.argsort(doc_scores)[-5:] print(top_5) print(doc_scores[top_5]) df.iloc[top_5] # It seems to heavily bias towards places with relatively little text which contains a couple of the required terms. # # **TODO**: investigate how longer documents could still end up high in the ranking? # ### Loop over all queries # %%time scores = np.array([bm25.get_scores(queries[i]) for i in range(0, len(queries))]).T print(scores.shape) df_scores = pd.DataFrame(scores, columns=queries.index) df_scores.shape df_scores.head() # Add proper column names df_scores.columns = types['feature_name'] df_scores.head() # #### Examine a top 5: df.iloc[np.argsort(df_scores['Whale watching'])[-5:]] # ## Compare to old scores # # Note, the old scores had more places in scope so exact counts don't match. Also the BM25 implementation was done manually instead of importing a library. df_scores_old = pd.read_csv(source_dir + "data/old-sql-database/destination_scores.csv") df_scores_old.shape # Compare distributions for some features: # + fig, axes = plt.subplots(1, 2, figsize=(16,6)) df_scores['Museums'].hist(bins=30, ax=axes[0]) axes[0].set_title('New scores. Count = {}'.format(sum(df_scores['Museums'] > 0)), size=15) df_scores_old['museums'].hist(bins=30, ax=axes[1]); axes[1].set_title('Old scores. Count = {}'.format(sum(df_scores_old['museums'] > 0)), size=15) fig.tight_layout() # + fig, axes = plt.subplots(1, 2, figsize=(16,6)) df_scores['Islands'].hist(bins=30, ax=axes[0]) axes[0].set_title('New scores. Count = {}'.format(sum(df_scores['Islands'] > 0)), size=15) df_scores_old['islands'].hist(bins=30, ax=axes[1]); axes[1].set_title('Old scores. Count = {}'.format(sum(df_scores_old['islands'] > 0)), size=15) fig.tight_layout() # + fig, axes = plt.subplots(1, 2, figsize=(16,6)) # this time plot only > 0 df_scores['Wineries'].loc[df_scores['Wineries'] > 0].hist(bins=30, ax=axes[0]) axes[0].set_title('New scores. Count = {}'.format(sum(df_scores['Wineries'] > 0)), size=15) df_scores_old['wineries'].loc[df_scores_old['wineries'] > 0].hist(bins=30, ax=axes[1]); axes[1].set_title('Old scores. Count = {}'.format(sum(df_scores_old['wineries'] > 0)), size=15) fig.tight_layout() # - # Distributions are quite different. Possibly reasons: # # * New text data, things might have changed in wikivoyage # * Different sizes and possibly different places in scope # * Different implementations of BM25 (package vs. manual) # * Different hyperparameters for BM25 # # However counts are total counts per category and distributions are enough alike to accept the new scores as feature scores. # # ## Write to csv # + output_path = source_dir + 'data/wikivoyage/enriched/wikivoyage_features.csv' df_final = pd.concat([df[['id']], df_scores], axis=1) df_final.to_csv(output_path, index=False) # + api_path = 'api/data/wikivoyage_features.csv' df_final.to_csv(source_dir + api_path, index=False) # + api_path_types = 'api/data/wikivoyage_features_types.csv' types.to_csv(source_dir + api_path_types, index=False) # - # Done.
notebooks/data/wikivoyage/feature-engineering/features-bm25.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # batch correction of HSC dataset from unioncom import UnionCom import numpy as np data1 = np.loadtxt("../hsc/domain1.txt") data2 = np.loadtxt("../hsc/domain2.txt") type1 = np.loadtxt("../hsc/type1.txt") type2 = np.loadtxt("../hsc/type2.txt") type1 = type1.astype(np.int) type2 = type2.astype(np.int) uc = UnionCom.UnionCom(integration_type='BatchCorrect', distance_mode='cosine') integrated_data = uc.fit_transform(dataset=[data1,data2]) uc.test_LabelTA(integrated_data, [type1,type2]) uc.Visualize([data1,data2], integrated_data, mode='PCA') # without datatype uc.Visualize([data1,data2], integrated_data, [type1,type2], mode='PCA') # with datatype
Examples/HSC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df = pd.read_excel('D:\\testfinal2.xlsx') df.category.unique() lst = [] lst = df.category.unique() type(lst) lst = np.ndarray.tolist(lst) lst df2 = pd.DataFrame({'weight':df.groupby(['name','category'])['ratings'].mean()}).reset_index() df2 cat_ind = dict(list(zip(lst,range(0,len(lst))))) names = df2.name.unique() all_fv = [] for name in names: tfv = [0]*len(lst) temp = df2[df2['name']==name] cat_weights = dict(zip(temp['category'],temp['weight'])) for t in temp['category']: ind = cat_ind[t] tfv[ind] = cat_weights[t] all_fv.append(tfv) multi_v = [] for v in all_fv: l = len([i for i in v if i > 0]) if l > 1: multi_v.append(l) len(names) len(multi_v) # + for u in df2.name: for c in lst: index = l[0:2] #tlst[index] = df2.weight[u] index # - # tlst = [] for j in range(0,len(lst)): tlst.append(0) tlst len(lst) for i in lst: ct = 0 re = 0 rev = 0 for j in range(0, len(df)): if(df.category[j] == i): re = re + df.ratings[j] ct = ct + 1 rev = re*1.0/ct rel.append(rev) totalrev.append(ct) type(df.ratings[0]) rel = [] totalrev = [] rel totalrev place_counts = [] for i in lst: ct = 0 place = [] for j in range(0, len(df)): if(df.category[j] == i): place.append(df.place[j]) place = np.asarray(place) ct = len(np.unique(place)) place_counts.append(ct) place_counts place = df.place.unique() type(place) ls2 = [df.category, df.ratings] dt = {'category':lst, 'avg_reating':rel, 'total_rev':totalrev} dt final_lst = [] for i in range(0, len(lst)): dt = {'category':lst[i], 'total_places':place_counts[i], 'avg_reating':rel[i], 'total_rev':totalrev[i]} final_lst.append(dt) final_lst df1 = pd.DataFrame(final_lst) df1.to_excel('D://features.xlsx') df
Feature vectors/feature_vector_calc_final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Sandbox for playing around with heading-based segmentation, etc. # - import movingpandas as mpd import geopandas as gpd from matplotlib import pyplot as plt from shapely.geometry import Point import pandas as pd import numpy as np import hvplot.pandas import geojson file = 'ooinhdeep_points.geojson' with open(file) as f: gj = geojson.load(f) features = gj['features'][0] #Extract lats and lons lats = [feature['properties']['latitude'] for feature in gj['features']] lons = [feature['properties']['longitude'] for feature in gj['features']] df = pd.DataFrame({'lat': lats, 'lon': lons}) df.hvplot(x='lon', y='lat') #Create a GeoDataFrame geom = [Point(x,y) for x, y in zip(df['lon'], df['lat'])] gdf = gpd.GeoDataFrame(df, geometry = geom) gdf = gdf.set_crs(epsg=4326) #Get gdf with differences between x and y for calculating angle with arctan. Think about what to do with last point gdf_diff = gdf.diff(periods=-1) gdf['theta'] = np.arctan2(gdf_diff['lat'], gdf_diff['lon']) tol = .3 n = 500 def iterative_segmentation(gdf, criterion, tol): """ gdf is the GeoDataFrame with tracks that you're interested in criterion is a column name in the gdf that you're using to segment — for example, angle if you're looking at changes in heading, or speed tol is the tolerance to a given criterion. For example, a tolerance of .3 means that a change of .3 in criterion value between two rows will be allowed """ trajectory_id = [] j = 0 #i.e., the first trajectory for i, row in gdf.iterrows(): #Assign starting value for the criterion if i == 0: start_value = row[criterion] else: if abs(row[criterion] - start_value) < tol: pass elif abs(row[criterion] - start_value) >= tol: j+=1 #Start a new trajectory label start_value = row[criterion] #Update comparison value. This method is more robust to changes in curvature #that may not represent a new trajectory trajectory_id.append(j) gdf['traj_id'] = trajectory_id return gdf def direction_segmentation(gdf, criterion, tol): """ gdf is the GeoDataFrame with tracks that you're interested in criterion is a column name in the gdf that you're using to segment — for example, angle if you're looking at changes in heading, or speed tol is the tolerance to a given criterion. For example, a tolerance of .3 means that a change of .3 in criterion value between two rows will be allowed """ trajectory_label = [] j = 0 #i.e., the first trajectory for i, row in gdf.iterrows(): #Assign starting value for the criterion if i == 0: start_value = row[criterion] else: if abs(row[criterion] - start_value) < tol: pass elif abs(row[criterion] - start_value) >= tol: start_angle = row[criterion] #only change starting angle if we're starting a new trajectory j+=1 #Start a new trajectory label #Update trajectory_label.append(j) gdf['traj_id'] = trajectory_id return gdf #Apply the iterative segmentation approach on angle with a tolerance of 0.3 seg_gdf = iterative_segmentation(gdf, 'theta', .3) traj_collection = mpd.TrajectoryCollection(seg_gdf, 'traj_id') #For comparison with original gdf gdf['traj_id'] = 1 traj_collection1 = mpd.TrajectoryCollection(gdf, 'traj_id') plot = traj_collection.hvplot() hvplot.save(plot, 'gliders.html') # + ## Comparison with original tracks # - traj_collection.trajectories[0].df.drop('geometry', axis = 1).hvplot.line(x='lon', y='lat') * traj_collection.trajectories[0].df.drop('geometry', axis = 1).hvplot.scatter(x='lon', y='lat') traj_collection1.trajectories[0].df.drop('geometry', axis = 1).head(200).hvplot(x='lon', y='lat')
Rule_based_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="EPoFC1rP0JDQ" colab_type="code" outputId="b7306528-9d34-43a6-cd67-51eb0c232ed0" colab={"base_uri": "https://localhost:8080/", "height": 474} # !pip install mglearn # + id="NaSwVwNR2QnE" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.model_selection import cross_val_score from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression # + id="9A3ZJtYn2pwe" colab_type="code" colab={} iris = load_iris() logreg = LogisticRegression() # + id="s-qQaMVl2x4D" colab_type="code" outputId="6d3240aa-555c-455d-8812-9ac4265cd7ae" colab={"base_uri": "https://localhost:8080/", "height": 310} # cross validation takes 3 arguments: model, data, target # by default it does 3 cvs scores = cross_val_score(logreg, iris.data, iris.target) print(f'cross validation scores: {scores}') # + id="jSSGUd7n3F7Z" colab_type="code" outputId="4c35c7e4-05b3-4070-90df-882988e49dfd" colab={"base_uri": "https://localhost:8080/", "height": 419} scores2 = cross_val_score(logreg, iris.data, iris.target, cv=5) print(f'cross validation scores: {scores2}') # + id="AKCvvDYW3jXW" colab_type="code" outputId="edc8d2c6-d569-4a1b-d6b2-ee609c239a43" colab={"base_uri": "https://localhost:8080/", "height": 35} # we expect the model to be about 96% accurate # 100% accurate twice - the model is very dependant on particular folds # or could be because it's a small dataset scores.mean(), scores2.mean() # + [markdown] id="nofOw_0l7RES" colab_type="text" # ## Diagrams # + id="bn0BypOy3rMn" colab_type="code" colab={} from sklearn.model_selection import GridSearchCV import mglearn # + id="BQQ7DXos4qkO" colab_type="code" outputId="056fed2f-7dce-462b-dfb8-2cb01bcf7e4b" colab={"base_uri": "https://localhost:8080/", "height": 439} # Overview of the Process of Parameter Selection and Model Evaluation with GridSearchCV mglearn.plots.plot_grid_search_overview() # + id="SmLVrbFa40ND" colab_type="code" outputId="7907c32e-f58f-4d6e-a21b-aed726fc1803" colab={"base_uri": "https://localhost:8080/", "height": 186} # 5 fold cross validation image mglearn.plots.plot_cross_validation() # + id="1p6bbHvV6ORO" colab_type="code" outputId="16f9a01a-9688-43b1-bedb-1c05c1d9b9c4" colab={"base_uri": "https://localhost:8080/", "height": 108} print(iris.target) # + id="wHQBvhlv40SB" colab_type="code" outputId="7c0eec43-1b5d-4934-8857-c71dea288623" colab={"base_uri": "https://localhost:8080/", "height": 365} # stratification used when target is in order (ex: 000,111, 222,...) # a good idea for iris dataset (see above target in order) # using k-fold you get 0% accuracy # Comparison of Standard Cross Validation & Stratified Cross Validation when the Data is Ordered by Class Label mglearn.plots.plot_stratified_cross_validation() # + id="ZbCVMFIm5_pE" colab_type="code" outputId="bd25d756-2aed-49b9-fbc6-db4fc9474a17" colab={"base_uri": "https://localhost:8080/", "height": 126} # Threefold splitting into Training, Validation, and Test Data mglearn.plots.plot_threefold_split() # + id="bBBK_O1N64xE" colab_type="code" outputId="18a2664a-73d7-40dc-cc13-2e71c634149a" colab={"base_uri": "https://localhost:8080/", "height": 379} mglearn.plots.plot_cross_val_selection() # + [markdown] id="qhE8sQqP4bht" colab_type="text" # ## GridSearch with Cross Validation # + id="53p_60_78F6J" colab_type="code" colab={} param_grid = { 'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.001, 0.01, 0.1, 1, 10, 100] } # + id="INRw5ouZ8idd" colab_type="code" colab={} from sklearn.svm import SVC from sklearn.model_selection import train_test_split # + id="_-UFoYdc8nXD" colab_type="code" outputId="25b9fe5e-fffc-4dea-d2c5-c35c83ec2fb4" colab={"base_uri": "https://localhost:8080/", "height": 92} # inside GridSearch = model, param_grid (dict), folds(cv) # GridSearchCV for when you want to find best parameters but also wanting to do k-folds grid_search = GridSearchCV(SVC(), param_grid, cv=5) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=0) grid_search.fit(X_train, y_train) grid_search.score(X_test, y_test) # + id="ZquIXyNy95Ql" colab_type="code" outputId="c46c8a4a-df12-4893-a264-8b55f0465f1b" colab={"base_uri": "https://localhost:8080/", "height": 35} # Note: the important thing here we did not use the test set to use paramenters # the parameters found are store in the best_params attribute # best parameters (see image above with red circle) grid_search.best_params_ # + id="LM_fzAWi-Lqe" colab_type="code" outputId="24ad36ea-fae9-45b2-f162-3570c1b43f31" colab={"base_uri": "https://localhost:8080/", "height": 35} # The best cross validation accuracy (the mean accuracy over the diff splits) is stored in best_score_ # be careful not to confuse best score with the score method - which employs a model on whole training test # the best_score_ attribute stores the mean cross validation accuracy with cross validation performed on the training set grid_search.best_score_ # + id="WEpzX5U0-z6w" colab_type="code" outputId="52eab175-48f3-463e-bdff-1dab11a6c2de" colab={"base_uri": "https://localhost:8080/", "height": 90} grid_search.best_estimator_ # + id="EoTITDU9_2Du" colab_type="code" outputId="9cfedb56-7d7e-4d98-fdc2-0d770a61ca76" colab={"base_uri": "https://localhost:8080/", "height": 292} results = pd.DataFrame(grid_search.cv_results_) # + id="VguMFT6vAVzk" colab_type="code" outputId="2de539c1-36fa-4d62-f8d5-b1e424513e2f" colab={"base_uri": "https://localhost:8080/", "height": 506} # each row in results corresponds to 1 particular parameter setting # for each setting the results of all cross validation splits are reported # as well as the mean and standard deviation for all splits display(results.head()) # + id="0XL9UR3EAaBw" colab_type="code" outputId="fbeb0636-af20-4d6e-b06f-b4ea2333075a" colab={"base_uri": "https://localhost:8080/", "height": 379} # Comparison of each gamma and each C value # C is the strength of regularization # a higher C value corresponds to less regularization # when you use a high value for C logistic regression tries to fit the training set as best as possible # while low values of C put more emphasis on fiding a coefficient that is closest to zero # high C == more overfitting - tries that each data point individually be classified correctly # low C == more generalization - will cause the algorithim to adjust to majority of data point scores = np.array(results.mean_test_score).reshape(6, 6) mglearn.tools.heatmap(scores, xlabel='gamma', xticklabels=param_grid['gamma'], ylabel='C', yticklabels=param_grid['C'], cmap='viridis') # + id="6AejQSFDBG3H" colab_type="code" outputId="24aac1f5-4dbd-4371-d289-43689df2791a" colab={"base_uri": "https://localhost:8080/", "height": 264} # example --> LogisticRegression(C=10) mglearn.plots.plot_linear_svc_regularization() # + id="2vHxn2qaDc-h" colab_type="code" colab={}
module-3-select-models-and-parameters/LS_DS_243_Select_models_and_parameters_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns from datetime import datetime fig, ax = plt.subplots() x = [ datetime(year=2017, month=3, day=15), datetime(year=2017, month=4, day=1), datetime(year=2017, month=5, day=15)] y = [2, 4, 5] ax.plot(x, y) formatter = mdates.DateFormatter("%d.%m.%Y") ax.xaxis.set_major_formatter(formatter) locator = mdates.MonthLocator() ax.xaxis.set_major_locator(locator) fig.autofmt_xdate() # Stellt die Beschriftungen der X-Achse schräg plt.show() # -
UDEMY_Datavis_Python/10 - datumsfunktionen/Datumswerte + Matplotlib.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gradient Descent # + import numpy as np alpha = 0.003 termination_threshold = 0.01 # - def terminate(th0, th1, first_run): if(first_run): return False else: return (np.linalg.norm(np.subtract(np.array(th0), np.array(th1)))) < termination_threshold def hypotesis() def gradient(th0, th1): def gradient_descent(): theta0 = 0.0 theta1 = 0.0 first_run = true while(not terminate(theta0, theta1)): first_run = false tmp = theta1 theta1 = theta0 - alpha*(gradient(theta0, theta1)) theta0 = tmp
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Vectorize Asthma Trial inclusion data # ## Using Google's pre-trained Word2Vec # Pros: Pre-trained # Cons: Likely missing lots of medical terminology important to the meaning of criteria # # Important check: Capture all words that Word2Vec doesn't recognize in the asthma study subset from pymongo import MongoClient from gensim.models import Word2Vec import gensim import re import string from sklearn.feature_extraction.text import CountVectorizer from nltk.tokenize import TreebankWordTokenizer from nltk.stem import PorterStemmer from nltk.corpus import stopwords import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.decomposition import PCA import networkx as nx from itertools import combinations import community from collections import defaultdict import randomcolor from sklearn.cluster import MiniBatchKMeans from sklearn.metrics import silhouette_score from sklearn.preprocessing import StandardScaler import pickle from scipy.spatial.distance import pdist # ## Connect to Mongo clinical_trials DB def connect_to_mongo(database, collection): """ Opens a connection to a specified Mongo DB location Input Parameters: database: name of database to connect to or create (str) collection: name of collection to connect to or create (str) Returns: The connection object for the database without a collection specified The connection object for a specific Mongo location (database & collection) """ client = MongoClient() db = client[database] mongo_loc = db[collection] return db, mongo_loc trials_loc, eligibility_loc = connect_to_mongo('clinical_trials', 'eligibilities') # ## Pre-process inclusion data # + doc_cursor = eligibility_loc.find({"inclusion_criteria": { '$regex' : ".*asthma.*"}}) stoplist = stopwords.words('english') inclusion_texts = [] for study in doc_cursor: for crit in study['cleaned_inclusion']: words = re.findall('[a-z][a-z]+', crit) inclusion_tokens = [[word for word in words if word not in stoplist]] inclusion_texts += inclusion_tokens print(inclusion_texts[0:5]) # - # ## Load Google's pre-trained Word2Vec model google_vec_file = '/Users/courtney/ds/Word2Vec/GoogleNews-vectors-negative300.bin' model = gensim.models.KeyedVectors.load_word2vec_format(google_vec_file, binary=True) # ## Get vector for each criteria # ### Function to vectorize each inclusion criteria # + missing_words = [] def get_doc_vec(words, model): good_words = [] for word in words: # Words not in the original model will fail try: if model.wv[word] is not None: good_words.append(word) except: if word not in missing_words: missing_words.append(word) continue # If no words are in the original model if len(good_words) == 0: return None # Return the mean of the vectors for all the good words return model.wv[good_words].mean(axis=0) # - # ### Calculate number of unique keywords in the asthma criteria dataset unique_words = [] total_word_count = 0 for crit in inclusion_texts: for word in crit: total_word_count += 1 if word not in unique_words: unique_words.append(word) unique_word_count = len(unique_words) print("Unique words:", unique_word_count, "\nTotal words:", total_word_count) # ### Vectorize each inclusion criteria # #### Create empty array to fill with vectrized criteria inclusion_vectors = np.zeros((len(inclusion_texts), 300)) inclusion_vectors.shape # #### Vectorize criteria for i, doc in enumerate(inclusion_texts): vec = get_doc_vec(doc, model) inclusion_vectors[i, :] = vec print(f"\nMissing {len(missing_words)} out of {unique_word_count} unique words: {round(len(missing_words)/unique_word_count*100)}% missing\n") print(vec, '\n') print(inclusion_texts[-1]) # 18% of unique words are missing in Word2Vec. Most of them are numbers. # + # print(missing_words) # - print(len(missing_words)) # #### Exploring how well GoogleNews Word2Vec performs on medical words model.most_similar('gene' ,topn=8) model.most_similar('pollen' ,topn=8) model.most_similar('mg_dL' ,topn=8) # ## Pickle vectorized eligibility criteria pickle.dump(inclusion_vectors, open("vectorized_criteria.p", "wb"))
2_vectorize_criteria_Word2Vec_googlenews.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Bluelord/Kaggle_Courses/blob/main/02%20Pandas/05%20Datatypes%20%26%20Missing%20values.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="trBY7XOiJ1g6" # # Datatypes & Missing values # # --- # # # # --- # # # + [markdown] id="AYQl8nHNJ3rg" # ## Tutorial # # --- # + id="FgWoEoIVKGF4" colab={"base_uri": "https://localhost:8080/"} outputId="f2d02491-989d-490c-89e6-bef939809182" from google.colab import drive drive.mount('/content/gdrive') # + id="PA6UrakKKPLI" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="86e2cc4a-750e-4dce-9180-117a1de3363b" import pandas as pd pd.set_option('max_rows', 5) import numpy as np reviews = pd.read_csv("/content/gdrive/MyDrive/Colab Notebooks/Kaggle_Courses/02 Pandas/winemag-data-130k-v2.csv", index_col=0) reviews.head() # + [markdown] id="ve246I-YJj1-" # ### Dtypes # # The data type for a column in a DataFrame or a Series is known as the **dtype**. # You can use the `dtype` property to grab the type of a specific column. For instance, we can get the dtype of the `price` column in the `reviews` DataFrame. A DataFrame or Series index has its own `dtype`. # + id="u0s7wWJmJj2G" colab={"base_uri": "https://localhost:8080/"} outputId="5cfd370a-265b-4414-e0c2-beffef57f312" reviews.price.dtype # + [markdown] id="ZUqFb2MaJj2G" # Alternatively, the `dtypes` property returns the `dtype` of _every_ column in the DataFrame: # + id="-gzX1HKaJj2G" colab={"base_uri": "https://localhost:8080/"} outputId="02c77f05-46ee-4ca9-d56b-c9d7564d4a0d" reviews.dtypes # + [markdown] id="h2-gcNmVJj2H" # Data types tell us something about how pandas is storing the data internally. `float64` means that it's using a 64-bit floating point number; # One peculiarity to keep in mind is that columns consisting entirely of strings do not get their own type; they are instead given the `object` type. # It's possible to convert a column of one type into another wherever such a conversion makes sense by using the `astype()` function. # + id="esvykOOEJj2H" colab={"base_uri": "https://localhost:8080/"} outputId="354050ff-e64f-4976-e574-d2f1c47411a9" reviews.points.astype('float64') # + id="JtCOs61cJj2I" colab={"base_uri": "https://localhost:8080/"} outputId="0c1ee06b-45d6-4d0b-f5f7-79679f636993" reviews.index.dtype # + [markdown] id="16tb8Hi5Jj2J" # ### Missing data # # Entries missing values are given the value `NaN`, short for "Not a Number". For technical reasons these `NaN` values are always of the `float64` dtype. # Pandas provides some methods specific to missing data. To select `NaN` entries you can use `pd.isnull()`, its companion `pd.notnull()` # + id="qlx5pVMGJj2J" colab={"base_uri": "https://localhost:8080/", "height": 226} outputId="01fe3de3-07e4-44b3-e5a4-1fa558229d56" reviews[pd.isnull(reviews.country)] # + [markdown] id="m4w8t5lLJj2K" # Replacing missing values is a common operation. Pandas provides a really handy method for this problem, `fillna()` which provides a few different strategies for mitigating such data. For example, we can simply replace each `NaN` with an `"Unknown"`. # + id="jC22JtADJj2K" colab={"base_uri": "https://localhost:8080/"} outputId="7484b94f-bb10-4b58-8a81-32b3978eddc7" reviews.region_2.fillna("Unknown") # + [markdown] id="YU_1dt1YJj2K" # We could fill each missing value with the first non-null value that appears sometime after the given record in the database. This is known as the backfill strategy. Alternatively, we may have a non-null value that we would like to replace. For example, suppose that since this dataset was published, reviewer <NAME> has changed her Twitter handle from `@kerinokeefe` to `@kerino`. One way is using the `replace()` method: # + id="Zv5CkTwRJj2K" colab={"base_uri": "https://localhost:8080/"} outputId="05d8c934-ebb2-410d-b6c5-0c2db00f2e34" reviews.taster_twitter_handle.replace("@kerinokeefe", "@kerino") # + [markdown] id="LyNb6jy8Jj2L" # The `replace()` method is worth mentioning here because it's handy for replacing missing data which is given some kind of sentinel value in the dataset: things like `"Unknown"`, `"Undisclosed"`, `"Invalid"`, and so on. # + [markdown] id="gs5TR5gWgPxo" # ## Exercise # # --- # + colab={"base_uri": "https://localhost:8080/"} id="TBkV1kiBgO1p" outputId="f8409a25-49da-46c8-8141-6a9fee4b5e6d" #What is the data type of the points column in the dataset? reviews.points.dtypes # + colab={"base_uri": "https://localhost:8080/"} id="fQPrVwrSixSJ" outputId="aba1baca-2418-48ad-de11-45088a63b0eb" # Create a Series from entries in the points column, but convert the entries to strings. reviews.points.astype('str') # + colab={"base_uri": "https://localhost:8080/"} id="UxgqyjMOi6KS" outputId="e9332483-75d4-487d-ee14-c40f9271e0f8" # Sometimes the price column is null. How many reviews in the dataset are missing a price? n_missing_prices = reviews.price.isnull().sum() n_missing_prices # + colab={"base_uri": "https://localhost:8080/"} id="QmUEohCRjB_a" outputId="e177aa1a-9ba7-49de-8c19-6474bc331e22" # What are the most common wine-producing regions? Create a Series counting the number of times each value occurs in the region_1 field. # This field is often missing data, so replace missing values with Unknown. Sort in descending order. Your output should look something like this: reviews_per_region = reviews.region_1.fillna('Unknown').value_counts().sort_values(ascending=False) reviews_per_region
02 Pandas/05 Datatypes & Missing values.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/lingpri/congenial-sniffle/blob/main/2020.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rhFSoYNibAjS" # # Find the product of the numbers, whose sum is equal to 2020. # # + id="Q9Tbd0XqvrTs" def data(day: int, parser=str, sep='\n') -> list: "Split the day's input file into sections separated by `sep`, and apply `parser` to each." sections = open(f'../content/sample_data/day{day}').read().rstrip().split(sep) return [parser(section) for section in sections] # + id="XsAXaXJAwKR1" def first(iterable, default=None) -> object: "Return first item in iterable, or default." return next(iter(iterable), default) def day1_1(nums): "Find 2 distinct numbers that sum to 2020, and return their product." return first(x * y for x in nums for y in nums & {2020 - x} if x != y) # + id="3w1VsCPUwj4s" def day1_2(nums): "Find 3 distinct numbers that sum to 2020, and return their product." return first(x * y * z for x, y in combinations(nums, 2) for z in nums & {2020 - x - y} if x != z != y) # + colab={"base_uri": "https://localhost:8080/"} id="-09EN5ec7qoC" outputId="f35e1587-b140-4a07-f578-5a6a2be9cc57" in1: Set[int] = set(data(1,int)) print(in1) # + id="ovFuLs9EUuXj" # + id="906F7p9OQzbQ" day1_1(in1) # + id="qWyOknfmRCMH" for i in in1: if(889779 % i == 0): print(divmod(889779,i)) # + colab={"base_uri": "https://localhost:8080/"} id="Dlu4UAjcT6MN" outputId="ba70435e-d15e-4ea7-917b-c360d9f7d3c3" print(1371*649) # + id="MQVP4nZceh1h" nums = {1732,1972,1822,1371,649} # + colab={"base_uri": "https://localhost:8080/"} id="7OsfvcpIevPL" outputId="54bc555a-c7b7-456d-8981-39b130949ddf" len(nums) # + colab={"base_uri": "https://localhost:8080/"} id="FD6QbUkzezv9" outputId="4ad4cac1-558b-4e76-846f-85fa0d967dca" for j in nums: for i in nums & {2020 - j} : print(i, j, j+i, j * i) # + id="QXQRlZdmfGTz"
2020.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sagar.crystal.derive import ConfigurationGenerator from sagar.io.vasp import read_vasp, write_vasp from sagar.crystal.structure import symbol2number as s2n import os import shutil def _get_sites(atoms, l_sub): return [tuple(set([i]+l_sub)) for i in atoms] # + symprec = 1e-3 comprec = 1e-3 element = 'C' substitutes = 'B' max_v = 4 dimension = 2 comment = 'CxBy' folder = 'all_vollume' # - cell = read_vasp("./primitive_cell.vasp") cg = ConfigurationGenerator(cell, symprec) sites = _get_sites(list(cell.atoms), [s2n(i) for i in substitutes]) confs = cg.cons_max_volume(sites, max_volume=max_v, dimension=dimension, symprec=symprec) if not os.path.exists('./'+folder): os.mkdir('./'+folder) else: shutil.rmtree('./'+folder) os.mkdir('./'+folder) idx = 0 for c in confs: c = c.get_primitive_cell() filename = '{:s}_id{:d}'.format(comment, idx) file = os.path.join('./'+folder, filename) write_vasp(c, file) idx += 1
examples/ipynb/all_volume.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup url = "https://dl.acm.org/doi/proceedings/10.1145/2814864" req = requests.get(url) soup = BeautifulSoup(req.content, "html.parser") print(soup.prettify()) soup.find_all("a") for link in soup.find_all("a"): print(link) print("-------") soup.title.string soup.title.parent.name for publication in soup.find_all("h5"): print(publication) print("---------------") for publication in soup.find_all("h5", {"class": "issue-item__title"}): print(publication) print("---------------") for publication in soup.find_all("h5", {"class": "issue-item__title"}): print(f"Title: {publication.string}") print(f'DOI: {publication.a["href"].replace("/doi/", "")}') print("---------------") with open("title_and_doi.tsv", "w") as title_fh: title_fh.write("title\tdoi\n") for publication in soup.find_all("h5", {"class": "issue-item__title"}): # print(f"Title: {publication.string}") # print(f'DOI: {publication.a["href"].replace("/doi/", "")}') # print("---------------") title_fh.write( f'{publication.string}\t{publication.a["href"].replace("/doi/", "")}\n')
code/Web_scraping_with_beautiful_soup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Demo of using RDKitMol as intermediate to generate TS by TS-GCN # # A demo to show how RDKitMol can connect RMG and TS-GCN to help predict TS geometry. TS-GCN requires a same atom ordering for the reactant and the product, which is seldomly accessible in practice. RDKitMol + RMG provides an opportunity to match reactant and product atom indexes according to RMG reaction family. <br> # # Some codes are compiled from https://github.com/ReactionMechanismGenerator/TS-GCN # # + import os import sys import subprocess from typing import Iterable # To add this RDMC into PYTHONPATH in case you haven't do it sys.path.append(os.path.dirname(os.path.abspath(''))) import numpy as np from rdkit import Chem from rdmc import RDKitMol from rdmc.forcefield import optimize_mol from rdmc.ts import get_formed_and_broken_bonds, is_DA_rxn_endo from rdmc.utils import reverse_match from rdmc.view import grid_viewer, mol_viewer try: # import RMG dependencies from rdmc.external.rmg import (from_rdkit_mol, find_reaction_family, generate_reaction_complex, load_rmg_database, ) # Load RMG database rmg_db = load_rmg_database(all_families=True) except (ImportError, ModuleNotFoundError): print('You need to install RMG-Py first and run this IPYNB in rmg_env!') # A helper function to generate molecules either from xyz or smiles # It will also note which molecules have 3D information def parse_xyz_or_smiles_list(mol_list, **kwargs): """ A function to parse xyz and smiles and list if the conformational information is provided. """ mols, is_3D = [], [] for mol in mol_list: if isinstance(mol, (tuple, list)) and len(mol) == 2: mol, mult = mol else: mult = None try: rd_mol = RDKitMol.FromXYZ(mol, **kwargs) except ValueError: rd_mol = RDKitMol.FromSmiles(mol,) rd_mol.EmbedConformer() is_3D.append(False) else: is_3D.append(True) finally: if mult != None: rd_mol.SaturateMol(multiplicity=mult) mols.append(rd_mol) return mols, is_3D # when doing subgraph match, RDKit will returns a list # that the index corresponds to the reference molecule # and the value corresponds to the probing molecule # This function inverse-transform the index and value relationship. def match_mols_to_complex(mols: list, mol_complex: 'RDKitMol'): """ Generate a list of lists of indexes that each item corresponding to one of the fragments in the complex. """ frags_idx = list(mol_complex.GetMolFrags()) if len(frags_idx[0]) == len(frags_idx[1]): frags = mol_complex.GetMolFrags(asMols=True) match1 = frags[0].GetSubstructMatch(mols[0]) match2 = frags[0].GetSubstructMatch(mols[1]) if match1: # Either two identical molecules # Or the first fragment is indeed the first one assigned pass elif match2: frags_idx = frags_idx[::-1] else: match1 = frags[1].GetSubstructMatch(mols[0]) match2 = frags[1].GetSubstructMatch(mols[1]) if match1: frags_idx = frags_idx[::-1] elif match2: pass else: raise RuntimeError('Have difficulty matching molecules from the complex' 'to the input molecules.') elif len(frags_idx[0]) != mols[0].GetNumAtoms(): frags_idx = frags_idx[::-1] for i in range(len(mols)): frags_idx[i] = mol_complex.GetSubstructMatch(mols[i]) return frags_idx def get_bond_length_list(mol: 'RDKitMol', match: list = [],): """ Get a list whose first element is length-2 set containing the bonded atoms, and the second element is the bond length. Args: mol (RDKitMol): the molecule match (list): A list from the subgraph match result. The atom indexes will be transformed to the matched pattern. """ if match: match_dict = {prb_i: ref_i for prb_i, ref_i in enumerate(match)} else: match_dict = {i: i for i in range(mol.GetNumAtoms())} conf = mol.GetConformer() # Get the bond length in the product geometry bond_length = [] for bond in mol.GetBondsAsTuples(): bond_length.append([[match_dict[atom] for atom in bond], conf.GetBondLength(bond)]) return bond_length # Experimental features # Ideally, each reaction family may have a value that works better # and the author is still trying to find those numbers # Ones recorded below are just for reference BOND_CONSTRAINT = {'1,3_Insertion_ROR': 2.5, 'Retroene': 2.5, '1,2_Insertion': 2.5, '2+2_cycloaddition_Cd': 2.5, 'Diels_alder_addition': 3.0, 'Intra_ene_reaction': 4.0, 'H_Abstraction': 3., 'Disproportionation': 3., 'H_Abstraction': 3., 'SubstitutionS': 3., # A + B = C + D 'Substitution_O': 3.,} # There are several conditions that reactants can be more informative. # 1. If a family is only breaking bonds without breaking any, # then the reactant geometry is more informative. One can # infer the geometry of products and the product alignment # from the reactant solely. (Since all reactions are elementary, # We expect stereospecificity is maintained.) # 2. For ketoenol, the reactant (enol) may have cis-trans, while # this info may lose in the product # the value is if product is definied as the forward direction REACTANT_MORE_INFORMATIVE_FAMILIES = { '1+2_Cycloaddition': False, # A + B = C Ring in product # '1,2_shiftS': False, # A = B Potentially 2 chiral in product vs 1 chiral center in reactant '1,4_Cyclic_birad_scission': True, # A = C Ring in product '2+2_cycloaddition': False, # A + B = C Ring in product 'Birad_recombination': False, # A = C Ring in product 'Concerted_Intra_Diels_alder_monocyclic_1,2_shiftH': False, # A = C C=CC=C in A can be less constraint # 'Cyclic_Ether_Formation': False, # A = C + D Ring structure is more constraint than single bond # 'Cyclic_Thioether_Formation': False, # A = C + D Ring structure is more constraint 'Cyclopentadiene_scission': True, # A = C Ring in product 'Diels_alder_addition': False, # A + B = C Ring in product 'Intra_2+2_cycloaddition_Cd': False, # A = C Ring in product 'Intra_5_membered_conjugated_C=C_C=C_addition': False, # A = C 'Intra_Diels_alder_monocyclic': False, # A = C Ring in product # 'Intra_RH_Add_Endocyclic': False, # A = C Ring in product # 'Intra_RH_Add_Exocyclic': False, # A = C Ring in product 'Intra_R_Add_Endocyclic': False, # A = C Ring in product 'Intra_R_Add_Exocyclic': False, # A = C Ring in product # 'Intra_R_Add_ExoTetcyclic': False, # A = C Ring in product 'Intra_Retro_Diels_alder_bicyclic': True, # A = C Ring in reactant 'R_Addition_COm': False, # A + B = C 'R_Addition_CSm': False, # A + B = C 'R_Addition_MultipleBond': False, # A + B = C 'R_Recombination': False, # An extra bond formed 'ketoenol': True, # A = B } OWN_REVERSE = [ '1,2_shiftC', # A = C '6_membered_central_C-C_shift', # A = C 'H_Abstraction', # A + B = C + D 'Intra_R_Add_Exo_scission', # A = C 'Intra_ene_reaction', # A = C 'intra_H_migration', # A = C 'SubstitutionS', # A + B = C + D 'Substitution_O', # A + B = C + D ] BIMOLECULAR = [ 'CO_Disproportionation', 'Disproportionation', 'H_Abstraction', 'SubstitutionS', # A + B = C + D 'Substitution_O', # A + B = C + D ] # # %load_ext autoreload # # %autoreload 2 # - # ## INTPUT FIELDS # #### Forcefield arguments # - `forcefield`: The type of the forcefield to use. Available: `MMFF94s`, `MMFF94`, `UFF` # - `tol`: The convergence tolerance of the optimization # - `max_step`: The max number of step for the optimization to conduct. # # #### XYZ perception arguments # - `backends`: choose the backends for XYZ perception. It has no influence if you are using SMILES. Previously, `openbabel` xyz perception is prefered over `jensen` # - `header`: The xyz files contains a line indicates the number of atoms and a line of title/comments. If your string does not contain those two lines, set `header` to `False`. # # #### TS-GCN arguments # - `TS_GCN_PYTHON`: The path to the python executable to run TS-GCN. If an conda environment is installed # for TS-GCN, then it should be something like `CONDA_HOME_PATH/envs/ENV_NAME/bin/python # - `TS_GCN_DIR`: The path to the directory where TS-GCN is installed. # + ############### Force Field ################### # Force Field force_field_type = "MMFF94s" # Convergence criteria, Step size, Max step tol, max_step = 1e-8, 10000 ############################################### ############### XYZ Perception ################ # Backend perception algorithm backends = ['openbabel', 'jensen'] # If the input XYZ has the first two lines (atom number + title/comments) header = False ################################################ ############ TS-GCN setup ########################### TS_GCN_PYTHON = '~/Apps/anaconda3/envs/ts_gen_v2/bin/python3.7' TS_GCN_DIR = '~/Apps/ts_gen_v2' ####################################################### ############# For DA reaction only. ################## # Whether specific a certain type of stereoisomer ('endo' or 'exo') da_stereo_specific = 'endo' ####################################################### # NOTE: The following is a testing feature not fully functioning!!!! # This is an option to inform if the user want to use their input 3D geometries # without modifications (excluding necesary alignment). Note, there are multiple cases # that such setting can results in lower TS generation success rate. E.g., the reactant # and the product have great differences in geometries of non-reacting atoms; the # some reactant conformer may not on the IRC path # force_user_input = False # - # ### 1. Input molecule information # You can input SMILEs, XYZs or mix them together. Molecule instances are then generated from the input identifiers.<br> # **RECOMMENDATIONs:** # - **Better define the single species end of the reaction as the reactant.** # - **Better put the heavier product in the first place of the list.** # - **If you need to specify the multiplicity, make the molecule instance a tuple. E.g., reactants = [('XYZ_STRING', 1), ('SMILES', 2)] where 1 and 2 are multiplicities.** # # Here, some examples are provided # Example: intra_H_migration # + reactants = [ """C -1.528265 0.117903 -0.48245 C -0.214051 0.632333 0.11045 C 0.185971 2.010727 -0.392941 O 0.428964 2.005838 -1.836634 O 1.53499 1.354342 -2.136876 H -1.470265 0.057863 -1.571456 H -1.761158 -0.879955 -0.103809 H -2.364396 0.775879 -0.226557 H -0.285989 0.690961 1.202293 H 0.605557 -0.056315 -0.113934 H -0.613001 2.746243 -0.275209 H 1.100271 2.372681 0.080302""", ] products = [ """C 1.765475 -0.57351 -0.068971 H 1.474015 -1.391926 -0.715328 H 2.791718 -0.529486 0.272883 C 0.741534 0.368416 0.460793 C -0.510358 0.471107 -0.412585 O -1.168692 -0.776861 -0.612765 O -1.768685 -1.15259 0.660846 H 1.164505 1.37408 0.583524 H 0.417329 0.069625 1.470788 H -1.221189 1.194071 0.001131 H -0.254525 0.771835 -1.433299 H -1.297409 -1.977953 0.837367""", ] # + reactants = [ """CCCO[O]""", ] products = [ """C 1.765475 -0.57351 -0.068971 H 1.474015 -1.391926 -0.715328 H 2.791718 -0.529486 0.272883 C 0.741534 0.368416 0.460793 C -0.510358 0.471107 -0.412585 O -1.168692 -0.776861 -0.612765 O -1.768685 -1.15259 0.660846 H 1.164505 1.37408 0.583524 H 0.417329 0.069625 1.470788 H -1.221189 1.194071 0.001131 H -0.254525 0.771835 -1.433299 H -1.297409 -1.977953 0.837367""", ] # + reactants = [ """CCCO[O]""", ] products = [ """[CH2]CCOO""", ] # - # Example: intra_OH_migration # + reactants = [ """OCCC[O]""", ] products = [ """C 1.765475 -0.57351 -0.068971 H 1.474015 -1.391926 -0.715328 H 2.791718 -0.529486 0.272883 C 0.741534 0.368416 0.460793 C -0.510358 0.471107 -0.412585 O -1.168692 -0.776861 -0.612765 O -1.768685 -1.15259 0.660846 H 1.164505 1.37408 0.583524 H 0.417329 0.069625 1.470788 H -1.221189 1.194071 0.001131 H -0.254525 0.771835 -1.433299 H -1.297409 -1.977953 0.837367""", ] # - # Example: intra_ene_reaction # + reactants = [ """C=CC=CCC""", ] products = [ """CC=CC=CC""", ] # + reactants = [ """C 2.365139 -0.823066 0.195886 C 1.132133 -0.448278 -0.615530 C 0.601799 0.908821 -0.244634 C -0.469930 1.514001 -0.781599 C -1.310197 0.964460 -1.818982 C -2.368267 1.614877 -2.317239 H 2.724279 -1.814346 -0.098775 H 2.140638 -0.849340 1.267595 H 3.177943 -0.106912 0.034253 H 0.358910 -1.205983 -0.444261 H 1.397321 -0.462750 -1.678915 H 1.139704 1.449320 0.533030 H -0.736243 2.502985 -0.410259 H -1.070361 -0.020969 -2.210454 H -2.973523 1.164170 -3.097508 H -2.658935 2.600563 -1.968362""" ] products = [ """CC=CC=CC""", ] # - # Example: keto-enol # + reactants = [ """O 0.898799 1.722422 0.70012 C 0.293754 -0.475947 -0.083092 C -1.182804 -0.101736 -0.000207 C 1.238805 0.627529 0.330521 H 0.527921 -1.348663 0.542462 H 0.58037 -0.777872 -1.100185 H -1.45745 0.17725 1.018899 H -1.813437 -0.937615 -0.310796 H -1.404454 0.753989 -0.640868 H 2.318497 0.360641 0.272256""", ] products = [ """O 2.136128 0.058786 -0.999372 C -1.347448 0.039725 0.510465 C 0.116046 -0.220125 0.294405 C 0.810093 0.253091 -0.73937 H -1.530204 0.552623 1.461378 H -1.761309 0.662825 -0.286624 H -1.923334 -0.892154 0.536088 H 0.627132 -0.833978 1.035748 H 0.359144 0.869454 -1.510183 H 2.513751 -0.490247 -0.302535""", ] # - # Example: 2+2_cycloaddition # + reactants = [ """O -0.854577 1.055663 -0.58206 O 0.549424 1.357531 -0.196886 C -0.727718 -0.273028 -0.011573 C 0.76774 -0.043476 0.113736 H -1.066903 -1.044054 -0.706048 H -1.263435 -0.349651 0.939354 H 1.374762 -0.530738 -0.655177 H 1.220707 -0.172248 1.098653""" ] products = [ """O 0.0 0.0 0.682161 C 0.0 0.0 -0.517771 H 0.0 0.938619 -1.110195 H 0.0 -0.938619 -1.110195""", """O 0.0 0.0 0.682161 C 0.0 0.0 -0.517771 H 0.0 0.938619 -1.110195 H 0.0 -0.938619 -1.110195""", ] # - # Example: Diels_Alder # + reactants = [ """C 2.788553 0.698686 0.674316 C 2.218817 -1.464988 0.029675 C 2.516823 -0.656661 1.258397 C 2.662208 0.650837 -0.659411 C 2.310059 -0.686509 -1.057857 H 3.046237 1.573804 1.251124 H 1.969545 -2.515127 0.032875 H 1.657462 -0.631845 1.934608 H 3.393616 -1.044418 1.784949 H 2.798561 1.473331 -1.344992 H 2.148949 -0.993686 -2.080010""", """C -0.567538 -0.593271 -0.685125 C 0.550187 -0.609810 0.305262 C -0.935561 0.922337 -0.812382 C 0.866807 0.620438 0.725116 C -2.448912 0.929590 -0.465129 C -1.921658 -1.240688 -0.288092 C 0.013503 1.688438 0.113822 C -2.886280 -0.387595 -1.126997 C -2.356832 -0.769335 1.082806 C -2.672212 0.530266 0.975717 H -0.801330 1.252767 -1.851145 H -0.225560 -0.979503 -1.653092 H 1.647573 0.834801 1.442595 H 1.040594 -1.515236 0.635305 H -2.000313 -2.316795 -0.448365 H -3.004507 1.811308 -0.786923 H -3.943990 -0.631087 -0.961967 H -2.677424 -0.424904 -2.202421 H 0.641104 2.375974 -0.463365 H -0.510855 2.269595 0.877800 H -2.347672 -1.359962 1.985934 H -2.969955 1.189249 1.776970""", ] # + reactants = [ """C 2.788553 0.698686 0.674316 C 2.218817 -1.464988 0.029675 C 2.516823 -0.656661 1.258397 C 2.662208 0.650837 -0.659411 C 2.310059 -0.686509 -1.057857 H 3.046237 1.573804 1.251124 H 1.969545 -2.515127 0.032875 H 1.657462 -0.631845 1.934608 H 3.393616 -1.044418 1.784949 H 2.798561 1.473331 -1.344992 H 2.148949 -0.993686 -2.080010""", """C1=CC2C3C=CC(C3)C2C1""", ] products = ['C1=CC2CC1C1CC3C4C=CC(C4)C3C21'] # + reactants = [ """C1=CCC=C1""", """C1=CC2C3C=CC(C3)C2C1""", ] products = ['C1=CC2CC1C1CC3C4C=CC(C4)C3C21'] # - # Example: Intra_R_Add_Endocyclic (A = B) # + reactants = ["""C=CCCO[O]""", ] products = ["""[CH2]C1CCOO1""", ] # - # Example: Retroene # + reactants = [ """CCC1C=CC=C1""", ] products = [ """C1C=CC=C1""", """C=C""", ] # - # Example: HO2_elimination # + reactants = [ """C -1.890664 -0.709255 -0.271996 C -0.601182 0.078056 -0.018811 C 0.586457 -0.545096 -0.777924 C -0.292203 0.188974 1.451901 H -0.683164 -0.56844 2.124827 C 0.477032 1.332664 2.012529 O -0.367239 2.493656 2.288335 O -0.679966 1.393013 -0.618968 O -1.811606 2.119506 -0.074789 H -1.819659 -1.711353 0.159844 H -2.063907 -0.801665 -1.346104 H -2.739557 -0.190076 0.171835 H 0.374452 -0.548385 -1.849706 H 1.501209 0.026135 -0.608139 H 0.747239 -1.572318 -0.444379 H 1.209047 1.707778 1.296557 H 0.998836 1.047896 2.931789 H -0.994076 2.235514 2.974109 H -1.392774 2.537261 0.704151""" ] products = [ """C -1.395681 1.528483 -0.00216 C -0.402668 0.411601 -0.210813 C -0.997629 -0.972081 -0.127641 C 0.890607 0.678979 -0.433435 C 2.015631 -0.28316 -0.676721 O 2.741986 0.043989 -1.867415 H -0.923699 2.509933 -0.072949 H -2.200649 1.479183 -0.744922 H -1.873843 1.44886 0.981238 H -1.839799 -1.068706 -0.822233 H -0.283424 -1.765173 -0.346167 H -1.400492 -1.154354 0.875459 H 1.201336 1.7219 -0.466637 H 2.754241 -0.212398 0.127575 H 1.667906 -1.32225 -0.7073 H 2.101868 0.079395 -2.5857""", """O -0.168488 0.443026 0.0 O 1.006323 -0.176508 0.0 H -0.837834 -0.266518 0.0""", ] # - # Example: H abstraction # + reactants = [ """CCC[O]""", """CC(C)=C(C)C""",] products = [ """CCCO""", """[CH2]C(C)=C(C)C""",] # - # Example: Subsitution_O # This family currently have issue matching templates # + # reactants = [ # """CCCOCC""", # """[CH3]""",] # products = [ # """CCCOC""", # """[CH2]C""",] # - # Example: 1+2_Cycloaddition # + reactants = [ ("""[CH2]""", 1), """C=C""",] products = [ """C1CC1""",] # - # ## 2. Find RMG reaction and generate reactant/product complex # Check if this reaction matches RMG templates. If the reaction matches at least one RMG family, the result will be shown, and complexes will be generated. Otherwise, this notebook is not helpful to you. # + # Generate reactant and product complex if len(reactants) == 2 and len(products) == 1: reactants, products = products, reactants print('Warning: the reactants and the products are inverted for convenience!') for backend in backends: print(f'Using \"{backend}\" method as the XYZ perception backend.') try: # Convert XYZ to rdkit mol r_mols, r_is_3D = parse_xyz_or_smiles_list(reactants, backend=backend, header=False) p_mols, p_is_3D = parse_xyz_or_smiles_list(products, backend=backend, header=False) # Detect 3D information r_all_3D, p_all_3D = all(r_is_3D), all(p_is_3D) r_any_3D, p_any_3D = r_all_3D or any(r_is_3D), p_all_3D or any(p_is_3D) # Convert rdkit mol to RMG mol r_rmg_mols = [from_rdkit_mol(r.ToRWMol()) for r in r_mols] p_rmg_mols = [from_rdkit_mol(p.ToRWMol()) for p in p_mols] except Exception as e: print(e) print(f'Cannot generate molecule instances using {backend}...') continue else: # A product complex with the same atom indexing as the reactant is generated family_label, forward = find_reaction_family(rmg_db, r_rmg_mols, p_rmg_mols, verbose=False) r_complex, p_complex = generate_reaction_complex(rmg_db, r_rmg_mols, p_rmg_mols, only_families=[family_label], verbose=False) if not r_complex: # Cannot find the reaction continue try: # Convert complexes back from their RMG molecule forms to RDKitMol form r_complex, p_complex = RDKitMol.FromRMGMol(r_complex), RDKitMol.FromRMGMol(p_complex) except Exception as e: # There can be some problem converting RMG mol back to RDKit print(e); continue else: print('Find a match!\n'); break else: print('No matched RMG reaction is found for the given reactants and products.') if r_complex: if (# CONDITION 1: for families whose product contains more information family_label in REACTANT_MORE_INFORMATIVE_FAMILIES and \ REACTANT_MORE_INFORMATIVE_FAMILIES[family_label] != forward) or \ (# CONDITION 2: own_reverse family and information is inbalanced (family_label in OWN_REVERSE or family_label in BIMOLECULAR) and \ ((p_all_3D and not r_all_3D) or (p_any_3D and not r_any_3D))): # For convenience, revert the sequence of reactants and products reactants, products, r_mols, p_mols, r_rmg_mols, p_rmg_mols, r_complex, p_complex = \ products, reactants, p_mols, r_mols, p_rmg_mols, r_rmg_mols, p_complex, r_complex r_is_3D, p_is_3D, r_any_3D, p_any_3D, r_all_3D, p_all_3D = \ p_is_3D, r_is_3D, p_any_3D, r_any_3D, p_all_3D, r_all_3D forward = not forward print('Warning: the reactants and the products are inverted for convenience!') reaction_type = '+'.join(['A', 'B'][:len(r_mols)]) + '=' + '+'.join(['C', 'D'][:len(p_mols)]) print(' + '.join([s.ToSmiles() for s in r_mols]) + \ ' <=> ' + \ ' + '.join([s.ToSmiles() for s in p_mols])) print(f'RMG family: {family_label}\nIs forward reaction: {forward}') print(f'This is a {reaction_type} reaction\n') # Find formed and broken bonds formed_bonds, broken_bonds = get_formed_and_broken_bonds(r_complex, p_complex) print(f'Bonds are FORMED: {formed_bonds}\nBonds are BROKEN: {broken_bonds}') only_break_bonds = not any(formed_bonds) if only_break_bonds: print(f'This is a reaction that only breaks bonds!') to_print = {(True, True): 'all of', (False, True): 'part of', (False, False): 'none of'} print(f'{to_print[(r_all_3D, r_any_3D)].capitalize()} the reactant geometries and ' f'{to_print[(p_all_3D, p_any_3D)]} the products geometries are provided.') # A state variable of the script FINISHED = False # - # ## 3. Complexes generation # ### 3.1 A = C reactions if reaction_type == 'A=C': ############################ ## 1. Geometry initialize ## ############################ # 1.1 Set the r_complex to the given geometry, otherwise, embed one. r_bond_length = [] if r_all_3D: r_match = r_complex.GetSubstructMatch(r_mols[0]) r_bond_length.extend(get_bond_length_list(mol=r_mols[0], match=r_match)) r_complex.SetPositions(r_mols[0].GetPositions()[reverse_match(r_match), :]) else: # TODO: May embed several times until get the desired conformers r_complex.EmbedConformer() r_complex = optimize_mol(r_complex, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) # 1.2 Grab the product information; Initialize p_complex to the same coordinates # as the r_complex unless no reactant geometry given and reactant not necessary # has more information than the product. p_bond_length = [] p_complex.SetPositions(r_complex.GetPositions()) if p_all_3D: # Match the p_complex to the product geometry p_match = p_complex.GetSubstructMatch(p_mols[0]) p_bond_length.extend(get_bond_length_list(mol=p_mols[0], match=p_match)) if not r_all_3D and family_label not in REACTANT_MORE_INFORMATIVE_FAMILIES: p_complex.SetPositions(p_mols[0].GetPositions()[reverse_match(p_match), :]) r_complex.SetPositions(p_complex.GetPositions()) r_complex = optimize_mol(r_complex, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) ############################## ## 2. Geometry optimization ## ############################## # 2.1 Optimize the product. Initial guess is the geometry r_complex, which makes sure # non-reacting coordinates won't change too much from the reactants. p_complex = optimize_mol(p_complex, frozen_bonds=p_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in broken_bonds]) # 2.2 Optimize the reactant geometry again, if it is not more informative than the product, # Step 2.2 may introduce more constraints to the reactant geometry if family_label not in REACTANT_MORE_INFORMATIVE_FAMILIES: r_complex.SetPositions(p_complex.GetPositions()) r_complex = optimize_mol(r_complex, frozen_bonds=r_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) # ### 3.2 A = C + D reactions if reaction_type == 'A=C+D': ############################ ## 1. Geometry initialize ## ############################ # 1.1 Set the r_complex to the given geometry, otherwise, embed one. r_bond_length = [] if r_all_3D: r_match = r_complex.GetSubstructMatch(r_mols[0]) r_bond_length.extend(get_bond_length_list(mol=r_mols[0], match=r_match)) r_complex.SetPositions(r_mols[0].GetPositions()[reverse_match(r_match), :]) # Check DA reaction # Only check DA now if family_label == 'Diels_alder_addition' and da_stereo_specific and \ (is_DA_rxn_endo(r_complex, p_complex, embed=True) != (da_stereo_specific == 'endo')): raise ValueError('The provided DA product doesn\'t match the stereotype ' 'required. You have to provide another DA product geometry!') else: # TODO: May embed several times until get the desired conformers r_complex.EmbedConformer() if family_label == 'Diels_alder_addition' and da_stereo_specific: is_endo = da_stereo_specific == 'endo' max_num_try = 100 for i in range(max_num_try): rxn_is_endo = is_DA_rxn_endo(r_complex, p_complex, embed=True) if rxn_is_endo == 'none' or rxn_is_endo == is_endo: # This reaction may not distinguish endo or exo break r_complex.EmbedConformer() else: raise RuntimeError('Have trouble to find a conformer with the desired stereo type.') r_complex = optimize_mol(r_complex, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) # 1.2 Grab the product information; Initialize p_complex to the same coordinates # as the r_complex unless no reactant geometry given and reactant not necessary # has more information than the product. p_bond_length = [] p_complex.SetPositions(r_complex.GetPositions()) if p_any_3D: new_xyz = np.zeros((p_complex.GetNumAtoms(), 3)) p_frags_idx = match_mols_to_complex(mol_complex=p_complex, mols=p_mols) for i, is_3D in enumerate(p_is_3D): if not is_3D: new_xyz[p_frags_idx[i], :] = p_complex.GetPositions()[p_frags_idx[i], :] else: # Align the geometry atom_map = list(enumerate(p_frags_idx[i])) p_mols[i].AlignMol(refMol=p_complex, atomMap=atom_map) new_xyz[p_frags_idx[i], :] = p_mols[i].GetPositions() p_bond_length.extend(get_bond_length_list(mol=p_mols[i], match=p_frags_idx[i])) p_complex.SetPositions(new_xyz) ############################## ## 2. Geometry optimization ## ############################## # 2.1 Optimize the product. Initial guess is the geometry r_complex, which makes sure # non-reacting coordinates won't change too much from the reactants. p_complex = optimize_mol(p_complex, frozen_bonds=p_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in broken_bonds]) # 2.3 Optimize the reactant geometry again, if it is not more informative than the product # Step 2.3 may introduce more constraints to the reactant geometry if family_label not in REACTANT_MORE_INFORMATIVE_FAMILIES: r_complex.SetPositions(p_complex.GetPositions()) r_complex = optimize_mol(r_complex, frozen_bonds=r_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) # ### 3.3 A + B = C + D reactions # + if reaction_type == 'A+B=C+D': # There is always a fragment being transfered between the two things # H abstraction, Disproportionation, CO_Disproportionation: H # Substitution_O, SubstitutionS: RO / RS group # And there will be one bond forms and one bond breaks # Implementation is based on this observation # Get the transfered atom transfered_atom = list(formed_bonds[0] & broken_bonds[0])[0] # Find the flux pair r_frags_idx = match_mols_to_complex(mol_complex=r_complex, mols=r_mols) p_frags_idx = match_mols_to_complex(mol_complex=p_complex, mols=p_mols) pairs = {0: 0, 1: 1} # True for both reactant to product and product to reactant if transfered_atom in r_frags_idx[0] == transfered_atom in p_frags_idx[0]: pairs = {0: 1, 1: 0} # Re-analyze xyz based on the pairs. In the pair, if both reactant and the product is provided, # Then only use the reactant one. # Possible cases: # - r_all_3D: use 3D geometries of reactants # - p_all_3D and not r_all_3D: not possible, due to the reactant, product switch in previous step # - r_any_3D and p_any_3D: if same pair: use 3D geometries of the reactant # if different pair: use both geometries # - r_any_3D: Use the geometry anyway # - p_any_3D and not r_any_3D: not possible, due to the reactant, product switch in previous step # - non geometry: embed r_complex if not r_all_3D and not p_all_3D and r_any_3D and p_any_3D: if (r_is_3D[0] and p_is_3D[pairs[0]]) or \ (r_is_3D[1] and p_is_3D[pairs[1]]): p_is_3D, p_any_3D = [False, False], False # First, create complexes that stores alignment information # After embed, molecules are overlapping # forcefield optimization helps de-overlapping r_complex.EmbedConformer() r_complex = optimize_mol(r_complex, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) p_complex.SetPositions(r_complex.GetPositions()) p_complex = optimize_mol(p_complex, # This is a experimental arguments # It seems that ignore interfrag_interaction helps # generate better A + B = C + D reactions ignore_interfrag_interaction=True, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in broken_bonds]) r_bond_length = [] p_bond_length = [] if r_all_3D: new_xyz = np.zeros((p_complex.GetNumAtoms(), 3)) for i in range(len(r_mols)): # Align the geometry atom_map = list(enumerate(r_frags_idx[i])) r_mols[i].AlignMol(refMol=r_complex, atomMap=atom_map) new_xyz[r_frags_idx[i], :] = r_mols[i].GetPositions() r_bond_length.extend(get_bond_length_list(mol=r_mols[i], match=r_frags_idx[i])) r_complex.SetPositions(new_xyz) r_complex = optimize_mol(r_complex, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) if p_any_3D: for i in range(len(p_mols)): if p_is_3D[i]: p_bond_length.extend(get_bond_length_list(mol=p_mols[i], match=p_frags_idx[i])) p_complex.SetPositions(r_complex.GetPositions()) p_complex = optimize_mol(p_complex, frozen_bonds=p_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in broken_bonds]) elif r_any_3D and not p_any_3D: new_xyz = np.zeros((p_complex.GetNumAtoms(), 3)) for i in range(len(r_mols)): if r_is_3D[i]: # Align the geometry atom_map = list(enumerate(r_frags_idx[i])) r_mols[i].AlignMol(refMol=r_complex, atomMap=atom_map) new_xyz[r_frags_idx[i], :] = r_mols[i].GetPositions() r_bond_length.extend(get_bond_length_list(mol=r_mols[i], match=r_frags_idx[i])) else: new_xyz[r_frags_idx[i], :] = r_complex.GetPositions()[r_frags_idx[i], :] r_complex.SetPositions(new_xyz) r_complex = optimize_mol(r_complex, frozen_bonds=r_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) p_complex.SetPositions(r_complex.GetPositions()) p_complex = optimize_mol(p_complex, frozen_bonds=p_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in broken_bonds]) elif r_any_3D and p_any_3D: new_xyz = np.zeros((p_complex.GetNumAtoms(), 3)) for i in range(len(r_mols)): if r_is_3D[i]: # Align the geometry atom_map = list(enumerate(r_frags_idx[i])) r_mols[i].AlignMol(refMol=r_complex, atomMap=atom_map) new_xyz[r_frags_idx[i], :] = r_mols[i].GetPositions() r_bond_length.extend(get_bond_length_list(mol=r_mols[i], match=r_frags_idx[i])) else: trucated_p_frag_idx, undeteremined_idx = [], [] for i in p_frags_idx[pairs[i]]: if i in r_frags_idx[i]: tructated_p_frag_idx.append(i) else: undetermined_idx.append(i) atom_map = list(enumerate(trucated_p_frag_idx)) p_mols[pairs[i]].AlignMol(refMol=r_complex, atomMap=atom_map) new_xyz[trucated_p_frag_idx, :] = p_mols[pairs[i]].GetPositions() p_bond_length.extend(get_bond_length_list(mol=p_mols[i], match=p_frags_idx[i])) new_xyz[undetermined_idx, :] = r_complex.GetPositions()[undetermined_idx, :] r_complex.SetPositions(new_xyz) r_complex = optimize_mol(r_complex, frozen_bonds=r_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in formed_bonds]) p_complex.SetPositions(r_complex.GetPositions()) p_complex = optimize_mol(p_complex, frozen_bonds=p_bond_length, frozen_non_bondings=[[bond, BOND_CONSTRAINT.get(family_label, 3.0)] for bond in broken_bonds]) if p_any_3D: new_xyz = np.zeros((p_complex.GetNumAtoms(), 3)) p_frags_idx = match_mols_to_complex(mol_complex=p_complex, mols=p_mols) for i, is_3D in enumerate(p_is_3D): if not is_3D: new_xyz[p_frags_idx[i], :] = p_complex.GetPositions()[p_frags_idx[i], :] else: # Align the geometry atom_map = list(enumerate(p_frags_idx[i])) p_mols[i].AlignMol(refMol=p_complex, atomMap=atom_map) new_xyz[p_frags_idx[i], :] = p_mols[i].GetPositions() p_bond_length.extend(get_bond_length_list(mol=p_mols[i], match=p_frags_idx[i])) p_complex.SetPositions(new_xyz) # - # ### Find the best atom mapping by RMSD. # At this point, all heavy atoms are mapped, but some H atoms may be no longer mapped, for example due to a rotation in the methyl rotor during the optimization. We recommend you to do this step, but it is not a requirement though # # NOTE: # 1. this can perform relatively poorly if the reactant and the product are in different stereotype (cis/trans). or most rotors are significantly different oriented. However, previous step (match according to RMG reaction) makes sure that all heavy atoms and reacting H atoms are consistent, so only H atoms that are more trivial are influenced by this. # 2. AlignMol can yields wrong numbers, we switch to `GetBestRMS` and `CalcRMS`. # Whether to find better matches by reflecting the molecule (resulting in mirror image) reflect = False # + # Generate substructure matches # There is no difference using `p_combine` or `p_complex` as the argument # Since both of them have the same connectivity information matches = p_complex.GetSubstructMatches(p_complex, uniquify=False) # Make a copy of p_combine to preserve its original information p_align = p_complex.Copy() weights = [atom.GetMass() for atom in p_align.GetAtoms()] rmsds = [] # Align the combined complex to the rmg generated complex # According to different mapping and find the best one. for i, match in enumerate(matches): atom_map = [list(enumerate(match))] rmsd1 = Chem.rdMolAlign.CalcRMS(prbMol=p_align.ToRWMol(), refMol=r_complex.ToRWMol(), map=atom_map, weights=weights) if reflect: p_align.Reflect() rmsd2 = Chem.rdMolAlign.CalcRMS(prbMol=p_align.ToRWMol(), refMol=r_complex.ToRWMol(), map=atom_map, weights=weights) p_align.Reflect() else: rmsd2 = 1e10 if rmsd1 > rmsd2: rmsds.append((i, True, rmsd2,)) else: rmsds.append((i, False, rmsd1,)) best = sorted(rmsds, key=lambda x: x[2])[0] print('Match index: {0}, Reflect Conformation: {1}, RMSD: {2}'.format(*best)) # Realign and reorder atom indexes according to the best match best_match = matches[best[0]] p_align.AlignMol(refMol=r_complex, atomMap=list(enumerate(best_match)), weights=weights) if best[1]: p_align.Reflect() new_atom_indexes = [best_match.index(i) for i in range(len(best_match))] p_align = p_align.RenumberAtoms(new_atom_indexes) # - # ### 4. View Complexes # + mols_to_view = [r_complex, p_align] entries = len(mols_to_view) viewer = grid_viewer(viewer_grid=(1, entries), viewer_size=(240 * entries, 300),) for i in range(entries): mol_viewer(mols_to_view[i], viewer=viewer, viewer_loc=(0, i)) print('reactant complex product complex') viewer.show() # - # ### 5. Export to SDF file and run ts_gen r_complex.ToSDFFile('reactant.sdf') p_align.ToSDFFile('product.sdf') # #### 5.1 TS GCN try: subprocess.run(f'export PYTHONPATH=$PYTHONPATH:{TS_GCN_DIR};' f'{TS_GCN_PYTHON} {TS_GCN_DIR}/inference.py ' f'--r_sdf_path reactant.sdf ' f'--p_sdf_path product.sdf ' f'--ts_xyz_path TS.xyz', check=True, shell=True) except subprocess.CalledProcessError as e: print(e) else: with open('TS.xyz', 'r') as f: ts_xyz=f.read() ts = RDKitMol.FromXYZ(ts_xyz) # ### 6. Visualize TS # + # Align the TS to make visualization more convenient atom_map = [(i, i) for i in range(r_complex.GetNumAtoms())] ts.GetBestAlign(refMol=r_complex, atomMap=atom_map, keepBestConformer=True) # View results in 3D geometries mols_to_view = [r_complex, ts, p_align,] entries = len(mols_to_view) viewer = grid_viewer(viewer_grid=(1, entries), viewer_size=(300 * entries, 300),) for i in range(entries): mol_viewer(mols_to_view[i], viewer=viewer, viewer_loc=(0, i)) print('reactant TS product') viewer.show() # -
ipython/TS-GCN+RDMC_v5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch from fastai2.basics import * from fastai2.vision.all import * from fastai2.medical.imaging import * from fastai2.callback.tracker import * from fastai2.callback.all import * torch.cuda.set_device(3) path = Path('~/data/rsna').expanduser() path_meta = path/'meta' df_comb = pd.read_feather(path_meta/'df_trn2_comb.fth').set_index('SOPInstanceUID') df_comb.shape df_comb.head() # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # from rsnautils import * nw = 8 pre = 'rn34_repl' # - # ## Pretrain on sample df_comb.fname def filename(o): return os.path.splitext(os.path.basename(o))[0] fns = L(list(df_comb.fname)).map(filename) fn = fns[0] fn sops = set(Path('val_sops.pkl').load()) def split_data(df): idx = L.range(df) #mask = [o in sops for o in fns] #pgrp = patient_cv(cv_idx) mask = df.index.isin(sops) return idx[~mask],idx[mask] splits = split_data(df_comb) fns = L(list(df_comb.index)) # + def get_data_gen(fns, bs, img_tfm, mean, std, splits, sz=None, nw=8, wgts=None, batch_xtra=None, after_item=None, with_aug=True, **kwargs): tfms = [[img_tfm], [fn2label,EncodedMultiCategorize(htypes)]] dsets = Datasets(fns, tfms, splits=splits) nrm = Normalize(tensor(mean),tensor(std)) batch_tfms = L(nrm, Cuda()) + L(batch_xtra) if with_aug: batch_tfms += aug_transforms(**kwargs) if sz is not None: batch_tfms = batch_tfms+[RandomResizedCropGPU(sz, min_scale=0.7, ratio=(1.,1.), valid_scale=0.9)] if wgts is None: return dsets.dataloaders(bs=bs, num_workers=nw, after_item=after_item, after_batch=batch_tfms) else: return dsets.weighted_dataloaders(wgts, bs=bs, num_workers=nw, after_item=after_item, after_batch=batch_tfms) # def get_data_pil(fns, bs, splits, sz=None, use_hist=True, nw=8, path=None, # wgts=None, mean=None, std=None, with_aug=True, batch_xtra=None): # remover = [] if path is not None else [[remove_hist,remove_soft][use_hist]] # if path is None: path = [path_jpg256,path_jpg512][sz is None or sz>256] # if mean is None: mean = [0.2,0.2,0.4] if use_hist else [0.2] # if std is None: std = [0.3] # batch_xtra = L(batch_xtra) + [IntToFloatTensor()] + remover # return get_data_gen(fns, bs, get_pil_fn(path), mean, std, splits=splits, sz=sz, wgts=wgts, with_aug=with_aug, # batch_xtra=batch_xtra, after_item=[ToTensor], nw=nw, max_rotate=30.) # - dbch = get_data_gen(fns, bs=512, sz=128, splits) dbch = get_data(512, sz=128) xb,yb = to_cpu(dbch.one_batch()) # dbch.show_batch(max_n=4, figsize=(9,6)) xb.mean(),xb.std(),xb.shape,len(dbch.train) show_images(xb[0]) loss_func = get_loss() def get_learner(lf=loss_func, fp16=True): learn = cnn_learner(dbch, resnet34, pretrained=False, loss_func=lf, opt_func=opt_func, metrics=metrics) if fp16: learn = learn.to_fp16() return learn def fit_tune(bs, sz, epochs, lr): learn.dls = get_data(bs, sz) do_fit(learn, epochs, lr, freeze=False) learn = get_learner() #TODO same thing for single label clas learn.model[1][8].bias.data = to_device(logit(avg_lbls)) fit_tune(256, 128, 20, 3e-2) learn.save(f'{pre}a1') fit_tune(512, 224, 10, 6e-3) learn.save(f'{pre}a2') fit_tune(256, 352, 4, 2e-3) learn.save(f'{pre}a3') fit_tune(96, 512, 3, 1e-4) learn.save(f'{pre}a4') # ## Prepare for submission learn.dls = get_data(128, 352) learn.load(f'{pre}a3') path_tst = path/'tst_jpg' test_fns = [(path_tst/f'{filename(o)}.jpg').absolute() for o in df_tst.fname.values] fn = test_fns[0] tst = test_dl(learn.dls, test_fns) x = tst.one_batch()[0] x.min(),x.max(),x.mean(),x.std(),x.shape show_images(x[0]) preds,targs = learn.get_preds(dl=tst) preds_scl = (logit(preds)*0.88).sigmoid() probs = preds_scl.clamp(1e-4,1-1e-4) def submission(df_tst, preds): ids,labels = [],[] for idx,pred in zip(df_tst.index, preds): for i,label in enumerate(htypes): ids.append(f"{idx}_{label}") labels.append('{0:1.10f}'.format(pred[i].item())) df_csv = pd.DataFrame({'ID': ids, 'Label': labels}) return df_csv pred_csv = submission(df_tst, probs) # + prev_csv = pd.read_csv('subm/subm_avg.csv') pred2_df = pred_csv.merge(prev_csv, on='ID') pred2_samp = pred2_df.sample(10000) np.corrcoef(pred2_samp.Label_x.astype(np.float64),pred2_samp.Label_y, rowvar=False) x,y = tensor(pred2_samp.Label_x.astype(np.float32)),tensor(pred2_samp.Label_y.astype(np.float32)) plt.figure(figsize=(7,7)) plt.scatter(logit(x),logit(y), s=1, alpha=0.3); # - nm = 'replace_ashaw_refactor_scl_088' pred_csv.to_csv(f'subm/{nm}.csv', index=False) from IPython.display import FileLink, FileLinks FileLink(f'subm/{nm}.csv') from kaggle import api api.competition_submit(f'subm/{nm}.csv', '0.0744,rn34_repl (scl 0.88)', 'rsna-intracranial-hemorrhage-detection') t = to_cpu(learn.get_preds(act=noop)) preds,targs = t lw2 = tensor(2.0, 1, 1, 1, 1, 1) lf2 = BaseLoss(nn.BCEWithLogitsLoss, pos_weight=lw2, floatify=True, flatten=False, is_2d=False, activation=torch.sigmoid) lf2(preds, targs) def f(x): return lf2(preds*x,targs) scipy.optimize.minimize_scalar(f, bounds=(0.2,2), method='Bounded', options={'xatol':0.001}) pre
orig_files/04_replace_ashaw_refactor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from expectation_value import * from trotter_evolve import * # + import numpy as np from qiskit import Aer from qiskit.utils import QuantumInstance backend = Aer.get_backend('statevector_simulator') instance = QuantumInstance(backend) tfinal = 2. ntimes = 50 dt = 0.005 times = np.linspace(0.,tfinal,ntimes) n_spins = 3 J = 1./4. h = 1. magnetisations_z = [] magnetisations_x = [] for time in times[1:len(times)]: qc = trotter_evolve(n_spins, J, h, time, time*dt) magnetisations_z.append(mag(qc, instance, 'Z')) magnetisations_x.append(mag(qc, instance, 'X')) # + from matplotlib import pyplot as plt plt.plot(times[1:len(times)], magnetisations_z) # - plt.plot(times[1:len(times)], magnetisations_x) print(magnetisations_x) print(magnetisations_z)
Guided_exercises/p-VQD/Trotter/magnetizations_trotter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import math, numpy as np, tensorflow as tf, matplotlib.pyplot as plt, operator from importlib import reload import kmeans; reload(kmeans) from kmeans import Kmeans n_clusters=6 n_samples =25000 centroids = np.random.uniform(-35, 35, (n_clusters, 2)) slices = [np.random.multivariate_normal(centroids[i], np.diag([5., 5.]), n_samples) for i in range(n_clusters)] data = np.concatenate(slices).astype(np.float32) kmeans.plot_data(centroids, data, n_samples) # # Kmeans k = Kmeans(data, n_clusters) with tf.Session().as_default(): # %time new_centroids = k.run() kmeans.plot_data(new_centroids, data, n_samples) # # Mean shift with LSH def gaussian(d, bw): return torch.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi)) def dist_b(a,b): return torch.sqrt((sub(a.unsqueeze(0),b.unsqueeze(1))**2).sum(2)) def sum_sqz(a,axis): return a.sum(axis).squeeze(axis) # + import torch from torch_utils import * from pytorch_lshash import PyTorchLSHash def meanshift_lsh(data, bs=500): n = len(data) X = torch.FloatTensor(np.copy(data)).cuda() for it in range(5): pylsh = PyTorchLSHash(6, 2) pylsh.index(X) for i in range(0,n,bs): s = slice(i,min(n, i+bs)) candidates = pylsh.query(s, X, bs) weight = gaussian(dist_b(candidates, X[s]), 2) num = sum_sqz(mul(weight, candidates), 1) X[s] = div(num, sum_sqz(weight, 1)) return X # - # %time X=meanshift_lsh(data) kmeans.plot_data(new_centroids, X.cpu().numpy(), n_samples) # # Meanshift with random draw def meanshift_random(data, bs=500): n = len(data) X = torch.FloatTensor(np.copy(data)).cuda() for it in range(5): for i in range(0,n,bs): s = slice(i,min(n,i+bs)) candidates = X[torch.randperm(len(X))[slice(0,bs)].long().cuda()] weight = gaussian(dist_b(candidates, X[s]), 2) num = sum_sqz(mul(weight, candidates), 1) X[s] = div(num, sum_sqz(weight, 1)) return X # %time X=meanshift_random(data) kmeans.plot_data(new_centroids, X.cpu().numpy(), n_samples) # # Comparison in Convergence # + def meanshift_lsh_record_conv(data, bs=500): n = len(data) X = torch.FloatTensor(np.copy(data)).cuda() conv = [] for it in range(10): pylsh = PyTorchLSHash(6, 2) pylsh.index(X) Y = torch.zeros(X.size()).cuda() for i in range(0,n,bs): s = slice(i,min(n, i+bs)) candidates = pylsh.query(s, X, bs) weight = gaussian(dist_b(candidates, X[s]), 2) num = sum_sqz(mul(weight, candidates), 1) Y[s] = div(num, sum_sqz(weight, 1)) conv.append(torch.sqrt(((Y-X)**2).sum(1)).sum()) X = Y return X, conv def meanshift_random_record_conv(data, bs=500): n = len(data) X = torch.FloatTensor(np.copy(data)).cuda() conv = [] for it in range(5): Y = torch.zeros(X.size()).cuda() for i in range(0,n,bs): s = slice(i,min(n,i+bs)) candidates = X[torch.randperm(len(X))[slice(0,bs)].long().cuda()] weight = gaussian(dist_b(candidates, X[s]), 2) num = sum_sqz(mul(weight, candidates), 1) Y[s] = div(num, sum_sqz(weight, 1)) conv.append(torch.sqrt(((Y-X)**2).sum(1)).sum()) X = Y return X, conv def plot_convergence(): X1, conv1 = meanshift_lsh_record_conv(data) X2, conv2 = meanshift_random_record_conv(data) colour = plt.cm.rainbow(np.linspace(0,1,len(centroids))) fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1, 1, 1) plt.plot(conv1, label='LSH approximation') plt.plot(conv2,'g--', label='random draw') ax.set_title('Convergence Rate', size=20) ax.set_xlabel('Iteration', size=16) plt.legend(loc="best") plot_convergence() # -
meanshift_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: readToMe # language: python # name: readtome # --- # ## SSD ReadToMe Model Training for DeepLens # # #### You can run the following steps to train the model for the ReadToMeProject or you can replace the custom data with your own custom data set and train your own object detection model # #### First, lets install some dependancies. I am running Cuda 9.1 on my system so I am grabbing the MXNet version which is built for Cuda 9.1, you may need to adjust accordinly depending on what is installed on your machine. # + language="bash" # # pip install mxnet-cu91 # pip install numpy # pip install opencv-python # pip install matplotlib # - # #### Next, we need to grab the MXNet repo from Github # + language="bash" # # echo checking for incubator-mxnet # DIR=incubator-mxnet # if [[ -d $DIR ]]; then # echo found existing git repo # echo deleting incubator-mxnet.git # rm -rf incubator-mxnet # fi # # git clone --recursive https://github.com/apache/incubator-mxnet.git # - # # #### This is where we deviate from the example instructions on Github. The example tells us to grab a model from this [list](https://github.com/apache/incubator-mxnet/tree/master/example/ssd#map) however, these models do not work with the current version of MXNet, at least not without modifying the symbol names. So lets grab a pretrained model that does work. # # + language="bash" # # rm -rf incubator-mxnet/example/ssd/model/* # cd incubator-mxnet/example/ssd/model/ # wget https://github.com/zhreshold/mxnet-ssd/releases/download/v0.2-beta/vgg16_reduced.zip # unzip vgg16_reduced.zip # rm vgg16_reduced.zip # mv vgg16_reduced-symbol.json ssd_vgg16_reduced_300-symbol.json # mv vgg16_reduced-0001.params ssd_vgg16_reduced_300-0001.params # - # # #### Now we need to organize our data into directories so that the example scripts will work. You can read more about this structure by Googling Pascal VOC. There are other Pascal VOC datasets available online that you can use to train models with. If you plan to train a model on your own dataset, you should first check to see if someone else has already made it available online. # # + import os import zipfile import shutil from pathlib import Path import random # # remove training dir if exists if os.path.exists("incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018"): shutil.rmtree('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018') # # text-block-custom dataset os.makedirs('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/JPEGImages') os.makedirs('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/Annotations') destination_dir = 'incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/' for file in os.listdir('2018'): if file.endswith('.jpg'): outfile_path = destination_dir + 'JPEGImages/' + file shutil.copy(os.path.join('2018', file), outfile_path) elif file.endswith('.xml'): outfile_path = destination_dir + 'Annotations/' + file shutil.copy(os.path.join('2018', file), outfile_path) files = [] for filename in os.listdir('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/JPEGImages/'): if filename.endswith('.jpg'): files.append('{0}'.format(Path(filename).stem)) # # Take 10% of the data and use it for validation, the rest goes to training training = [] validation = [] validationPercent = 10 k = int(len(files) * validationPercent // 100) indices = random.sample(range(len(files)), k) for index, file in enumerate(files): if index not in indices: training.append(file) else: validation.append(file) print(len(files)) print(len(training)) print(len(validation)) os.makedirs('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/ImageSets/Main/') with open('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/ImageSets/Main/trainval.txt', 'w') as training_list: for _, row in enumerate(training): training_list.write('{}\n'.format(row)) with open('incubator-mxnet/example/ssd/data/VOCdevkit/VOC2018/ImageSets/Main/test.txt', 'w') as validation_list: for _, row in enumerate(validation): validation_list.write('{}\n'.format(row)) # - # #### It is reccomended that we turn our dataset into .rec files so that MXNet can iterate over the data much more efficiently. # # #### This next step calls a wrapper script which altimately calls im2rec.py. This generates a ".rec" file for our training dataset and one for our validation dataset. # # + language="bash" # # # change directories to the example/ssd directory # cd incubator-mxnet/example/ssd # # update the names list to only include our single class name 'text_block' # echo text_block > dataset/names/pascal_voc.names # # # generate the .rec files we will use to train with # python tools/prepare_dataset.py --dataset pascal --year 2018 --set test --target ./data/test.lst --root data/VOCdevkit/ # python tools/prepare_dataset.py --dataset pascal --year 2018 --set trainval --target ./data/train.lst --root data/VOCdevkit/ # - # #### Finally we can call train on our data. This will run for 250 epochs by default, but you can change that by passing in the "--end-epoch" flag and specifying an epoch you would like to stop at. # # #### You can view the progress of this training by switching to a termial and tailing the train.log file # # ##### i.e. "tail -f train.log" # # #### once the validation is acceptible, you can stop training. # + language="bash" # # # change directories to the example/ssd directory # cd incubator-mxnet/example/ssd # # #finetune the model using our custom data set # python train.py --train-path data/train.rec --val-path data/test.rec --class-names text_block --num-class 1 --finetune 1 --gpus 0 # - # #### You can test the model by calling the following command from the terminal # # **(Note: You will need to change the path of the image to point to a sample image in your dataset.)** # # `python demo.py --epoch 2 --network vgg16_reduced --images ./data/VOCdevkit/VOC2018/JPEGImages/MVIMG_20180129_210518.jpg --thresh 0.5 --data-shape 300 --class-names text_block --gpu 0` # # #### You should also evaluate the model against the test dataset using the following command # # **(Note: You will need to change the epoch flag to point to the epoch you would like to evaluate.)** # # `python evaluate.py --gpus 0 --network vgg16_reduced --epoch 184 --class-names text_block --num-class 1 --rec-path data/test.rec` # # # #### Once you are finished you will need to deploy the model # **Pick the best epoch and deploy the model using the following command** # # `python deploy.py --network vgg16_reduced --epoch 172 --num-class 1 --data-shape 300` # #### In order to deploy your model to the deeplens, you will need to tar.gz up your ".params" and your ".json" deployed model files and create a new model in the DeepLens Service inside the AWS Console online. You can then refer to the model using the model name in your project's lambda file. To optimize the model using the DeepLens Model Optimizer package, follow the instructions [here](https://docs.aws.amazon.com/deeplens/latest/dg/deeplens-model-optimizer-api-methods.html)
ReadToMe Model Training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## LIST INTRODUCTION # + ### ----- LIST ----- ### #Sequence of numbers/strings/any other data type # - # ## 1). Creating an empty list: # There are two ways to create an empty list: # #Empty List l1 = [] l2 = list() print(l1) print(l2) # ## 2). Indexing and slicing: a = [1,5,2,5,2,6,3,6,2,3,5] #Indexing b = a[5] print(b) #Slicing (start,stop,step) c = a[3:5:2] d = a[3:] e = a[:-2] print(c) print(d) print(e) # ## 3). Built-in-functions: #dir() a = [1,2,3] dir(a) #1. append() a = [1,2,3] a.append(4) print(a) #2. insert(),remove() a = [1,2,3,1,1,1] a.insert(2,10) a.remove(1) print(a) #3. sort() [ Both A.O and D.O ] & copy() function a = [6,7,2,5,3,9,7,11,45,31] a.sort() b = a.copy() b.sort(reverse = True) print(a,b) #4. clear() a = [1,2,3,4,5] b = a.copy() print(b) b.clear() print(b) #5. count() a = [1,2,2,3,3,3,4,4,4,4] a.count(3) #6. index() a = [4,2,5,2,1,3,6,8,3,3,3,2] a.index(3) #7. pop() a = [1,2,3,4,5,6,7,8,9,10] c = a.pop() b = a.copy() print(c,'\n',b) b.pop(6) print(b) #8. reverse() a = [5,3,1,6,3,2,5,32,1] print(a) b = a.copy() b.reverse() print(b) #9. extend() a = [1,2,3] b = [4,5,6] a.extend(b) print(a) b = a.copy() b.extend([7,8,9]) print(b) #10. copy() a = [1,2,3] b = [] b = a.copy() print(b) # ## 4). Nested Lists (with indexing and slicing): # + #NESTED LIST #one can have many lists in a list, here are a few types.... #type 1 l1 = [1,2,3,4] l2 = [5,6,7,8,9,10] matrix1 = [l1,l2] print(matrix1) #indexing and slicing for type1 #indexing print(matrix1[0][1]) #slicing # ----- forward order ----- print(matrix1[1][1:-2]) # ----- reverse order ----- print(matrix1[1][-3:-6:-1]) print('-------------------------------------') #type2 l1 = [1,2,3] l2 = [4,5,6] l3 = [7,8,9] matrix2 = [l1,l2,l3] print(matrix2) #indexing and slicing for type2 #indexing print(matrix2[2][1]) #slicing # ----- forward order ----- print(matrix2[0][0:2]) # ----- reverse order ----- print(matrix2[0][-2:-4:-1]) # - # ## 5). List comprehension: # + #LIST COMPREHENSION #Sytanx: [expression for item in list if condition] l1 = [1,2,3] print([t**2 for t in l1]) type(t) #this variable t is created in run time, once the list is done, t is destroyed # + l1 = [1,2,3] l2 = [2,3,4] l3 = [4,5,6] matrix = [l1,l2,l3] print(matrix) first_col = [row[0] for row in matrix] print(first_col) first_col_op = [row[0]*2 for row in matrix] print(first_col_op) # - # # CONVERSIONS: # ## Simple lists to other data types: li = [21,53,4,23,5,46,7,23,3,4,6,57] # ### List -> Tuple tup = tuple(li) print(tup) # ### List->Set se = set(li) print(se) # ### List->Dictionary di = dict(li) print(di) # ## Nested List to other data types: li = [[23,125,32],232,32,56,[325,52],64,[34,43,12,23]] # ### Nested list -> Tuple tup = tuple(li) print(tup) # ### Nested list -> Set se = set(li) print(se) # ### Nested list -> Dictionary di = dict(li) print(di)
Data-Science-HYD-2k19/Topic-Wise/.ipynb_checkpoints/LIST-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import netCDF4 as nc import seaborn as sns import matplotlib.colors as mcolors from matplotlib.colors import LinearSegmentedColormap from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.patches as mpatches import cmocean as cm import glob import os import xarray as xr import datetime from salishsea_tools import viz_tools, tidetools, geo_tools, gsw_calls, wind_tools import pickle from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) from sys import platform # %matplotlib inline # + ## User inputs. As set up now, darwin -> paths on Rachael's "shuga" laptop ## The other paths are for Vicky's Ocean setup if platform == 'darwin': bathy_dir = '/Users/rmueller/Data/SalishSeaCast/grid/' results_dir = '/Users/rmueller/Projects/' out_dir = '/Users/rmueller/Documents/Presentations/2020/OceanSciences2020/' else: display('Update file paths for oceans machine') # Set to [1] if you want to print to file and [0] if not print_to_file = 0 # - ## colourmap for the land/water. vmax = 1 cmap2 = LinearSegmentedColormap.from_list('mycmap', [(0 / vmax, 'lightskyblue'), #change this if you want a different colour for water (0.85 / vmax, 'lightskyblue'), #change this if you want a different colour for water (0.850001 / vmax, 'papayawhip'), #change this if you want a different colour for land (1 / vmax, 'papayawhip')] #change this if you want a different colour for land ) # ### Compare 2D concentrations for early beaching case of 01 Aug, 2017, with more delayed beaching case of 15 Jan, 2018 # create labels for plotting and for file names date_labels = ['Aug 01, 2017', 'Jan 15, 2018'] # for plotting date_ranges = ['01aug17-08aug17', '15jan18-22jan18'] # for file naming oil_types = ['bunker','akns','diesel'] # create variables bunker_0_thick, bunker_1_thick, etc. where numeric vals. reflect date ndates = len(date_ranges) for oils in oil_types: # set index for dates corresponding to date_labels # define input paths for different oil types if oils == 'bunker': for ii in range(ndates): locals()["inpath_" + str(ii)] = ["MIDOSS/results/OS2020/bunkerc/SB/Lagrangian_BunkerC_crude_SB_" + date_ranges[ii] + "_BunkerC.nc"] elif oils == 'akns': for ii in range(ndates): locals()["inpath_" + str(ii)] = ["MIDOSS/results/OS2020/akns/SB/Lagrangian_AKNS_crude_" + date_ranges[ii] + "_Diesel.nc"] elif oils == 'diesel': for ii in range(ndates): locals()["inpath_" + str(ii)] = ["MIDOSS/results/OS2020/diesel/SB/Lagrangian_DieselFuel_refined_SB_" + date_ranges[ii] + "_Diesel.nc"] for ii in range(ndates): print(locals()['calculating integrated thickness for ' oils + date_ranges(ii)]) locals()[oils + "_" + str(ii)] = xr.open_dataset(results_dir + locals()['inpath_' + str(ii)][0]) locals()[oils + "_" + str(ii) + "_thick"] = locals()[oils + "_" + str(ii)].Thickness_2D locals()[oils + "_" + str(ii) + "_thick_t"] = locals()[oils + "_" + str(ii) + "_thick"].sum(dim='time') # ### plot up 2D thickness # + fs = 20 fig = plt.figure(figsize=(20,20)) # plot oil types ax1 = fig.add_subplot(231) ax2 = fig.add_subplot(232) ax3 = fig.add_subplot(233) ax4 = fig.add_subplot(234) ax5 = fig.add_subplot(235) ax6 = fig.add_subplot(236) # convert xarray into numpy using ".values" in order to gain access to different visualization tools mappable = ax1.pcolormesh(akns_0_thick_t.values, vmin = 0, vmax = 40, cmap = cm.cm.balance) mappable = ax2.pcolormesh(bunker_0_thick_t.values, vmin = 0, vmax = 40, cmap = cm.cm.balance) mappable = ax3.pcolormesh(diesel_0_thick_t.values, vmin = 0, vmax = 40, cmap = cm.cm.balance) mappable = ax4.pcolormesh(akns_1_thick_t.values, vmin = 0, vmax = 40, cmap = cm.cm.balance) mappable = ax5.pcolormesh(bunker_1_thick_t.values, vmin = 0, vmax = 40, cmap = cm.cm.balance) mappable = ax6.pcolormesh(diesel_1_thick_t.values, vmin = 0, vmax = 40, cmap = cm.cm.balance) # add land mask to ax1 and ax2 for ii in range(6): axis = locals()['ax' + str(ii+1)] viz_tools.plot_land_mask(axis,'/Users/rmueller/Projects/MIDOSS/MIDOSS-MOHID-grid/AfterNEMOBathy201702.nc', color = 'burlywood') # adjust colorbar for AKNS graphic divider = make_axes_locatable(ax6) c1 = divider.append_axes("right", size = '10%') cb = plt.colorbar(mappable, cax = c1) cb.set_label('Thickness (microns) summed over full run', fontsize=fs) cb.ax.tick_params(labelsize = fs) cb.remove() divider = make_axes_locatable(ax3) c1 = divider.append_axes("right", size = '10%') cb = plt.colorbar(mappable, cax = c1) cb.set_label('Thickness (microns) summed over full run', fontsize=fs) cb.ax.tick_params(labelsize = fs) cb.remove() ax1.set_title(oil_types[0], fontsize = fs) ax1.set_ylabel(date_ranges[0], fontsize = fs) ax2.set_title(oil_types[1], fontsize = fs) ax3.set_title(oil_types[2], fontsize = fs) ax4.set_ylabel(date_ranges[1], fontsize = fs) plt.tight_layout() fig.savefig(results_dir + 'SB_2date_oiltype_comparison.png', bbox_inches='tight', dpi = 600); # - # ### plot up spill area comparison # + #### Load header information with open('/Users/rmueller/Projects/MIDOSS/results/OS2020/bunkerc/SB/resOilOutput_01aug2017.sro', 'r') as the_file: all_data = [line.strip() for line in the_file.readlines()] header = all_data[4] # Order header into list array by splitting up string header_arr = [] header_arr = header.split(' ') # Remove emtpy entries from list header_arr = np.asarray([x for x in header_arr if x != '']) header_arr # - # load mass balance area values aug01_17_sro_in = '/Users/rmueller/Projects/MIDOSS/results/OS2020/bunkerc/SB/resOilOutput_01aug2017.sro' jan15_18_sro_in = '/Users/rmueller/Projects/MIDOSS/results/OS2020/bunkerc/SB/resOilOutput_15jan2018.sro' data_aug01_17 = np.genfromtxt(aug01_17_sro_in, skip_header=6, skip_footer=4) data_jan15_18 = np.genfromtxt(jan15_18_sro_in, skip_header=6, skip_footer=4) data_aug01_17.shape data_jan15_18.shape plot_data = 12 plt.plot(data_aug01_17[range(191),12],'b') plt.plot(data_jan15_18[range(129),12]) plt.ylabel('Area (m^2)') plt.xlabel('Hours after spill ') plt.legend(['01 Aug, 2017', '15 Jan, 2018']) plt.title('Dispersed Mass Balance Comparison')
notebooks/bird_meeting/SB_BunkerDieselAKNS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Git과 Github 시작 _ _ GIT 교과서 # # #### 2장 저장소(로컬/원격) # # echo : 출력 명령어 # # cat : 파일내용 출력 # # git init . 저장소 초기화 # # git remote -v : 연결되어 있는 원격 저장소 정보 확인 # # # #### 3장 커밋 및 전송하기 # Git status : 현재의 깃 상태를 출력 # <깃의 상태 3가지 # ㅁ 작업하고 있는 곳 : 워킹 디렉토리 # - 등록(관리인지) tracted : git add 명령어 # ㅁ 스테이지 영역 : 둘 사이의 임시 저장소 # - 기록(커밋) : commit 명렁어 # ㅁ .git 저장소 # # # Git log: 로그 확인 # Push 명령어 : 원격 저장소로 push # Git push -u origin master # # # #### 4장 저장소 복제 및 수정 작업 # 저장소 1) init 생성 -> 폴더 init=> git 저장소 새로이 생성 # 2) clone 복제 ->clone 명령을 이용하여 복사하여 작업 # # - 푸시 # Git push -u origin master # # #### 5장 브랜치 생성 및 병합 # 브랜치 생성 : Git branch <브랜치명> # 브랜치 이동(checkout) : Git checkout <브랜치명> # 브랜치 확인 : Git branch -v # 깃 상태 확인 : git status # 스테이지 영역에 등록 : git add 파일명.확장자 //커밋 준비 상태가됨 # 커밋 : git commit -m “메시지” # # # #### 7장 리셋 및 리버트 # Git 사용 이유 : # - 코드 변경 상태(안정상태 유지) # - 테스트 하면서 코드의 안정 상태를 커밋으로 저장하면서 완성해 나감 # - 진행중 오류 동작이 나면 이전의 코드상태로 돌아가서 다시 새로운 설계 모델로 돌아감 # - 프로그램 작성할때마다 새로운 커밋을 만들면서 프로그램을 완성시켜나감 # # Git reset <돌아갈 지점 커밋 ID> # 이전 커밋 : HEAD ^ # 이이전 커밋 지점 : HEAD ^^ # 깃은 3가지 리셋모드 존재 # soft모드 : 커밋 삭제, 작업내용은 유지 # mixed모드 : 커밋 삭제, 작업내용은 유지 (default 값) # hard모드 : 커밋과 작업내용 모두 삭제 # # 리셋은 local 저장소에서만 해야함 # github에는 모든 커밋의 기록이 남아져 있음 # 원격 저장소에 공개되어 있을 경우 reset으로 커밋 X # 취소 커밋 : revert # # # #### 추가) GIT repository merge # git remote add <병합할 저장소 이름> <병합할 저장소 주소> # git fetch <병합할 저장소 이름> # git merge --allow-unrelated-histories <병합할 저장소 이름>/<병합하고 싶은 branch 이름> # git remote remove <병합할 저장소 이름> # git commit -m "Merge : <병합할 저장소 이름> into <유지할 저장소 이름>" #
opensourceSW_DeveloperContest/GIT_note.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns test_df=pd.read_csv('test.csv') test_df.shape test_df.head() #check null values test_df.isnull().sum() # + ## Fill Missing Values test_df['LotFrontage']=test_df['LotFrontage'].fillna(test_df['LotFrontage'].mean()) # - test_df['MSZoning']=test_df['MSZoning'].fillna(test_df['MSZoning'].mode()[0]) test_df.shape test_df.drop(['Alley'],axis=1,inplace=True) test_df.shape test_df['BsmtCond']=test_df['BsmtCond'].fillna(test_df['BsmtCond'].mode()[0]) test_df['BsmtQual']=test_df['BsmtQual'].fillna(test_df['BsmtQual'].mode()[0]) test_df['FireplaceQu']=test_df['FireplaceQu'].fillna(test_df['FireplaceQu'].mode()[0]) test_df['GarageType']=test_df['GarageType'].fillna(test_df['GarageType'].mode()[0]) test_df.drop(['GarageYrBlt'],axis=1,inplace=True) test_df.shape # + test_df['GarageFinish']=test_df['GarageFinish'].fillna(test_df['GarageFinish'].mode()[0]) test_df['GarageQual']=test_df['GarageQual'].fillna(test_df['GarageQual'].mode()[0]) test_df['GarageCond']=test_df['GarageCond'].fillna(test_df['GarageCond'].mode()[0]) test_df.drop(['PoolQC','Fence','MiscFeature'],axis=1,inplace=True) # - test_df.shape test_df.drop(['Id'],axis=1,inplace=True) test_df['MasVnrType']=test_df['MasVnrType'].fillna(test_df['MasVnrType'].mode()[0]) test_df['MasVnrArea']=test_df['MasVnrArea'].fillna(test_df['MasVnrArea'].mode()[0]) test_df['BsmtExposure']=test_df['BsmtExposure'].fillna(test_df['BsmtExposure'].mode()[0]) test_df['BsmtFinType2']=test_df['BsmtFinType2'].fillna(test_df['BsmtFinType2'].mode()[0]) sns.heatmap(test_df.isnull(),yticklabels=False,cbar=False,cmap='viridis') test_df.loc[:, test_df.isnull().any()].head() test_df.shape test_df['Utilities']=test_df['Utilities'].fillna(test_df['Utilities'].mode()[0]) test_df['Exterior1st']=test_df['Exterior1st'].fillna(test_df['Exterior1st'].mode()[0]) test_df['Exterior2nd']=test_df['Exterior2nd'].fillna(test_df['Exterior2nd'].mode()[0]) test_df['BsmtFinType1']=test_df['BsmtFinType1'].fillna(test_df['BsmtFinType1'].mode()[0]) test_df['BsmtFinSF1']=test_df['BsmtFinSF1'].fillna(test_df['BsmtFinSF1'].mean()) test_df['BsmtFinSF2']=test_df['BsmtFinSF2'].fillna(test_df['BsmtFinSF2'].mean()) test_df['BsmtUnfSF']=test_df['BsmtUnfSF'].fillna(test_df['BsmtUnfSF'].mean()) test_df['TotalBsmtSF']=test_df['TotalBsmtSF'].fillna(test_df['TotalBsmtSF'].mean()) test_df['BsmtFullBath']=test_df['BsmtFullBath'].fillna(test_df['BsmtFullBath'].mode()[0]) test_df['BsmtHalfBath']=test_df['BsmtHalfBath'].fillna(test_df['BsmtHalfBath'].mode()[0]) test_df['KitchenQual']=test_df['KitchenQual'].fillna(test_df['KitchenQual'].mode()[0]) test_df['Functional']=test_df['Functional'].fillna(test_df['Functional'].mode()[0]) test_df['GarageCars']=test_df['GarageCars'].fillna(test_df['GarageCars'].mean()) test_df['GarageArea']=test_df['GarageArea'].fillna(test_df['GarageArea'].mean()) test_df['SaleType']=test_df['SaleType'].fillna(test_df['SaleType'].mode()[0]) test_df.to_csv('formulatedtest.csv',index=False)
Handle_test_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + a, b, m = (int(input()) for _ in range(3)) print(pow(a, b)) # a^b print(pow(a, b, m)) # remainder(a^b/m).. e.g(3^4=81 , 81/5=80 , remainder 1)
Python/6. Math/53. power mod power.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 함수 1부 # _[Think Python의 3장](http://greenteapress.com/thinkpython2/html/thinkpython2004.html) # 내용을 요약 및 수정한 내용입니다._ # ## 함수 정의하기 # 파이썬에서 함수의 정의는 아래 형식을 이용해야 한다. # # ```python # def 함수이름(매개변수1, 매개변수2, ..., 매개변수n): # 명령문 # return 표현식 # ``` # # **주의:** 함수 본체에 사용되는 명령문은 들여쓴다. # # 함수를 정의할 때 사용되는 **매개변수**는 인자로 들어오는 값들을 # 함수 본체 명령문에 전달하는 역할을 수행한다. # ### 예제 # 절댓값을 계산하는 함수 `myAbs`와 # 두 숫자의 합을 계산하는 함수 `myAdd`를 아래와 같이 직접 정의할 수 있다. # # `myAbs`는 한 개의 값을 입력받아야 하기에 한 개의 매개변수가 필요하고, # 반면에 `myAdd`는 더해야 할 두 개의 값을 입력받아야 하기에 두 개의 매개변수가 필요하다. def myAbs(num): if num < 0: num = -num return num def myAdd(left, right): z = left + right return z myAbs(-3) myAdd(-3, myAbs(-3)) # ## 매개변수와 인자 # `myAbs`와 `myAdd`의 정의헤서 사용된 # `num`, `left`, `right` 등은 함수의 인자를 받아들이는데 사용되는 **매개변수**이다. # 그리고 매개변수를 통해 함수에게 전달되는 값들을 **인자**라고 부른다. # 사용되는 인자의 개수는 매개변수의 개수와 일치해야 한다. # # 함수와 매개변수들의 이름은 각각의 역할에 맞게 정하는 것을 권유한다. # 그러면 함수와 매개변수들의 이름을 보고 함수와 각 매개변수들의 의미와 # 역할을 파악하는 데에 보다 유리하다. # ### 키워드 인자 # `print` 함수를 이용하여 화면에 여러 개의 인자를 출력할 수 있다. print('Hello,', 'Python', '!') # 그런데 여러 개의 인자를 각각 다른 줄에 출력하려면 아래와 같이 하면 된다. print('Hello,', 'Python', '!', sep='\n') # 위에서 사용된 `sep`은 `print` 함수의 키워드 인자이다. # `sep`과 같은 키워드 인자는 인자를 지정하지 않으면 기본값을 사용한다. # `sep`의 기본값은 한 칸 띄어쓰기를 의미하는 `' '`이다. # 즉, `print` 함수의 인자들을 한 칸씩 띄어서 화면에 보여준다. # 그리고 위에서는 `sep`에 대한 인자를 띄어쓰기 대신 줄바꿈(`'\n'`)을 # 사용하였다. # # 이렇듯 특정 함수들은 키워드 인자를 사용할 수 있으며, 그런 함수는 # 매개변수에 기본값을 지정하는 식으로 정의된다. # # 예를 들어, `print`함수에 사용되는 매개변수 중에 # `sep` 이외에 `end`, `file`, `flush` 등이 기본값을 갖는다. # 실제로 `help(print)` 명령문을 실행하면 # 아래와 같이 확인된다. # # ```python # print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False) # ``` # # * `sep` 매개변수: 출력할 인자들 사이에 대한 기준 지정. 기본값은 띄어쓰기. # * `end` 매개변수: 값들을 화면에 출력한 후 사용할 추가 문자열 지정. 기본값은 줄바꿈. # * `file` 매개변수: 출력 장치 지정. 기본값은 터미널 화면. # * `flush` 매개변수: 여러 개의 출력값들을 하나씩 차례대로 출력 장치에 보낼지 말지를 지정. 기본값은 하나씩 바로바로 보내기. # # 위 옵션 매개변수 중에서 `sep`과 `end`는 여러 모로 유용하게 활용된다. help(print) # ### 예제 # 키워드 인자를 사용하는 함수를 정의할 수 있다. # 아래 함수는 두 개의 숫자를 입력받아 합을 구한다. def myAdd10(left, right=10): add10 = left + right return add10 # 그런데, 둘째 매개변수의 기본값이 10으로 지정되었다. # 따라서 둘째 인자를 반드시 입력하지 않아도 되며, # 그럴 경우 둘째 인자는 10으로 처리된다. myAdd10(5) # 물론 둘째 인자를 지정할 수도 있다. myAdd10(5, 20) # 키워드 인자를 지정할 경우 매개변수 이름을 언급하는 것이 좋다. myAdd10(5, right=20) # ## 수학과 프로그래밍에서의 함수 이해 # ### 수학에서의 함수 # 함수라는 표현이 수학에서 많이 사용된다. # 수학에서 함수는 두 집합 사이의 관계이며, # 첫째 집합의 원소를 둘째 집합의 원소와 대응시킨다. # # 아래 그림은 집합 $X$의 원수 $x$와 집합 $Y$의 원소 $f(x)$를 대응시키는 함수를 보여준다. # <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/mle/master/notebooks/images/fun_math1.png" width="300"/></div> # # <그림 출처: [함수, 위키백과](https://ko.wikipedia.org/wiki/함수)> # 이때 $X$와 $Y$를 각각 함수 $f$의 **정의역**(domain)과 # **공역**(codomain)이라 부른다. # ### 프로그래밍에서의 함수 # 프로그래밍에서 함수가 하는 역할은 다음과 같다. # # > 어떤 값이 입력(input)되면 지정된 명령문에 따라 입력된 값을 조작하거나 이용하여 계산된 값을 내준다(output). # # 아래 그림은 함수의 입력(input)과 내주기(output)의 관계를 보여준다. # <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/mle/master/notebooks/images/fun_prog2.png" width="300"/></div> # # <그림 출처: [함수, 위키백과](https://ko.wikipedia.org/wiki/함수)> # 함수의 입력값으로 사용될 수 있는 값들의 집합이 정의역에 해당하고, # 내주는 값들의 집합이 공역에 해당한다. # 함수가 내주는 값을 **반환값**(return)이라 부르기도 한다. # 이유는 함수가 내주는 값을 `return` 예약어로 지정하기 때문이다. # ### 차이점 # 입력값을 특정 집합의 원소로 보고, 반환값을 다른 집합의 원소로 보고, # 그리고 # 두 집합 사이의 대응관계를 "지정된 명령문에 따라 조작하거나 계산한다"라 이해한다면 # 수학과 프로그래밍에서의 함수 개념은 기본적으로 동일하다. # # * 정의역: 함수의 입력값들로 구성된 집합 # * 공역: 함수의 반환값들로 구성된 집합 # # 실제로, 많은 수학 함수를 프로그래밍 함수로 다룰 수 있다. # 예를 들어, 아래 소개되는 `abs()`는 실수에서 실수로 가는 함수이며, # 절댓값을 반환한다. # # __참고:__ 위에서 정의한 `myAbs()` 함수와 동일한 기능을 수행한다. abs(-3.3) # 하지만 앞으로 이어서 보겠지만 프로그래밍에서의 함수가 수학의 함수와 다른 기능도 갖는다. # ### 프로그래밍 함수 예제 # 프로그래밍에서는 다루는 함수들의 유형을 살펴본다. # #### 수학 함수와 유사한 경우 # # * 수를 입력하면 계산결과를 돌려준다. # # $$y = f(x) = x^2 + 1$$ # + def func1(x): y = x**2 + 1 return y print(func1(3)) # - # * 아래 함수는 변수이름 `y`를 두 군데서 사용한다. # 하나는 함수 밖에서, 다른 하나는 함수 안에서. # 하지만 서로 관계가 없기 때문에 일반 수학 함수처럼 작동한다. # + y = -1 def func2(x): y = x**2 + 1 # 함수 밖의 y와 아무 상관 없음. return y print(func2(3), y) # - # 하지만 오해를 방지하기 위해 서로 다른 기능을 수행하는 변수는 이름도 다르게 줄 것을 권장한다. # + y = -1 def func3(x): z = x**2+1 return z print(func3(3), y) # - # 이렇게 다른 변수를 사용하면 아래와 같이 함수 밖에서 선언된 변수를 함수 안에서 사용해도 별 혼란이 # 발생하지 않는다. # + y = -1 def func4(x): z = x**2 - y # 함수 밖의 y 사용 가능 return z print(func4(3), y) # - # #### 수학 함수와 조금 다른 경우 # * 함수 밖에서 선언된 변수의 값을 변경할 수는 없다. # + y = -1 def func5(x): y = 10 # 함수 밖의 y가 가리키는 값을 변경할 수 없음. return y print(func5(3), y) # - # 하지만 함수 밖에서 선언된 변수의 값을 변경할 수는 없다. # + y = -1 def func5(x): y = 10 # 함수 밖의 y가 가리키는 값을 변경할 수 없음. return y print(func5(3), y) # - # * 하지만 `global` 예약어를 사용하면 함수 밖에서 선언된 변수를 # 함수 내부에서 수정할 수 있게 된다. # + y = -1 def func6(x): global y y = x**2+1 return y # func 함수가 실행되면 y 가 업데이트 됨. print(func6(3), y) # - # #### 수학 함수와 많이 다른 경우 # 함수의 반환값이 지정되지 않으면 아무런 의미가 없는 값인 `None`을 반환한다. # 또한 아래 함수처럼 반환값은 없지만 함수 외부에서 선언된 변수 `y`의 값을 업데이트할 수도 있다. # + y = -1 def func7(x): global y y = x**2+1 print(func7(3), y) # - # ## 함수의 반환값<a id='funReturn'></a> # ### 반환값이 명시되지 않은 함수 # `func7()`은 `return` 예약어를 사용하지 않았다. # 이런 경우 파이썬은 암묵적으로 `return None`을 지정한다. # 즉, 반환값이 항상 `None` 값인 것이다. # # 파이선 내장함수(built-in functions)들 중에 # `print()` 함수가 반환값이 명시되지 않은 대표적인 함수이다. # 예를 들어, 아래 코드에서 `print()` 함수의 반환값 `None`임을 확인할 수 있다. x = print(1) print(x) # **주의:** 위 코드를 실행한 결과의 첫째 줄에서 보이는 `1`은 `print` 함수의 반환값이 아니라, # `print(1)`을 실행하여 모니터에 숫자 1을 출력하는 `print` 함수의 # **부수 효과**(side effect)에 불과하다. # 함수의 부수 효과에 대해서는 아래에서 좀 더 자세히 다룬다. # ### 함수의 반환값은 단 하나 # 함수 본체 코드에 `return` 지정자가 여러 번 사용되더라도 # 함수가 실행되어 멈출 때까지 반환되는 값은 무조건 하나이다. # 사실, 함수의 반환값이 지정되는 순간에 함수의 실행이 멈춰진다. # # 예를 들어, 아래 `login` 함수는 # `members` 리스트에 아이디가 포함되어 있는지 여부를 # 판단한다. # # `login` 함수 본체에 `return` 지정자가 두 번 사용되었다. # 하지만, `members`에 포함된 항목별로 회원여부를 판단할 때 # 회원이 확인되면 '누구누구님 환영합니다'를 리턴하고 # 바로 함수의 실행을 종료한다. # 즉, 더이상 `for` 반복문을 돌리지 않는다. # + language = 'python' def check_char(char): for item in language: if item == char: return item.upper() return '해당 알파벳 없음' # - check_char('o') check_char('n') check_char('k') # ## 함수호출 # 앞서 살펴 보았듯이 함수의 반환값을 저장하거나 다른 함수의 인자로 전달할 수 있다. # 즉, 하나의 값으로 다룰 수 있으며, # 이렇게 함수를 이용하여 표현된 값을 **함수 표현식**이라 부른다. # # 예를 들어, 절댓값을 생성하는 함수인 `abs`에 부동소수점 `-3.3`을 # 인자로 사용하여 표현된 값은 아래와 같다. # # ```python # abs(-3.3) # ``` # # 또한, 실수 `abs(-3.3)`를 -3.3과 더해주려면 아래와 표현식처럼 # 함수의 합성을 이용할 수 있다. # # ```python # myAdd(abs(-3.3), -3.3) # ``` # 하지만 함수 표현식이 가리키는 값을 실제로 확인하려면 # 함수를 해당 인자와 함께 실행해야 한다. # 이렇게 함수 표현식을 실행하는 것을 **함수호출**이라 부른다. # # 예를 들어, 앞서 언급한 두 표현식의 호출해서 결과를 확인하려면 아래와 같이 할 수 있다. check_char('liga') abs(-3.3) sumZero = myAdd(abs(-3.3), -3.3) sumZero # ### 함수호출 실행과정 # 함수호출 과정을 좀 더 자세히 살펴보자. # 예를 들어, 아래 함수 표현식이 가리키는 값이 어떤 순서대로 계산되는가를 확인하려 한다. # + from operator import add, mul mul(add(2, mul(4, 6)), add(3, 5)) # - # 실제로 계산이 이루지는 과정은 다음과 같다. # ```python # mul(add(2, mul(4, 6)), add(3, 5)) => mul(add(2, 24), add(3, 5)) # => mul(26, add(3, 5)) # => mul(26, 8) # => 208 # ``` # __참고:__ [PythonTutor: 함수호출 실행과정](http://pythontutor.com/visualize.html#code=def%20myAdd%28left,%20right%29%3A%0A%20%20%20%20return%20left%2Bright%0A%20%20%20%20%0Adef%20myMul%28left,%20right%29%3A%0A%20%20%20%20return%20left*right%0A%0AmyMul%28myAdd%282,%20myMul%284,%206%29%29,%20myAdd%283,%205%29%29&cumulative=false&curInstr=0&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false)에서 # 앞서 사용한 표현식과 동일하게 작동하는 표현식이 계산되는 과정을 살펴볼 수 있다. # `add`, `mul`과 같이 파이썬에서 이미 정의된 내장함수들의 계산과정은 시각화해서 보여지지 않는다. # 그래서 동일하게 작동하는 `myAdd`와 `myMul`을 새로 정의하였다. # 함수의 이름만 다를 뿐 동일한 표현식이다. # ## 함수호출의 부수 효과 # 프로그램에서 사용되는 함수를 구분하는 다양한 기준이 있다. # 여기서는 부수기능의 존재여부에 따른 함수 분류 기준을 살펴본다. # ### 부수 효과가 없는 함수 (Pure functions) # 함수가 호출되어 반환값을 지정하는 일 이외에 다른 어떤 일도 하지 않는다면 # 그 함수를 부수 효과가없는 함수라 부른다. # 예를 들어 절대값을 계산하는 `abs` 함수와 덧셈을 행하는 `add` 함수 등이 해당된다. # 아래 그림에서 보듯이 인자를 입력받은 후에 각각 절대값과 덧셈을 실행한 결과를 # 반환하는 일 이외에는 다른 일을 행하지 않는다. # <div align="center"><img src="https://raw.githubusercontent.com/codingalzi/mle/master/notebooks/images/fun_pure.png" width="400"/></div> # ### 부수 효과가 있는 함수 (Impure functions) # 부수 효과가 있는 함수는 부수 효과가 없는 함수와는 달리 반환값을 지정하는 일 이외에 부수적인 일을 한다. # 대표적으로 `print()` 함수가 부수 효과를 갖는 함수이다. # # 그런데 `print()` 함수가 부수 효과를 갖는 함수라는 것을 확인하려면 아래 두 가지를 알아야 한다. # # * 리턴하는 값이 무엇인가? # * 부수적으로 어떤 일을 하는가? # # 앞서 살펴 보았듯이 `print()`가 리턴하는 값은 `None`이다. # 기타 언어에서는 보통 널(null) 값이라 부른다. # # 다음으로, `print()` 함수가 부수적으로 하는 일은 예를 들어 터미널 창에 어떤 문자열을 출력하는 것이다. # `print("Hello Python")` 방식으로 `print()` 함수를 호출하면 아래와 같이 `'Hello Python'` 이란 문자열을 출력한다. print("Hello Python") # ### 예제: 부수 효과를 갖는 함수의 호출과정 # 부수 효과를 갖는 함수호출의 실행과정은 부수 효과가 없는 함수의 호출과정과 기본적으로 동일하다. # 다만 함수의 본문에 지정된 코드의 실행 중간중간에 부수적인 일도 함께 한다는 점만 다르다. # # 먼저 아래 코드의 실행결과가 어떻게 도출되었는가를 잘 생각해보자. print(print(1), print(2)) # 아래의 그림은 위 코드를 호출하는 과정을 설명한다. # 아래 그림에서 &#9312; ~ &#9319;번 사이의 번호가 배정된 네모상자로 둘러싸인 부분이 # 현재 실행중인 함수호출 또는 함수호출의 결과값 또는 부수 효과(화면 출력)를 나타낸다. # ```python # print(print(1), print(2)) => print(None, print(2)), 추가로 1 출력 # => print(None, None), 추가로 2 출력 # => None, 추가로 None None 출력 # ``` # __참고:__ [PythonTutor: 함수호출 부수 효과](http://pythontutor.com/visualize.html#code=def%20myPrint%28*args,%20**kwargs%29%3A%0A%20%20%20%20print%28*args,%20**kwargs%29%0A%0AmyPrint%28myPrint%281%29,%20myPrint%282%29%29&cumulative=false&curInstr=0&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false)에서 # 앞서 사용한 표현식과 동일하게 작동하는 표현식이 계산되는 과정을 살펴볼 수 있다. # `print()` 함수와 동일하게 작동하는 `myPrint()` 함수를 사용했다. # 함수의 이름만 다를 뿐 동일한 표현식이다. # ## 지역변수와 전역변수 # 함수를 선언할 때 사용되는 매개변수와 함수 본체에서 선언되는 변수는 함수가 실행되는 동안에만 의미를 갖는 변수들이며, # 이런 변수들을 **지역변수**라 부른다. # 지역변수가 아닌 변수들은 **전역변수**라 부른다. # # 예를 들어, `hour_to_min()` 함수를 정의할 때 사용된 # 매개 변수 `hour`와 본체에서 선언된 `minutes` 변수는 모두 지역함수이며, # `two_hour` 는 함수 밖에서 선언된 전역변수이다. def hour_to_min(hour): minutes = hour * 60 return minutes # 지역변수들은 함수 밖에서는 어떤 의미도 갖지 않는다. 예를 들어, 아래 코드를 실행하면 오류가 발생한다. two_hour = hour_to_min(2) print(minutes) # 물론 아래의 경우도 오류가 발생한다. two_hour = hour_to_min(2) print(hour) # 위에서 오류가 발생하는 이유는 `hour_to_min` 함수가 인자 2와 함께 실행되어 종료가 되면 # 실행도중에 선언되어 사용된 `hour`와 `minutes` 변수의 의미도 완전히 사라지기 때문이다. # # **참고:** # [PythonTutor:지역변수 전역변수](http://pythontutor.com/visualize.html#code=def%20hour_to_min%28hour%29%3A%0A%20%20%20%20minutes%20%3D%20hour%20*%2060%0A%20%20%20%20return%20minutes%0A%0Atwo_hour%20%3D%20hour_to_min%282%29%0Aprint%28minutes%29&cumulative=false&curInstr=0&heapPrimitives=nevernest&mode=display&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false)에서 # `hour`와 `minutes`의 생존주기, 즉, 언제 생성되고 언제 사라지는지를 확인할 수 있다. # ## 연습문제 # 3장 마지막 예제에서 숫자 맞히기 게임을 아래와 같이 구현하였다. # + import random # 컴퓨터가 1에서 100사이의 값을 무작위로 선택 secret = random.randint(1, 100) # secret을 맞출 때까지 반복해서 guess함. while True: guess = int(input("맞춰보세요: ")) if secret == guess: print(f"정답입니다.") break # 정답이면 게임 종료 else: print(f"틀렸습니다.") if guess < secret: # 오답이면 크기 비교 알려주기 print("너무 작아!") else: print("너무 커!") print("종료합니다.") # - # 이제 위 코드를 함수로 선언해보자. # 다만 예측 횟수를 제한하는 역할을 수행하는 값을 인자로 받도록 한다. # 즉, 함수가 적절한 인자와 함께 호출되면 게임이 제한된 횟수 내에 `secret`을 맞혀야 하며 # 그렇지 않으면 게임이 자동으로 종료되도록 한다. # # 함수 이름, 인자, 반환값에 대한 정보는 아래와 같이 정해보자. # # - 함수 이름: `guessing` # - 인자: 허용된 예측 횟수를 나타내는 `count_limit` # - 반환값: 제한 횟수 내에 맞혔을 경우엔 추측 횟수, 그렇지 않은 경우엔 제한 횟수 내에 맞히지 못했다는 정보. # # 매개변수 `cout_limit`는 게임을 진행할 때 예측 횟수를 제한하는 역할을 수행하는 값을 인자로 받는다. # 예를 들어 5가 인자로 사용되면 게임 참여자는 최대 5번 이내에 답을 맞혀야 하며, # 그렇지 않으면 게임이 자동 종료된다. # __업그레이드 1 단계__ # 함수를 선언하기 전에 먼저 위 게임을 특정 값을 이용하여 예측 횟수를 제한하도록 해보자. # 그러기 위해서는 예를 들어 아래 두 개의 변수를 사용할 수 있다. # # - `count_limit`: 예측 제한 횟수 # - `count`: 예측한 횟수 # # 즉, 게임 참여자가 예측할 때마다 `count`가 가리키는 값을 1씩 키워준 후 그 값이 `count_limit`가 # 가리키는 값과 같아지면 게임을 멈추게 하면 된다. # # 예를 들어, 아래 코드는 예측을 최대 5번까지만 허용한다. # + import random secret = random.randint(1, 100) count_limit = 5 # 예측 제한 횟수 지정 count = 0 # 게임 시작할 때 예측한 횟수를 0으로 초기화하기 # count 값이 count_limit 보다 작은 동안 게임 진행 while count != count_limit: guess = int(input("맞춰보세요: ")) count += 1 # 예측할 때마다 1씩 키워주기 if secret == guess: print(f"정답입니다.") break # 게임 종료 else: print(f"틀렸습니다.") if guess < secret: print("너무 작아!") else: print("너무 커!") print("종료합니다.") # - # 예측 횟수를 4번으로 제한하고 싶으면 아래와 같이 `count_limit` 변수가 가리키는 값을 4로 수정하면 된다. # + import random secret = random.randint(1, 100) count_limit = 4 # 예측 제한 횟수 = 4 count = 0 while count != count_limit: guess = int(input("맞춰보세요: ")) count += 1 if secret == guess: print(f"정답입니다.") break else: print(f"틀렸습니다.") if guess < secret: print("너무 작아!") else: print("너무 커!") print("종료합니다.") # - # __업그레이드 2 단계__ # 그런데 위와 같이 예측 횟수를 바꾸기 위해 코드 자체를 수정하는 것은 바람직하지 않다. # 이런 문제를 함수를 이용하면 매우 간단하게 해결할 수 있다. # # 위 코드를 함수화하기 위해 생각해야 할 것은 하나이다. 무엇을 함수의 매개변수로 사용할 것인가? # 바로 예측 횟수, 즉 `count_limit` 변수를 함수의 매개변수로 지정하면 다른 것은 전혀 바꿀 필요가 없다. # 아래와 같이 함수의 이름을 `guessing()`로 지정하고, `count_limit`를 매개변수로 지정하고, # 나머지 코드는 그냥 들여쓰기만 하면 된다. # 단, `import random`은 함수 밖에 그대로 둔다. # 그리고 `count_limit=4` 명령문은 삭제한다. # 이유는 `count_limit`의 값이 매개변수를 통해 지정되기 때문이다. # + import random def guessing(count_limit): secret = random.randint(1, 100) # count_limit = 4 # 더 이상 필요없음. 매개변수로 대체됨. count = 0 while count != count_limit: guess = int(input("맞춰보세요: ")) count += 1 if secret == guess: print(f"정답입니다.") break else: print(f"틀렸습니다.") if guess < secret: print("너무 작아!") else: print("너무 커!") print("종료합니다.") # - # 이제 `guessing()` 함수를 적절한 인자와 함께 호출하면 게임이 시작된다. # 예를 들어 예측횟수를 5회로 제한하려면 5와 함께 호출한다. guessing(5) # 예측 횟수를 4로 제한하려면 4를 인자로 사용한다. guessing(4) # __업그레이드 3 단계__ # 게임이 종료될 때 몇 번 예측했는지, 아니면 몇 번까지 예측한 후에 게임이 종료되었는가를 반환하도록 함수를 살짝 # 업그레이드해 보자. # # 먼저 어떤 경우에 게임이 종료되는가를 확인해야 한다. # # - 경우 1: 제한 횟수 내에 정답을 맞히는 경우 # - 경우 2: 제한 횟수 내에 정답을 맞히지 못하는 경우 # # 첫째 경우엔 예측한 횟수를, 둘째 경우엔 제한 횟수를 반환하도록 할 수 있다. # 따라서 각각의 경우에 모두 `return` 예약어를 이용하여 반환값을 지정해야 한다. # 경우에 따라 `return` 예약어의 위치가 달라짐에 주의하라. 특히 들여쓰기 수준에 주의해야 한다. # + import random def guessing(count_limit): secret = random.randint(1, 100) count = 0 while count != count_limit: guess = int(input("맞춰보세요: ")) count += 1 if secret == guess: print(f"정답입니다.") return count # 경우 1: 제한 횟수 이전에 예측한 횟수 # break # 더 이상 필요 없음. return 이후에는 무조건 함수의 실행이 종료됨. else: print(f"틀렸습니다.") if guess < secret: print("너무 작아!") else: print("너무 커!") print("종료합니다.") return count_limit # 경우 2: 제한 횟수에 도달했을 때 # - # 이제는 게임이 종료될 때마다 예측한 횟수를 알려준다. guessing(5) # __업그레이드 4 단계__ # 마지막으로 한 번 업그레이드한다. # 이번엔 `count_limit` 매개변수를 키워드 인자로 사용한다. # 1부터 100까지의 정수를 예측하는 게임이고 이진탐색 방식을 이용하면 최대 7번이면 답을 맞힐 수 있다. # 따라서 `count_limit=7`로 지정하도록 하자. # + import random def guessing(count_limit=7): # 키워드 인자를 7로 지정. secret = random.randint(1, 100) count = 0 while count != count_limit: guess = int(input("맞춰보세요: ")) count += 1 if secret == guess: print(f"정답입니다.") return count # break else: print(f"틀렸습니다.") if guess < secret: print("너무 작아!") else: print("너무 커!") print("종료합니다.") return count_limit # - # 이제 `guessing()` 함수를 호출할 때 인자를 지정하지 않아도 되며, 그럴 때는 자동으로 # 예측 횟수가 7로 제한된다. guessing() # 물론 원하는 값으로 예측 횟수를 제한할 수 있다. guessing(3)
notebooks/python04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["papermill-error-cell-tag"] # <span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at '<a href="#papermill-error-cell">In [14]</a>'.</span> # + [markdown] papermill={"duration": 0.012939, "end_time": "2021-05-03T13:25:29.051683", "exception": false, "start_time": "2021-05-03T13:25:29.038744", "status": "completed"} tags=[] # # Papermill Report Generator # + papermill={"duration": 1.933535, "end_time": "2021-05-03T13:25:30.995370", "exception": false, "start_time": "2021-05-03T13:25:29.061835", "status": "completed"} tags=[] import os import pandas as pd import numpy as np import plotnine as pn import seaborn as sns import datetime as dt import matplotlib.pyplot as plt import pdfkit # + papermill={"duration": 0.016387, "end_time": "2021-05-03T13:25:31.022024", "exception": false, "start_time": "2021-05-03T13:25:31.005637", "status": "completed"} tags=[] #Check Dataframe Utility function def check_df(dataframe, sample=False): print(f"Dataframe Shape: {dataframe.shape} with rows: {dataframe.shape[0]} and columns: {dataframe.shape[1]}") print(f"\nDF Columns: \n{list(dataframe.columns)}") if sample == True: print(f"\nData:\n{dataframe.head(5)}") return None # + papermill={"duration": 0.01883, "end_time": "2021-05-03T13:25:31.050300", "exception": false, "start_time": "2021-05-03T13:25:31.031470", "status": "completed"} tags=["parameters"] #Define the default parameters analysis = "listings" # + papermill={"duration": 0.015024, "end_time": "2021-05-03T13:25:31.075460", "exception": false, "start_time": "2021-05-03T13:25:31.060436", "status": "completed"} tags=["injected-parameters"] # Parameters analysis = "listings" # + papermill={"duration": 0.016267, "end_time": "2021-05-03T13:25:31.100757", "exception": false, "start_time": "2021-05-03T13:25:31.084490", "status": "completed"} tags=[] #Import the data def import_data(analysis, folder_path=None): if not folder_path: folder_path = os.path.abspath(".") data_dir = 'data' folder_path = os.path.join(folder_path, data_dir) if analysis == 'listings': filename = 'listings.csv' elif analysis == 'reviews': filename = 'reviews.csv' elif analysis == 'calendar': filename = 'calendar.csv' filepath = os.path.join(folder_path, filename) df = pd.read_csv(filepath) check_df(df) return df # + papermill={"duration": 1.080136, "end_time": "2021-05-03T13:25:32.190201", "exception": false, "start_time": "2021-05-03T13:25:31.110065", "status": "completed"} tags=[] ## Data cleaning Listings @np.vectorize def remove_dollar(label: str): return float(label.replace('$','').replace(',','')) if analysis == 'listings': #Import dei dati df = import_data(analysis) # Selezioniamo solo alcune delle colonne listings = df[[ 'id','name','longitude','latitude', 'listing_url', 'instant_bookable', 'host_response_time', 'review_scores_rating', 'property_type', 'room_type','accommodates', 'bathrooms','bedrooms','beds','reviews_per_month','amenities', 'number_of_reviews', 'price' ]] #listings['price'] = remove_dollar(listings['price']) listings = listings.assign(price = remove_dollar(listings.price)) listings[['price']] print("Listings dataset readed and parsed") df_clean = listings.copy() # + papermill={"duration": 0.01706, "end_time": "2021-05-03T13:25:32.217154", "exception": false, "start_time": "2021-05-03T13:25:32.200094", "status": "completed"} tags=[] ## Data cleaning Reviews if analysis == 'reviews': #Import dei dati df = import_data(analysis) #Date to datetime reviews = df.assign(date = pd.to_datetime(df['date'])) reviews['year'] = reviews['date'].dt.year reviews['month'] = reviews['date'].dt.month reviews = reviews.sort_values(['year', 'month'], ascending=False) print("Reviews dataset readed and parsed") df_clean = reviews.copy() # + papermill={"duration": 0.019474, "end_time": "2021-05-03T13:25:32.246411", "exception": false, "start_time": "2021-05-03T13:25:32.226937", "status": "completed"} tags=[] ## Data cleaning Calendar if analysis == 'calendar': # Import dei dati df = import_data(analysis) calendar = df.assign(date = pd.to_datetime(df['date'])) calendar = calendar.assign( price = pd.to_numeric(calendar.price.str.replace('$','').str.replace(',','')), # adjusted_price = pd.to_numeric(calendar.adjusted_price.str.replace('$','').str.replace(',','')), ) calendar['year'] = pd.DatetimeIndex(calendar['date']).year calendar['month'] = pd.DatetimeIndex(calendar['date']).month calendar = calendar.sort_values(['year', 'month'], ascending=False) calendar['available'] = calendar.available.map({ 't': True, 'f': False }) print("Calendar dataset readed and parsed") df_clean = calendar.copy() # + [markdown] papermill={"duration": 0.009021, "end_time": "2021-05-03T13:25:32.265404", "exception": false, "start_time": "2021-05-03T13:25:32.256383", "status": "completed"} tags=[] # # 2. Generate analysis and plots # + papermill={"duration": 0.025642, "end_time": "2021-05-03T13:25:32.301033", "exception": false, "start_time": "2021-05-03T13:25:32.275391", "status": "completed"} tags=[] # Simple Analysis Generation if analysis == 'listings': room_type_count = ( df_clean.groupby("room_type", dropna=False) .id.count() .reset_index() .rename(columns={"id": "listing_count"}) ) night_price = df_clean.agg({"price": [np.mean]}) night_price_room = df_clean.groupby("room_type").agg( {"price": [np.mean]} ) elif analysis == 'reviews': pass elif analysis == 'calendar': pass # + papermill={"duration": 0.895681, "end_time": "2021-05-03T13:25:33.206637", "exception": false, "start_time": "2021-05-03T13:25:32.310956", "status": "completed"} tags=[] # Simply Plot Generation if analysis == 'listings': fig1 = ( pn.ggplot(df_clean) + pn.aes(x='room_type', fill='room_type') + pn.geom_bar() + pn.theme(axis_text_x=pn.element_text(angle=45, hjust=1)) ) fig1_path = os.path.join(os.path.abspath('.'),'plot1.png') fig1.save(filename=fig1_path) fig2 = ( pn.ggplot(df_clean) + pn.aes(x="price") + pn.geom_histogram(fill="blue", colour="black", bins=30) + pn.xlim(0, 200) ) fig2_path = os.path.join(os.path.abspath('.'),'plot2.png') fig2.save(filename=fig2_path) elif analysis == 'reviews': pass elif analysis == 'calendar': pass # + [markdown] papermill={"duration": 0.011628, "end_time": "2021-05-03T13:25:33.228868", "exception": false, "start_time": "2021-05-03T13:25:33.217240", "status": "completed"} tags=[] # # 3. Creating the final PDF Report # + papermill={"duration": 0.019191, "end_time": "2021-05-03T13:25:33.260736", "exception": false, "start_time": "2021-05-03T13:25:33.241545", "status": "completed"} tags=[] # Defining start and send date for the analysis today = str(dt.date.today()).replace('-', '/') # + papermill={"duration": 0.019797, "end_time": "2021-05-03T13:25:33.291713", "exception": false, "start_time": "2021-05-03T13:25:33.271916", "status": "completed"} tags=[] # HTML template to add our data and plots report_template = f''' <!DOCTYPE html> <html> <head> <meta charset='utf-8'> <title>PythonBiellaGroup Report Example</title> <link rel='stylesheet' href='report.css'> <style> h1 {{ font-family: Arial; font-size: 300%; }} h2 {{ font-family: Arial; font-size: 200%; }} @page {{ size: 7in 9.25in; margin: 27mm 16mm 27mm 16mm; }} </style> </head> <h1 align="center">Analysis for: {analysis}</h1> <h2 align="center">Report date: {today}</h2> <figure> <img src="{fig1_path}" width="1200" height="600"> </figure> <figure> <img src="{fig2_path}" width="1200" height="600"> </figure> </html> ''' # + papermill={"duration": 0.019164, "end_time": "2021-05-03T13:25:33.322656", "exception": false, "start_time": "2021-05-03T13:25:33.303492", "status": "completed"} tags=[] # Save HTML string to file html_report = os.path.join(os.path.abspath("."),f"{analysis.split(',')[0].replace(' ','_')}_report.html") with open(html_report, "w") as r: r.write(report_template) # + [markdown] papermill={"duration": 0.011527, "end_time": "2021-05-03T13:25:33.345444", "exception": false, "start_time": "2021-05-03T13:25:33.333917", "status": "completed"} tags=[] # Be carefull! To use pdfkit with html report export to pdf you need to install on your machine: `wkhtmltopdf` # - https://stackoverflow.com/questions/27673870/cant-create-pdf-using-python-pdfkit-error-no-wkhtmltopdf-executable-found # + [markdown] tags=["papermill-error-cell-tag"] # <span id="papermill-error-cell" style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">Execution using papermill encountered an exception here and stopped:</span> # + papermill={"duration": 4.808131, "end_time": "2021-05-03T13:25:38.166347", "exception": true, "start_time": "2021-05-03T13:25:33.358216", "status": "failed"} tags=[] # Use pdfkit to create the pdf report from the pdfkit.from_file(html_report, os.path.join(os.path.abspath("."),f"{analysis.split(',')[0].replace(' ', '_')}_report.pdf"))
PercorsoDati/Lab3-4/notebooks/generate_report_output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/valogonor/DS-Unit-2-Sprint-4-Model-Validation/blob/master/LS_DS_242_Validate_classification_problems_LIVE_LESSON.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="KMI2k-oBsS08" colab_type="text" # _Lambda School Data Science — Model Validation_ # # # Validate classification problems # # Objectives # - Imbalanced Classes # - Confusion Matrix # - ROC AUC # # + [markdown] id="SUZCkblZYN60" colab_type="text" # Reading # - [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) # - [Precision and Recall](https://en.wikipedia.org/wiki/Precision_and_recall) # + [markdown] id="rU7RuVcjWdcp" colab_type="text" # ## Preliminary # + [markdown] id="jes2WnwV072n" colab_type="text" # We'll use [mlxtend](http://rasbt.github.io/mlxtend/) and [yellowbrick](http://www.scikit-yb.org/en/latest/) for visualizations. These libraries are already installed on Google Colab. But if you are running locally with Anaconda Python, you'll probably need to install them: # # ``` # conda install -c conda-forge mlxtend # conda install -c districtdatalabs yellowbrick # ``` # + [markdown] id="nQYGb3HgEp8b" colab_type="text" # We'll reuse the `train_validation_test_split` function from yesterday's lesson. # + id="PMTjC3vQ7ZNV" colab_type="code" colab={} from sklearn.model_selection import train_test_split def train_validation_test_split( X, y, train_size=0.8, val_size=0.1, test_size=0.1, random_state=None, shuffle=True): assert train_size + val_size + test_size == 1 X_train_val, X_test, y_train_val, y_test = train_test_split( X, y, test_size=test_size, random_state=random_state, shuffle=shuffle) X_train, X_val, y_train, y_val = train_test_split( X_train_val, y_train_val, test_size=val_size/(train_size+val_size), random_state=random_state, shuffle=shuffle) return X_train, X_val, X_test, y_train, y_val, y_test # + [markdown] id="OWLBlu5K5kJR" colab_type="text" # ## Fun demo! # # The next code cell does five things: # # #### 1. Generate data # # We use scikit-learn's [make_classification](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html) function to generate fake data for a binary classification problem, based on several parameters, including: # - Number of samples # - Weights, meaning "the proportions of samples assigned to each class." # - Class separation: "Larger values spread out the clusters/classes and make the classification task easier." # # (We are generating fake data so it is easy to visualize.) # # #### 2. Split data # # We split the data three ways, into train, validation, and test sets. (For this toy example, it's not really necessary to do a three-way split. A two-way split, or even no split, would be ok. But I'm trying to demonstrate good habits, even in toy examples, to avoid confusion.) # # #### 3. Fit model # # We use scikit-learn to fit a [Logistic Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) on the training data. # # We use this model parameter: # # > **class_weight : _dict or ‘balanced’, default: None_** # # > Weights associated with classes in the form `{class_label: weight}`. If not given, all classes are supposed to have weight one. # # > The “balanced” mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as `n_samples / (n_classes * np.bincount(y))`. # # # #### 4. Evaluate model # # We use our Logistic Regression model, which was fit on the training data, to generate predictions for the validation data. # # Then we print [scikit-learn's Classification Report](https://scikit-learn.org/stable/modules/model_evaluation.html#classification-report), with many metrics, and also the accuracy score. We are comparing the correct labels to the Logistic Regression's predicted labels, for the validation set. # # #### 5. Visualize decision function # # Based on these examples # - https://imbalanced-learn.readthedocs.io/en/stable/auto_examples/combine/plot_comparison_combine.html # - http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/#example-1-decision-regions-in-2d # + id="TcpoWCUq5xNV" colab_type="code" outputId="7a70a23f-a718-4830-fcf3-ed63f4dcfd04" colab={"base_uri": "https://localhost:8080/", "height": 556} # %matplotlib inline import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.metrics import accuracy_score, classification_report from sklearn.linear_model import LogisticRegression from mlxtend.plotting import plot_decision_regions #1. Generate data # Try re-running the cell with different values for these parameters n_samples = 1000 weights = (0.95, 0.05) class_sep = .5 X, y = make_classification(n_samples=n_samples, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1, weights=weights, class_sep=class_sep, random_state=0) # 2. Split data # Uses our custom train_validation_test_split function X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split( X, y, train_size=0.8, val_size=0.1, test_size=0.1, random_state=1) # 3. Fit model # Try re-running the cell with different values for this parameter class_weight = {0:1, 1:15} model = LogisticRegression(solver='lbfgs', class_weight=class_weight) model.fit(X_train, y_train) # 4. Evaluate model y_pred = model.predict(X_val) print(classification_report(y_val, y_pred)) print('accuracy', accuracy_score(y_val, y_pred)) # 5. Visualize decision regions plt.figure(figsize=(10, 6)) plot_decision_regions(X_val, y_val, model, legend=0); # + [markdown] id="zrllN3yECsEN" colab_type="text" # Try re-running the cell above with different values for these four parameters: # - `n_samples` # - `weights` # - `class_sep` # - `class_balance` # # For example, with a 50% / 50% class distribution: # ``` # n_samples = 1000 # weights = (0.50, 0.50) # class_sep = 0.8 # class_balance = None # ``` # # With a 95% / 5% class distribution: # ``` # n_samples = 1000 # weights = (0.95, 0.05) # class_sep = 0.8 # class_balance = None # ``` # # With the same 95% / 5% class distribution, but changing the Logistic Regression's `class_balance` parameter to `'balanced'` (instead of its default `None`) # ``` # n_samples = 1000 # weights = (0.95, 0.05) # class_sep = 0.8 # class_balance = 'balanced' # ``` # # With the same 95% / 5% class distribution, but with different values for `class_balance`: # - `{0: 1, 1: 1}` _(equivalent to `None`)_ # - `{0: 1, 1: 2}` # - `{0: 1, 1: 10}` _(roughly equivalent to `'balanced'` for this dataset)_ # - `{0: 1, 1: 100}` # - `{0: 1, 1: 10000}` # # How do the evaluation metrics and decision region plots change? # + [markdown] id="5-3MS-jANssN" colab_type="text" # ## What you can do about imbalanced classes # + [markdown] id="2KwgStd-yUUr" colab_type="text" # [Learning from Imbalanced Classes](https://www.svds.com/tbt-learning-imbalanced-classes/) gives "a rough outline of useful approaches" : # # - Do nothing. Sometimes you get lucky and nothing needs to be done. You can train on the so-called natural (or stratified) distribution and sometimes it works without need for modification. # - Balance the training set in some way: # - Oversample the minority class. # - Undersample the majority class. # - Synthesize new minority classes. # - Throw away minority examples and switch to an anomaly detection framework. # - At the algorithm level, or after it: # - Adjust the class weight (misclassification costs). # - Adjust the decision threshold. # - Modify an existing algorithm to be more sensitive to rare classes. # - Construct an entirely new algorithm to perform well on imbalanced data. # # + [markdown] id="iO7kOZ2HN0EA" colab_type="text" # We demonstrated just one of these options: many scikit-learn classifiers have a `class_balance` parameter, which we can use to "adjust the class weight (misclassification costs)." # # The [imbalance-learn](https://github.com/scikit-learn-contrib/imbalanced-learn) library can be used to "oversample the minority class, undersample the majority class, or synthesize new minority classes." # # You can see how to "adjust the decision threshold" in a great blog post, [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415). # + [markdown] id="Xhh5TiW_X1_Q" colab_type="text" # ## Bank Marketing — getting started # # https://archive.ics.uci.edu/ml/datasets/Bank+Marketing # # The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed. # # bank-additional-full.csv with all examples (41188) and 20 inputs, **ordered by date (from May 2008 to November 2010)** # + [markdown] id="n18wVnuxY-xl" colab_type="text" # ### Download data # + id="S-oHbkK1X1h2" colab_type="code" outputId="70b06873-52d2-49b7-ea48-bd41992647f1" colab={"base_uri": "https://localhost:8080/", "height": 217} # !wget https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip # + id="1INLmiipZA-y" colab_type="code" cellView="both" outputId="9c185f54-d20d-40a4-93e1-9ffe7241fcd4" colab={"base_uri": "https://localhost:8080/", "height": 217} # !unzip bank-additional.zip # + id="HwWCY5XrZCWk" colab_type="code" outputId="71cc179d-4521-435c-a3c4-6361a1c5b971" colab={"base_uri": "https://localhost:8080/", "height": 35} # %cd bank-additional # + [markdown] id="Zf49DcHTZPdE" colab_type="text" # ### Load data, assign to X and y # + id="OwhVgENcZEwo" colab_type="code" colab={} import pandas as pd bank = pd.read_csv('bank-additional-full.csv', sep=';') X = bank.drop(columns='y') y = bank['y'] == 'yes' # + [markdown] id="lq1it0dnZlX3" colab_type="text" # ### Split data # + [markdown] id="P-FgY4pIaEXo" colab_type="text" # We want to do "model selection (hyperparameter optimization) and performance estimation" so we'll choose a validation method from the diagram's green box. # # There is no one "right" choice here, but I'll choose "3-way holdout method (train/validation/test split)." # # <img src="https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg" width="600"> # # Source: https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html # + [markdown] id="V74i3GDcZnkm" colab_type="text" # There's no one "right" choice here, but I'll choose to split by time, not with a random shuffle, based on this advice by [<NAME>]( # https://www.fast.ai/2017/11/13/validation-sets/): # > If your data is a time series, choosing a random subset of the data will be both too easy (you can look at the data both before and after the dates your are trying to predict) and not representative of most business use cases (where you are using historical data to build a model for use in the future). # # [According to UCI](https://archive.ics.uci.edu/ml/datasets/Bank+Marketing), this data is "ordered by date (from May 2008 to November 2010)" so if I don't shuffle it when splitting, then it will be split by time. # + id="-xnw-vfOamHH" colab_type="code" colab={} X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split( X, y, shuffle=False) # + [markdown] id="12dWJxXabDxt" colab_type="text" # ## Bank Marketing — live coding! # + id="qAPOJu3uamrU" colab_type="code" colab={} import numpy as np majority_class = y_train.mode()[0] y_pred = np.full(shape=y_val.shape, fill_value=majority_class) # + id="JOhxFsbh19Fm" colab_type="code" outputId="afe67abe-1525-4dc4-a0cc-6eb393b73d73" colab={"base_uri": "https://localhost:8080/", "height": 35} y_val.shape, y_pred.shape # + id="e9UTw4DZ2BLp" colab_type="code" outputId="6a6e2d52-faff-4641-d759-2438c71965a6" colab={"base_uri": "https://localhost:8080/", "height": 35} from sklearn.metrics import accuracy_score accuracy_score(y_val, y_pred) # + id="TsxqI78B2SJ_" colab_type="code" outputId="f7a53fe3-63f6-4c82-cf51-43c8c42d646f" colab={"base_uri": "https://localhost:8080/", "height": 72} y_val.value_counts(normalize=True) # + id="FJkjkznO2aXq" colab_type="code" outputId="c4a1ccbd-8759-406d-d580-0dde0f811660" colab={"base_uri": "https://localhost:8080/", "height": 237} print(classification_report(y_val, y_pred)) # + id="0Xy4bVUu23yO" colab_type="code" outputId="f8b8dcf4-2a76-45cf-a3ae-c1428dc85ca8" colab={"base_uri": "https://localhost:8080/", "height": 35} from sklearn.metrics import roc_auc_score roc_auc_score(y_val, y_pred) # + id="FXrRxHec47rK" colab_type="code" outputId="55251434-a5e4-4ae4-e972-cae7af0f1a38" colab={"base_uri": "https://localhost:8080/", "height": 291} # !pip install category_encoders # + id="SpHY2Rxp4uw5" colab_type="code" outputId="08f3d09c-bdab-44bb-951f-06b308d90489" colab={"base_uri": "https://localhost:8080/", "height": 182} import category_encoders as ce from sklearn.preprocessing import StandardScaler from sklearn.pipeline import make_pipeline pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names=True), StandardScaler(), LogisticRegression(solver='lbfgs') ) pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_val) # + id="t3ZBO81Y5iVi" colab_type="code" outputId="b58866ee-6a99-4641-b964-0ca3a22bdd03" colab={"base_uri": "https://localhost:8080/", "height": 35} accuracy_score(y_val, y_pred) # + id="-F_Ka_sF5pTd" colab_type="code" outputId="df35d485-1fd1-45e7-8c9e-ba23ffb082fe" colab={"base_uri": "https://localhost:8080/", "height": 35} roc_auc_score(y_val, y_pred) # + id="WFOrCPnh6_c2" colab_type="code" outputId="be968690-8a3e-43b0-fb61-cdf9bd2dd735" colab={"base_uri": "https://localhost:8080/", "height": 53} from sklearn.metrics import confusion_matrix confusion_matrix(y_val, y_pred) # + id="D8b-r1pn7Y03" colab_type="code" outputId="7c86deaa-e23d-4e03-bf9f-fab3407c3bd3" colab={"base_uri": "https://localhost:8080/", "height": 277} # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns def confusion_viz(y_true, y_pred): matrix = confusion_matrix(y_true, y_pred) return sns.heatmap(matrix, annot=True, fmt=',', linewidths=1, linecolor='grey', square=True, xticklabels=['Predicted\nNO', 'Predicted\nYES'], yticklabels=['Actual\nNO', 'Actual\nYES']) confusion_viz(y_val, y_pred); # + id="KMTvDI4l9dkJ" colab_type="code" outputId="d2ed709f-0377-40d6-d32f-759d7c6202c8" colab={"base_uri": "https://localhost:8080/", "height": 146} pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names=True), StandardScaler(), LogisticRegression(solver='lbfgs', class_weight='balanced') ) pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_val) # + id="caEUzcQP9oKz" colab_type="code" outputId="c7829f08-3bf7-428c-c53f-ddc464bc6540" colab={"base_uri": "https://localhost:8080/", "height": 277} confusion_viz(y_val, y_pred); # + [markdown] id="3Av6q6o0A3x8" colab_type="text" # Recall score from confusion matrix # + id="gmUiasQvA6Cl" colab_type="code" outputId="88d47026-59fb-48c1-9011-a4bfc30953bc" colab={"base_uri": "https://localhost:8080/", "height": 35} 408 / (408 + 240) # + id="1VvpvoBFBPcy" colab_type="code" outputId="d3652dd0-cdfc-467b-d6c2-a3b98ca39182" colab={"base_uri": "https://localhost:8080/", "height": 35} from sklearn.metrics import recall_score recall_score(y_val, y_pred) # + [markdown] id="r1VRJ1CFA0dD" colab_type="text" # Precision score from confusion matrix # + id="g38-Tv7MAlwv" colab_type="code" outputId="a600e630-3f4e-4528-9751-f5acf45ee2ef" colab={"base_uri": "https://localhost:8080/", "height": 35} 408 / (408 + 395) # + id="ZlDqHeJkAeCr" colab_type="code" outputId="61548693-38e1-43e0-9fbf-35478888c0c7" colab={"base_uri": "https://localhost:8080/", "height": 35} from sklearn.metrics import precision_score precision_score(y_val, y_pred) # + [markdown] id="yHrF3zYfAauM" colab_type="text" # Accuracy score from confusion matrix # + id="EBkfXkbDAPfu" colab_type="code" outputId="893b3f80-0038-41d4-c9f6-aee9edd95f3e" colab={"base_uri": "https://localhost:8080/", "height": 35} (408 + 3076) / (3076 + 395 + 240 + 408) # + id="DGyviEFq-nj_" colab_type="code" outputId="01dceb8a-57af-4a01-af47-50c87962b654" colab={"base_uri": "https://localhost:8080/", "height": 35} accuracy_score(y_val, y_pred) # + [markdown] id="5XNSCdRzAH-L" colab_type="text" # Class (im)balance can be calculated from the confusion matrix # + id="kCgjUPdM_KfD" colab_type="code" outputId="c9e27f89-009c-4058-ff24-ff29035e1e52" colab={"base_uri": "https://localhost:8080/", "height": 35} (3076 + 395) / (3076 + 395 + 240 + 408) # + id="JePUQuYS_8gK" colab_type="code" outputId="9b5beb4e-ed7b-4aaf-d38c-a5aa15169928" colab={"base_uri": "https://localhost:8080/", "height": 72} y_val.value_counts(normalize=True) # + id="moM1jQxD-2HH" colab_type="code" outputId="6435ba58-4830-472c-e672-cd8bf41e075f" colab={"base_uri": "https://localhost:8080/", "height": 35} roc_auc_score(y_val, y_pred) # + [markdown] id="P_XjBTW5SBwZ" colab_type="text" # # ASSIGNMENT options # # Replicate code from the lesson or other examples. [Do it "the hard way" or with the "Benjamin Franklin method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) # # Work with one of these datasets # - [Bank Marketing](https://archive.ics.uci.edu/ml/datasets/Bank+Marketing) # - [Synthetic Financial Dataset For Fraud Detection](https://www.kaggle.com/ntnu-testimon/paysim1) # - Any imbalanced binary classification dataset # # Continue improving your model. Measure validation performance with a variety of classification metrics, which could include: # - Accuracy # - Precision # - Recall # - F1 # - ROC AUC # # Try one of the other options mentioned for imbalanced classes # - The [imbalance-learn](https://github.com/scikit-learn-contrib/imbalanced-learn) library can be used to "oversample the minority class, undersample the majority class, or synthesize new minority classes." # - You can see how to "adjust the decision threshold" in a great blog post, [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415). # + id="89ZPlDeZwl01" colab_type="code" outputId="7365b5cc-f705-44d8-bafc-78710d3371c6" colab={"base_uri": "https://localhost:8080/", "height": 217} # !wget https://archive.ics.uci.edu/ml/machine-learning-databases/00445/Absenteeism_at_work_AAA.zip # + id="VP0zhzOJwrSZ" colab_type="code" outputId="99924dd1-7f11-4b30-c301-ea5f3b5c0e21" colab={"base_uri": "https://localhost:8080/", "height": 126} # !unzip Absenteeism_at_work_AAA.zip # + id="9MZrI6YDwvyo" colab_type="code" outputId="392aa4a8-4094-49b7-b851-ac43937541d2" colab={"base_uri": "https://localhost:8080/", "height": 278} df = pd.read_csv('Absenteeism_at_work.csv', sep=';') pd.set_option('max_columns', 100) df = df.drop('ID', axis=1) df.head() # + id="ZsER1gBhw7Ng" colab_type="code" outputId="51de3998-e450-4d68-ef8f-8055518e4769" colab={"base_uri": "https://localhost:8080/", "height": 372} df = df.rename(columns={'Son': 'Children'}) df.describe() # + id="T_bNq9wZxpXe" colab_type="code" outputId="6ef76452-74dc-45d1-d362-0cf561fbdda7" colab={"base_uri": "https://localhost:8080/", "height": 35} X = df.drop(columns='Absenteeism time in hours') y = df['Absenteeism time in hours'] # Uses our custom train_validation_test_split function X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split( X, y, train_size=0.8, val_size=0.1, test_size=0.1, random_state=42) X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape # + id="YoGPNu7w7F-Z" colab_type="code" outputId="38b5d13f-bb6a-4f7c-eee3-9aa26c8c88b4" colab={"base_uri": "https://localhost:8080/", "height": 53} median = y_train.median() mode = y_train.mode()[0] y_pred = np.full(shape=y_val.shape, fill_value=mode) y_train.mode() # + id="zNQS304P7NLi" colab_type="code" outputId="d9b9c98e-d434-491a-a321-3dbbc6bbcb73" colab={"base_uri": "https://localhost:8080/", "height": 35} y_pred.shape, y_val.shape # + id="3b7wWl2N7n2K" colab_type="code" outputId="549f5afa-b53b-46de-fa3b-1510e66eff01" colab={"base_uri": "https://localhost:8080/", "height": 35} all(y_pred == mode) # + id="KDbvQ-kY7th7" colab_type="code" outputId="fa42537a-b9ee-44f2-d20f-0355bafc4a30" colab={"base_uri": "https://localhost:8080/", "height": 400} from sklearn.metrics import accuracy_score print(classification_report(y_val, y_pred)) print('accuracy', accuracy_score(y_val, y_pred)) # + id="pvOE9JVd71S7" colab_type="code" outputId="234c0f5f-6987-4819-88b8-18d5a345b651" colab={"base_uri": "https://localhost:8080/", "height": 217} y_val.value_counts(normalize=True) # + id="IFG3ZZzC9IpR" colab_type="code" outputId="443f2d50-d92e-422e-c811-aaf829ff2fd7" colab={"base_uri": "https://localhost:8080/", "height": 473} from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_val) print(classification_report(y_val, y_pred)) print('accuracy', accuracy_score(y_val, y_pred)) # + [markdown] id="I2c34OEwDPUP" colab_type="text" # F1 score went from a baseline of 10% guessing the mode, to 39% with logistic regression.
LS_DS_242_Validate_classification_problems_LIVE_LESSON.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Construccion del modelo de prediccion # import pandas as pd import numpy as np import matplotlib.pyplot as plt pwd df = pd.read_csv("../../../sentimentClean.csv") df.drop(["Unnamed: 0"],axis=1, inplace=True) df.head(4) df["sentimiento"] = df["sentimiento"].replace({"positivo": 1}) df["sentimiento"] = df["sentimiento"].replace({"negativo": -1}) df.head(2) # Para este analisis se va a utilizar las reseñas como el input. Con esto vamos a intentar saber si el modelo las clasifica como positivas o negativas. Para ello se usara un modelo de regresion logistica. # + #Se eliminan los signos de puntuación def quita_puntuacion(texto): final = "".join(u for u in texto if u not in ("?",".",";",":","!",'"')) return final df["Text"] = df["Text"].apply(quita_puntuacion) df = df.dropna(subset=["Summary"]) df["Summary"] = df["Summary"].apply(quita_puntuacion) # - df_nuevo = df[["Summary","sentimiento"]] df_nuevo # + #Dividiendo los conjuntos 80% train, 20%test index = df.index df["random"] = np.random.randn(len(index)) # - train = df[df["random"]<=0.8] test = df[df["random"]>=0.8] # ## Creando una bolsa de palabras # # A continuación, utilizaremos un vectorizador de cuentas de la biblioteca Scikit-learn. # # Esto transformará el texto de nuestro marco de datos en un modelo de bolsa de palabras, que contendrá una matriz dispersa de enteros. El número de ocurrencias de cada palabra será contado e impreso. # # Tendremos que convertir el texto en un modelo de bolsa de palabras ya que el algoritmo de regresión logística no puede entender el texto. Debe ser logistica ya que el countervectorizer lo convertira a categoria. # # + from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(token_pattern=r'\b\w+\b') #Expresion regular para constituir lo que es un token train_matrix = vectorizer.fit_transform(train["Summary"]) test_matrix = vectorizer.transform(test["Summary"]) # - X_train = train_matrix X_test = test_matrix y_train = train['sentimiento'] y_test = test['sentimiento'] #Importando regresion logistica. from sklearn.linear_model import LogisticRegression lr = LogisticRegression(max_iter=1000) X_train.shape,X_test.shape #Entrenando lr.fit(X_train,y_train) #Creando las predicciones predictions = lr.predict(X_test) #Viendo la precision from sklearn.metrics import confusion_matrix,classification_report new = np.asarray(y_test) confusion_matrix(predictions,y_test) from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(lr, X_test, y_test) plt.show() print(classification_report(predictions,y_test))
Supervisados/SentimentAmazon/SentimentAnalisis-Modelo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import pyspark from pyspark import SparkConf from pyspark.sql import SparkSession from pyspark.sql import Window import pyspark.sql.functions as F from pyspark.ml.feature import VectorAssembler, StandardScaler, OneHotEncoder, StringIndexer from pyspark.ml.classification import LogisticRegression, GBTClassifier, RandomForestClassifier from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.ml import Pipeline from mmlspark.lightgbm import LightGBMClassifier spark = SparkSession\ .builder\ .appName("Antifraud")\ .master("local[*]")\ .config("spark.jars", "/mnt/d/git/learning_spark/spark_ml/mmlspark_2.11-0.18.1.jar")\ .getOrCreate() # + file_path = "/mnt/d/git/learning_spark/spark_ml/data/creditcard.csv" # CSV options infer_schema = "true" first_row_is_header = "true" delimiter = "," df = spark.read.csv(file_path, header=True) # - df.count() words = sc.parallelize ( ["This", "SPARK", "cluster", "seems", "to", "work", "just", "fine!"] ) def f(x): print(x) fore = words.foreach(f)
spark_ml/antifraud.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os from pathlib import Path import sys sys.path.append('../src') import numpy as np from matplotlib import pyplot as plt from nlkda.data.loader import get_data, DatasetEnum from nlkda.models.base import MonotonicityWrapper from nlkda.models.utils import load_model_from_directory from nlkda.settings import K_MAX # - def plot_bounds(lower, upper, lower_mono, upper_mono, real, i, f_name): x = np.arange(1, K_MAX+1) l, = plt.plot(x, lower[i,:], label='original lower', ls="dashed", color="deepskyblue", zorder=99) u, = plt.plot(x, upper[i,:], label='original upper', ls="dashed", color="orange", zorder=99) ml, = plt.plot(x, lower_mono[i,:], label='improved lower', color="blue", zorder=99) mu, = plt.plot(x, upper_mono[i,:], label='improved upper', color="red", zorder=99) plt.plot(x, real[i,:], label='Real', color="black", zorder=99, ls="dotted",) plt.fill_between(x, lower[i,:], lower_mono[i,:], color=l.get_color(), alpha=.4) plt.fill_between(x, upper[i,:], upper_mono[i,:], color=u.get_color(), alpha=.4) plt.xlim(1, K_MAX) plt.ylim(0, None) plt.grid(which='minor', alpha=0.4) plt.grid(which='major', alpha=0.5) plt.xlabel('k') plt.ylabel('distance') plt.legend() plt.tight_layout() plt.savefig(f'{f_name}.pdf') # + data_root = Path("/tmp/data/") exp = data_root / 'experiments' ds = "OL" run_id = '' # TODO: fill in run_id out_dir = exp / run_id x, y, dist = get_data(DatasetEnum(ds), data_root) # bounds model model = load_model_from_directory(out_dir) pred = np.load(os.path.join(out_dir,'pred_k_dist.npy')) model.set_min_max(pred, y, True, True) lower_p, upper_p = model.predict_bounds(pred, True, True) mono_wrapper = MonotonicityWrapper(base=model) mono_lower_p, mono_upper_p = mono_wrapper.predict_bounds(pred, True, True) model.set_min_max(pred, y, True, False) lower_k, upper_k = model.predict_bounds(pred, True, False) mono_wrapper_p = MonotonicityWrapper(base=model) mono_lower_k, mono_upper_k = mono_wrapper_p.predict_bounds(pred, True, False) # get best lower and best upper lower_p_k = np.maximum(mono_lower_k, mono_lower_p) upper_p_k = np.minimum(mono_upper_k, mono_upper_p) # - plot_bounds( lower=lower_k, upper=upper_k, lower_mono=mono_lower_k, upper_mono=mono_upper_k, real=y, i=0, f_name='improve_k' )
notebooks/01_Evaluate_Bounds.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Working with code cells # # In this notebook you'll get some experience working with code cells. # # First, run the cell below. As I mentioned before, you can run the cell by selecting it the click the "run cell" button above. However, it's easier to run it by pressing **Shift + Enter** so you don't have to take your hands away from the keyboard. # Select the cell, then press Shift + Enter 3**2 # Shift + Enter runs the cell then selects the next cell or creates a new one if necessary. You can run a cell without changing the selected cell by pressing **Control + Enter**. # # The output shows up below the cell. It's printing out the result just like in a normal Python shell. Only the very last result in a cell will be printed though. Otherwise, you'll need to use `print()` print out any variables. # # > **Exercise:** Run the next two cells to test this out. Think about what you expect to happen, then try it. 3**2 4**2 print(3**2) 4**2 # Now try assigning a value to a variable. mindset = 'growth' # There is no output, `'growth'` has been assigned to the variable `mindset`. All variables, functions, and classes created in a cell are available in every other cell in the notebook. # # What do you think the output will be when you run the next cell? Feel free to play around with this a bit to get used to how it works. mindset[:4] # ## Code completion # # When you're writing code, you'll often be using a variable or function often and can save time by using code completion. That is, you only need to type part of the name, then press **tab**. # # > **Exercise:** Place the cursor at the end of `mind` in the next cell and press **tab** mindset # Here, completing `mind` writes out the full variable name `mindset`. If there are multiple names that start the same, you'll get a menu, see below. # Run this cell mindful = True # Complete the name here again, choose one from the menu mindset # Remember that variables assigned in one cell are available in all cells. This includes cells that you've previously run and cells that are above where the variable was assigned. Try doing the code completion on the cell third up from here. # # Code completion also comes in handy if you're using a module but don't quite remember which function you're looking for or what the available functions are. I'll show you how this works with the [random](https://docs.python.org/3/library/random.html) module. This module provides functions for generating random numbers, often useful for making fake data or picking random items from lists. # Run this import random # > **Exercise:** In the cell below, place the cursor after `random.` then press **tab** to bring up the code completion menu for the module. Choose `random.randint` from the list, you can move through the menu with the up and down arrow keys. random.randint # Above you should have seen all the functions available from the random module. Maybe you're looking to draw random numbers from a [Gaussian distribution](https://en.wikipedia.org/wiki/Normal_distribution), also known as the normal distribution or the "bell curve". # # ## Tooltips # # You see there is the function `random.gauss` but how do you use it? You could check out the [documentation](https://docs.python.org/3/library/random.html), or just look up the documentation in the notebook itself. # # > **Exercise:** In the cell below, place the cursor after `random.gauss` the press **shift + tab** to bring up the tooltip. random.gauss(mu sigma) # You should have seen some simple documentation like this: # # Signature: random.gauss(mu, sigma) # Docstring: # Gaussian distribution. # # The function takes two arguments, `mu` and `sigma`. These are the standard symbols for the mean and the standard deviation, respectively, of the Gaussian distribution. Maybe you're not familiar with this though, and you need to know what the parameters actually mean. This will happen often, you'll find some function, but you need more information. You can show more information by pressing **shift + tab** twice. # # > **Exercise:** In the cell below, show the full help documentation by pressing **shift + tab** twice. random.gauss # You should see more help text like this: # # mu is the mean, and sigma is the standard deviation. This is # slightly faster than the normalvariate() function.
play/working-with-code-cells.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## _*H2 excited states from NumPyEigensolver*_ # # This notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state and excited state energies of the Hydrogen (H2) molecule over a range of inter-atomic distances. This notebook utilizes the fact that when two_qubit_reduction is used with the parity mapping on H2 the resultant hamiltionian solely contains the 4 states we are looking for. # # This notebook has been written to use the PYSCF chemistry driver. # + import numpy as np import pylab from qiskit.aqua.algorithms import NumPyEigensolver from qiskit.chemistry.drivers import PySCFDriver from qiskit.chemistry.core import Hamiltonian, QubitMappingType # TODO: Ignore warnings for now until the algorithm result handling is changed. import warnings warnings.filterwarnings('ignore', category=DeprecationWarning) molecule = 'H .0 .0 -{0}; H .0 .0 {0}' start = 0.5 # Start distance by = 0.5 # How much to increase distance by steps = 20 # Number of steps to increase by energies = np.empty([4, steps+1]) distances = np.empty(steps+1) print('Processing step __', end='') for i in range(steps+1): print('\b\b{:2d}'.format(i), end='', flush=True) d = start + i*by/steps driver = PySCFDriver(molecule.format(d/2), basis='sto3g') qmolecule = driver.run() operator = Hamiltonian(qubit_mapping=QubitMappingType.PARITY, two_qubit_reduction=True) qubit_op, aux_ops = operator.run(qmolecule) result = NumPyEigensolver(qubit_op, k=4).run() _, result = operator.process_algorithm_result(result) energies[:, i] = result['energies'] distances[i] = d print(' --- complete') print('Distances: ', distances) print('Energies:', energies) # - pylab.rcParams['figure.figsize'] = (12, 8) for j in range(energies.shape[0]): label = 'Ground state' if j ==0 else 'Excited state {}'.format(j) pylab.plot(distances, energies[j], label=label) pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('H2 Ground and Excited States') pylab.legend(loc='upper right') pylab.show() # The above plot has all the states. Below we plot them individually. With each plot having its own y-axis scale the energy change over distance change is more evident, particularly the ground state curve which is very flattened above by the scale. pylab.rcParams['figure.figsize'] = (6, 4) prop_cycle = pylab.rcParams['axes.prop_cycle'] colors = prop_cycle.by_key()['color'] for j in range(energies.shape[0]): label = 'Ground state' if j ==0 else 'Excited state {}'.format(j) pylab.plot(distances, energies[j], color=colors[j], label=label) pylab.xlabel('Interatomic distance') pylab.ylabel('Energy') pylab.title('H2 {}'.format(label)) pylab.legend(loc='upper right') pylab.show()
chemistry/h2_excited_states.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Head Pose Image Database # # http://www-prima.inrialpes.fr/perso/Gourier/Faces/HPDatabase.html # ## このデータベースを利用する目的: # # さまざまな顔の向きで顔を検出できるかどうかを評価する。 # # 各pitch, yaw の組み合わせに対して、30枚の画像があり、 # 顔向きごとの検出率を評価できる。 # # ## 評価上の注意点: # # - 背景がフラットな画像になっているので、背景が込み入っている時の検出率を評価できない。 # - 被験者が欧米人に偏っている。 # - 照明条件の多様性がない。 # - 表情の変化が少ない(口を開けたりはしていない) # # %matplotlib inline import pandas as pd # + import glob dataset = "headPose" names = glob.glob("headPose/Person*/*.jpg") # - names.sort() import HaarFrontal as faceDetector faceDetector.processDatabase(dataset, names) # # headPose dataset の検出処理後のデータ解析 # + import pandas as pd import readheadPose df = pd.read_csv("log_headPose_0.csv") pitches = [] yaws = [] angles = [] for index, rows in df.iterrows(): # print index, rows["name"] pitch, yaw = readheadPose.getAngles(rows["name"]) pitches.append(float(pitch)) yaws.append(float(yaw)) angles.append("%s_%s" % (pitch, yaw)) df["angles"] = angles df["pitch"] = pitches df["yaws"] = yaws print df.groupby("num").count() # - print "検出数の分布(比率)" print df.groupby("num").count()/float(df.shape[0]) print "検出数のヒストグラム" ax = df["num"].hist(bins=11) ax.set_xlabel("number of detections") ax.set_ylabel("Frequency") # ## HaaR Cascade の検出器のついて # haarcascade_frontalface_default.xml # を用いているので、正面向き以外に対しては検出性能に限界がある。 # # ## 検出数の分布を角度の組み合わせごとに集約する dfSum = df.groupby("angles").sum(); dfSum = dfSum.drop("pitch", axis=1); dfSum = dfSum.drop("yaws", axis=1); dfSum dfMean = df.groupby("angles").mean() dfMean dfMean = dfMean.drop("pitch", axis=1); dfMean = dfMean.drop("yaws", axis=1); dfMean # 降べきの順に表示させる。 # そうすると、どの角度範囲までが検出率が高いのかがわかる。 dfMean.sort_values("truePositives", ascending=False) df["num"].mean() print df[df["num"]==0] # pitch が 90 のデータは検出されていないものが多いことがわかる。 print df[df["pitch"]==90] print df[df["pitch"]==90].groupby("num").count() print "num 0で表示されたcount だけ未検出だったことがわかる" # # print df[df["num"]==2] print "検出数が2つ以上あるものは、誤検出を含んでいることがわかる" df["truePositives"].hist() print "顔がひとつしかない画像を入力にしているので、検出された時は1、未検出の時は0になる" df.shape print df.groupby("truePositives").count() df[df["truePositives"]==0]
HaarProfile/haarCascade_headPose.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 64-bit # name: python3 # --- import numpy as np import pandas as pd #atribui leitura a variavel movies = pd.read_csv("movies.csv") ratings = pd.read_csv("ratings.csv") ratings.drop(['timestamp'], axis=1, inplace=True) ratings.head() # + def replace_name(x): return movies[movies['movieId']==x].title.values[0] ratings.movieId=ratings.movieId.map(replace_name) # - ratings M = ratings.pivot_table(index=['userId'],columns=['movieId'],values='rating') M.shape M # Pearson's R
projeto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.013366, "end_time": "2020-04-28T12:13:02.514451", "exception": false, "start_time": "2020-04-28T12:13:02.501085", "status": "completed"} tags=[] # # COVID-19 Deaths, Cases & Recovery Per Capita # > Compare deaths and total cases adjusting for population size. # # - comments: true # - author: <NAME>. <NAME> & <NAME> # - categories: [growth, compare, interactive] # - hide: false # - image: images/covid-permillion-trajectories.png # - image: images/highest-recovery-and-death-rates.png # - permalink: /covid-compare-permillion/ # + papermill={"duration": 0.676784, "end_time": "2020-04-28T12:13:03.201819", "exception": false, "start_time": "2020-04-28T12:13:02.525035", "status": "completed"} tags=[] #hide import numpy as np import pandas as pd import matplotlib.pyplot as plt import altair as alt from load_covid_data import load_individual_timeseries # %config InlineBackend.figure_format = 'retina' chart_width = 550 chart_height= 400 # + [markdown] papermill={"duration": 0.011934, "end_time": "2020-04-28T12:13:03.223945", "exception": false, "start_time": "2020-04-28T12:13:03.212011", "status": "completed"} tags=[] # ## Deaths Per Million Of Inhabitants # + [markdown] papermill={"duration": 0.009394, "end_time": "2020-04-28T12:13:03.242807", "exception": false, "start_time": "2020-04-28T12:13:03.233413", "status": "completed"} tags=[] # Since reaching at least 1 death per million # # > Tip: Click (Shift+ for multiple) on countries in the legend to filter the visualization. # + papermill={"duration": 5.709519, "end_time": "2020-04-28T12:13:08.961828", "exception": false, "start_time": "2020-04-28T12:13:03.252309", "status": "completed"} tags=[] #hide data = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv", error_bad_lines=False) data = data.drop(columns=["Lat", "Long"]) data = data.melt(id_vars= ["Province/State", "Country/Region"]) data = pd.DataFrame(data.groupby(['Country/Region', "variable"]).sum()) data.reset_index(inplace=True) data = data.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_cases"}) data['date'] =pd.to_datetime(data.date) data = data.sort_values(by = "date") data.loc[data.location == "US","location"] = "United States" data.loc[data.location == "Korea, South","location"] = "South Korea" data_pwt = pd.read_stata("https://www.rug.nl/ggdc/docs/pwt91.dta") filter1 = data_pwt["year"] == 2017 data_pop = data_pwt[filter1] data_pop = data_pop[["country","pop"]] data_pop.loc[data_pop.country == "Republic of Korea","country"] = "South Korea" data_pop.loc[data_pop.country == "Iran (Islamic Republic of)","country"] = "Iran" # per habitant data_pc = data.copy() countries = ["Italy", "Spain", "France", "United Kingdom", "Germany", "Portugal", "United States", "Singapore", "South Korea", "Japan", "Brazil", "Iran", 'Netherlands', 'Belgium', 'Sweden', 'Switzerland', 'Norway', 'Denmark', 'Austria', 'Slovenia', 'Greece'] data_countries = [] data_countries_pc = [] # compute per habitant for i in countries: data_pc.loc[data_pc.location == i,"total_cases"] = data_pc.loc[data_pc.location == i,"total_cases"]/float(data_pop.loc[data_pop.country == i, "pop"]) # get each country time series filter1 = data_pc["total_cases"] > 1 for i in countries: filter_country = data_pc["location"]== i data_countries_pc.append(data_pc[filter_country & filter1]) # + papermill={"duration": 4.059687, "end_time": "2020-04-28T12:13:13.031976", "exception": false, "start_time": "2020-04-28T12:13:08.972289", "status": "completed"} tags=[] #hide_input # Stack data to get it to Altair dataframe format data_countries_pc2 = data_countries_pc.copy() for i in range(0,len(countries)): data_countries_pc2[i] = data_countries_pc2[i].reset_index() data_countries_pc2[i]['n_days'] = data_countries_pc2[i].index data_countries_pc2[i]['log_cases'] = np.log(data_countries_pc2[i]["total_cases"]) data_plot = data_countries_pc2[0] for i in range(1, len(countries)): data_plot = pd.concat([data_plot, data_countries_pc2[i]], axis=0) data_plot["trend_2days"] = np.log(2)/2*data_plot["n_days"] data_plot["trend_4days"] = np.log(2)/4*data_plot["n_days"] data_plot["trend_12days"] = np.log(2)/12*data_plot["n_days"] data_plot["trend_2days_label"] = "Doubles every 2 days" data_plot["trend_4days_label"] = "Doubles evey 4 days" data_plot["trend_12days_label"] = "Doubles every 12 days" # Plot it using Altair source = data_plot scales = alt.selection_interval(bind='scales', zoom=False) selection = alt.selection_multi(fields=['location'], bind='legend') base = alt.Chart(source, title = "COVID-19 Deaths Per Million of Inhabitants").encode( x = alt.X('n_days:Q', title = "Days passed since reaching 1 death per million"), y = alt.Y("log_cases:Q",title = "Log of deaths per million"), color = alt.Color('location:N', legend=alt.Legend(title="Country", labelFontSize=15, titleFontSize=17), scale=alt.Scale(scheme='tableau20')), opacity = alt.condition(selection, alt.value(1), alt.value(0.1)) ) lines = base.mark_line().add_selection( scales ).add_selection( selection ).properties( width=chart_width, height=chart_height ) trend_2d = alt.Chart(source).encode( x = "n_days:Q", y = alt.Y("trend_2days:Q", scale=alt.Scale(domain=(0, max(data_plot["log_cases"])))), ).mark_line(color="grey", strokeDash=[3,3]) labels = pd.DataFrame([{'label': 'Doubles every 2 days', 'x_coord': 6, 'y_coord': 4}, {'label': 'Doubles every 4 days', 'x_coord': 16, 'y_coord': 3.5}, {'label': 'Doubles every 12 days', 'x_coord': 25, 'y_coord': 1.8}, ]) trend_label = (alt.Chart(labels) .mark_text(align='left', dx=-55, dy=-15, fontSize=12, color="grey") .encode(x='x_coord:Q', y='y_coord:Q', text='label:N') ) trend_4d = alt.Chart(source).mark_line(color="grey", strokeDash=[3,3]).encode( x = "n_days:Q", y = alt.Y("trend_4days:Q", scale=alt.Scale(domain=(0, max(data_plot["log_cases"])))), ) trend_12d = alt.Chart(source).mark_line(color="grey", strokeDash=[3,3]).encode( x = "n_days:Q", y = alt.Y("trend_12days:Q", scale=alt.Scale(domain=(0, max(data_plot["log_cases"])))), ) plot1= ( (trend_2d + trend_4d + trend_12d + trend_label + lines) .configure_title(fontSize=20) .configure_axis(labelFontSize=15,titleFontSize=18) ) #plot1.save(("../images/covid-permillion-trajectories.png")) plot1 # + [markdown] papermill={"duration": 0.014971, "end_time": "2020-04-28T12:13:13.062500", "exception": false, "start_time": "2020-04-28T12:13:13.047529", "status": "completed"} tags=[] # Last Available Total Deaths By Country: # + papermill={"duration": 0.056931, "end_time": "2020-04-28T12:13:13.134308", "exception": false, "start_time": "2020-04-28T12:13:13.077377", "status": "completed"} tags=[] #hide_input label = 'Deaths' temp = pd.concat([x.copy() for x in data_countries_pc]).loc[lambda x: x.date >= '3/1/2020'] metric_name = f'{label} per Million' temp.columns = ['Country', 'date', metric_name] # temp.loc[:, 'month'] = temp.date.dt.strftime('%Y-%m') temp.loc[:, f'Log of {label} per Million'] = temp[f'{label} per Million'].apply(lambda x: np.log(x)) temp.groupby('Country').last() # + papermill={"duration": 4.959798, "end_time": "2020-04-28T12:13:18.110520", "exception": false, "start_time": "2020-04-28T12:13:13.150722", "status": "completed"} tags=[] #hide # Get data and clean it data = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", error_bad_lines=False) data = data.drop(columns=["Lat", "Long"]) data = data.melt(id_vars= ["Province/State", "Country/Region"]) data = pd.DataFrame(data.groupby(['Country/Region', "variable"]).sum()) data.reset_index(inplace=True) data = data.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_cases"}) data['date'] =pd.to_datetime(data.date) data = data.sort_values(by = "date") data.loc[data.location == "US","location"] = "United States" data.loc[data.location == "Korea, South","location"] = "South Korea" # Population data (last year is 2017 which is what we use) data_pwt = pd.read_stata("https://www.rug.nl/ggdc/docs/pwt91.dta") filter1 = data_pwt["year"] == 2017 data_pop = data_pwt[filter1] data_pop = data_pop[["country","pop"]] data_pop.loc[data_pop.country == "Republic of Korea","country"] = "South Korea" data_pop.loc[data_pop.country == "Iran (Islamic Republic of)","country"] = "Iran" # per habitant data_pc = data.copy() # I can add more countries if needed countries = ["China", "Italy", "Spain", "France", "United Kingdom", "Germany", "Portugal", "United States", "Singapore","South Korea", "Japan", "Brazil","Iran"] data_countries = [] data_countries_pc = [] # compute per habitant for i in countries: data_pc.loc[data_pc.location == i,"total_cases"] = data_pc.loc[data_pc.location == i,"total_cases"]/float(data_pop.loc[data_pop.country == i, "pop"]) # get each country time series filter1 = data_pc["total_cases"] > 1 for i in countries: filter_country = data_pc["location"]== i data_countries_pc.append(data_pc[filter_country & filter1]) # + [markdown] papermill={"duration": 0.017036, "end_time": "2020-04-28T12:13:18.143953", "exception": false, "start_time": "2020-04-28T12:13:18.126917", "status": "completed"} tags=[] # ## Cases Per Million of Habitants # # Since reaching at least 1 case per million # # > Note: The following chart, "Cases Per Million of Habitants" is biased depending on how widely a country administers tests. Please read with caution. # # > Tip: Click (Shift+ for multiple) on countries in the legend to filter the visualization. # + papermill={"duration": 0.287105, "end_time": "2020-04-28T12:13:18.446815", "exception": false, "start_time": "2020-04-28T12:13:18.159710", "status": "completed"} tags=[] #hide_input # Stack data to get it to Altair dataframe format data_countries_pc2 = data_countries_pc.copy() for i in range(0,len(countries)): data_countries_pc2[i] = data_countries_pc2[i].reset_index() data_countries_pc2[i]['n_days'] = data_countries_pc2[i].index data_countries_pc2[i]['log_cases'] = np.log(data_countries_pc2[i]["total_cases"]) data_plot = data_countries_pc2[0] for i in range(1, len(countries)): data_plot = pd.concat([data_plot, data_countries_pc2[i]], axis=0) data_plot["trend_2days"] = np.log(2)/2*data_plot["n_days"] data_plot["trend_4days"] = np.log(2)/4*data_plot["n_days"] data_plot["trend_12days"] = np.log(2)/12*data_plot["n_days"] data_plot["trend_2days_label"] = "Doubles every 2 days" data_plot["trend_4days_label"] = "Doubles evey 4 days" data_plot["trend_12days_label"] = "Doubles every 12 days" # Plot it using Altair source = data_plot scales = alt.selection_interval(bind='scales', zoom=False) selection = alt.selection_multi(fields=['location'], bind='legend') base = alt.Chart(source, title = "COVID-19 Confirmed Cases Per Million of Inhabitants").encode( x = alt.X('n_days:Q', title = "Days passed since reaching 1 case per million"), y = alt.Y("log_cases:Q",title = "Log of confirmed cases per million"), color = alt.Color('location:N', legend=alt.Legend(title="Country", labelFontSize=15, titleFontSize=17), scale=alt.Scale(scheme='tableau20')), opacity = alt.condition(selection, alt.value(1), alt.value(0.1)) ).properties( width=chart_width, height=chart_height ) lines = base.mark_line().add_selection( scales ).add_selection( selection ) trend_2d = alt.Chart(source).encode( x = "n_days:Q", y = alt.Y("trend_2days:Q", scale=alt.Scale(domain=(0, max(data_plot["log_cases"])))), ).mark_line( strokeDash=[3,3], color="grey") labels = pd.DataFrame([{'label': 'Doubles every 2 days', 'x_coord': 10, 'y_coord': 6}, {'label': 'Doubles every 4 days', 'x_coord': 28, 'y_coord': 6}, {'label': 'Doubles every 12 days', 'x_coord': 45, 'y_coord': 3}, ]) trend_label = (alt.Chart(labels) .mark_text(align='left', dx=-55, dy=-15, fontSize=12, color="grey") .encode(x='x_coord:Q', y='y_coord:Q', text='label:N') ) trend_4d = alt.Chart(source).mark_line(color="grey", strokeDash=[3,3]).encode( x = "n_days:Q", y = alt.Y("trend_4days:Q", scale=alt.Scale(domain=(0, max(data_plot["log_cases"])))), ) trend_12d = alt.Chart(source).mark_line(color="grey", strokeDash=[3,3]).encode( x = "n_days:Q", y = alt.Y("trend_12days:Q", scale=alt.Scale(domain=(0, max(data_plot["log_cases"])))), ) ( (trend_2d + trend_4d + trend_12d + trend_label + lines) .configure_title(fontSize=20) .configure_axis(labelFontSize=15,titleFontSize=18) ) # + #hide_input #Added by <NAME> for Recovery and Death Rates # load data for infections, deaths, and recovered df_confirmed = load_individual_timeseries('confirmed') df_death = load_individual_timeseries('deaths') df_recovered = load_individual_timeseries('recovered') # clean the data df_confirmed = df_confirmed[~df_confirmed['country'].str.contains(' \(total\)')].drop(['state', 'type'], axis=1, ).reset_index() df_death = df_death[~df_death['country'].str.contains(' \(total\)')].drop(['state', 'type'], axis=1, ).reset_index() df_recovered = df_recovered[~df_recovered['country'].str.contains(' \(total\)')].drop(['state', 'type'], axis=1, ).reset_index() # get the data only for the latest date Latest_Date = df_confirmed['date'].max() df_confirmed = df_confirmed.loc[df_confirmed['date'] == Latest_Date] df_death = df_death.loc[df_death['date'] == Latest_Date] df_recovered = df_recovered.loc[df_recovered['date'] == Latest_Date] # remove column 'date' as it is no more required df_confirmed = df_confirmed.drop('date', axis=1) df_death = df_death.drop('date', axis=1) df_recovered = df_recovered.drop('date', axis=1) # aggregate data df_confirmed = (df_confirmed.sort_values(by=['country']) .groupby(['country']) .agg(sum)).reset_index() df_death = (df_death.sort_values(by=['country']) .groupby(['country']) .agg(sum)).reset_index() df_recovered = (df_recovered.sort_values(by=['country']) .groupby(['country']) .agg(sum)).reset_index() # rename the column for 'cases' in respective datasets df_confirmed = df_confirmed.rename(columns={"cases": "infections"}) df_death = df_death.rename(columns={"cases": "deaths"}) df_recovered = df_recovered.rename(columns={"cases": "recovered"}) # (inner)join the datasets for 'confirmed' and 'death' on 'country' df_Master = pd.merge(df_confirmed, df_death, how='inner', on='country', left_on=None, right_on=None, left_index=False, right_index=False, sort=True) df_Master = pd.merge(df_Master, df_recovered, how='inner', on='country', left_on=None, right_on=None, left_index=False, right_index=False, sort=True) # rename the Countries df_Master['country'] = df_Master['country'].replace({'Bosnia and Herzegovina':'Bosnia Herzegovina', 'Timor-Leste' :'East Timor', "Cote d'Ivoire" :'Ivory Coast', 'Burma' :'Myanmar', 'Korea, South' :'South Korea', 'Taiwan*' :'Taiwan', 'US' :'United States of America', 'Holy See' :'Vatican City'}) ## remove the countries that have less than 200 confirmed cases case_threshold = 200 keep_countries = df_Master.loc[(df_Master['infections'] > case_threshold)].country df_Master = df_Master.loc[df_Master['country'].isin(keep_countries)] # calculate 'deaths' and 'recovery' per 1000 infections df_Master['Deaths_per_1000'] = round(1000 * (df_Master['deaths']/df_Master['infections']), 2) df_Master['Recovered_per_1000'] = round(1000 * (df_Master['recovered']/df_Master['infections']), 2) # get countries with top 50 death rates df_top_death = df_Master.sort_values(by=['Deaths_per_1000'], ascending=False) df_top_death = df_top_death.head(50) # get countries with top 50 recovery rates df_top_recovered = df_Master.sort_values(by=['Recovered_per_1000'], ascending=False) df_top_recovered = df_top_recovered.head(50) # get rest of the countries df_ROW = df_Master.loc[~df_Master['country'].isin(df_top_death.country)] df_ROW = df_ROW.loc[~df_ROW['country'].isin(df_top_recovered.country)] # - # This graph shows the 50 countries that are facing highest rate of deaths per 1000 infections, across the world. As it is not so intuitive that the developed countries such as Belgium, France, Netherlands are among the top countries having highest death rate. #hide_input # make the bar-chart for countries on "deaths per 1000 infections" alt.Chart(df_top_death).mark_bar(color='Orange').encode( x= alt.Y('country:N', sort='-y', title="Countries"), y=alt.Y('Deaths_per_1000', title="Deaths per 1000 infected people") ).properties( title='Countries with top death rates') # Further, we move on to the countries with best recovery rate. The graph below shows 50 countries with highest rate of recovery. #hide_input # make the bar-chart for countries on "recovery per 1000 infections" alt.Chart(df_top_recovered).mark_bar(color='#00CC66').encode( x= alt.Y('country:N', sort='-y', title="Countries"), y=alt.Y('Recovered_per_1000', title="Recovered per 1000 infected people") ).properties( title='Countries with top recovery rates') # Finally we project the data from above graphs on to a scatter plot. The Orange and Green bubbles are from the above two bar charts. Additionally, the bubbles in Grey color are the rest of the countries. # + #hide_input # make the scatter plot for "Death Rates by Population Density" chart_high_recovery = alt.Chart(df_top_recovered).mark_circle(size=150, color='#00CC66').encode( x=alt.Y('Recovered_per_1000', title="Recovery per 1000 infections"), y=alt.Y('Deaths_per_1000', title="Deaths per 1000 infections"), tooltip=['country', 'Recovered_per_1000', 'Deaths_per_1000'] ).properties( width=700, height=450) chart_high_death = alt.Chart(df_top_death).mark_circle(size=150, color='Orange').encode( x=alt.Y('Recovered_per_1000', title="Recovery per 1000 infections"), y=alt.Y('Deaths_per_1000', title="Deaths per 1000 infections"), tooltip=['country', 'Recovered_per_1000', 'Deaths_per_1000'] ).properties( width=700, height=450) chart_rest = alt.Chart(df_ROW).mark_circle(size=150, color='#C3C3C3').encode( x=alt.Y('Recovered_per_1000'), y=alt.Y('Deaths_per_1000'), tooltip=['country', 'Recovered_per_1000', 'Deaths_per_1000'] ).properties( width=700, height=450) (chart_high_recovery + chart_high_death + chart_rest).interactive() # + [markdown] papermill={"duration": 0.021479, "end_time": "2020-04-28T12:13:18.489956", "exception": false, "start_time": "2020-04-28T12:13:18.468477", "status": "completed"} tags=[] # ## Appendix # + [markdown] papermill={"duration": 0.021776, "end_time": "2020-04-28T12:13:18.534057", "exception": false, "start_time": "2020-04-28T12:13:18.512281", "status": "completed"} tags=[] # Last Available Cases Per Million By Country: # + papermill={"duration": 0.057634, "end_time": "2020-04-28T12:13:18.613360", "exception": false, "start_time": "2020-04-28T12:13:18.555726", "status": "completed"} tags=[] #hide_input label = 'Cases' temp = pd.concat([x.copy() for x in data_countries_pc]).loc[lambda x: x.date >= '3/1/2020'] metric_name = f'{label} per Million' temp.columns = ['Country', 'date', metric_name] # temp.loc[:, 'month'] = temp.date.dt.strftime('%Y-%m') temp.loc[:, f'Log of {label} per Million'] = temp[f'{label} per Million'].apply(lambda x: np.log(x)) temp.groupby('Country').last() # + [markdown] papermill={"duration": 0.02317, "end_time": "2020-04-28T12:13:18.660449", "exception": false, "start_time": "2020-04-28T12:13:18.637279", "status": "completed"} tags=[] # This analysis was conducted by [Joao <NAME>](http://jbduarte.com). Assitance with creating visualizations were provided by [<NAME>](https://twitter.com/HamelHusain). # The highest Recovery and Death rates were added by [<NAME>](https://www.linkedin.com/in/arungupta21/). # Relevant sources are listed below: # # # 1. ["2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE"](https://systems.jhu.edu/research/public-health/ncov/) [GitHub repository](https://github.com/CSSEGISandData/COVID-19). # # 2. [Feenstra, <NAME>., <NAME> and <NAME> (2015), "The Next Generation of the Penn World Table" American Economic Review, 105(10), 3150-3182](https://www.rug.nl/ggdc/productivity/pwt/related-research)
_notebooks/2020-03-19-cases-and-deaths-per-million.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %pylab inline import numpy as np from sigvisa import Sigvisa from sigvisa.models.ttime import tt_predict from sigvisa.utils.geog import dist_km # + s = Sigvisa() slon, slat, selev = s.earthmodel.site_info("MKAR", 0)[:3] print slon, slat, selev print s.phaseids["Pg"] # + lons = np.linspace(0, 40.0, 500) ds = [dist_km((slon, slat), (slon+l, slat)) for l in lons] depth = 0 #tts = np.array([s.sigmodel.mean_travel_time(slon+l, slat, depth, 0, "MKAR", 3) for l in lons]) tts = np.zeros(lons.shape) tts1 = np.zeros(lons.shape) tts2 = np.zeros(lons.shape) tts3 = np.zeros(lons.shape) for i, l in enumerate(lons): try: tts[i] = s.sigmodel.mean_travel_time(slon+l, slat, depth, 0, "MKAR", 0) except: pass try: tts1[i] = s.sigmodel.mean_travel_time(slon+l, slat, depth, 0, "MKAR", 1) except: pass try: tts2[i] = s.sigmodel.mean_travel_time(slon+l, slat, depth, 0, "MKAR", 12) except: pass # - plt.plot(ds, tts) plt.plot(ds, tts1) plt.plot(ds, tts2) #plt.plot(ds, tts3-tts) #plt.ylim([-100, 100]) plt.xlim([0, 12000])
notebooks/phase_distances_from_tt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import datetime as dt import dateutil as du p = du.parser.parse('01-NOV-2016 10:38:26') p.isoformat() import pandas as pd g = pd.Timestamp(p) g du.parser.parse(p.isoformat()) import pandas as pd reader = pd.read_csv('../data/clean_data/full_data.csv', parse_dates={'ETD_DATETIME':['ETD_DATE','ETD_TD_TIME']}, chunksize=10) df.head(5) df.loc[:,'ETD_CUR_STOP_CODE':'ETD_TICKET_TYPE2'].head(5) import sqlite3 as sql import os, sys if os.path.exists('../data/db/'): conn = sql.connect('../data/db/ksrctc.db') print('connection established : {}'.format(conn)) else: print("path does not exists") df.info() df.to_sql('KSRTC_BUS',con=conn, if_exists='append', index_label='ID') conn.commit() conn.close() if reader: print("thik hai") else: print("nahi thik hai")
notebooks/.ipynb_checkpoints/DBModule-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demo - `ipython_magic_canvas` # # # Install the magic: `pip install git+https://github.com/psychemedia/ipython_magic_canvas.git` # ## Load in the magic # %load_ext canvas_magic # ## Block Magic with Explicit Context # + # %%canvas -c ctx ctx.beginPath(); ctx.fillStyle = "#F9A520"; ctx.moveTo(0, 0); ctx.lineTo(200, 0); ctx.lineTo(100, 200); ctx.fill(); // triangle 2, top center ctx.moveTo(300, 0); // pick up "pen," reposition at 300 (horiz), 0 (vert) ctx.lineTo(300, 200); // draw straight down (from 300,0) to 200px ctx.lineTo(500, 100); // draw up toward right (100 half of 200) ctx.fill(); // connect and fill # - # ## Block Magic with Implicit Context # + # %%canvas beginPath(); fillStyle = "#F9A520"; moveTo(0, 0); lineTo(200, 0); lineTo(100, 200); fill(); beginPath(); fillStyle = "#F00"; moveTo(300, 200); lineTo(300, 400); // draw straight down by 200px (200 + 200) lineTo(100, 300); // draw up toward left (100 less than 300, so left) fill(); // connect and fill # - # ## Cell Magic with Explicit Context txt=''' ctx.beginPath(); // note usage below // triangle 1, at left ctx.fillStyle = "#F9A520"; ctx.moveTo(0, 0); // start at top left corner of canvas ctx.lineTo(200, 0); // go 200px to right (x), straight line from 0 to 0 ctx.lineTo(100, 200); // go to horizontal 100 (x) and vertical 200 (y) ctx.fill(); // connect and fill // triangle 2, top center ctx.moveTo(300, 0); // pick up "pen," reposition at 300 (horiz), 0 (vert) ctx.lineTo(300, 200); // draw straight down (from 300,0) to 200px ctx.lineTo(500, 100); // draw up toward right (100 half of 200) ctx.fill(); // connect and fill // triangle 3, bottom center ctx.beginPath(); // note: w/o this, color does not work as expected ctx.fillStyle = "#F00"; ctx.moveTo(300, 200); // pick up "pen," reposition at 300 (horiz), 200 (vert) ctx.lineTo(300, 400); // draw straight down by 200px (200 + 200) ctx.lineTo(100, 300); // draw up toward left (100 less than 300, so left) ctx.fill(); // connect and fill ''' # %canvas -v txt --context ctx # %canvas -v txt --wrap # ## Cell Magic with Implicit Context txt2=''' beginPath(); // note usage below // triangle 1, at left fillStyle = "#F9A520"; moveTo(0, 0); // start at top left corner of canvas lineTo(200, 0); // go 200px to right (x), straight line from 0 to 0 lineTo(100, 200); // go to horizontal 100 (x) and vertical 200 (y) fill(); // connect and fill // triangle 3, bottom center beginPath(); // note: w/o this, color does not work as expected fillStyle = "#F00"; moveTo(300, 200); // pick up "pen," reposition at 300 (horiz), 200 (vert) lineTo(300, 400); // draw straight down by 200px (200 + 200) lineTo(100, 300); // draw up toward left (100 less than 300, so left) fill(); // connect and fill ''' # %canvas -v txt2
magic_canvas_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="_AznB3KjOqRF" import matplotlib.pyplot as plt import math import numpy as np # + id="SjPk-64-QbFC" def show_plot(x, y): """ Given x and y values, this function will plot the graph Args: x(int): fist point y(int): second point Returns: None """ plt.scatter(x, y) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="O-U1SqH-kbRT" outputId="cc16f3ba-da80-4820-e4c2-7777907c428f" show_plot(4,3) # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="L-WZy-imkek1" outputId="fca34874-937d-4a5b-9637-c918e3de7e36" def show_plot_points_in_list(x, y): """ Given x and y lists, this function will plot the graph Args: x(list): list of x points y(list): list of y points Returns: None """ plt.scatter(x, y) plt.show() show_plot_points_in_list([1, 2, 3, 4, 5], [1, 4, 9, 16, 25]) # + id="oFU_9HRomppm" def plot_function(k): """ plot a graph of the kx function. Asign the x and y values to a list x values are from 0 to 10, y = kx Args: k(int): integer value Returns: None """ import numpy as np x = np.arange(0, 10, 1) y = k * x plt.scatter(x, y) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="cvgLtbLS2Cmf" outputId="52cd904b-c728-4163-d2cb-8c3b5efe7438" plot_function(-2) # + id="sUuyq8H5qHZF" def plot_function_1(k): """ plot a graph of the kx^2 function. Assign the x and y values to a list x values are from -10 to 11, y = kx^2 Args: k(int): integer value Returns: None """ x = np.arange(-10, 11, 1) y = k * x**2 plt.scatter(x, y) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Fj8IXhRL2cEd" outputId="5b0c09f0-9881-4175-8dd7-66960cc6d954" plot_function_1(5) # + id="1QftZHMksPV3" def plot_sin_function(): """ draw a graph of the sin function Assign the x and y values to a list x values are from -500 to 501, y = sin(x) """ # write your code here x = np.arange(-500, 501, 0.1) y = np.sin(x/50) plt.scatter(x, y, s=1) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="MZYdNvgF2m7N" outputId="8bd55dc6-3d9c-4cb5-99fd-7a21948481bc" plot_sin_function() # + id="NZNBkys-0M4y" def plot_cos_function(): """ draw a graph of the sin function Assign the x and y values to a list x values are from -500 to 501, y = cos(x) """ # write your code here x = list(range(-500, 501, 1)) y = [math.cos(i/40) for i in x] plt.scatter(x, y, s=2) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="6zJJ4QNA0QVI" outputId="b6d9f8ca-09c9-4062-e28d-a3172ae70f96" plot_cos_function() # + id="sgsiCHDF2vZ-"
Python/matplotlib/scatter_plot_Hometask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # We used three datasets in this paper (one for each growth cycle -2017/18/19). # # The code described below was applied to each dataset separetely, but here we show just one to avoid repetition. # + code_folding=[] #Import libraries for tabular data processing import pandas as pd import tabula as tab import numpy as np import os # + [markdown] heading_collapsed=true # # Convert pdf to csv # # The datasets made available by Genomes to Fields initiative (G2F) contained some tables in pdf instead of csv format. So to use the tables we are converting it from pdf to csv using tabula library, the conversion was performed page by page so it would be easier to check the values accuracy. # + code_folding=[] hidden=true # For a document with 18 pages pages = [] for i in range(18): each_page = tabula.read_pdf('/data/fieldnotes.pdf', pages=i+1, output_format='dataframe') print(f"Doing page {i+1}") # print the current page to notice if the function got stuck pages.append(each_page) # + code_folding=[] hidden=true # Clean and tidy each page for i in range(18): pages[i] = pages[i][0] # Export this to csv and check if the values between pdf and csv version are matching for i in range(18): pages[i].to_csv(f"/data/fieldnotes_page{i}.csv", index=False) # + [markdown] hidden=true # Check the if the csv file matches the pdf version manually, them merge the pages together in a file called "/data/fieldnotes_mergedpages.csv". # + [markdown] heading_collapsed=true # # Clean csv # The tabular dataset contains missing values, and other inconsistencies that must be resolved before using it as input for a machine learning model. # # + [markdown] hidden=true # ```Check for NAs``` # + code_folding=[0] hidden=true def na_stats(df): ''' Function to count the NAs per column and count the amount of unique values ''' list_of_columns = df.columns print(f"This dataframe has {len(df)} rows") # Check each column for na values for column in list_of_columns: df_clean = df[df[column].notna()] nans = len(df[df[column].isna()]) unique_values = df_clean[column].unique() print(f"{column} has {len(unique_values)} unique values, and {nans} NAs ") # + code_folding=[0] hidden=true # Read the csv file, each dataframe is separated by Year already df = pd.read_csv("/data/fieldnotes_mergedpages.csv") na_stats(df) # + code_folding=[0] hidden=true # Fill Yield NAs in the dataframe using its replicate values df['Yield'] = df['Yield'].fillna( df.groupby(by=['Test', 'Pedigree'])['Yield'].transform( lambda s:s.loc[s.first_valid_index()])) #takes the first valid number in the group and fill in the NA # + code_folding=[0] hidden=true # Drop samples that didn't have a replicate Yield value df = df.dropna(axis=0) # + code_folding=[0] hidden=true # Replace NaN in Stock column with 'unknow' df['Stock'] = df['Stock'].fillna('unknown') # + [markdown] hidden=true # ```Clean Typos``` # # Converting pdf to csv may have created some typos like extra blank spaces in the end of the word # + code_folding=[0] hidden=true # Clean typos in the string columns columns = ['Barcode', 'Test', 'Stock', 'Pedigree'] for col in columns: df[col] = df[col].str.strip('!? \n\t"') df[col] = df[col].str.strip("'") # + [markdown] hidden=true # ```Separate Hybrids into parental lines``` # + code_folding=[0] hidden=true # Repeat this for each df used df['Parental 1'] = df['Pedigree'].str.split('/').str.get(0) df['Parental 2'] = df['Pedigree'].str.split('/').str.get(1) # + [markdown] hidden=true # ```Add columns to describe treatment``` # + code_folding=[0] hidden=true # G2LA, G2FE and DG2F are the original treatment names as used in the Genomes to Field Initiative. # In the paper they are replaced by more intuitive names P2F1, P1F1 and P1F2 g2la = df.loc[df['Test'] == 'G2LA'] g2la['Planting'] ='late' g2la['Fertilizer'] = 'optimal' g2fe = df.loc[df['Test'] == 'G2FE'] g2fe['Planting'] = 'optimal' g2fe['Fertilizer'] = 'optimal' dg2f = df.loc[df['Test'] == 'DG2F'] dg2f['Planting'] = 'optimal' dg2f['Fertilizer'] ='reduced' df = g2la.append(g2fe).append(dg2f) # + code_folding=[0] hidden=true # save csv df.to_csv('/data/fielddata/df_2017.csv') # - # # Prepare input data # Import libraries for preparing input data from fastai.tabular.all import * # + # Load the cleaned dataframes df_2017 = pd.read_csv('/data/fielddata/df_2017.csv') df_2018 = pd.read_csv('/data/fielddata/df_2018.csv') df_2019 = pd.read_csv('/data/fielddata/df_2019.csv') # Blend the datasets together mixed = df_2017.append(df_2018) mixed_df = mixed.append(df_2019) # + code_folding=[] # Divide between train/validation and test (90:10%) # the sample function uses a equal probability of getting any row in the dataset df_test = mixed_df.sample(frac=0.1, random_state=32) df_train_val = mixed_df.drop(df_test.index) df_test.to_csv('/data/fielddata/df_test.csv', index=False) df_train_val.to_csv('/data/fielddata/df_train_val.csv', index=False) # + code_folding=[] # Random splitter function from fastai splitter = RandomSplitter(seed=42) splits = splitter(range_of(df_train_validation)) splits # + procs = [Categorify, Normalize, FillMissing] cat_names = [ 'Parental 1', 'Parental 2', 'Planting', 'Stock', 'Fertilizer'] cont_names =['Days_after_sowing'] dls = TabularDataLoaders.from_df(df_train_val, y_names="Yield", cat_names=cat_names, cont_names=cont_names, procs = procs, splits = splits) # Prepare the train/val data for XGBoost and Random Forest X_train, y_train = dls.train.xs, dls.train.ys.values.ravel() X_val, y_val = dls.valid.xs, dls.valid.ys.values.ravel() X = X_train.append(X_val) Y = np.append(y_train, y_val) X.to_csv('/data/fielddata/X_ordinal.csv') Y.to_csv('/data/fielddata/Y.csv') # Prepare the holdout data for XGBoost and Random Forest dl = dls.test_dl(df_test) Xtest = dl.xs Ytest = Xtest.pop('Yield') Xtest.to_csv('/data/fielddata/Xtest_ordinal.csv') Ytest.to_csv('/data/fielddata/Ytest.csv') # + # Prepare One-hot encoded dataset # One hot encode categorical_cols = ['Stock', 'Parental 1','Parental 2', 'Planting', 'Fertilizer', 'Days_after_sowing'] # Ad the test set to the training dataset to dummy then together, so they match superX = X.append(Xtest) superX = pd.get_dummies(superX, columns=categorical_cols) X_ohe = superX[:3878] Xtest_ohe = superX[3878:] # Save the one-hot encoded Xs # No need to export the target values as they are the same as above X_ohe.to_csv('/data/fielddata/X_ohe.csv') Xtest_ohe.to_csv('/data/fielddata/Xtest_ohe.csv')
tab_processing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # We show that linear_model.Lasso provides the same results for dense and sparse data and that in the case of sparse data the speed is improved. # #### New to Plotly? # Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/). # <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online). # <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! # ### Version import sklearn sklearn.__version__ # ### Imports # + print(__doc__) from time import time from scipy import sparse from scipy import linalg from sklearn.datasets.samples_generator import make_regression from sklearn.linear_model import Lasso # - # ### The two Lasso implementations on Dense data # + print("--- Dense matrices") X, y = make_regression(n_samples=200, n_features=5000, random_state=0) X_sp = sparse.coo_matrix(X) alpha = 1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) t0 = time() sparse_lasso.fit(X_sp, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(X, y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_)) # - # ### The two Lasso implementations on Sparse data # + print("--- Sparse matrices") Xs = X.copy() Xs[Xs < 2.5] = 0.0 Xs = sparse.coo_matrix(Xs) Xs = Xs.tocsc() print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100)) alpha = 0.1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) t0 = time() sparse_lasso.fit(Xs, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(Xs.toarray(), y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_)) # + from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) # ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'Lasso on Dense and Sparse Data.ipynb', 'scikit-learn/lasso-dense-vs-sparse-data/', 'Lasso on Dense and Sparse Data | plotly', ' ', title = 'Lasso on Dense and Sparse Data | plotly', name = 'Lasso on Dense and Sparse Data', has_thumbnail='true', thumbnail='thumbnail/scikit-default.jpg', language='scikit-learn', page_type='example_index', display_as='linear_models', order=13, ipynb= '~Diksha_Gabha/3202') # -
_posts/scikit/lasso-on-dense-and-sparse-data/Lasso on Dense and Sparse Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os # %env MODEL=/opt/intel/openvino/deployment_tools/tools/model_downloader/intel/person-detection-retail-0013/FP32/person-detection-retail-0013.xml # %env DEVICE=CPU # %env INPUT=Pedestrain_Detect_2_1_1.mp4 # %env PERF_COUNTS=False # %env PROB_THRESHOLD=0.6 # + """People Counter.""" """ Copyright (c) 2018 Intel Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit person to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import sys import time import socket import json import cv2 import logging as log import paho.mqtt.client as mqtt from argparse import ArgumentParser from inference import Network import subprocess # MQTT server environment variables HOSTNAME = socket.gethostname() IPADDRESS = socket.gethostbyname(HOSTNAME) TOPIC = "people_counter_python" MQTT_HOST = IPADDRESS MQTT_PORT = 1884 MQTT_KEEPALIVE_INTERVAL = 60 CONFIG_FILE = '../resources/config.json' def performance_counts(perf_count): """ print information about layers of the model. :param perf_count: Dictionary consists of status of the layers. :return: None """ print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exec_type', 'status', 'real_time, us')) for layer, stats in perf_count.items(): print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'], stats['status'], stats['real_time'])) def ssd_out(frame, result): """ Parse SSD output. :param frame: frame from camera/video :param result: list contains the data to parse ssd :return: person count and frame """ current_count = 0 for obj in result[0][0]: # Draw bounding box for object when it's probability is more than # the specified threshold if obj[2] > prob_threshold: xmin = int(obj[3] * initial_w) ymin = int(obj[4] * initial_h) xmax = int(obj[5] * initial_w) ymax = int(obj[6] * initial_h) cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 55, 255), 1) current_count = current_count + 1 return frame, current_count def main(): """ Load the network and parse the SSD output. :return: None """ # Connect to the MQTT server client = mqtt.Client() client.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL) client.subscribe(TOPIC) #args = build_argparser().parse_args() # Flag for the input image single_image_mode = False cur_request_id = 0 last_count = 0 total_count = 0 start_time = 0 # Initialise the class infer_network = Network() model = os.environ['MODEL'] device = os.environ['DEVICE'] # Load the network to IE plugin to get shape of input layer n, c, h, w = infer_network.load_model(model, device, 1, 1, cur_request_id)[1] assert os.path.isfile(CONFIG_FILE), "{} file doesn't exist".format(CONFIG_FILE) config = json.loads(open(CONFIG_FILE).read()) for idx, item in enumerate(config['inputs']): if item['video'].isdigit(): input_stream = int(item['video']) elif [item['video'].endswith('.jpg') or item['video'].endswith('.bmp')] : single_image_mode = True input_stream = item['video'] else: input_stream = item['video'] cap = cv2.VideoCapture(input_stream) if input_stream: cap.open(input_stream) if not cap.isOpened(): log.error("ERROR! Unable to open video source") global initial_w, initial_h, prob_threshold prob_threshold = float(os.environ['PROB_THRESHOLD']) initial_w = cap.get(3) initial_h = cap.get(4) fps = cap.get(cv2.CAP_PROP_FPS) cmdstring = ('ffmpeg', '-y', '-r', '%d' %(fps), # overwrite, 60fps '-s', '%dx%d' % (initial_w, initial_h), # size of image string '-pixel_format' , 'bgr24', # format '-f', 'rawvideo', '-i', '-', # tell ffmpeg to expect raw video from the pipe 'http://localhost:8090/fac.ffm') # output encoding p = subprocess.Popen(cmdstring, stdin=subprocess.PIPE) while cap.isOpened(): flag, frame = cap.read() if not flag: break key_pressed = cv2.waitKey(1) # Start async inference image = cv2.resize(frame, (w, h)) # Change data layout from HWC to CHW image = image.transpose((2, 0, 1)) image = image.reshape((n, c, h, w)) # Start asynchronous inference for specified request. inf_start = time.time() infer_network.exec_net(cur_request_id, image) # Wait for the result if infer_network.wait(cur_request_id) == 0: det_time = time.time() - inf_start # Results of the output layer of the network result = infer_network.get_output(cur_request_id) if os.environ['PERF_COUNTS'] == True: perf_count = infer_network.performance_counter(cur_request_id) performance_counts(perf_count) frame, current_count = ssd_out(frame, result) inf_time_message = "Inference time: {:.3f}ms"\ .format(det_time * 1000) cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1) # When new person enters the video if current_count > last_count: start_time = time.time() total_count = total_count + current_count - last_count # client.publish("person", json.dumps({"total": total_count})) # Person duration in the video is calculated if current_count < last_count: duration = int(time.time() - start_time) # Publish messages to the MQTT server client.publish("person/duration", json.dumps({"duration": duration})) client.publish("person", json.dumps({"count": current_count})) last_count = current_count if key_pressed == 27: break # Send frame to the ffmpeg server p.stdin.write(frame.tobytes()) if single_image_mode: cv2.imwrite('output_image.jpg', frame) cap.release() cv2.destroyAllWindows() client.disconnect() infer_network.clean() if __name__ == '__main__': main() exit(0) # -
Jupyter/people_counter_jupyter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Useful for debugging # %load_ext autoreload # %autoreload 2 # # Writer examples from pmd_beamphysics import ParticleGroup, particle_paths, pmd_init from h5py import File # + # Pick one: #H5File = 'data/bmad_particles2.h5' H5FILE = 'data/distgen_particles.h5' #H5FILE = 'data/astra_particles.h5' P = ParticleGroup(H5FILE) # - # # openPMD # The regular write routine writes in a proper openPMD format P.write('openpmd_particles.h5') # An open h5 hande can also be used, but it needs to be properly initialized with File('openpmd_particles.h5', 'w') as h5: pmd_init(h5, basePath='/', particlesPath='/' ) P.write(h5) # This can be read in by another ParticleGroup P2 = ParticleGroup('openpmd_particles.h5') # Check that these are the same all(P2.x == P.x) # # Astra P.write_astra('astra_particles.txt') # !head astra_particles.txt # # Bmad ASCII P.write_bmad('bmad_particles.txt') # !head bmad_particles.txt # # elegant P.write_elegant('elegant_particles.txt', verbose=True) # !head -n 20 elegant_particles.txt # # Genesis 1.3 v2 P.write_genesis2_beam_file('genesis2.beam', n_slice=50, verbose=True) # !head genesis2.beam # # Genesis 1.3 v4 P.write_genesis4_distribution('genesis4_distribution.h5', verbose=True) # These are written with File('genesis4_distribution.h5', 'r') as h5: for g in h5: print(g, len(h5[g])) # # GPT ASCII # ASCII Particles P.write_gpt('gpt_particles.txt', verbose=True) # ASCII Particles P.write_gpt('gpt_particles.gdf', verbose=True, asci2gdf_bin='$ASCI2GDF_BIN') # !head gpt_particles.txt # # Impact-T # Impact-T particles must all be a the same time P.drift_to_t(P['mean_t']) # This will return settings for Impact-T to use. P.write_impact('impact_particles.txt') # !head impact_particles.txt # # LiTrack # LiTrack particles must be at the same z P.drift_to_z() # This will return settings for Impact-T to use. P.write_litrack('litrack.zd', verbose=True) # !head -n 20 litrack.zd # # Lucretia P.write_lucretia('lucretia.mat', ele_name='BEGINNING', t_ref=0, stop_ix=None, verbose=True) # + # Read back from pmd_beamphysics.interfaces.lucretia import lucretia_to_data, list_element_names ParticleGroup(data=lucretia_to_data('lucretia.mat', verbose=True)) # - # Helpher function to list the available elements list_element_names('lucretia.mat') # # OPAL # + # Injected particled must be at the same time\ P.drift_to_t() P.write_opal('opal_injected.txt', dist_type='injected') # - # !head opal_injected.txt # Emitted particles must be at the same z P.drift_to_z(P['mean_z']) P.write_opal('opal_emitted.txt', dist_type='emitted') # !head opal_emitted.txt # # Cleanup # !rm astra_particles.txt bmad_particles.txt elegant_particles.txt gpt_particles.txt impact_particles.txt opal_injected.txt opal_emitted.txt openpmd_particles.h5 # !rm genesis4_distribution.h5 genesis2.beam litrack.zd # !rm gpt_particles.gdf # !rm lucretia.mat
examples/write_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 테스트할 데이터와 훈련할 데이터로 분할하기 # 데이터 모델을 학습할 때 데이터를 테스트할 데이터셋과 학습할 데이터셋으로 나눌 필요가 있습니다. # # 이 작업을 수행하기 위해 [scikit-learn](https://scikit-learn.org/stable/) 라이브러리를 사용해야합니다. # # scikit-learn은 오픈 소스로 전처리 및 학습 모델을 위한 데이터 과학용 BSD 라이센스 라이브러리입니다. # 데이터를 테스트용 학습용으로 나누기 전에 우리는 데이터 준비를 해야합니다. import pandas as pd # 항공편 및 항공편 지연에 대한 정보가 포함 된 csv 파일을 불러옵니다. # # **shape**를 사용하여 원본 DataFrame에있는 행과 열 수를 확인합니다. delays_df = pd.read_csv('Data/Lots_of_flight_data.csv') delays_df.shape # ## 데이터를 기능 및 레이블로 분할 # 모델 학습에 사용할 기능만 포함하는 X라는 DataFrame을 만듭니다. # # **참고** 숫자 값을 특징으로만 사용할 수 있으며, 숫자가 아닌 값이 있는 경우 핫 인코딩과 같은 다른 기술을 적용하여 모델을 학습하기위한 특징으로 사용하기 전에 이를 숫자 값으로 변환해야합니다. 이러한 기술에 대한 자세한 내용은 데이터 과학 과정을 확인하십시오! X = delays_df.loc[:,['DISTANCE', 'CRS_ELAPSED_TIME']] X.head() # 모델로 예측하려는 값만 포함하는 y라는 DataFrame을 만듭니다. # # 우리의 경우 비행기가 몇 분 늦게 도착할지 예측하려고합니다. 이 정보는 ARR_DELAY 열에 있습니다. y = delays_df.loc[:,['ARR_DELAY']] y.head() # ## 테스트할 데이터와 훈련할 데이터로 분할하기 # Use **scikitlearn train_test_split**를 사용하여 Test DataFrames로 30%의 행을 옮깁니다. # # DataFrame의 나머지 70% 행은 우리의 모델을 학습하는데 사용합니다. # # 참고: *random_state* 값을 지정하여 코드를 다시 실행하면 동일한 행이 테스트 DataFrame으로 이동됩니다. 이것은 결과를 반복적으로 생성합니다. # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) # - # 이제 우리는 행의 70%를 포함하는 **X_train** DataFrame을 가지고 있습니다. # # 우리는 모델을 학습하는데 이 DataFrame을 이용합니다. X_train.shape # DataFrame **X_test**은 30%의 행을 포함하고 있습니다. # # 우리는 이 DataFrame으로 훈련된 모델을 테스트하는 용도로 사용해 정확도를 측정할 수 있습니다. X_test.shape # **X_train**와 **X_test**는 특징을 포함합니다. # # 그 특징은 항공편이 얼마나 늦게 도착할지 예측하는 데 도움이 될 수 있다고 생각하는 열입니다: **DISTANCE**, **CRS_ELAPSED_TIME** X_train.head() # DataFrame **y_train**은 70%의 행을 포함합니다. # # 우리는 이 DataFrame을 이용하여 우리의 모델을 학습할 것입니다. # 원본 DataFrame을 유지할 필요가없는 경우 새 DataFrame을 만드는 대신 기존 DataFrame 내의 행을 삭제하기 만하면됩니다. # **inplace=*True*** 는 지정된 DataFrame에서 행을 삭제하려고 함을 나타냅니다. y_train.shape # DataFrame **y_test**에는 행의 나머지 30 %가 포함됩니다. # # 이 DataFrame을 사용하여 훈련된 모델을 테스트하므로 정확성을 확인할 수 있습니다. y_test.shape # **y_train**과 **y_test**은 우리의 레이블을 포함합니다. # # 레이블은 학습된 모델로 예측하려는 열입니다 : ** ARR_DELAY ** # # **참고:** ARR_DELAY 값이 음수이면 항공편이 일찍 도착했음을 나타냅니다. y_train.head()
even-more-python-for-beginners-data-tools/10 - Splitting test and training data with scikit-learn/10 - Train Test split.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # #Matrix and Covariance # # The `mat_handler.py` module contains `matrix` class, which is the backbone of `pyemu`. The `matrix` class overloads all common mathematical operators and also uses an "auto-align" functionality to line up matrix objects for multiplication, addition, etc. # # from __future__ import print_function import os import numpy as np from pyemu import Matrix, Cov # Here is the most basic instantiation of the `matrix` class: m = Matrix() # Here we will generate a `matrix` object with a random ndarray a = np.random.random((5, 5)) row_names = [] [row_names.append("row_{0:02d}".format(i)) for i in range(5)] col_names = [] [col_names.append("col_{0:02d}".format(i)) for i in range(5)] m = Matrix(x=a, row_names=row_names, col_names=col_names) print(m) # #File I/O with `matrix` # `matrix` supports several PEST-compatible I/O routines as well as some others: ascii_name = "mat_test.mat" m.to_ascii(ascii_name) m2 = Matrix.from_ascii(ascii_name) print(m2) bin_name = "mat_test.bin" m.to_binary(bin_name) m3 = Matrix.from_binary(bin_name) print(m3) # `Matrix` also implements a `to_dataframe()` and a `to_sparse`, which return `pandas dataframe` and a `scipy.sparse` (compressed sparse row) objects, respectively: print(type(m.to_dataframe())) print(type(m.to_sparse())) m.to_dataframe() #looks really nice in the notebook! # #Convience methods of `Matrix` # # several cool things are implemented in `Matrix` and accessed through `@property` decorated methods. For example, the SVD components of a `Matrix` object are simply accessed by name. The SVD routine is called on demand and the components are cast to `Matrix` objects, all opaque to the user: print(m.s) #the singular values of m cast into a matrix object. the SVD() is called on demand m.s.to_ascii("test_sv.mat") #save the singular values to a PEST-compatible ASCII file m.v.to_ascii("test_v.mat") #the right singular vectors of m. m.u.to_dataframe()# a data frame of the left singular vectors of m # The `Matrix` inverse operation is accessed the same way, but requires a square matrix: m.inv.to_dataframe() # #Manipulating `Matrix` shape # `Matrix` has lots of functionality to support getting submatrices by row and col names: # + print(m.get(row_names="row_00",col_names=["col_01","col_03"])) # - # `extract()` calls `get()` then `drop()`: from copy import deepcopy m_copy = deepcopy(m) sub_m = m_copy.extract(row_names="row_00",col_names=["col_01","col_03"]) m_copy.to_dataframe() sub_m.to_dataframe() # #Operator overloading # The operator overloading uses the auto-align functionality as well as the `isdiagonal` flag for super easy linear algebra. The "inner join" of the two objects is found and the rows and cols are aligned appropriately: #a new matrix object that is not "aligned" with m row_names = ["row_03","row_02","row_00"] col_names = ["col_01","col_10","col_100"] m_mix = Matrix(x=np.random.random((3,3)),row_names=row_names,col_names=col_names) m_mix.to_dataframe() m.to_dataframe() prod = m * m_mix.T prod.to_dataframe() prod2 = m_mix.T * m prod2.to_dataframe() (m_mix + m).to_dataframe() # #The `Cov` derived type # The `Cov` type is designed specifically to handle covariance matrices. It makes some assumptions, such as the symmetry (and accordingly that row_names == col_names). c = Cov(m.newx,m.row_names) # The `Cov` class supports several additional I/O routines, including the PEST uncertainty file (.unc): c.to_uncfile("test.unc") c1 = Cov.from_uncfile("test.unc") print(c1) # We can also build `cov` objects implied by pest control file parameter bounds or observation weights: parcov = Cov.from_parbounds(os.path.join("henry","pest.pst")) obscov = Cov.from_obsweights(os.path.join("henry","pest.pst")) #to_dataframe for diagonal types builds a full matrix dataframe - can be costly parcov.to_dataframe().head() # notice the zero-weight obs have been assigned a really large uncertainty obscov.to_dataframe().head()
examples/MatrixCovariance_demo.ipynb
# -*- coding: utf-8 -*- # # 📝 Exercise M4.03 # # In all previous notebooks, we only used a single feature in `data`. But we # have already shown that we could add new features to make the model more # expressive by deriving new features, based on the original feature. # # The aim of this notebook is to train a linear regression algorithm on a # dataset with more than a single feature. # # We will load a dataset about house prices in California. # The dataset consists of 8 features regarding the demography and geography of # districts in California and the aim is to predict the median house price of # each district. We will use all 8 features to predict the target, the median # house price. # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">If you want a deeper overview regarding this dataset, you can refer to the # Appendix - Datasets description section at the end of this MOOC.</p> # </div> # + from sklearn.datasets import fetch_california_housing data, target = fetch_california_housing(as_frame=True, return_X_y=True) target *= 100 # rescale the target in k$ data.head() # - # Now it is your turn to train a linear regression model on this dataset. # First, create a linear regression model. # + # Write your code here. # - # Execute a cross-validation with 10 folds and use the mean absolute error # (MAE) as metric. Be sure to *return* the fitted *estimators*. # + # Write your code here. # - # Compute the mean and std of the MAE in thousands of dollars (k$). # + # Write your code here. # - # Inspect the fitted model using a box plot to show the distribution of values # for the coefficients returned from the cross-validation. Hint: # use the function # [`df.plot.box()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.box.html) # to create a box plot. # + # Write your code here.
notebooks/linear_models_ex_03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %%time from IPython.display import clear_output clear_output() # 저니크 모멘트 계산 from AZernike import * # 차수에 따른 연산 속도 print_curr_time() use_gpus = [ 1, 0 ] use_threads = [ 0 ] use_hashs = [ 0, 1 ] Ks = numpy.arange( 0.5, 3.1, 0.5 ) Ks = 1 T_MAX = 20 max_mem = max_gpu_memory() if False else max_cpu_memory() if max_mem < 8 : T_MAX = 18 elif max_mem < 25 : T_MAX = 20 pass T = T_MAX #T_MAX = 2 #T = [ T//2, T ] T = numpy.arange( 5, 31, 5 ) datas = { } debug = 0 test_zernike_moments( datas, use_gpus, use_threads, use_hashs, Ks, T, debug=debug ) plot_moment_calc_times( datas ) print( "Done." )
AZernike_04_MomentCalcTime.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="VAYu3ISwwGks" import numpy as np import pandas as pd import torch import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from matplotlib import pyplot as plt # %matplotlib inline # + [markdown] id="H0VVjc9qFqf_" # # Create blob Train and Test data # + id="mpdNNb8wFp56" # mu1 = np.array([3,3,3,3,0]) # sigma1 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu2 = np.array([4,4,4,4,0]) # sigma2 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu3 = np.array([10,5,5,10,0]) # sigma3 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu4 = np.array([-10,-10,-10,-10,0]) # sigma4 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu5 = np.array([-21,4,4,-21,0]) # sigma5 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu6 = np.array([-10,18,18,-10,0]) # sigma6 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu7 = np.array([4,20,4,20,0]) # sigma7 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu8 = np.array([4,-20,-20,4,0]) # sigma8 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu9 = np.array([20,20,20,20,0]) # sigma9 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu10 = np.array([20,-10,-10,20,0]) # sigma10 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # sample1 = np.random.multivariate_normal(mean=mu1,cov= sigma1,size=500) # sample2 = np.random.multivariate_normal(mean=mu2,cov= sigma2,size=500) # sample3 = np.random.multivariate_normal(mean=mu3,cov= sigma3,size=500) # sample4 = np.random.multivariate_normal(mean=mu4,cov= sigma4,size=500) # sample5 = np.random.multivariate_normal(mean=mu5,cov= sigma5,size=500) # sample6 = np.random.multivariate_normal(mean=mu6,cov= sigma6,size=500) # sample7 = np.random.multivariate_normal(mean=mu7,cov= sigma7,size=500) # sample8 = np.random.multivariate_normal(mean=mu8,cov= sigma8,size=500) # sample9 = np.random.multivariate_normal(mean=mu9,cov= sigma9,size=500) # sample10 = np.random.multivariate_normal(mean=mu10,cov= sigma10,size=500) # + id="GVwzqqNxFwzN" # X = np.concatenate((sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10),axis=0) # Y = np.concatenate((np.zeros((500,1)),np.ones((500,1)),2*np.ones((500,1)),3*np.ones((500,1)),4*np.ones((500,1)), # 5*np.ones((500,1)),6*np.ones((500,1)),7*np.ones((500,1)),8*np.ones((500,1)),9*np.ones((500,1))),axis=0).astype(int) # print(X.shape,Y.shape) # plt.scatter(sample1[:,0],sample1[:,1],label="class_0") # plt.scatter(sample2[:,0],sample2[:,1],label="class_1") # plt.scatter(sample3[:,0],sample3[:,1],label="class_2") # plt.scatter(sample4[:,0],sample4[:,1],label="class_3") # plt.scatter(sample5[:,0],sample5[:,1],label="class_4") # plt.scatter(sample6[:,0],sample6[:,1],label="class_5") # plt.scatter(sample7[:,0],sample7[:,1],label="class_6") # plt.scatter(sample8[:,0],sample8[:,1],label="class_7") # plt.scatter(sample9[:,0],sample9[:,1],label="class_8") # plt.scatter(sample10[:,0],sample10[:,1],label="class_9") # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') # + id="kdLONe1TFwcv" # class SyntheticDataset(Dataset): # """MosaicDataset dataset.""" # def __init__(self, x, y): # """ # Args: # csv_file (string): Path to the csv file with annotations. # root_dir (string): Directory with all the images. # transform (callable, optional): Optional transform to be applied # on a sample. # """ # self.x = x # self.y = y # #self.fore_idx = fore_idx # def __len__(self): # return len(self.y) # def __getitem__(self, idx): # return self.x[idx] , self.y[idx] #, self.fore_idx[idx] # + id="am81Wr7lFwTn" # trainset = SyntheticDataset(X,Y) # trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True) # classes = ('zero','one','two','three','four','five','six','seven','eight','nine') # foreground_classes = {'zero','one','two'} # fg_used = '012' # fg1, fg2, fg3 = 0,1,2 # all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'} # background_classes = all_classes - foreground_classes # background_classes # + id="_lopYyy7G--q" # dataiter = iter(trainloader) # background_data=[] # background_label=[] # foreground_data=[] # foreground_label=[] # batch_size=100 # for i in range(50): # images, labels = dataiter.next() # for j in range(batch_size): # if(classes[labels[j]] in background_classes): # img = images[j].tolist() # background_data.append(img) # background_label.append(labels[j]) # else: # img = images[j].tolist() # foreground_data.append(img) # foreground_label.append(labels[j]) # foreground_data = torch.tensor(foreground_data) # foreground_label = torch.tensor(foreground_label) # background_data = torch.tensor(background_data) # background_label = torch.tensor(background_label) # + id="JnT8fc5DMAa0" # def create_mosaic_img(bg_idx,fg_idx,fg): # """ # bg_idx : list of indexes of background_data[] to be used as background images in mosaic # fg_idx : index of image to be used as foreground image from foreground data # fg : at what position/index foreground image has to be stored out of 0-8 # """ # image_list=[] # j=0 # for i in range(9): # if i != fg: # image_list.append(background_data[bg_idx[j]]) # j+=1 # else: # image_list.append(foreground_data[fg_idx]) # label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2 # #image_list = np.concatenate(image_list ,axis=0) # image_list = torch.stack(image_list) # return image_list,label # + id="kd4r_sXhMDCu" # # number of data points in bg class and fg class # nbg = 500*7 # nfg = 500*3 # print(nbg, nfg, nbg+nfg) # + id="2zq2xQCBMC7j" # desired_num = 3000 # mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images # fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 # mosaic_label=[] # label of mosaic image = foreground class present in that mosaic # list_set_labels = [] # for i in range(desired_num): # set_idx = set() # np.random.seed(i) # bg_idx = np.random.randint(0,nbg,8) # set_idx = set(background_label[bg_idx].tolist()) # fg_idx = np.random.randint(0,nfg) # set_idx.add(foreground_label[fg_idx].item()) # fg = np.random.randint(0,9) # fore_idx.append(fg) # image_list,label = create_mosaic_img(bg_idx,fg_idx,fg) # mosaic_list_of_images.append(image_list) # mosaic_label.append(label) # list_set_labels.append(set_idx) # + id="8pqXIPlNMCyz" # desired_num = 2000 # test_mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images # test_fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 # test_mosaic_label=[] # label of mosaic image = foreground class present in that mosaic # test_list_set_labels = [] # for i in range(desired_num): # set_idx = set() # np.random.seed(i+3000) # bg_idx = np.random.randint(0,nbg,8) # set_idx = set(background_label[bg_idx].tolist()) # fg_idx = np.random.randint(0,nfg) # set_idx.add(foreground_label[fg_idx].item()) # fg = np.random.randint(0,9) # test_fore_idx.append(fg) # test_image_list,test_label = create_mosaic_img(bg_idx,fg_idx,fg) # test_mosaic_list_of_images.append(test_image_list) # test_mosaic_label.append(test_label) # test_list_set_labels.append(set_idx) # + id="Sxq58WbtMmKq" # data = [{"mosaic_list":mosaic_list_of_images, "mosaic_label": mosaic_label, "fore_idx":fore_idx}] # np.save("train_blob_data.npy",data) # + id="hNnqO6yKMCoR" # data = [{"mosaic_list":test_mosaic_list_of_images, "mosaic_label": test_mosaic_label, "fore_idx":test_fore_idx}] # np.save("test_blob_data.npy",data) # + id="DSZpS3F5Jnhz" # data = [{"X":X,"Y":Y}] # np.save("blob_data.npy",data) # + [markdown] id="y4ZKaqzq_vcD" # # load mosaic data # + id="6_dZKEaJAQ2Y" class MosaicDataset1(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list, mosaic_label,fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx] # + id="9tn26Ne1KXs4" class SyntheticDataset(Dataset): """MosaicDataset dataset.""" def __init__(self, x, y): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.x = x self.y = y #self.fore_idx = fore_idx def __len__(self): return len(self.y) def __getitem__(self, idx): return self.x[idx] , self.y[idx] #, self.fore_idx[idx] # + id="UWxCb_h8AQpf" train_data = np.load("train_blob_data.npy",allow_pickle=True) test_data = np.load("test_blob_data.npy",allow_pickle=True) data = np.load("blob_data.npy",allow_pickle=True) # + id="WR0bl63TAQI9" train_mosaic_list_of_images = train_data[0]["mosaic_list"] train_mosaic_label = train_data[0]["mosaic_label"] train_fore_idx = train_data[0]["fore_idx"] test_mosaic_list_of_images = test_data[0]["mosaic_list"] test_mosaic_label = test_data[0]["mosaic_label"] test_fore_idx = test_data[0]["fore_idx"] X = data[0]["X"] Y = data[0]["Y"] # + id="GVS6xb4fAZ_P" batch = 250 tr_msd = MosaicDataset1(train_mosaic_list_of_images, train_mosaic_label, train_fore_idx) train_loader = DataLoader( tr_msd,batch_size= batch ,shuffle=True) # + id="Mn1Xql2gNrUH" batch = 250 tst_msd = MosaicDataset1(test_mosaic_list_of_images, test_mosaic_label, test_fore_idx) test_loader = DataLoader( tst_msd,batch_size= batch ,shuffle=True) # + id="hQNLGobaKa2s" dset = SyntheticDataset(X,Y) dtloader = DataLoader(dset,batch_size =batch,shuffle=True ) # + [markdown] id="ARLPPASQ_2gB" # # models # + id="T1Y0mCQzjSV0" class Module1(nn.Module): def __init__(self): super(Module1, self).__init__() self.fc1 = nn.Linear(5, 100) self.fc2 = nn.Linear(100, 1) def forward(self, z): x = torch.zeros([batch,9],dtype=torch.float64) y = torch.zeros([batch,5], dtype=torch.float64) x,y = x.to("cuda"),y.to("cuda") for i in range(9): x[:,i] = self.helper(z[:,i])[:,0] x = F.softmax(x,dim=1) # alphas for i in range(9): x1 = x[:,i] y = y + torch.mul(x1[:,None],z[:,i]) return y , x def helper(self,x): x = F.relu(self.fc1(x)) x = self.fc2(x) return x # + id="zt9YwV5rjSK_" class Module2(nn.Module): def __init__(self): super(Module2, self).__init__() self.fc1 = nn.Linear(5, 100) self.fc2 = nn.Linear(100, 3) def forward(self,y): y = F.relu(self.fc1(y)) y = self.fc2(y) return y # + id="UJbdW7cijRsr" torch.manual_seed(1234) where_net = Module1().double() where_net = where_net.to("cuda") # print(net.parameters) torch.manual_seed(1234) what_net = Module2().double() what_net = what_net.to("cuda") # + id="ehAfQnNwgFYX" def calculate_attn_loss(dataloader,what,where,criter): what.eval() where.eval() r_loss = 0 alphas = [] lbls = [] pred = [] fidices = [] correct = 0 tot = 0 with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels,fidx = data lbls.append(labels) fidices.append(fidx) inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") avg,alpha = where(inputs) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) correct += sum(predicted == labels) tot += len(predicted) pred.append(predicted.cpu().numpy()) alphas.append(alpha.cpu().numpy()) loss = criter(outputs, labels) r_loss += loss.item() alphas = np.concatenate(alphas,axis=0) pred = np.concatenate(pred,axis=0) lbls = np.concatenate(lbls,axis=0) fidices = np.concatenate(fidices,axis=0) #print(alphas.shape,pred.shape,lbls.shape,fidices.shape) analysis = analyse_data(alphas,lbls,pred,fidices) return r_loss/i,analysis,correct.item(),tot,correct.item()/tot # + id="6e9HQJMzxBhp" def analyse_data(alphas,lbls,predicted,f_idx): ''' analysis data is created here ''' batch = len(predicted) amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0 for j in range (batch): focus = np.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): amth +=1 else: alth +=1 if(focus == f_idx[j] and predicted[j] == lbls[j]): ftpt += 1 elif(focus != f_idx[j] and predicted[j] == lbls[j]): ffpt +=1 elif(focus == f_idx[j] and predicted[j] != lbls[j]): ftpf +=1 elif(focus != f_idx[j] and predicted[j] != lbls[j]): ffpf +=1 #print(sum(predicted==lbls),ftpt+ffpt) return [ftpt,ffpt,ftpf,ffpf,amth,alth] # + id="cu6qMiWpK1IA" def cal_blob_accuracy(dat_loader): net = Module2().double() net = net.to("cuda") net.load_state_dict(torch.load("Net.pt")) pred1 = [] lbls1 = [] net.eval() with torch.no_grad(): for i1, data1 in enumerate(dat_loader, 0): inputs1, labels1= data1 inputs1 = inputs1.double() inputs1, labels1 = inputs1.to("cuda"),labels1.to("cuda") lbls1.append(labels1.cpu().numpy()) outputs1 = net(inputs1) _, predicted1 = torch.max(outputs1.data, 1) pred1.append(predicted1.cpu().numpy()) #print(np.shape(lbls)) lbls1 = np.concatenate(lbls1,axis=0) #lbls = lbls[:,0] #print(lbls.shape) pred1 = np.concatenate(pred1,axis=0) idxs1 = np.logical_or(np.logical_or(lbls1==0,lbls1==1),lbls1 ==2) #print(idxs) acc1 = (np.sum(lbls1[idxs1] == pred1[idxs1]) / (len(lbls1[idxs1])) )*100 print("Accuracy on blob 0,1,2 classes is ",acc1 ) return acc1 # + [markdown] id="a2AlGgRa_6_H" # # training # + colab={"base_uri": "https://localhost:8080/"} id="MOfxUJZ_eFKw" outputId="42780a7c-7477-43b3-88f3-93e93524b33b" # instantiate optimizer optimizer_where = optim.RMSprop(where_net.parameters(),lr =0.001)#,nesterov=True) optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.001)#,nesterov=True) criterion = nn.CrossEntropyLoss() acti = [] analysis_data_tr = [] analysis_data_tst = [] loss_curi_tst = [] loss_curi_tr = [] blob_classify_acc = [] epochs = 450 every_what_epoch = 20 # calculate zeroth epoch loss and FTPT values running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion) print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy)) loss_curi_tr.append(running_loss) analysis_data_tr.append(anlys_data) running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion) print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy)) loss_curi_tst.append(running_loss) analysis_data_tst.append(anlys_data) # training starts for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 what_net.train() where_net.train() # if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 : # print(epoch+1,"updating what_net, where_net is freezed") # print("--"*40) # elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 : # print(epoch+1,"updating where_net, what_net is freezed") # print("--"*40) for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels,_ = data inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_where.zero_grad() optimizer_what.zero_grad() # forward + backward + optimize avg, alpha = where_net(inputs) outputs = what_net(avg) loss = criterion(outputs, labels) # print statistics running_loss += loss.item() loss.backward() optimizer_what.step() optimizer_where.step() running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion) analysis_data_tr.append(anls_data) loss_curi_tr.append(running_loss_tr) #loss per epoch print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy)) # calculate loss for test data running_loss_tst,anls_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion) analysis_data_tst.append(anls_data) loss_curi_tst.append(running_loss_tst) #loss per epoch print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tst,correct,total,accuracy)) #torch.save(what_net.state_dict(),"Net.pt") #blob_classify_acc.append(cal_blob_accuracy(dtloader)) if running_loss_tr<=0.05: break # if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 : # scheduler_what.step(running_loss_val) # elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 : # scheduler_where.step(running_loss_val) print('Finished Training run ') analysis_data_tr = np.array(analysis_data_tr) analysis_data_tst = np.array(analysis_data_tst) # + id="dwRqaz3Wxc23" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="46844c54-5e36-4190-d289-da8a58e27168" fig = plt.figure(figsize = (8,8)) #vline_list = np.arange(every_what_epoch, epoch + every_what_epoch, every_what_epoch) # train_loss = np.random.randn(340) # test_loss = np.random.randn(340) epoch_list = np.arange(0, epoch+2) plt.plot(epoch_list,loss_curi_tr, label='train_loss') #plt.plot(epoch_list,loss_curi_tst, label='test_loss') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("CE Loss") #plt.vlines(vline_list,min(min(loss_curi_tr),min(loss_curi_tst)), max(max(loss_curi_tst),max(loss_curi_tr)),linestyles='dotted') plt.title("train loss") plt.show() fig.savefig("train_loss_plot.pdf") fig.savefig("train_loss_plot.png") # + id="EZtPJ-rnRr-x" colab={"base_uri": "https://localhost:8080/"} outputId="a8e61ce6-b756-48e3-9032-19e0fdab58be" analysis_data_tr # + colab={"base_uri": "https://localhost:8080/"} id="RQ3OzJlOOyY8" outputId="3c990be7-81e2-420e-8fa7-4dd1515039ca" analysis_data_tst # + id="GR7i12R_QymL" columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ] df_train = pd.DataFrame() df_test = pd.DataFrame() df_train[columns[0]] = np.arange(0,epoch+2) df_train[columns[1]] = analysis_data_tr[:,-2] df_train[columns[2]] = analysis_data_tr[:,-1] df_train[columns[3]] = analysis_data_tr[:,0]/30 df_train[columns[4]] = analysis_data_tr[:,1]/30 df_train[columns[5]] = analysis_data_tr[:,2]/30 df_train[columns[6]] = analysis_data_tr[:,3]/30 df_test[columns[0]] = np.arange(0,epoch+2) df_test[columns[1]] = analysis_data_tst[:,-2] df_test[columns[2]] = analysis_data_tst[:,-1] df_test[columns[3]] = analysis_data_tst[:,0]/20 df_test[columns[4]] = analysis_data_tst[:,1]/20 df_test[columns[5]] = analysis_data_tst[:,2]/20 df_test[columns[6]] = analysis_data_tst[:,3]/20 # + id="gYIlDhfYSg4y" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="32a726a0-543c-4fba-ba33-70f3a2c95d42" # plt.figure(figsize=(12,12)) # plt.plot(df_train[columns[0]],df_train[columns[1]], label='argmax > 0.5') # plt.plot(df_train[columns[0]],df_train[columns[2]], label='argmax < 0.5') # plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # plt.xlabel("epochs") # plt.ylabel("training data") # plt.title("On Training set") # #plt.vlines(vline_list,min(min(df_train[columns[1]]),min(df_train[columns[2]])), max(max(df_train[columns[1]]),max(df_train[columns[2]])),linestyles='dotted') # plt.show() fig=plt.figure(figsize=(8,6)) plt.plot(df_train[columns[0]],df_train[columns[3]], label ="focus_true_pred_true ") plt.plot(df_train[columns[0]],df_train[columns[4]], label ="focus_false_pred_true ") plt.plot(df_train[columns[0]],df_train[columns[5]], label ="focus_true_pred_false ") plt.plot(df_train[columns[0]],df_train[columns[6]], label ="focus_false_pred_false ") plt.title("On Train set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("training data") #plt.vlines(vline_list,min(min(df_train[columns[3]]),min(df_train[columns[4]]),min(df_train[columns[5]]),min(df_train[columns[6]])), max(max(df_train[columns[3]]),max(df_train[columns[4]]),max(df_train[columns[5]]),max(df_train[columns[6]])),linestyles='dotted') plt.show() fig.savefig("train_analysis.pdf") fig.savefig("train_analysis.png") # + id="UPvuau_Id_Fi" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="20e7732e-f59a-45b4-e150-3a1c1ff944e7" # plt.figure(figsize=(12,12)) # plt.plot(df_test[columns[0]],df_test[columns[1]], label='argmax > 0.5') # plt.plot(df_test[columns[0]],df_test[columns[2]], label='argmax < 0.5') # plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # plt.xlabel("epochs") # plt.ylabel("test data") # plt.title("On Testing set") # #plt.vlines(vline_list,min(min(df_test[columns[1]]),min(df_test[columns[2]])), max(max(df_test[columns[1]]),max(df_test[columns[2]])),linestyles='dotted') # plt.show() fig=plt.figure(figsize=(8,6)) plt.plot(df_test[columns[0]],df_test[columns[3]], label ="focus_true_pred_true ") plt.plot(df_test[columns[0]],df_test[columns[4]], label ="focus_false_pred_true ") plt.plot(df_test[columns[0]],df_test[columns[5]], label ="focus_true_pred_false ") plt.plot(df_test[columns[0]],df_test[columns[6]], label ="focus_false_pred_false ") plt.title("On Test set") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.xlabel("epochs") plt.ylabel("testing data") #plt.vlines(vline_list,min(min(df_test[columns[3]]),min(df_test[columns[4]]),min(df_test[columns[5]]),min(df_test[columns[6]])), max(max(df_test[columns[3]]),max(df_test[columns[4]]),max(df_test[columns[5]]),max(df_test[columns[6]])),linestyles='dotted') plt.show() fig.savefig("test_analysis.pdf") fig.savefig("test_analysis.png") # + id="hGgOLQseShsw" # fig = plt.figure(figsize = (8,8)) # vline_list = np.arange(every_what_epoch, epoch + every_what_epoch, every_what_epoch) # # train_loss = np.random.randn(340) # # test_loss = np.random.randn(340) # epoch_list = np.arange(0, epoch+1) # plt.plot(epoch_list, blob_classify_acc , label='blob_classify_acc') # #plt.plot(epoch_list,loss_curi_tst, label='test_loss') # # plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # plt.legend() # plt.xlabel("epochs") # plt.ylabel("accuracy") # #plt.vlines(vline_list,min(min(loss_curi_tr),min(loss_curi_tst)), max(max(loss_curi_tst),max(loss_curi_tr)),linestyles='dotted') # plt.title("blob 3 class classify accuracy") # plt.show() # fig.savefig("blob_classify_acc_every_20_plot.pdf") # fig.savefig("blob_classify_acc_every_20_plot.png") # + id="yanNSw3vO7j4"
1_mosaic_data_attention_experiments/3_stage_wise_training/alternate_minimization/Synthetic_Data/Simultaneous_blob_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # print(1 / 0) # ZeroDivisionError: division by zero # - try: print(1 / 0) except ZeroDivisionError: print('Error') try: print(1 / 0) except ZeroDivisionError as e: print(e) print(type(e)) try: for i in [-2, -1, 0, 1, 2]: print(1 / i) except ZeroDivisionError as e: print(e) def divide(a, b): try: print(a / b) except ZeroDivisionError as e: print('catch ZeroDivisionError:', e) divide(1, 0) # + # divide('a', 'b') # TypeError: unsupported operand type(s) for /: 'str' and 'str' # - def divide_each(a, b): try: print(a / b) except ZeroDivisionError as e: print('catch ZeroDivisionError:', e) except TypeError as e: print('catch TypeError:', e) divide_each(1, 0) divide_each('a', 'b') def divide_same(a, b): try: print(a / b) except (ZeroDivisionError, TypeError) as e: print(e) divide_same(1, 0) divide_same('a', 'b') def divide_wildcard(a, b): try: print(a / b) except: print('Error') divide_wildcard(1, 0) divide_wildcard('a', 'b') def divide_exception(a, b): try: print(a / b) except Exception as e: print(e) divide_exception(1, 0) divide_exception('a', 'b') def divide_else(a, b): try: print(a / b) except ZeroDivisionError as e: print('catch ZeroDivisionError:', e) else: print('finish (no error)') divide_else(1, 2) divide_else(1, 0) def divide_finally(a, b): try: print(a / b) except ZeroDivisionError as e: print('catch ZeroDivisionError:', e) finally: print('all finish') divide_finally(1, 2) divide_finally(1, 0) def divide_else_finally(a, b): try: print(a / b) except ZeroDivisionError as e: print('catch ZeroDivisionError:', e) else: print('finish (no error)') finally: print('all finish') divide_else_finally(1, 2) divide_else_finally(1, 0) def divide_pass(a, b): try: print(a / b) except ZeroDivisionError: pass divide_pass(1, 0)
notebook/exception_handling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Growing classes # # When implementing much of the functionality and running the research whose artifacts live in this repository, the authors found it best to document the iterations of the research and development. However, Python insists classes should be defined in one block, complicating the iterative development of its methods. We thus write here a decorator that allows for the definition of classes one method at a time, across multiple code cells. # %load_ext pycodestyle_magic # %flake8_on --max_line_length 120 --ignore W293,E302 from contextlib import contextmanager from dask.delayed import Delayed import dask from functools import reduce import inspect from jupytest import Suite, Report, Magic, summarize_results, assert_, eq, belong_to, is_any_of, not_ import operator as op import re from typing import Callable, Sequence, Optional, cast, Set, Union suite = Suite() if __name__ == "__main__": suite |= Report() suite |= Magic() # + Decorator = Callable[[Callable], Callable] def growing(klass: type) -> type: def add_method( fn_: Optional[Callable] = None, name: str = "", wrapped_in: Union[Decorator, Sequence[Decorator]] = [] ) -> Callable: def add_to_class(fn: Callable): name_method = name or fn.__name__ method_new = reduce(lambda f, w: w(f), wrapped_in if hasattr(wrapped_in, "__iter__") else [wrapped_in], fn) setattr(klass, name_method, method_new) return getattr(klass, name_method) if fn_ is None: return add_to_class return add_to_class(cast(Callable, fn_)) def add_class_method( fn_: Optional[Callable] = None, name: str = "", wrapped_in: Union[Decorator, Sequence[Decorator]] = [] ) -> Callable: wrappers = wrapped_in if hasattr(wrapped_in, "__iter__") else [wrapped_in] return add_method(fn_, name, wrappers + [classmethod]) setattr(klass, "method", staticmethod(add_method)) setattr(klass, "classmethod", staticmethod(add_class_method)) return klass # - # ## Tests def user_members(klass) -> Set[str]: return {m for m in dir(klass) if not re.match(r"^__.*__$", m)} # + # %%test Add method @growing class MyClass: def f(self): return 5 assert_(op.le, {"f", "method"}, user_members(MyClass), msg="User members before adding method g") assert_(not_(belong_to(user_members(MyClass))), "g") @MyClass.method def g(self, x): return self.f() + x assert_(op.le, {"f", "g", "method"}, user_members(MyClass), msg="User members after adding method g") assert_(eq, obtained=MyClass().g(3), expected=8) # + # %%test Add Dask Delayed method @growing class MyClass: def f(self): return 5 @MyClass.method(wrapped_in=dask.delayed(pure=True)) def h(self, x, y): return self.f() * x + y assert_(belong_to(user_members(MyClass)), "h") assert_(is_any_of(Delayed), MyClass().h(4, 5)) assert_(eq, expected=25, obtained=MyClass().h(4, 5).compute(scheduler="single-threaded")) # + # %%test Multiple method wrappers @growing class MyClass: def f(self): return 5 def wrapper1(fn): return lambda self, x: fn(self, x) + x def wrapper2(fn): return lambda self, x: fn(self, x) * x @MyClass.method(wrapped_in=[wrapper1, wrapper2]) def double_wrapped(self, x): return x / 3 + self.f() assert_(belong_to(user_members(MyClass)), "double_wrapped") assert_(eq, expected=153.0, obtained=MyClass().double_wrapped(9)) # + # %%test Add class method, inelegant @growing class MyClass: C = 34 def f(self): return 5 try: @MyClass.method @classmethod def cm(cls): return cls.C fail() except AttributeError: pass @MyClass.method(wrapped_in=classmethod) def cm(cls): return cls.C assert_(eq, expected=MyClass.C, obtained=MyClass.cm()) # + # %%test Add class method, preferred approach @growing class MyClass: C = 34 def f(self): return 5 @MyClass.classmethod def cm(cls): return cls.C assert_(eq, expected=MyClass.C, obtained=MyClass.cm()) # + # %%test Add class method that acts as context manager @growing class MyClass: C = 34 def f(self): return 5 @MyClass.classmethod(wrapped_in=contextmanager) def changing_C(cls, num: int): old = cls.C try: cls.C = num yield finally: cls.C = old assert_(eq, expected=34, obtained=MyClass.C) with MyClass.changing_C(45): assert_(eq, expected=45, obtained=MyClass.C) assert_(eq, expected=34, obtained=MyClass.C) # + # %%test Add method, then redefine it @growing class C: def f(self): return 56 assert_(eq, expected=56, obtained=C().f()) @C.method def f(self): return 890 assert_(eq, expected=890, obtained=C().f()) # - if __name__ == "__main__": _ = summarize_results(suite)
growing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # REP (Reproducible Experiment Platform) # # # # # Tutorial for Flavours of Physics Challenge # # an almost real-life example of physics analysis with [yandex/REP](https://github.com/yandex/rep). # # # %pylab inline # ## Loading datasets import os.path if not os.path.exists('tau_data/check_agreement.root'): # !wget https://www.dropbox.com/s/yxyw00os79cg19n/training.root?dl=0 -O tau_data/training.root # !wget https://www.dropbox.com/s/gfefr8036dmmbsr/check_agreement.root?dl=0 -O tau_data/check_agreement.root # ## First, open the files # # in the challenge we have two datasets, each corresponding to particular decay: # - $\tau \to \mu\mu\mu$ - signal channel (MC signal and background from mass sidebands) # - $D_s \to \phi \pi $ - normalization channel (MC and RD) # # In this notebook we're playing only with the first one # + import numpy import root_numpy import pandas tau_data = pandas.DataFrame(root_numpy.root2array('tau_data/training.root')) tau_labels = tau_data.signal.values tau_data = tau_data.drop('signal', axis=1) ds_data = pandas.DataFrame(root_numpy.root2array('tau_data/check_agreement.root')) ds_labels = ds_data.signal.values ds_weights = ds_data.weight.values ds_data = ds_data.drop(['signal', 'weight'], axis=1) # - tau_data.head() # ## Check which features disagree # # Some features are not well modelled in simulation, let's find them by computing Kolmogorov-Smirnov. # # + from hep_ml.metrics_utils import ks_2samp_weighted ks_values = [] for feature in ds_data.columns: ks_value = ks_2samp_weighted(ds_data[feature][ds_labels == 0], ds_data[feature][ds_labels == 1], ds_weights[ds_labels == 0], ds_weights[ds_labels == 1]) ks_values.append(ks_value) # - # ### Print only 5 features with the greatest KS values pandas.DataFrame({'feature': ds_data.columns, 'KS': ks_values}).sort_values(by='KS', ascending=False).head() # # Simple training of different models # # For this pupose `REP` contains `ClassifiersFactory` (class with `sklearn` interface to train different models parallelly and simultaneously on the same dataset and to compare quality) and wrappers for different libraries: # # * __scikit-learn__ # * __TMVA__ # * __XGBoost__ # * estimators from __hep_ml__ # * __theanets__ # * __PyBrain__ # * __Neurolab__ # # (and any `sklearn`-compatible classifiers may be used). # # ### Prepare train and test # + from sklearn.cross_validation import train_test_split trainX, testX, train_labels, test_labels = train_test_split(tau_data, tau_labels, train_size=0.5, random_state=11) # - variables = list(set(trainX.columns) - {'SPDhits', 'production', 'mass', 'min_ANNmuon'}) variables # ## Defining classification model # # Models are defined in the same manner as in scikit-learn. # # Parameters taken to take few time on training. from rep.metaml import ClassifiersFactory from rep.estimators import TMVAClassifier, SklearnClassifier, XGBoostClassifier from sklearn.ensemble import GradientBoostingClassifier # + tmva_clf = TMVAClassifier(method='kBDT', NTrees=100, MaxDepth=6, BoostType='Grad', BaggedSampleFraction=0.4, Shrinkage=0.1, UseRandomisedTrees=True, UseNvars=20, NCuts=-1, features=variables) gb_clf = SklearnClassifier(GradientBoostingClassifier( n_estimators=100, max_features=0.7, learning_rate=0.1, min_samples_leaf=50, subsample=0.7, max_depth=6, random_state=11), features=variables) # model which uses the mass gb_mass_clf = SklearnClassifier(GradientBoostingClassifier( n_estimators=100, max_features=0.7, learning_rate=0.1, min_samples_leaf = 50, subsample=0.7, max_depth=6, random_state=11), features=variables + ['mass']) # define model with disagreement features gb_with_spd = SklearnClassifier(GradientBoostingClassifier( n_estimators=100, max_features=0.7, learning_rate=0.1, min_samples_leaf=50, subsample=0.7, max_depth=6, random_state=11), features=variables + ['SPDhits']) # - # ### Define factory gb_mass_clf = SklearnClassifier( GradientBoostingClassifier(n_estimators=100, max_features=0.7, learning_rate=0.1), features=variables + ['mass']) factory = ClassifiersFactory() factory.add_classifier('tmva', tmva_clf) factory.add_classifier('gb', gb_clf) factory['xgb'] = XGBoostClassifier(n_estimators=100, colsample=0.7, eta=0.1, nthreads=8, subsample=0.7, max_depth=6, features=variables) factory.add_classifier('gb with mass', gb_mass_clf) factory['gb with SPDHits'] = gb_with_spd factory.fit(trainX, train_labels, parallel_profile='threads-2') pass # ## Copying and cloning import cPickle as pickle string_repr = pickle.dumps(tmva_clf) tmva_clf2 = pickle.loads(string_repr) # ## Everybody loves plots! # ### Visualizing result of training with factory # # `ClassificationReport` class provides the posibility to get classification description to compare different models. <br />Below you can find available functions which can help you to analyze result on arbitrary dataset. # # There are different plotting backends supported: # * __matplotlib__ (default, de-facto standard plotting library, mpld3 allows turning this into interactive plots), # * __plotly__ (proprietary package with interactive plots, information is kept on the server), # * __ROOT__ (the library used by CERN people), # * __bokeh__ (open-source package with interactive plots) # ### Get ClassificationReport object # report has some useful methods! report = factory.test_on(testX, test_labels) # ### Plot importances of features # Only the features used in training are compared features_importances = report.feature_importance() features_importances.plot(new_plot=True, figsize=(20, 15)) # ### ROC curves (receiver operating characteristic) # Plot roc curve for train, test data (it's the same as BackgroundRejection vs Signal Efficiency plot) plt.figure(figsize(14, 5)) plt.subplot(1, 2, 1) report.roc().plot(xlim=(0.5, 1)) plt.subplot(1, 2, 2) report.roc(physical_notion=False).plot(ylim=(0.5, 1)) # + # report.roc() # - # ### Plot learning curves to see possible overfitting of trained classifier # Learning curves are powerful and simple tool to analyze the behaviour of your model. # + from rep.report.metrics import RocAuc learning_curve = report.learning_curve(RocAuc(), metric_label='ROC AUC', steps=1) learning_curve.plot(new_plot=True, figsize=(8, 4), ylim=(0.86, 1.01)) # - # ### But our challenge metric is weighted AUC! # # Metric in simplest case may be defined as a function with interface: # ``` # def function(labels, probabilities, sample_weight=None) # # ``` # # Let's use it: from evaluation import roc_auc_truncated learning_curve = report.learning_curve(roc_auc_truncated, metric_label='Challenge ROC AUC', steps=1) learning_curve.plot(new_plot=True, figsize=(8, 4)) # ### Compute final score report.compute_metric(roc_auc_truncated) # ### Plot 'flatness' of classifier prediction # (this is dependence of efficiency on variables of dataset) efficiencies = report.efficiencies(['mass'], ignored_sideband=0.01, labels_dict={0: 'bck'}) efficiencies.plot(figsize=(18, 26), fontsize=12, show_legend=False, ) # ### Let's test models on the $D_s$ data # # Remove bad models from factory factory.pop('gb with mass') factory.pop('gb') factory.pop('tmva') pass report_ds = factory.test_on(ds_data, ds_labels, ds_weights) report_ds.prediction_pdf(plot_type='bar', log=True) # + def ks_metric(y_true, y_pred, sample_weight=None): return ks_2samp_weighted(y_pred[y_true == 0, 1], y_pred[y_true == 1, 1], sample_weight[y_true == 0], sample_weight[y_true == 1]) report_ds.compute_metric(ks_metric) # - # # Advantages of common interface # # As one can see above, all the classifiers implement the same interface, # this simplifies work, simplifies comparison of different classifiers, # but this is not the only profit. # # `Sklearn` provides different tools to combine different classifiers and transformers. # One of this tools is `AdaBoost`, which is abstract metaclassifier built on the top of some other classifier (usually, decision dree). Also `bagging` is other frequently used ensembling meta-algorithm. # # Let's show that now you can run Bagging over classifiers from other libraries! <br /> # _(isn't boosting over neural network what you were dreaming of all your life?)_ # + from sklearn.ensemble import BaggingClassifier # define base estimators for Bagging tmva_base = TMVAClassifier(method='kBDT', NTrees=100, MaxDepth=6, BoostType='Grad', BaggedSampleFraction=0.4, Shrinkage=0.1, UseRandomisedTrees=True, UseNvars=20, NCuts=-1) bagging_tmva = SklearnClassifier(BaggingClassifier(base_estimator=tmva_base, n_estimators=5, random_state=11), features=variables) # - bagging_tmva.fit(trainX, train_labels) report = bagging_tmva.test_on(testX, test_labels) report.compute_metric(roc_auc_truncated) # # Other advantages of common interface # There are many things you can do with classifiers now: # * cloning # * getting / setting parameters as dictionaries # * automatic hyperparameter optimization # * build pipelines (`sklearn.pipeline`) # * use hierarchical training, training on subsets # * passing over internet / train classifiers on other machines # # And you can replace classifiers at any moment. from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.random_projection import GaussianRandomProjection pipeline_clf = make_pipeline(StandardScaler(), GaussianRandomProjection(n_components=40), GradientBoostingClassifier()) # # Simple grid search # In this example we are optimizing # * parameters of GradientBoostingClassifier # * we maximize Challenge RocAuc (= area under the ROC curve) # * using 4 threads (each time we train 4 classifiers) # * we use 3-Folding to estimate quality. # * we use only 30 trees to make examples run fast from rep.metaml import GridOptimalSearchCV, FoldingScorer, RandomParameterOptimizer # + # define grid parameters grid_param = {} grid_param['learning_rate'] = [0.2, 0.1, 0.05, 0.02, 0.01] grid_param['max_depth'] = [2, 3, 4, 5] # use random hyperparameter optimization algorithm param_opt = RandomParameterOptimizer(grid_param, random_state=11) # define folding scorer with the Challenge metric scorer = FoldingScorer(roc_auc_truncated, folds=3, fold_checks=3) # - # %%time estimator = SklearnClassifier(GradientBoostingClassifier(n_estimators=30, subsample=0.5), features=variables) grid_finder = GridOptimalSearchCV(estimator, param_opt, scorer, parallel_profile='threads-4') grid_finder.fit(tau_data, tau_labels) # ## Looking at the results grid_finder.params_generator.print_results() # # Optimizing models + using custom scorer # # __REP__ supports sklearn-way of combining classifiers and getting/setting their parameters. # # So you can tune complex models using the same approach. # # In this example: # * we will create new scorer, which test everything on special part of dataset # * we are optimizing Challenge AUC score (+ some penalties for breaking restrictions) # * tuning parameters of TMVA's GBDT # * using RandomForest to make good guesses about next points to check # + from sklearn.base import clone from evaluation import compute_cvm class ChallengeScorer(object): def __init__(self, test_data, test_labels, ds_data, ds_labels, ds_weights): self.ds_data = ds_data self.ds_labels = ds_labels self.ds_weights = ds_weights self.test_data = test_data self.test_labels = test_labels self.correlation_data = test_data[test_labels == 0] def __call__(self, base_estimator, params, X, y, sample_weight=None): cl = clone(base_estimator) cl.set_params(**params) cl.fit(X, y) cvm_value = compute_cvm(cl.predict_proba(self.correlation_data)[:, 1], self.correlation_data['mass']) ds_proba = cl.predict_proba(self.ds_data)[:, 1] ks_value = ks_2samp_weighted(ds_proba[self.ds_labels == 0], ds_proba[self.ds_labels == 1], self.ds_weights[self.ds_labels == 0], self.ds_weights[self.ds_labels == 1]) cvm_loss = (1 if cvm_value > 0.002 else 0.01) * (cvm_value / 0.002) ks_loss = (1 if ks_value > 0.09 else 0.01) * (ks_value / 0.09) # Returning AUC - cvm_loss - ks_loss return roc_auc_truncated(self.test_labels, cl.predict_proba(self.test_data)) - ks_loss - cvm_loss # + # %%time from rep.metaml import RegressionParameterOptimizer from sklearn.ensemble import RandomForestRegressor # define grid parameters grid_param = {'Shrinkage': [0.4, 0.2, 0.1, 0.05, 0.02, 0.01], 'NTrees': [5, 10, 15, 20, 30], # you can pass different sets of features to be compared 'features': [variables[:10], variables[:10] + ['SPDhits']], } param_opt = RegressionParameterOptimizer(grid_param, n_evaluations=10, regressor=RandomForestRegressor(), n_attempts=15, random_state=11) # define folding scorer scorer = ChallengeScorer(testX, test_labels, ds_data, ds_labels, ds_weights) grid_finder = GridOptimalSearchCV(TMVAClassifier(method='kBDT', BoostType='Grad',), param_opt, scorer, parallel_profile='threads-3') grid_finder.fit(trainX, train_labels) # - grid_finder.params_generator.print_results() plot(grid_finder.params_generator.grid_scores_.values()) # # Summary # Grid search in __REP__ extends sklearn grid search, uses optimization techniques to avoid complete search of estimator parameters. # # __REP__ has predefined scorers, metric functions, optimization techniques. Each component is replaceable and you can optimize complex models and pipelines (Folders/Bagging/Boosting and so on). # # ## Structure together # * _ParameterOptimizer_ is responsible for generating new set of parameters which will be checked # * RandomParameterOptimizer # * AnnealingParameterOptimizer # * SubgridParameterOptimizer # * RegressionParameterOptimizer (this one can use any regression model, like GaussianProcesses) # # * _Scorer_ is responsible for training and evaluating metrics # * Folding scorer (uses metrics with __REP__ interface), uses averaging quality after kFolding # # * _GridOptimalSearchCV_ makes all of this work together and sends tasks to IPython cluster or separate threads. # # # # # Folding Strategy # # REP implements folding strategy as one more metaestimator. # # When we don't have enough data to split data on train/test, we're stick to k-folding cross-validation scheme. # Folding becomes the only way when you use some multi-staged stacking algorithm. # # Usually we split training data into folds manually, but this is annoying (and not reliable). REP has FoldingClassifier and FoldingRegressor, which do this automatically. from hep_ml.nnet import MLPClassifier from rep.metaml import FoldingClassifier # %%time # train NN on the full data to obtain it as a feature for further training folder = FoldingClassifier(MLPClassifier(layers=(30, 10), epochs=100, random_state=11), n_folds=3, features=variables) folder.fit(tau_data, tau_labels) # ## Default prediction (predict i_th_ fold by i_th_ classifier) # # In this case each sample will be predict by estimator that was not using this particular sample in training. # # When you apply this prediction to some new data (not the same was passed in training), it will predict each sample by random estimator. roc_auc_truncated(tau_labels, folder.predict_proba(tau_data)) # ## Voting prediction # # When predicting new data, we can predict each fold by *all classifiers* and take value, which is calculated by `vote_function`. # # It makes sense to use all classifier to predict new data, because averaging makes predictions more stable. # definition of mean function, which combines all predictions def mean_vote(x): return numpy.mean(x, axis=0) ds_proba = folder.predict_proba(ds_data, vote_function=mean_vote)[:, 1] ks_2samp_weighted(ds_proba[ds_labels == 0], ds_proba[ds_labels == 1], ds_weights[ds_labels == 0], ds_weights[ds_labels == 1]) # # Links about REP # # # * [documentation](http://yandex.github.io/rep/) # * [howto](http://nbviewer.ipython.org/github/yandex/rep/tree/master/howto/) # * [bugtracker](https://github.com/yandex/rep/issues) # * [API, contributing new estimator](https://github.com/yandex/rep/wiki/Contributing-new-estimator) # * [API, contributing new metric](https://github.com/yandex/rep/wiki/Contributing-new-metrics) #
1-REP_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + [markdown] colab_type="text" id="D7tqLMoKF6uq" # Deep Learning # ============= # # Assignment 5 # ------------ # # The goal of this assignment is to train a skip-gram model over [Text8](http://mattmahoney.net/dc/textdata) data. # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="0K1ZyLn04QZf" # %matplotlib inline # These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import collections import math import numpy as np import os import random import sys import tensorflow as tf import zipfile from matplotlib import pylab from six.moves import range from six.moves.urllib.request import urlretrieve from sklearn.manifold import TSNE import outputer import convnet # + [markdown] colab_type="text" id="aCjPJE944bkV" # Download the data from the source website if necessary. # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 14640, "status": "ok", "timestamp": 1445964482948, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2f1ffade4c9f20de", "userId": "102167687554210253930"}, "user_tz": 420} id="RJ-o3UBUFtCw" outputId="c4ec222c-80b5-4298-e635-93ca9f79c3b7" url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): filename, _ = urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified %s' % filename) else: print(statinfo.st_size) raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') return filename filename = maybe_download('temp/text8.zip', 31344016) # + [markdown] colab_type="text" id="Zqz3XiqI4mZT" # Read the data into a string. # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 28844, "status": "ok", "timestamp": 1445964497165, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.<KEY>", "sessionId": "2f1ffade4c9f20de", "userId": "102167687554210253930"}, "user_tz": 420} id="Mvf09fjugFU_" outputId="e3a928b4-1645-4fe8-be17-fcf47de5716d" def read_data(filename): f = zipfile.ZipFile(filename) for name in f.namelist(): return tf.compat.as_str(f.read(name)).split() f.close() words = read_data(filename) print('Data size %d' % len(words)) print("Words[:5]=", words[:5]) # + [markdown] colab_type="text" id="Zdw6i4F8glpp" # Build the dictionary and replace rare words with UNK token. # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 28849, "status": "ok", "timestamp": 1445964497178, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": <KEY>", "sessionId": "2f1ffade4c9f20de", "userId": "102167687554210253930"}, "user_tz": 420} id="gAL1EECXeZsD" outputId="3fb4ecd1-df67-44b6-a2dc-2291730970b2" class Document(object): """ Given a sequence of words builds a two vocabulary mapping between words and numeric IDs, And also tracks word frequency.""" def __init__(self, words, vocabulary_size): total_counts = collections.Counter(words) self.vocabulary_total_size = len(total_counts) self.word_counts = [['UNK', -1]] self.word_counts.extend(total_counts.most_common(vocabulary_size - 1)) self.word_IDs = dict() for word, _ in self.word_counts: self.word_IDs[word] = len(self.word_IDs) self.sequence = list() unknowns = 0 for word in words: if word in self.word_IDs: index = self.word_IDs[word] else: index = 0 # dictionary['UNK'] unknowns = unknowns + 1 self.sequence.append(index) self.word_counts[0][1] = unknowns self.ID_to_word = dict(zip(self.word_IDs.values(), self.word_IDs.keys())) def vocabulary_size(self): return len(self.word_IDs) def print_stats(self): print("Vocabulary", self.vocabulary_size(), "of", self.vocabulary_total_size) print('Most common words (+UNK)', self.word_counts[:5]) print('Sample data', self.sequence[:10]) document = Document(words, 50000) document.print_stats() # - del words # Hint to reduce memory. # + [markdown] colab_type="text" id="lFwoyygOmWsL" # Function to generate a training batch for the skip-gram model. # - def generate_batch(batch_size, skip_count, skip_window, sequence, index): assert batch_size % skip_count == 0 assert skip_count <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(sequence[index]) index = (index + 1) % len(sequence) for i in range(batch_size // skip_count): target = skip_window # target label at the center of the buffer targets_to_avoid = [ skip_window ] for j in range(skip_count): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * skip_count + j] = buffer[skip_window] labels[i * skip_count + j, 0] = buffer[target] buffer.append(sequence[index]) index = (index + 1) % len(sequence) return batch, labels, index # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 113, "status": "ok", "timestamp": 1445964901989, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2f1ffade4c9f20de", "userId": "102167687554210253930"}, "user_tz": 420} id="w9APjA-zmfjV" outputId="67cccb02-cdaf-4e47-d489-43bcc8d57bb8" print('sequence:', [document.ID_to_word[di] for di in document.sequence[:8]]) for skip_window in [1, 2]: data_index = 0 batch, labels, data_index = generate_batch( 8, 2 * skip_window, skip_window, document.sequence, data_index ) print('\nwith skip_window = %d:' % (skip_window)) print(' batch:', [document.ID_to_word[bi] for bi in batch]) print(' labels:', [document.ID_to_word[li] for li in labels.reshape(8)]) # + [markdown] colab_type="text" id="Ofd1MbBuwiva" # Train a skip-gram model. # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="8pQKsV4Vwlzy" def setup_graph(vocab_size, batch_size, embedding_size, sample_count, valid_examples, cbow_skips=None): graph = tf.Graph() with graph.as_default(): # Input data. input_shape = [batch_size, cbow_skips] if cbow_skips else [batch_size] train_dataset = tf.placeholder(tf.int32, shape=input_shape) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Variables. embeddings = tf.Variable( tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0) ) weights = tf.Variable( tf.truncated_normal([vocab_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)) ) biases = tf.Variable(tf.zeros([vocab_size])) # Model. # Look up embeddings for inputs. embed = tf.nn.embedding_lookup(embeddings, train_dataset) if cbow_skips: embed = tf.reduce_sum(embed, 1) # Compute the softmax loss, using a sample of the negative labels each time. sampled_softmax = tf.nn.sampled_softmax_loss( weights, biases, embed, train_labels, sample_count, vocab_size ) loss = tf.reduce_mean(sampled_softmax) # Optimizer. optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss) # Normalize the embeddings: norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm # Compute the similarity between validation examples and all embeddings using cosine distance: valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings)) return { "graph": graph, "batch_size": batch_size, "train": train_dataset, "labels": train_labels, "optimizer": optimizer, "loss": loss, "normalized_embeddings": normalized_embeddings, "similarity": similarity, "valid_examples": valid_examples } # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 23}, {"item_id": 48}, {"item_id": 61}]} colab_type="code" executionInfo={"elapsed": 436189, "status": "ok", "timestamp": 1445965429787, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2f1ffade4c9f20de", "userId": "102167687554210253930"}, "user_tz": 420} id="1bQFGceBxrWW" outputId="5ebd6d9a-33c6-4bcd-bf6d-252b0b6055e4" def run_graph(graph_data, document, batcher, skip_window, skip_count, step_count): with tf.Session(graph=graph_data["graph"]) as session: tf.initialize_all_variables().run() print('Initialized') average_loss = 0 data_index = 0 for step in range(step_count + 1): batch_data, batch_labels, data_index = batcher( graph_data["batch_size"], skip_count, skip_window, document.sequence, data_index ) feed_dict = { graph_data["train"] : batch_data, graph_data["labels"] : batch_labels } inputs = [graph_data["optimizer"], graph_data["loss"]] _, l = session.run(inputs, feed_dict=feed_dict) average_loss += l if step % 2000 == 0: if step > 0: average_loss = average_loss / 2000 # The average loss is an estimate of the loss over the last 2000 batches. print('Average loss at step %d: %f' % (step, average_loss)) average_loss = 0 # note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = graph_data["similarity"].eval() for i in range(len(graph_data["valid_examples"])): valid_word = document.ID_to_word[graph_data["valid_examples"][i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k+1] log = 'Nearest to %s:' % valid_word for k in range(top_k): close_word = document.ID_to_word[nearest[k]] log = '%s %s,' % (log, close_word) print(log) return graph_data["normalized_embeddings"].eval() # + # We pick a random validation set to sample nearest neighbors. here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples=np.array(random.sample(range(valid_window), valid_size)) graph_data = setup_graph( document.vocabulary_size(), batch_size=128, embedding_size=128, sample_count=64, # Number of negative examples to sample. valid_examples=valid_examples ) final_embeddings = run_graph( graph_data, document, generate_batch, skip_window=1, # How many words to consider left and right. skip_count=2, step_count=100000 ) # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="jjJXYA_XzV79" def apply_tsne(embeddings, points): tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) return tsne.fit_transform(embeddings[1:points+1, :]) # - def plot(embeddings, labels): assert embeddings.shape[0] >= len(labels), 'More labels than embeddings' pylab.figure(figsize=(15,15)) # in inches for i, label in enumerate(labels): x, y = embeddings[i,:] pylab.scatter(x, y) pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') pylab.show() example_words = [document.ID_to_word[i] for i in range(1, 401)] # + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 4763, "status": "ok", "timestamp": 1445965465525, "user": {"color": "#1FA15D", "displayName": "<NAME>", "isAnonymous": false, "isMe": true, "permissionId": "05076109866853157986", "photoUrl": "//lh6.googleusercontent.com/-cCJa7dTDcgQ/AAAAAAAAAAI/AAAAAAAACgw/r2EZ_8oYer4/s50-c-k-no/photo.jpg", "sessionId": "2f1ffade4c9f20de", "userId": "102167687554210253930"}, "user_tz": 420} id="o_e0D_UezcDe" outputId="df22e4a5-e8ec-4e5e-d384-c6cf37c68c34" plot(apply_tsne(final_embeddings, len(example_words)), example_words) # + [markdown] colab_type="text" id="QB5EFrBnpNnc" # --- # # Problem # ------- # # An alternative to Word2Vec is called [CBOW](http://arxiv.org/abs/1301.3781) (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset. # # --- # + def generate_cbow_batch(batch_size, skip_count, skip_window, sequence, index): assert batch_size % skip_count == 0 assert skip_count <= 2 * skip_window batch = np.ndarray(shape=(batch_size, skip_count), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(sequence[index]) index = (index + 1) % len(sequence) for i in range(batch_size): target = skip_window # target label at the center of the buffer targets_to_avoid = [ skip_window ] labels[i] = buffer[skip_window] for j in range(skip_count): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i][j] = buffer[target] buffer.append(sequence[index]) index = (index + 1) % len(sequence) return batch, labels, index print('data:', [document.ID_to_word[di] for di in document.sequence[:12]]) for skip_window in [1, 2]: cbow_index = 0 cbow_batch, cbow_labels, cbow_index = generate_cbow_batch( 8, 2 * skip_window, skip_window, document.sequence, cbow_index ) print('\nwith skip_window = %d:' % (skip_window)) print(' batch:', [[document.ID_to_word[bi] for bi in skips] for skips in cbow_batch]) print(' labels:', [document.ID_to_word[li] for li in cbow_labels.reshape(8)]) # + cbow_skip_count = 2 cbow_graph = graph_data = setup_graph( document.vocabulary_size(), batch_size=128, embedding_size=128, sample_count=64, # Number of negative examples to sample. valid_examples=valid_examples, cbow_skips=cbow_skip_count ) cbow_embeddings = run_graph( graph_data, document, generate_cbow_batch, skip_window=1, # How many words to consider left and right. skip_count=cbow_skip_count, step_count=100000 ) # - plot(apply_tsne(final_embeddings, len(example_words)), example_words)
word2vec.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # (3) Manual Feature Scaling, Selection, and Encoding # # **Feature scaling** and **feature_selection** are yet two components of a classical data science pipeline that isn't fully adressed in neither `featuretools` and `h2o`. `featuretools` has no mention of these, and `h2o` includes them in some models as hyperparameters to learn and doesn't include them in others. Whereas **data encoding** for categorical variables are handled intrinsically in `h2o` as we shall discuss in the next notebook, it does so by applying different encodings for different algorithms. For example, XGBoost models perform an internal *one-hot encoding* and Gradient Boosting Machine (GBM) models perform *enum encoding*. For this reason, we will both demonstrate the case where we handle encoding ourselves and the case where we completely leave it to `h2o`. # + import numpy as np import pandas as pd from pandas.api.types import is_numeric_dtype from sklearn.feature_selection import SelectKBest, chi2, SelectFromModel, RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler from utils import categorical_to_onehot_columns SEED = 42 pd.options.mode.chained_assignment = None # suppress SettingWithCopyWarning() for chained assignments import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set_style("white", {'ytick.major.size': 8.0}) sns.set_context("poster", font_scale=0.8) # - # ## Reading Data X_train = pd.read_csv('(2)data_automated_ops/train_users.csv') X_train, Y_train = X_train.drop('country_destination', axis=1), X_train['country_destination'] X_train.head() X_test = pd.read_csv('(2)data_automated_ops/test_users.csv') X_test.head() # ## Feature Scaling # **Feature scaling** is notoriously effective in linear models and neural networks. As it is a rather conventional aspect of the data processing pipeline, we decided to include it here no matter what. An importing thing to note is that, we have to apply scaling (sometimes also referred to as **normalization**) only to *originally numerical* columns at first. Later in the notebook when we encode categorical variables, we will apply another scaling there as well. Let's first observe the different numeric and categorical variables we have. numeric_variables = X_train.select_dtypes(include=[np.number]).columns.tolist() categorical_variables = [x for x in X_train.columns.tolist() if x not in numeric_variables] print("NUMERIC VARIABLES: ", numeric_variables) print("CATEGORICAL VARIABLES: ", categorical_variables) # ### Scaler Choice # You can check the most common feature scaling methods from [here](https://en.wikipedia.org/wiki/Feature_scaling). The most effective algorithms are **Min-Max Scaling**, **Mean Normalization**, **Gaussian (Standard) Scaling**, **Unit-Lenth Scaling**, **Robust Scaling**, **Logarithmic Scaling**, and **Exponential Scaling** depending on the application. Here, we went with a fairly safe method: Gaussian (Standard) scaling based on mean and standard deviation of the variable sample. scaler = StandardScaler() scaler = scaler.fit(X_train[numeric_variables]) X_train[numeric_variables] = scaler.transform(X_train[numeric_variables]) X_test[numeric_variables] = scaler.transform(X_test[numeric_variables]) # Observe scaling effect X_train.head() # ## Feature Selection # Not only that feature selection yields smaller training & test sets and decreases training time vastly, it also prevents models from overfitting and allows them to generalize better. Moreover, as different models react to high numbers of feature spaces differently, we decided that feature selection might allow us to compare these models in a more fair way. # ### Setting Back Unknown Categorical Levels & Setting 0's for Numeric NaNs # In the previous notebook, we have converted the newly introduced missing values (from automated feature engineering) in all columns to NaNs for uniformity in representation. However, NaNs for numeric variables tell us that the corresponding users didn't have any related session information. For the majority of the numeric features we have generated, plugging in 0s as missing values seems to be logical here. On the other hand, NaNs for categorical variables tell us that corresponding users have untracked information or they used a tool/utility/method that is not recognized. Defining in a new categorical level of 'UNKNOWN' seems to be logical here. We also need these operations to eliminate missing values before applying feature selection algorithms. X_train[categorical_variables] = X_train[categorical_variables].fillna('UNKNOWN') X_test[categorical_variables] = X_test[categorical_variables].fillna('UNKNOWN') X_train[numeric_variables] = X_train[numeric_variables].fillna(0.0) X_test[numeric_variables] = X_test[numeric_variables].fillna(0.0) # ### One-Hot Encoding Categorical Variables # Before we proceed any further, we will have to *temporarily* one-hot encode our categorical variables so that they fit in with our feature selection methods. (NOTE: Particulary, we are referring to the estimation of *chi-squared test statistic* and the utilization of *logistic regression* models.) # Transform training set to one-hot encoded representation & get encoded columns X_train_onehot_encoded = categorical_to_onehot_columns(df=X_train) encoded_columns = X_train_onehot_encoded.columns.values.tolist() # Transform test set to one-hot encoded representation X_test_onehot_encoded = categorical_to_onehot_columns(df=X_test) # Add categorical levels that exist in training set to test set for fitted_column in encoded_columns: if fitted_column not in X_test_onehot_encoded.columns.values.tolist(): X_test_onehot_encoded[fitted_column] = 0 # Drop categorical levels that don't exist in training set from test set for column in X_test_onehot_encoded.columns.values.tolist(): if column not in encoded_columns: X_test_onehot_encoded.drop(column, axis=1, inplace=True) # Ensure that training and test sets have the same column-wise order X_test_onehot_encoded = X_test_onehot_encoded[encoded_columns] assert len(X_train_onehot_encoded.columns) == len(X_test_onehot_encoded.columns) # ### Feature Selection Algorithms # Common feature selection algorithms include: # * **Pearson Correlation Coefficient**: Perphaps the most common and easiest way to perform feature selection. However, we will have to try different methods as our response variables are categorical rather than numerical. # * **Chi-Squared Test Statistic**: The chi-square test is a statistical test of independence to determine the dependency of two variables. If the target variable is independent of the feature variable, we can discard that feature variable. If they are dependent, the feature variable is very important. It's a pretty conventional method. # * **Recursive Feature Elimination (RFE)**: Feature ranking with recursive feature elimination. # * **Variable Importances from a Baseline Model**: Applies common algorithms such as Random Forests, Logistic Regression, and XGBoost. # # For the problem at hand, we have focused on the last three algorithms. # #### Chi-Squared Test Statistic # The chi-squared test does not apply to negative values, because it assumes a distribution of frequencies. Hence, we will first check if any negative values exist in the data, and normalize values to [0, 1] if so. # + # Normalize values to [0, 1] interval, column-wise if len(X_train_onehot_encoded[X_train_onehot_encoded < 0]) > 0: min_max_scaler = MinMaxScaler() normalized_values = min_max_scaler.fit_transform(X_train_onehot_encoded.T) X_train_onehot_encoded_normalized = pd.DataFrame(normalized_values.T, columns=X_train_onehot_encoded.columns, index=X_train_onehot_encoded.index) else: X_train_onehot_encoded_normalized = X_train_onehot_encoded chi_model = SelectKBest(chi2, k=200) chi_model.fit(X_train_onehot_encoded_normalized, Y_train) chi_support = chi_model.get_support() chi_selected_columns = X_train_onehot_encoded.loc[:, chi_support].columns.tolist() # - # #### Recursive Feature Elimination (RFE) by Logistic Regression (L2) rfe_model = RFE(estimator=LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=150, penalty='l2', random_state=SEED, n_jobs=-1), n_features_to_select=200, step=100, verbose=5) rfe_model.fit(X_train_onehot_encoded, Y_train) rfe_support = rfe_model.get_support() rfe_selected_columns = X_train_onehot_encoded.loc[:, rfe_support].columns.tolist() # #### Variable Importances by Logistic Regression (L2) lr_model = SelectFromModel(LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=150, penalty='l2', random_state=SEED, n_jobs=-1), threshold='1.25*median') lr_model.fit(X_train_onehot_encoded, Y_train) lr_support = lr_model.get_support() lr_selected_columns = X_train_onehot_encoded.loc[:, lr_support].columns.tolist() # ### Combining All Methods # By observing the *union* and *intersection* of the different features we have selected with the given methods above, we have decided that limiting the training & test sets to the *intersection* features seems like best option. This was partly because the *union* features were still large in number. It should be noted that the feature selection process adopted here is not optimized in any way. We could have used different *models*, *parameters*, and *numbers of independent features to select* in each method. # Get the union of all features all_selected_columns = list(set(chi_selected_columns + rfe_selected_columns + lr_selected_columns)) print("TOTAL NUMBER OF FEATURES SELECTED: ", len(all_selected_columns)) print("ALL SELECTED FEATURES: ", all_selected_columns) # Get the intersection of all features common_selected_columns = list(set(chi_selected_columns).intersection(set(rfe_selected_columns), set(lr_selected_columns))) print("NUMBER OF COMMON FEATURES SELECTED: ", len(common_selected_columns)) print("COMMON SELECTED FEATURES: ", common_selected_columns) # The columns mentioned above are selected from the one-hot encoded representation of our training & test sets. We can convert them to base features corresponding to the original columns in our data when we read it in the beginning of this notebook. all_base_selected_columns = list(set([column.split('=')[0] for column in all_selected_columns])) common_base_selected_columns = list(set([column.split('=')[0] for column in common_selected_columns])) print("NUMBER OF BASE COLUMNS SELECTED: ", len(common_base_selected_columns)) print("BASE COLUMNS SELECTED: ", common_base_selected_columns) # Now, let's create the trimmed versions of our data frames. X_train_trimmed = X_train[common_base_selected_columns] X_test_trimmed = X_test[common_base_selected_columns] assert X_train_trimmed.shape[1] == X_test_trimmed.shape[1] X_train_trimmed.head() # ## Save Progress with Raw & Trimmed Data # Add back the response variable to sets & save progress X_train_trimmed.loc[:, 'country_destination'] = Y_train.loc[:] X_train_trimmed.to_csv('(3)data_trimmed/raw/train_users.csv', index=None) X_test_trimmed.to_csv('(3)data_trimmed/raw/test_users.csv', index=None) # Drop response variable again, as we will continue processing features X_train_trimmed.drop('country_destination', axis=1, inplace=True) # ## Encoding Categorical Variables # Although we had chosen **one-hot encoding** as our strategy previously, those encodings were solely used for the purposes of feature selection and abondoned afterwards. Besides, there exists other and more efficient strategies in the literature that can be explored: # # * **Labeled Encoding**: Interprets categories as ordered integers. The extracted ordinality is almost always wrong, hence this is not really a preferred method. # * **Frequency Encoding**: Encodes categorical levels of each feature to values between 0.0 and 1.0 based on their relative frequincy. This method especially works when there is a high number of categorical levels that are somewhat imbalanced in distribution. # * **Target Mean Encoding**: Encodes categorical levels of each feature to the mean of the response. This method works best with binary classification, but it often yields *data leakage*. # # It should be noted that one-hot encoding variables increases the feature space vastly, and this may decrease the potential performance of the *tree-based models*. This is why we don't want to use this type of encoding to get our final data form. Instead, we have chosen to go with labeled encoding as an experiment. As previously discussed, `h2o`'s automated modelling method applies different kind of encodings, but labeled encoding was one strategy that wasn't automatically applied. remaining_categorical_vars = [] for column in X_train_trimmed.columns.values.tolist(): if not is_numeric_dtype(X_train_trimmed[column]): print("Currently encoding column: ", column) remaining_categorical_vars.append(column) encoder = LabelEncoder() encoder.fit(X_train_trimmed[column]) available_levels = list(encoder.classes_) for test_level in set(X_test_trimmed[column].values.tolist()): if test_level not in available_levels: X_test_trimmed.loc[X_test_trimmed[column] == test_level, column] = X_train_trimmed[column].mode()[0] X_train_trimmed.loc[:, column] = encoder.transform(X_train_trimmed[column]) X_test_trimmed.loc[:, column] = encoder.transform(X_test_trimmed[column]) X_train_trimmed.head() # ## Feature Scaling for Remaining Categorical Variables # # After this step, both our training & test sets are fully scaled and ready for being passed on to predictive models. # + scaler = StandardScaler() scaler = scaler.fit(X_train_trimmed[remaining_categorical_vars]) X_train_trimmed.loc[:, remaining_categorical_vars] = scaler.transform(X_train_trimmed.loc[:, remaining_categorical_vars]) X_test_trimmed.loc[:, remaining_categorical_vars] = scaler.transform(X_test_trimmed.loc[:, remaining_categorical_vars]) X_train_trimmed.head() # - # ## Save Progress with Label Encoded & Trimmed Data # Add back the response variable to sets & save progress X_train_trimmed.loc[:, 'country_destination'] = Y_train.loc[:] X_train_trimmed.to_csv('(3)data_trimmed/label_encoded/train_users.csv', index=None) X_test_trimmed.to_csv('(3)data_trimmed/label_encoded/test_users.csv', index=None)
(3)manual_feature_scaling_selection_encoding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Polynomial Smoothing/Regression # # We will show how to use the `PolynomialSmoother` class in the StatTools `smoothing` module to fit a polynomial curve through a 2D scatterplot. # + import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(style="ticks", palette="muted", color_codes=True) from stattools.smoothing import PolynomialSmoother from stattools.visualization import func_plot # - # Set NumPy random number generator seed for replicability np.random.seed(100) # ## Create Some Artificial Data # + n = 100 # Actual model coefficients a = 5 b = -3 c = 1 poly = np.poly1d((c, b, a)) x = np.random.uniform(0, 10, n) y = a + b * x + c * (x ** 2) + np.random.normal(0, 20, n) # Scatter plot of the raw data plt.figure() plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k") plt.title("True Curve: $y = 5 - 3 x + x^2$") plt.show() plt.close() # - # ## Linear Model # %time model = PolynomialSmoother(deg=1).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression line model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str()}") plt.show() plt.close() # - # ## Quadratic Model # %time model = PolynomialSmoother(deg=2).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression curve model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str()}") plt.show() plt.close() # - # ## Cubic Model # %time model = PolynomialSmoother(deg=3).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression curve model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str()}") plt.show() plt.close() # - # ## Quartic Model # %time model = PolynomialSmoother(deg=4).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression curve model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str()}") plt.show() plt.close() # - # ## Quintic Model # %time model = PolynomialSmoother(deg=5).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression curve model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str(precision=2)}") plt.show() plt.close() # - # ## Sextic Model # %time model = PolynomialSmoother(deg=6).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression curve model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str(precision=1)}") plt.show() plt.close() # - # ## Septic Model # %time model = PolynomialSmoother(deg=7).fit(x, y) # + plt.figure() # Scatter plot of the observations plt.scatter(x, y, c="b", alpha=0.7, edgecolor="k") # Plot of the true curve func_plot(poly, num=100, c="k", label="True Curve") # Plot the regression curve model.fit_plot(lw=3, c="r", label="Prediction") plt.legend(loc="best", frameon=True, shadow=True) plt.title(f"{model.poly_str(precision=1)}") plt.show() plt.close()
examples/Polynomial Smoothing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Visual Question Answering in gluon # This is a notebook for implementing visual question answering in gluon. from __future__ import print_function import numpy as np import mxnet as mx import mxnet.ndarray as F import mxnet.contrib.ndarray as C import mxnet.gluon as gluon from mxnet.gluon import nn from mxnet import autograd import bisect from IPython.core.display import display, HTML import logging logging.basicConfig(level=logging.INFO) import os from mxnet.test_utils import download import json from IPython.display import HTML, display # ## The VQA dataset # In the VQA dataset, for each sample, there is one image and one question. The label is the answer for the question regarding the image. You can download the VQA1.0 dataset from <a href="http://www.visualqa.org/vqa_v1_download.html">VQA</a> website. # # ![](../img/pizza.png ) # # You need to preprocess the data: # # (1) Extract the samples from original json files. # # (2) Filter the samples giving top k answers(k can be 1000, 2000...). This will make the prediction easier. # ## Pretrained Models # Usually people use pretrained models to extract features from the image and question. # # __Image pretrained model__: # # <a href="https://arxiv.org/abs/1409.1556">VGG</a>: A key aspect of VGG was to use many convolutional blocks with relatively narrow kernels, followed by a max-pooling step and to repeat this block multiple times. # # <a href="https://arxiv.org/abs/1512.03385">Resnet</a>: It is a residual learning framework to ease the training of networks that are substantially deep. It reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. # # __Question pretrained model__: # # <a href="https://code.google.com/archive/p/word2vec/">Word2Vec</a>: The word2vec tool takes a text corpus as input and produces the word vectors as output. It first constructs a vocabulary from the training text data and then learns vector representation of words. The model contains 300-dimensional vectors for 3 million words and phrases. # # <a href="https://nlp.stanford.edu/projects/glove/">Glove</a>: Similar to Word2Vec, it is a word embedding dataset. It contains 100/200/300-dimensional vectors for 2 million words. # # <a href="https://arxiv.org/abs/1506.06726">skipthought</a>: This is an encoder-decoder model that tries to reconstruct the surrounding sentences of an encoded passage. Sentences that share semantic and syntactic properties are thus mapped to similar vector representations. Different from the previous two model, this is a sentence based model. # # <a href="https://research.google.com/pubs/pub45610.html">GNMT encoder</a>: We propose using the encoder of google neural machine translation system to extract the question features. # # __We will discuss about how to extract the features <a href="https://github.com/shiyangdaisy23/vqa-mxnet-gluon/blob/master/extract-feature.ipynb">here</a> in details.__ # ## Define the model # We define out model with gluon. gluon.Block is the basic building block of models. If any operator is not defined under gluon, you can use mxnet.ndarray operators to subsititude. # Some parameters we are going to use batch_size = 64 ctx = mx.cpu() compute_size = batch_size out_dim = 10000 gpus = 1 # In the __first model__, we will concatenate the image and question features and use multilayer perception(MLP) to predict the answer. class Net1(gluon.Block): def __init__(self, **kwargs): super(Net1, self).__init__(**kwargs) with self.name_scope(): # layers created in name_scope will inherit name space # from parent layer. self.bn = nn.BatchNorm() self.dropout = nn.Dropout(0.3) self.fc1 = nn.Dense(8192,activation="relu") self.fc2 = nn.Dense(1000) def forward(self, x): x1 = F.L2Normalization(x[0]) x2 = F.L2Normalization(x[1]) z = F.concat(x1,x2,dim=1) z = self.fc1(z) z = self.bn(z) z = self.dropout(z) z = self.fc2(z) return z # In the __second model__, instead of linearly combine the image and text features, we use count sketch to estimate the outer product of the image and question features. It is also named as multimodel compact bilinear pooling(MCB). # # This method was proposed in <a href="https://arxiv.org/abs/1606.01847">Multimodal Compact Bilinear Pooling for VQA</a>. The key idea is: # # $\quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \psi(x \otimes y, h,s) = \psi(x,h,s) \star \psi(y,h,s)$ # # where $\psi$ is the count sketch operator, $x,y$ are the inputs, $h, s$ are the hash tables, $\otimes$ defines outer product and $\star$ is the convolution operator. This can further be simplified by using FFT properties: convolution in time domain equals to elementwise product in frequency domain. # # One improvement we made is adding ones vectors to each features before count sketch. The intuition is: given input vectors $x,y$, estimating outer product between $[x,1s]$ and $[y, 1s]$ gives us information more than just $x \otimes y$. It also contains information of $x$ and $y$. class Net2(gluon.Block): def __init__(self, **kwargs): super(Net2, self).__init__(**kwargs) with self.name_scope(): # layers created in name_scope will inherit name space # from parent layer. self.bn = nn.BatchNorm() self.dropout = nn.Dropout(0.3) self.fc1 = nn.Dense(8192,activation="relu") self.fc2 = nn.Dense(1000) def forward(self, x): x1 = F.L2Normalization(x[0]) x2 = F.L2Normalization(x[1]) text_ones = F.ones((batch_size/gpus, 2048),ctx = ctx) img_ones = F.ones((batch_size/gpus, 1024),ctx = ctx) text_data = F.Concat(x1, text_ones,dim = 1) image_data = F.Concat(x2,img_ones,dim = 1) # Initialize hash tables S1 = F.array(np.random.randint(0, 2, (1,3072))*2-1,ctx = ctx) H1 = F.array(np.random.randint(0, out_dim,(1,3072)),ctx = ctx) S2 = F.array(np.random.randint(0, 2, (1,3072))*2-1,ctx = ctx) H2 = F.array(np.random.randint(0, out_dim,(1,3072)),ctx = ctx) # Count sketch cs1 = C.count_sketch( data = image_data, s=S1, h = H1 ,name='cs1',out_dim = out_dim) cs2 = C.count_sketch( data = text_data, s=S2, h = H2 ,name='cs2',out_dim = out_dim) fft1 = C.fft(data = cs1, name='fft1', compute_size = compute_size) fft2 = C.fft(data = cs2, name='fft2', compute_size = compute_size) c = fft1 * fft2 ifft1 = C.ifft(data = c, name='ifft1', compute_size = compute_size) # MLP z = self.fc1(ifft1) z = self.bn(z) z = self.dropout(z) z = self.fc2(z) return z # __We will introduce attention model in this <a href="https://github.com/shiyangdaisy23/vqa-mxnet-gluon/blob/master/Attention-VQA-gluon.ipynb">notebook</a>.__ # ## Data Iterator # The inputs of the data iterator are extracted image and question features. At each step, the data iterator will return a data batch list: question data batch and image data batch. # # We need to seperate the data batches by the length of the input data because the input questions are in different lengths. The $buckets$ parameter defines the max length you want to keep in the data iterator. Here since we already used pretrained model to extract the question feature, the question length is fixed as the output of the pretrained model. # # The $layout$ parameter defines the layout of the data iterator output. "N" specify where is the data batch dimension is. # # $reset()$ function is called after every epoch. $next()$ function is call after each batch. class VQAtrainIter(mx.io.DataIter): def __init__(self, img, sentences, answer, batch_size, buckets=None, invalid_label=-1, text_name='text', img_name = 'image', label_name='softmax_label', dtype='float32', layout='NTC'): super(VQAtrainIter, self).__init__() if not buckets: buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences])) if j >= batch_size] buckets.sort() ndiscard = 0 self.data = [[] for _ in buckets] for i in range(len(sentences)): buck = bisect.bisect_left(buckets, len(sentences[i])) if buck == len(buckets): ndiscard += 1 continue buff = np.full((buckets[buck],), invalid_label, dtype=dtype) buff[:len(sentences[i])] = sentences[i] self.data[buck].append(buff) self.data = [np.asarray(i, dtype=dtype) for i in self.data] self.answer = answer self.img = img print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard) self.batch_size = batch_size self.buckets = buckets self.text_name = text_name self.img_name = img_name self.label_name = label_name self.dtype = dtype self.invalid_label = invalid_label self.nd_text = [] self.nd_img = [] self.ndlabel = [] self.major_axis = layout.find('N') self.default_bucket_key = max(buckets) if self.major_axis == 0: self.provide_data = [(text_name, (batch_size, self.default_bucket_key)), (img_name, (batch_size, self.default_bucket_key))] self.provide_label = [(label_name, (batch_size, self.default_bucket_key))] elif self.major_axis == 1: self.provide_data = [(text_name, (self.default_bucket_key, batch_size)), (img_name, (self.default_bucket_key, batch_size))] self.provide_label = [(label_name, (self.default_bucket_key, batch_size))] else: raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)") self.idx = [] for i, buck in enumerate(self.data): self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)]) self.curr_idx = 0 self.reset() def reset(self): self.curr_idx = 0 self.nd_text = [] self.nd_img = [] self.ndlabel = [] for buck in self.data: label = np.empty_like(buck.shape[0]) label = self.answer self.nd_text.append(mx.ndarray.array(buck, dtype=self.dtype)) self.nd_img.append(mx.ndarray.array(self.img, dtype=self.dtype)) self.ndlabel.append(mx.ndarray.array(label, dtype=self.dtype)) def next(self): if self.curr_idx == len(self.idx): raise StopIteration i, j = self.idx[self.curr_idx] self.curr_idx += 1 if self.major_axis == 1: img = self.nd_img[i][j:j + self.batch_size].T text = self.nd_text[i][j:j + self.batch_size].T label = self.ndlabel[i][j:j+self.batch_size] else: img = self.nd_img[i][j:j + self.batch_size] text = self.nd_text[i][j:j + self.batch_size] label = self.ndlabel[i][j:j+self.batch_size] data = [text, img] return mx.io.DataBatch(data, [label], bucket_key=self.buckets[i], provide_data=[(self.text_name, text.shape),(self.img_name, img.shape)], provide_label=[(self.label_name, label.shape)]) # ## Load the data # Here we will use subset of VQA dataset in this tutorial. We extract the image feature from ResNet-152, text feature from GNMT encoder. In first two model, we have 21537 training samples and 1044 validation samples in this tutorial. Image feature is a 2048-dim vector. Question feature is a 1048-dim vector. # + # Download the dataset dataset_files = {'train': ('train_question.npz','train_img.npz','train_ans.npz'), 'validation': ('val_question.npz','val_img.npz','val_ans.npz'), 'test':('test_question_id.npz','test_question.npz','test_img_id.npz','test_img.npz','atoi.json','test_question_txt.json')} train_q, train_i, train_a = dataset_files['train'] val_q, val_i, val_a = dataset_files['validation'] url_format = 'https://apache-mxnet.s3-accelerate.amazonaws.com/gluon/dataset/VQA-notebook/{}' if not os.path.exists(train_q): logging.info('Downloading training dataset.') download(url_format.format(train_q),overwrite=True) download(url_format.format(train_i),overwrite=True) download(url_format.format(train_a),overwrite=True) if not os.path.exists(val_q): logging.info('Downloading validation dataset.') download(url_format.format(val_q),overwrite=True) download(url_format.format(val_i),overwrite=True) download(url_format.format(val_a),overwrite=True) # + layout = 'NT' bucket = [1024] train_question = np.load(train_q)['x'] val_question = np.load(val_q)['x'] train_ans = np.load(train_a)['x'] val_ans = np.load(val_a)['x'] train_img = np.load(train_i)['x'] val_img = np.load(val_i)['x'] print("Total training sample:",train_ans.shape[0]) print("Total validation sample:",val_ans.shape[0]) data_train = VQAtrainIter(train_img, train_question, train_ans, batch_size, buckets = bucket,layout=layout) data_eva = VQAtrainIter(val_img, val_question, val_ans, batch_size, buckets = bucket,layout=layout) # - # ## Initialize the Parameters net = Net1() #net = Net2() net.collect_params().initialize(mx.init.Xavier(), ctx=ctx) # ## Loss and Evaluation Metrics # + loss = gluon.loss.SoftmaxCrossEntropyLoss() metric = mx.metric.Accuracy() def evaluate_accuracy(data_iterator, net): numerator = 0. denominator = 0. data_iterator.reset() for i, batch in enumerate(data_iterator): with autograd.record(): data1 = batch.data[0].as_in_context(ctx) data2 = batch.data[1].as_in_context(ctx) data = [data1,data2] label = batch.label[0].as_in_context(ctx) output = net(data) metric.update([label], [output]) return metric.get()[1] # - # ## Optimizer trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01}) # ## Training loop epochs = 10 moving_loss = 0. best_eva = 0 for e in range(epochs): data_train.reset() for i, batch in enumerate(data_train): data1 = batch.data[0].as_in_context(ctx) data2 = batch.data[1].as_in_context(ctx) data = [data1,data2] label = batch.label[0].as_in_context(ctx) with autograd.record(): output = net(data) cross_entropy = loss(output, label) cross_entropy.backward() trainer.step(data[0].shape[0]) ########################## # Keep a moving average of the losses ########################## if i == 0: moving_loss = np.mean(cross_entropy.asnumpy()[0]) else: moving_loss = .99 * moving_loss + .01 * np.mean(cross_entropy.asnumpy()[0]) #if i % 200 == 0: # print("Epoch %s, batch %s. Moving avg of loss: %s" % (e, i, moving_loss)) eva_accuracy = evaluate_accuracy(data_eva, net) train_accuracy = evaluate_accuracy(data_train, net) print("Epoch %s. Loss: %s, Train_acc %s, Eval_acc %s" % (e, moving_loss, train_accuracy, eva_accuracy)) if eva_accuracy > best_eva: best_eva = eva_accuracy logging.info('Best validation acc found. Checkpointing...') net.save_params('vqa-mlp-%d.params'%(e)) # ## Try it out! # Currently we have test data for the first two models we mentioned above. After the training loop over Net1 or Net2, we can try it on test data. Here we have 10 test samples. # + test = True if test: test_q_id, test_q, test_i_id, test_i, atoi,text = dataset_files['test'] if test and not os.path.exists(test_q): logging.info('Downloading test dataset.') download(url_format.format(test_q_id),overwrite=True) download(url_format.format(test_q),overwrite=True) download(url_format.format(test_i_id),overwrite=True) download(url_format.format(test_i),overwrite=True) download(url_format.format(atoi),overwrite=True) download(url_format.format(text),overwrite=True) if test: test_question = np.load("test_question.npz")['x'] test_img = np.load("test_img.npz")['x'] test_question_id = np.load("test_question_id.npz")['x'] test_img_id = np.load("test_img_id.npz")['x'] #atoi = np.load("atoi.json")['x'] # - # We pass the test data iterator to the trained model. data_test = VQAtrainIter(test_img, test_question, np.zeros((test_img.shape[0],1)), 10, buckets = bucket,layout=layout) for i, batch in enumerate(data_test): with autograd.record(): data1 = batch.data[0].as_in_context(ctx) data2 = batch.data[1].as_in_context(ctx) data = [data1,data2] #label = batch.label[0].as_in_context(ctx) #label_one_hot = nd.one_hot(label, 10) output = net(data) output = np.argmax(output.asnumpy(), axis = 1) idx = np.random.randint(10) print(idx) question = json.load(open(text)) print("Question:", question[idx]) # + image_name = 'COCO_test2015_' + str(int(test_img_id[idx])).zfill(12)+'.jpg' if not os.path.exists(image_name): logging.info('Downloading training dataset.') download(url_format.format('test_images/'+image_name),overwrite=True) from IPython.display import Image Image(filename=image_name) # - dataset = json.load(open('atoi.json')) ans = dataset['ix_to_ans'][str(output[idx]+1)] print("Answer:", ans) # For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
chapter08_computer-vision/visual-question-answer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dldiy # language: python # name: dldiy # --- # + [markdown] colab_type="text" id="8TOhzssTBZAb" # # Finding Synonyms and Analogies # # This notebook is taken from a [PyTorch NLP tutorial](https://github.com/joosthub/pytorch-nlp-tutorial-ny2018/blob/master/day_1/0_Using_Pretrained_Embeddings.ipynb) source: [repository for the training tutorial as the 2018 O'Reilly AI Conference in NYC on April 29 and 30, 2018](https://github.com/joosthub/pytorch-nlp-tutorial-ny2018) # + ## Colab SETUP # #!pip install annoy # + colab={} colab_type="code" id="HhJ0hUC5BZAe" from annoy import AnnoyIndex import numpy as np import torch from tqdm import notebook import os from pathlib import Path # + [markdown] colab_type="text" id="dhHjwm2JBZAl" # Glove embeddings can be downloaded from [GloVe webpage](https://nlp.stanford.edu/projects/glove/). # # You need to uncomment the appropriate part in the following cell # + colab={} colab_type="code" id="f76c_EM1Ch3q" ## Colab SETUP # #!mkdir data # #%cd data # #!wget http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip # #!unzip glove.6B.zip #ROOT_DIR = 'content' ## local SETUP download glove in ~/data/ with the commands wget http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip ## and unzip glove.6B.zip #ROOT_DIR = Path.home() # + colab={} colab_type="code" id="NWUdh2BxBZAn" data_path = os.path.join(ROOT_DIR,'data/') file = 'glove.6B.100d.txt' glove_filename=data_path+file # + colab={} colab_type="code" id="4GX0hecoBZAr" def load_word_vectors(filename): word_to_index = {} word_vectors = [] with open(filename) as fp: for line in notebook.tqdm(fp.readlines(), leave=False): line = line.split(" ") word = line[0] word_to_index[word] = len(word_to_index) vec = np.array([float(x) for x in line[1:]]) word_vectors.append(vec) return word_to_index, word_vectors # + colab={} colab_type="code" id="lDUMIr66BZAz" word_to_index, word_vectors = load_word_vectors(glove_filename) # + colab={} colab_type="code" id="jvIOgxSRBZA8" len(word_vectors) # + colab={} colab_type="code" id="3iaYd15JBZBB" word_vectors[0].shape # + colab={} colab_type="code" id="IlkJErGvBZBG" word_to_index['beautiful'] # + colab={} colab_type="code" id="Y3K8WuWEBZBL" class PreTrainedEmbeddings(object): def __init__(self, glove_filename): self.word_to_index, self.word_vectors = load_word_vectors(glove_filename) self.word_vector_size = len(self.word_vectors[0]) self.index_to_word = {v: k for k, v in self.word_to_index.items()} self.index = AnnoyIndex(self.word_vector_size, metric='euclidean') print('Building Index') for _, i in notebook.tqdm(self.word_to_index.items(), leave=False): self.index.add_item(i, self.word_vectors[i]) self.index.build(50) print('Finished!') def get_embedding(self, word): return self.word_vectors[self.word_to_index[word]] def closest(self, word, n=1): vector = self.get_embedding(word) nn_indices = self.index.get_nns_by_vector(vector, n) return [self.index_to_word[neighbor] for neighbor in nn_indices] def closest_v(self, vector, n=1): nn_indices = self.index.get_nns_by_vector(vector, n) return [self.index_to_word[neighbor] for neighbor in nn_indices] def sim(self, w1, w2): return np.dot(self.get_embedding(w1), self.get_embedding(w2)) # + colab={} colab_type="code" id="bZzQW7pyBZBP" glove = PreTrainedEmbeddings(glove_filename) # + colab={} colab_type="code" id="Stpp4FNaBZBT" glove.closest('apple', n=5) # + colab={} colab_type="code" id="ARYYKOwcBZBa" glove.closest('chip', n=5) # + colab={} colab_type="code" id="e0-ywu08BZBe" glove.closest('baby', n=5) # + colab={} colab_type="code" id="AtS8hOuPBZBm" glove.closest('beautiful', n=5) # + colab={} colab_type="code" id="jhPAEA5lBZBs" def SAT_analogy(w1, w2, w3): ''' Solves problems of the type: w1 : w2 :: w3 : __ ''' closest_words = [] try: w1v = glove.get_embedding(w1) w2v = glove.get_embedding(w2) w3v = glove.get_embedding(w3) w4v = w3v + (w2v - w1v) closest_words = glove.closest_v(w4v, n=5) closest_words = [w for w in closest_words if w not in [w1, w2, w3]] except: pass if len(closest_words) == 0: print(':-(') else: print('{} : {} :: {} : {}'.format(w1, w2, w3, closest_words[0])) # + colab={} colab_type="code" id="rlxpUPIhBZBw" SAT_analogy('man', 'he', 'woman') # + colab={} colab_type="code" id="07UCvv3fBZB4" SAT_analogy('fly', 'plane', 'sail') # + colab={} colab_type="code" id="gOHAs9gIBZB9" SAT_analogy('beijing', 'china', 'tokyo') # + colab={} colab_type="code" id="VdpkkLgNBZCC" SAT_analogy('man', 'woman', 'son') # + colab={} colab_type="code" id="M019lyRbBZCH" SAT_analogy('man', 'doctor', 'woman') # + colab={} colab_type="code" id="Dmuw3B3zBZCL" SAT_analogy('woman', 'leader', 'man') # + colab={} colab_type="code" id="qeWK5ASVBZCP"
Module8/08_Playing_with_word_embedding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://cognitiveclass.ai/"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center"> # </a> # # # # <h1 align=center><font size = 5><b>A </b>pplication <b>P</b>rogramming <b>I</b>nterface</font> (API)</h1> # An API lets two pieces of software talk to each other. Just like a function, you don’t have to know how the API works only its inputs and outputs. An essential type of API is a REST API that allows you to access resources via the internet. In this lab, we will review the Pandas Library in the context of an API, we will also review a basic REST API # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <a href="https://cocl.us/topNotebooksPython101Coursera"> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png" width="750" align="center"> # </a> # </div> # ## Table of Contents # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <li><a href="#ref0">Pandas is an API</a></li> # <li><a href="#ref1">REST APIs Basics </a></li> # <li><a href="#ref2">Quiz on Tuples</a></li> # # <p></p> # Estimated Time Needed: <strong>15 min</strong> # </div> # # <hr> # + jupyter={"outputs_hidden": false} # !pip install nba_api # - # <h2 id="PandasAPI">Pandas is an API </h2> # You will use this function in the lab: # + jupyter={"outputs_hidden": false} def one_dict(list_dict): keys=list_dict[0].keys() out_dict={key:[] for key in keys} for dict_ in list_dict: for key, value in dict_.items(): out_dict[key].append(value) return out_dict # - # <h2 id="PandasAPI">Pandas is an API </h2> # Pandas is actually set of software components , much of witch is not even written in Python. # # + jupyter={"outputs_hidden": false} import pandas as pd import matplotlib.pyplot as plt # - # You create a dictionary, this is just data. # + jupyter={"outputs_hidden": false} dict_={'a':[11,21,31],'b':[12,22,32]} # - # When you create a Pandas object with the Dataframe constructor in API lingo, this is an "instance". The data in the dictionary is passed along to the pandas API. You then use the dataframe to communicate with the API. # + jupyter={"outputs_hidden": false} df=pd.DataFrame(dict_) type(df) # - # <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%206/images/pandas_api.png" width = 800, align = "center" alt="logistic regression block diagram" /> # When you call the method head the dataframe communicates with the API displaying the first few rows of the dataframe. # # # # + jupyter={"outputs_hidden": false} df.head() # - # When you call the method mean,the API will calculate the mean and return the value. # + jupyter={"outputs_hidden": false} df.mean() # - # <h2 id="ref1">REST APIs</h2> # <p>Rest API’s function by sending a <b>request</b>, the request is communicated via HTTP message. The HTTP message usually contains a JSON file. This contains instructions for what operation we would like the service or <b>resource</b> to perform. In a similar manner, API returns a <b>response</b>, via an HTTP message, this response is usually contained within a JSON.</p> # <p>In this lab, we will use the <a href=https://pypi.org/project/nba-api/>NBA API</a> to determine how well the Golden State Warriors performed against the Toronto Raptors. We will use the API do the determined number of points the Golden State Warriors won or lost by for each game. So if the value is three, the Golden State Warriors won by three points. Similarly it the Golden State Warriors lost by two points the result will be negative two. The API is reltivly will handle a lot of the details such a Endpoints and Authentication </p> # In the nba api to make a request for a specific team, it's quite simple, we don't require a JSON all we require is an id. This information is stored locally in the API we import the module teams from nba_api.stats.static import teams import matplotlib.pyplot as plt # + #https://pypi.org/project/nba-api/ # - # The method <code>get_teams()</code> returns a list of dictionaries the dictionary key id has a unique identifier for each team as a value nba_teams = teams.get_teams() # The dictionary key id has a unique identifier for each team as a value, let's look at the first three elements of the list: nba_teams[0:3] # To make things easier, we can convert the dictionary to a table. First, we use the function <code>one dict</code>, to create a dictionary. We use the common keys for each team as the keys, the value is a list; each element of the list corresponds to the values for each team. # We then convert the dictionary to a dataframe, each row contains the information for a different team. dict_nba_team=one_dict(nba_teams) df_teams=pd.DataFrame(dict_nba_team) df_teams.head() # Will use the team's nickname to find the unique id, we can see the row that contains the warriors by using the column nickname as follows: # + jupyter={"outputs_hidden": false} df_warriors=df_teams[df_teams['nickname']=='Warriors'] df_warriors # - # we can use the following line of code to access the first column of the dataframe: # + jupyter={"outputs_hidden": false} id_warriors=df_warriors[['id']].values[0][0] #we now have an integer that can be used to request the Warriors information id_warriors # - # The function "League Game Finder " will make an API call, its in the module <code>stats.endpoints</code> from nba_api.stats.endpoints import leaguegamefinder # The parameter <code>team_id_nullable</code> is the unique ID for the warriors. Under the hood, the NBA API is making a HTTP request. # The information requested is provided and is transmitted via an HTTP response this is assigned to the object <code>gamefinder</code>. # + jupyter={"outputs_hidden": false} # Since https://stats.nba.com does lot allow api calls from Cloud IPs and Skills Network Labs uses a Cloud IP. # The following code is comment out, you can run it on jupyter labs on your own computer. # gamefinder = leaguegamefinder.LeagueGameFinder(team_id_nullable=id_warriors) # - # we can see the json file by running the following line of code. # + # Since https://stats.nba.com does lot allow api calls from Cloud IPs and Skills Network Labs uses a Cloud IP. # The following code is comment out, you can run it on jupyter labs on your own computer. # gamefinder.get_json() # - # The game finder object has a method <code>get_data_frames()</code>, that returns a dataframe. If we view the dataframe, we can see it contains information about all the games the Warriors played. The <code>PLUS_MINUS</code> column contains information on the score, if the value is negative the Warriors lost by that many points, if the value is positive, the warriors one by that amount of points. The column <code>MATCHUP </code>had the team the Warriors were playing, GSW stands for golden state and TOR means Toronto Raptors; <code>vs</code> signifies it was a home game and the <code>@ </code>symbol means an away game. # + # Since https://stats.nba.com does lot allow api calls from Cloud IPs and Skills Network Labs uses a Cloud IP. # The following code is comment out, you can run it on jupyter labs on your own computer. # games = gamefinder.get_data_frames()[0] # games.head() # - # you can download the dataframe from the API call for Golden State and run the rest like a video. # ! wget https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%205/Labs/Golden_State.pkl file_name = "Golden_State.pkl" games = pd.read_pickle(file_name) games.head() # We can create two dataframes, one for the games that the Warriors faced the raptors at home and the second for away games. # + jupyter={"outputs_hidden": false} games_home=games [games ['MATCHUP']=='GSW vs. TOR'] games_away=games [games ['MATCHUP']=='GSW @ TOR'] # - # We can calculate the mean for the column <code>PLUS_MINUS</code> for the dataframes <code>games_home</code> and <code> games_away</code>: # + jupyter={"outputs_hidden": false} games_home.mean()['PLUS_MINUS'] # + jupyter={"outputs_hidden": false} games_away.mean()['PLUS_MINUS'] # - # We can plot out the <code>PLUS MINUS</code> column for for the dataframes <code>games_home</code> and <code> games_away</code>. # We see the warriors played better at home. # + jupyter={"outputs_hidden": false} fig, ax = plt.subplots() games_away.plot(x='GAME_DATE',y='PLUS_MINUS', ax=ax) games_home.plot(x='GAME_DATE',y='PLUS_MINUS', ax=ax) ax.legend(["away", "home"]) plt.show() # - # <a href="http://cocl.us/NotebooksPython101bottom"><img src = "https://ibm.box.com/shared/static/irypdxea2q4th88zu1o1tsd06dya10go.png" width = 750, align = "center"></a> # # #### About the Authors: # # [<NAME>]( https://www.linkedin.com/in/joseph-s-50398b136/) has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD. # # Copyright &copy; 2017 [cognitiveclass.ai](https:cognitiveclass.ai). This notebook and its source code are released under the terms of the [MIT License](cognitiveclass.ai).
4-Python_for_Data_Science_and_AI/4 Read, Write, Pandas, Numpy, APIs/.ipynb_checkpoints/PY0101EN-5.1_Intro_API-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: kagglevil # language: python # name: kagglevil # --- # --- # title: "f1 Score" # author: "Vaishnavi" # date: 2020-08-09 # description: "-" # type: technical_note # draft: false # # # --- from sklearn.metrics import f1_score true = [0, 1, 2, 0, 1, 2] pred = [0, 2, 1, 0, 0, 1] f1_score(true, pred, average='macro') f1_score(true, pred, average='micro') f1_score(true, pred, average='weighted')
docs/python/basics/f1_score.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # Greyscale ℓ1-TV Denoising # ========================= # # This example demonstrates the use of class [tvl1.TVL1Denoise](http://sporco.rtfd.org/en/latest/modules/sporco.admm.tvl1.html#sporco.admm.tvl1.TVL1Denoise) for removing salt & pepper noise from a greyscale image using Total Variation regularization with an ℓ1 data fidelity term (ℓ1-TV denoising). # + from __future__ import print_function from builtins import input import numpy as np from sporco.admm import tvl1 from sporco import util from sporco import signal from sporco import metric from sporco import plot plot.config_notebook_plotting() # - # Load reference image. img = util.ExampleImages().image('monarch.png', scaled=True, idxexp=np.s_[:,160:672], gray=True) # Construct test image corrupted by 20% salt & pepper noise. np.random.seed(12345) imgn = signal.spnoise(img, 0.2) # Set regularization parameter and options for ℓ1-TV denoising solver. The regularization parameter used here has been manually selected for good performance. lmbda = 8e-1 opt = tvl1.TVL1Denoise.Options({'Verbose': True, 'MaxMainIter': 200, 'RelStopTol': 5e-3, 'gEvalY': False, 'AutoRho': {'Enabled': True}}) # Create solver object and solve, returning the the denoised image ``imgr``. b = tvl1.TVL1Denoise(imgn, lmbda, opt) imgr = b.solve() # Display solve time and denoising performance. print("TVL1Denoise solve time: %5.2f s" % b.timer.elapsed('solve')) print("Noisy image PSNR: %5.2f dB" % metric.psnr(img, imgn)) print("Denoised image PSNR: %5.2f dB" % metric.psnr(img, imgr)) # Display reference, corrupted, and denoised images. fig = plot.figure(figsize=(20, 5)) plot.subplot(1, 3, 1) plot.imview(img, title='Reference', fig=fig) plot.subplot(1, 3, 2) plot.imview(imgn, title='Corrupted', fig=fig) plot.subplot(1, 3, 3) plot.imview(imgr, title=r'Restored ($\ell_1$-TV)', fig=fig) fig.show() # Get iterations statistics from solver object and plot functional value, ADMM primary and dual residuals, and automatically adjusted ADMM penalty parameter against the iteration number. its = b.getitstat() fig = plot.figure(figsize=(20, 5)) plot.subplot(1, 3, 1) plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig) plot.subplot(1, 3, 2) plot.plot(np.vstack((its.PrimalRsdl, its.DualRsdl)).T, ptyp='semilogy', xlbl='Iterations', ylbl='Residual', lgnd=['Primal', 'Dual'], fig=fig) plot.subplot(1, 3, 3) plot.plot(its.Rho, xlbl='Iterations', ylbl='Penalty Parameter', fig=fig) fig.show()
tv/tvl1den_gry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/AlyssonBatista/Codigos-python/blob/main/Codigo_de_hamming.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="zjYLq6i6y8tY" colab={"base_uri": "https://localhost:8080/"} outputId="e09f5f6e-75cf-4313-f785-6d0dc557f690" import numpy as np import random import itertools def espaco_vetorial(n): # Função que retorna um espaço vetorial 2^n composto pelo maior número possível de vetores com n elementos de {0,1} alista = [] vetor = [] for numbers in itertools.product([0, 1], repeat=n): alista = list(numbers) vetor.append(alista) espaco_vetor = np.asarray(vetor) return espaco_vetor def subespaco_vetorial(n,k): # Função que retorna um subespaço vetorial 2^k a partir do espaço vetorial 2^n subespaco_vetores = [] indicie = [] indicie = random.sample(range(1,2**n), 2**k-1) # indicie dos vetores do espaço vetorial de 1 a 2^n com 2^k-1 elementos NÃO REPETIDOS for i in range(2**k-1): copia_espaco = np.array(espaco_vetorial(n)) # copia do espaço vetoria subespaco_vetores.append(copia_espaco[indicie[i]]) # adiciona ao vetor um vetor do espaço vetorial por um indicie aleatório subespaco_vetores.insert(0,copia_espaco[0]) # adiciona o vetor nulo no indicie 0 do subespaço return subespaco_vetores def base_subespaco_vetorial(lista,k): # Função que retorna a base do subespaço vetorial k a partir do subespaço vetorial 2^k base = [] indicie = random.sample(range(0,2**k), k) # indicie dos vetores do espaço vetorial de 1 a 2^k com k elementos NÃO REPETIDOS for i in range(k): copia_subespaco = np.array(lista) # copia do subespaço vetorial base.append(copia_subespaco[indicie[i]]) # adiciona ao vetor base k vetores do subespaço vetorial por um indicie aleatório return base def imprime_matriz(lista): linhas = len(lista) colunas = len(lista[0]) for i in range(linhas): for j in range(colunas): if(j == colunas - 1): print("%s" %lista[i][j] ) else: print("%s" %lista[i][j] , end = " ") print('\n') n=int(input("O digite o tamanho do espaço vetorial n: \n")) k=int(input("O digite o valor de k do subespaço vetorial: \n")) espaco = np.array(espaco_vetorial(n)) subespaco = np.array(subespaco_vetorial(n,k)) base = np.array(base_subespaco_vetorial(subespaco,k)) print('\n') print(f'Espaço vetorial = 2^{n}') imprime_matriz(espaco) print(f'Subespaço vetorial = 2^{k}') imprime_matriz(subespaco) print(f'Base do subespaço vetorial k = {k}') imprime_matriz(base) # + colab={"base_uri": "https://localhost:8080/"} id="JZhGvjINZUeQ" outputId="2ae1cb01-8be2-4df4-8f4f-0ec94d18daa5" # + id="TnCl2SWchgOT" def subespaco_vetorial(n,k): # Função que retorna um subespaço vetorial 2^k a partir do espaço vetorial 2^n subespaco_vetores = [] indicie = [] indicie = random.sample(range(1,2**n), 2**k-1) # indicie dos vetores do espaço vetorial de 1 a 2^n com 2^k-1 elementos NÃO REPETIDOS for i in range(2**k-1): copia_espaco = np.array(espaco_vetorial(n)) # copia do espaço vetoria subespaco_vetores.append(copia_espaco[indicie[i]]) # adiciona ao vetor um vetor do espaço vetorial por um indicie aleatório subespaco_vetores.insert(0,copia_espaco[0]) # adiciona o vetor nulo no indicie 0 do subespaço return subespaco_vetores # + colab={"base_uri": "https://localhost:8080/"} id="y3GpNdK9voQd" outputId="6152211f-a39c-4e63-e21f-09cd9f3c0834" indicie = random.sample(range(1,2**n), 2**k-1) arra = espaco_vetorial(n) lis = [] zeros = espaco_vetorial(n) sub = np.random.randint(2, size=n) for i in range((2**k)-1): soma = sub + arra[i] vetor = soma%2 lis.append(vetor) lis.insert(0,zeros[0]) # adiciona o vetor nulo no indicie 0 do subespaço lis # + colab={"base_uri": "https://localhost:8080/"} id="tP07ZORC2pn4" outputId="c4a3092d-211b-4ac8-a2aa-9d375d385651" sub
Codigo_de_hamming.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append("/mnt/home/TF_NEW/tf-transformers/src/") # + # Install tf-transformers from github # + import datasets import json import glob import tensorflow as tf import numpy as np from tf_transformers.data import TFWriter, TFReader, TFProcessor from tf_transformers.models import AlbertModel from tf_transformers.tasks import Classification_Model from tf_transformers.core import optimization, SimpleTrainer from tf_transformers.losses import cross_entropy_loss from transformers import AlbertTokenizer # - # ### Load Tokenizer # Load HuggingFace Tokenizer tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2") # ### Load MNLI dataset from Huggingface datasets examples = datasets.load_from_disk("/mnt/home/PRE_MODELS/HuggingFace_models/datasets/glue/mnli/") train_examples = examples["train"] for item in train_examples: print(item) break # + max_seq_length=64 def parse_train(): result = {} for f in train_examples: input_ids_s1 = [tokenizer.cls_token] + tokenizer.tokenize(f['hypothesis'])[: max_seq_length-2] + [tokenizer.sep_token] # -2 to add CLS and SEP input_ids_s1 = tokenizer.convert_tokens_to_ids(input_ids_s1) input_type_ids_s1 = [0] * len(input_ids_s1) # 0 for s1 input_ids_s2 = tokenizer.tokenize(f['premise'])[: max_seq_length-1] + [tokenizer.sep_token] # -1 to add SEP input_ids_s2 = tokenizer.convert_tokens_to_ids(input_ids_s2) input_type_ids_s2 = [1] * len(input_ids_s2) input_ids = input_ids_s1 + input_ids_s2 input_type_ids = input_type_ids_s1 + input_type_ids_s2 input_mask = [1] * len(input_ids) # 1 for s2 result = {} result['input_ids'] = input_ids result['input_mask'] = input_mask result['input_type_ids'] = input_type_ids result['labels'] = f['label'] yield result # Lets write using TF Writer # Use TFProcessor for smalled data schema = { "input_ids": ("var_len", "int"), "input_mask": ("var_len", "int"), "input_type_ids": ("var_len", "int"), "labels": ("var_len", "int"), } tfrecord_train_dir = '../../OFFICIAL_TFRECORDS/glue/alberta/mnli/train' tfrecord_filename = 'mnli' tfwriter = TFWriter(schema=schema, file_name=tfrecord_filename, model_dir=tfrecord_train_dir, tag='train', overwrite=True ) tfwriter.process(parse_fn=parse_train()) # - # ### Read TFRecords using TFReader # + # Read Data schema = json.load(open("{}/schema.json".format(tfrecord_train_dir))) all_files = glob.glob("{}/*.tfrecord".format(tfrecord_train_dir)) tf_reader = TFReader(schema=schema, tfrecord_files=all_files) x_keys = ['input_ids', 'input_type_ids', 'input_mask'] y_keys = ['labels'] batch_size = 32 train_dataset = tf_reader.read_record(auto_batch=True, keys=x_keys, batch_size=batch_size, x_keys = x_keys, y_keys = y_keys, shuffle=True, drop_remainder=True ) # - for (batch_inputs, batch_labels) in train_dataset.take(1): print(batch_inputs, batch_labels) # ### Load Albert V2 Model # + # Lets load Albert Model model_layer, model, config = AlbertModel(model_name='albert_base_v2', is_training=True, use_dropout=False ) model.load_checkpoint("/mnt/home/PRE_MODELS/LegacyAI_models/checkpoints/albert-base-v2/") # model_layer -> Legacylayer inherited from tf.keras.Layer # model -> legacyModel inherited from tf.keras.Model # - # ### Load Classification Model # + classification_layer = Classification_Model(model=model, num_classes=3, use_all_layers=True, is_training=True) classification_model = classification_layer.get_model() # + # Delete to save up memory del model del model_layer del classification_layer # - # ### Define Loss # # Loss function is simple. # * labels: 1D (batch_size) # class indices # * logits: 2D (batch_size x num_classes) # # **Joint loss** - We minimze loss over each hidden layer . # + def loss_fn(labels, logits): loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=tf.squeeze(labels, axis=1))) return loss def joint_loss(y_true_dict, y_pred_dict): layer_loss = [] for class_logits in y_pred_dict['class_logits']: loss = loss_fn(y_true_dict['labels'], class_logits) layer_loss.append(loss) return tf.reduce_mean(layer_loss) # - # ### Define Optimizer train_data_size = 392701 learning_rate = 2e-5 steps_per_epoch = int(train_data_size / batch_size) EPOCHS = 3 num_train_steps = steps_per_epoch * EPOCHS warmup_steps = int(0.1 * num_train_steps) # creates an optimizer with learning rate schedule optimizer_type = 'adamw' optimizer, learning_rate_fn = optimization.create_optimizer(learning_rate, steps_per_epoch * EPOCHS, warmup_steps, optimizer_type) # ### Train Using Keras :-) # # - ```compile2``` allows you to have directly use model outputs as well batch dataset outputs into the loss function, without any further complexity. # # Note: For ```compile2```, loss_fn must be None, and custom_loss_fn must be active. Metrics are not supprted for time being. # # Compile keras_loss_fn = {'class_logits': joint_loss} classification_model.compile2(optimizer=optimizer, loss=None, custom_loss=keras_loss_fn) # Change steps per epoch to large value/ ignore it completely to train # on full dataset history = classification_model.fit(train_dataset, epochs=2, steps_per_epoch=10) # ### Train using SimpleTrainer (part of tf-transformers) history = SimpleTrainer(model = classification_model, optimizer = optimizer, loss_fn = joint_loss, dataset = train_dataset.repeat(EPOCHS+1), # This is important epochs = EPOCHS, num_train_examples = train_data_size, batch_size = batch_size, steps_per_call=100, gradient_accumulation_steps=None) # ### Save Models # # You can save models as checkpoints using ```.save_checkpoint``` attribute, which is a part of all ```LegacyModels``` model_save_dir = "../../OFFICIAL_MODELS/glue/mnli/albert" classification_model.save_checkpoint(model_save_dir) # ### Parse validation data # # We use ```TFProcessor``` to create validation data, because dev data is small # + dev_examples = examples['validation_matched'] def parse_dev(): result = {} for f in dev_examples: input_ids_s1 = [tokenizer.cls_token] + tokenizer.tokenize(f['hypothesis'])[: max_seq_length-2] + [tokenizer.sep_token] # -2 to add CLS and SEP input_ids_s1 = tokenizer.convert_tokens_to_ids(input_ids_s1) input_type_ids_s1 = [0] * len(input_ids_s1) # 0 for s1 input_ids_s2 = tokenizer.tokenize(f['premise'])[: max_seq_length-1] + [tokenizer.sep_token] # -1 to add SEP input_ids_s2 = tokenizer.convert_tokens_to_ids(input_ids_s2) input_type_ids_s2 = [1] * len(input_ids_s2) input_ids = input_ids_s1 + input_ids_s2 input_type_ids = input_type_ids_s1 + input_type_ids_s2 input_mask = [1] * len(input_ids) # 1 for s2 result = {} result['input_ids'] = input_ids result['input_mask'] = input_mask result['input_type_ids'] = input_type_ids result['labels'] = f['label'] yield result tf_processor = TFProcessor() dev_dataset = tf_processor.process(parse_fn=parse_dev()) x_keys = ['input_ids', 'input_type_ids', 'input_mask'] y_keys = ['labels'] dev_dataset = tf_processor.auto_batch(dev_dataset, shuffle=False, x_keys=x_keys, y_keys=y_keys, batch_size=32, drop_remainder=False) # - # ### Evaluate dev dataset MNLI match - Accuracy # + num_hidden_layers = 12 predictions_per_layer = {i:[] for i in range(num_hidden_layers)} original_labels = [] for (batch_inputs, batch_labels) in dev_dataset: model_outputs = classification_model(batch_inputs)['class_logits'] for i in range(num_hidden_layers): predictions_per_layer[i].append(tf.argmax(model_outputs[i], axis=1).numpy()) original_labels.append(batch_labels['labels'].numpy()) from sklearn.metrics import accuracy_score eval_metrics = {} for i in range(num_hidden_layers): eval_metrics[i] = accuracy_score(np.hstack(predictions_per_layer[i]), np.hstack(original_labels)) print(i, eval_metrics[i]) with open('eval_mnli.json', 'w') as f: json.dump(eval_metrics, f) # - # ### Parse MNLI mismatched dev dataset # + dev_examples = examples['validation_mismatched'] def parse_dev(): result = {} for f in dev_examples: input_ids_s1 = [tokenizer.cls_token] + tokenizer.tokenize(f['hypothesis'])[: max_seq_length-2] + [tokenizer.sep_token] # -2 to add CLS and SEP input_ids_s1 = tokenizer.convert_tokens_to_ids(input_ids_s1) input_type_ids_s1 = [0] * len(input_ids_s1) # 0 for s1 input_ids_s2 = tokenizer.tokenize(f['premise'])[: max_seq_length-1] + [tokenizer.sep_token] # -1 to add SEP input_ids_s2 = tokenizer.convert_tokens_to_ids(input_ids_s2) input_type_ids_s2 = [1] * len(input_ids_s2) input_ids = input_ids_s1 + input_ids_s2 input_type_ids = input_type_ids_s1 + input_type_ids_s2 input_mask = [1] * len(input_ids) # 1 for s2 result = {} result['input_ids'] = input_ids result['input_mask'] = input_mask result['input_type_ids'] = input_type_ids result['labels'] = f['label'] yield result tf_processor = TFProcessor() dev_dataset = tf_processor.process(parse_fn=parse_dev()) x_keys = ['input_ids', 'input_type_ids', 'input_mask'] y_keys = ['labels'] dev_dataset = tf_processor.auto_batch(dev_dataset, shuffle=False, x_keys=x_keys, y_keys=y_keys, batch_size=32, drop_remainder=False) # - # ### Evaluate dev dataset MNLI mismatch - Accuracy # + num_hidden_layers = 12 predictions_per_layer = {i:[] for i in range(num_hidden_layers)} original_labels = [] for (batch_inputs, batch_labels) in dev_dataset: model_outputs = classification_model(batch_inputs)['class_logits'] for i in range(num_hidden_layers): predictions_per_layer[i].append(tf.argmax(model_outputs[i], axis=1).numpy()) original_labels.append(batch_labels['labels'].numpy()) from sklearn.metrics import accuracy_score eval_metrics = {} for i in range(num_hidden_layers): eval_metrics[i] = accuracy_score(np.hstack(predictions_per_layer[i]), np.hstack(original_labels)) print(i, eval_metrics[i]) with open('eval_mnli_mismatched.json', 'w') as f: json.dump(eval_metrics, f) # -
src/tf_transformers/notebooks/tutorials/joint_loss_experiments/glue/mnli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Initialization # + from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re from chemdataextractor import Document from chemdataextractor.model import Compound, BaseModel, \ StringType, ListType, ModelType from chemdataextractor.doc import Paragraph, Heading, Sentence from chemdataextractor.parse.actions import join from chemdataextractor.parse import R, I, W, Optional, merge, ZeroOrMore from chemdataextractor.parse.base import BaseParser from chemdataextractor.utils import first # - # ### Glossary for Regular Expression Parsing # `.parse`: # # * `R(pattern)`: match token text with regular expression # * `I(match)`: case-insensitive match token text # * `W(match)`: match token text exactly # * `T(match)`: match tag exactly # * `Optional(expr)`: abstract class for combining and post-processing parsed tokens # * `merge(tokens, start, result)`: join tokens into a single string with no spaces # * `ZeroOrMore(expr)`: optional repetition of zero or more of the given expression # * `OneOrMore(expr)`: repetition of one or more of the given expression # * `Not(expr):` check ahead to disallow a match with the given parse expression # # `.parse.actions`: # # * `strip_stop`: removes trailing full stop from tokens # * `join`: join tokens into a single string with spaces between # ### Outlining Spincoat Parameter Extraction # # {'Synthesis' : { # 'spincoat' : { # 'time' : { # 'value' : (number), # 'units' : 's', # }, # }, # 'anneal' : { # 'time' : { # 'value' : (number), # 'units' : 's', # }, # 'temperature' : { # 'value' : (number), # 'units' : 'K', # }, # etc... # } # # Structuring my code to fit the format: # # create 'spincoat' class to parse spincoat spds # spincoat: (value) (units) # create 'time' subclass in the spincoat class # time: (value) (units) # create 'anneal' class to parse annealing conditions # create 'time' subclass in the anneal class # time: (value) (units) # create 'temperature' subclass in the anneal class # temperature: (value) (units) # #### Data Structure for ir.py (CDE) vs my preferred implementation # # IR data structure: # # ir_spectra: # solvent # units # \ # \ # pk # pkunits # strength # bond # \ # \ # pk # pkunits # strength # bond # ...etc. # # My proposed data structure: # # spin_coat: # solvent # units # \ # \ # spinspd # spdunits # time # timeunits # \ # \ # spinspd # spdunits # time # timeunits # + # Creating SpinStep and SpinCoat class with various properties: speed, time, temperature, and respective units. class SpinSpd(BaseModel): """ Class for each spin-coating speed in a spin-coating process. """ spdvalue = StringType() spdunits = StringType(contextual=True) class SpinTime(BaseModel): """ Class for each spin-coating time in a spin-coating process. """ timevalue = StringType() timeunits = StringType(contextual=True) class SpinCoat(BaseModel): """ Class for full list of spin-coating step parameters for full process. """ #solvent = StringType(contextual=True) spds = ListType(ModelType(SpinSpd)) times = ListType(ModelType(SpinTime)) # - # ### Associating Parameters with a Chemical #Compound.synthesis = ListType(ModelType(Synthesis)) Compound.spin_coat = ListType(ModelType(SpinCoat)) #Compound.anneal = ListType(ModelType(Anneal)) # ### Defining Parameters for the Parser # + # Adding GBL to the solvents list #gbl = (I('GBL') | R('^γ-?[bB]?utyrolactone$')) #solvent = (gbl | chemical_name)('solvent').add_action(join) # + #def extract_units(tokens, start, result): # """Extract units from bracketed after nu""" # for e in result: # for child in e.iter(): # if 'cm−1' in child.text: # return [E('units', 'cm−1')] # return [] # + # Deliminator delim = R('^[;:,\./]$').hide() # Defining formats for spin-coating value and units spdunits = (R(u'^r(\.)?p(\.)?m(\.)?$') | R(u'^r(\.)?c(\.)?f(\.)?$') | R(u'^([x×]?)(\s?)?g$'))('spdunits').add_action(join) + ZeroOrMore(delim) spdvalue = Optional(W('(')).hide() + R(u'^\d+(,\d+)[0][0]$')('spdvalue') + Optional(W(')')).hide() # Defining formats for spin-coating time and time units timeprefix = I('for').hide() timeunits = (R('^s?(ec|econds)?$') | R('^m?(in|inute)?(s)?$') | R('^h?(ou)?(r)?(s)?$'))('timeunits').add_action(join) + Optional(delim) timevalue = R('^\d{,3}$')('timevalue') + Optional(delim) # Putting everything together spdprefix = I('at').hide() spd = (spdvalue)('spd') spds = (spd + ZeroOrMore(ZeroOrMore(spdunits | delim | W('and')).hide() + spd))('spds') time = (timevalue)('time') times = (time + ZeroOrMore(ZeroOrMore(timeunits | delim | W('and')).hide() + time))('times') spincoat = (Optional(delim) + Optional(spdprefix) + spds + Optional(delim) + Optional(spdunits) + Optional(delim) + Optional(timeprefix) + Optional(delim) + times + Optional(delim) + Optional(timeunits) + Optional(delim))('spincoat') # - class SpinCoatParser(BaseParser): root = spincoat def interpret(self, result, start, end): c = Compound() s = SpinCoat( # solvent=first(result.xpath('./solvent/text()')) ) spdunits = first(result.xpath('./spdunits/text()')) timeunits = first(result.xpath('./timeunits/text()')) for spd in result.xpath('./spds/spd'): spin_spd = SpinSpd( spdvalue=first(spd.xpath('./spdvalue/text()')), spdunits=spdunits ) s.spds.append(spin_spd) for time in result.xpath('./times/time'): spin_time = SpinTime( timevalue=first(time.xpath('./timevalue/text()')), timeunits=timeunits ) s.times.append(spin_time) c.spin_coat.append(s) yield c # ### Parsers Paragraph.parsers = [SpinCoatParser()] Sentence.parsers = [SpinCoatParser()] { 'spin_coat': [ { 'spds': [ {'spdvalue': '1,000', 'spdunits': 'r.p.m'}, {'spdvalue': '5,000', 'spdunits': 'r.p.m'} ], 'times': [ {'timevalue': '10', 'timeunits': 's'}, {'timevalue': '20', 'timeunits': 's'} ] } ] } # ## Testing Outputs # + d = Sentence(u'The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively.') d.records.serialize() # - def parse_spincoat(spincoat_str): """ Given a string as input, converts the string into a ChemDrawExtractor Paragraph and returns a list of spin-coating parameters (speeds and times) found via parsing the string. """ return Sentence(spincoat_str).records.serialize() sp = 'The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively.' parse_spincoat(sp) sp2 = "The mesoporous TiO2 films were then infiltrated with CH3NH3SnI3−xBrx by spin coating at 4,000 r.p.m. for 45 s and dried at 125 °C for 30 min to remove the solvent." parse_spincoat(sp2) sp3 = "The mesoporous TiO2 films were then infiltrated with PbI2 by spin coating at 6,500 r.p.m. for 90 s and dried at 70 °C for 30 min." parse_spincoat(sp3) sp4 = "The inorganic–organic lead halide perovskite solutions were then coated onto the mesoporous-TiO2/blocking-TiO2/FTO substrate by two consecutive spin-coating steps, at 1,000 rpm and 5,000 rpm for 40 s and 20 s, respectively." parse_spincoat(sp4) sp5 = "A PbI2 solution (dissolved in N,N-dimethylformamide at a concentration of 460 mg ml−1) was then spin coated on top of the ZnO layer at 3,000 r.p.m. for 15 s." parse_spincoat(sp5) # **In summary:** Totally works for all 5 test papers in the repo! :) # ### Test with Linnette/Neel's Output # # The output of the function written by Neel and Linnette gives a list of identified synthesis sentences. Synthesis_sentence = ['A 200–300-nm-thick mesoporous TiO2 (particle size: about 50 nm, crystalline phase: anatase) film was spin-coated onto the bl-TiO2/FTO substrate using home-made pastes14 and calcining at 500 °C for 1 h in air to remove organic components.', 'CH3NH3I (MAI) and CH3NH3Br (MABr) were first synthesized by reacting 27.86 ml CH3NH2 (40% in methanol, Junsei Chemical) and 30 ml HI (57 wt% in water, Aldrich) or 44 ml HBr (48 wt% in water, Aldrich) in a 250 ml round-bottom flask at 0 °C for 4 h with stirring, respectively.', 'The precipitate was recovered by evaporation at 55 °C for 1 h. MAI and MABr were dissolved in ethanol, recrystallized from diethyl ether, and dried at 60 °C in a vacuum oven for 24 h.', 'The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively.', 'During the second spin-coating step, the substrate (around 1 cm × 1 cm) was treated with toluene drop-casting.', 'The substrate was dried on a hot plate at 100 °C for 10 min.', 'Furthermore, it was reported that the uniformity of the perovskite films depended on the thickness of the TiO2 compact layer, and modification of the spinning conditions could not achieve 100% surface coverage20.', 'We see that the formation of the perovskite phase is accompanied by the complete transformation of the MAI–PbI2–DMSO at 130 °C, whereas both MAI–PbI2–DMSO and perovskite phases coexist at 100 °C.', 'As shown in Fig.\xa02d, at the initial stage during spinning, the film is composed of MAI and PbI2 dissolved in the DMSO/GBL solvent mixture, whereas in the intermediate stage, the composition of the film is concentrated by the evaporation of GBL.'] [parse_spincoat(sentence) for sentence in Synthesis_sentence] # My function integrates very well with the above format :) # ## Unit Testing # Proposed unit tests for `spincoat.py`: # # 1. Input must be a string -- Linnette/Neel's function will output a string! # 2. `Sentence(str)` must not raise an error # 3. Output must be a list of dictionary of lists # 4. Output should not be empty # 5. Output should have the correct number of elements (hopefully) # # Must also write test classes! # # `class TestClassName(object)` # + test_s = Sentence(u'The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively.') test_s_output = spincoat.parse_spincoat(test_s) def test_parse_spincoat(): """ Test function for spincoat.parse_spincoat """ assert isinstance(test_s, str), 'Incorrect input type: string required' assert isinstance(test_s_output, list), 'Error: incorrect output type (list expected)' assert test_s_output, 'Error: no parameters detected (parsing error)' try: assert isinstance(test_s_output[0], dict), 'Error: output ' # - # # Trash # ## Importing test synthesis paragraph and sentence # Nature Materials ptext = u"A dense blocking layer of TiO2 (bl-TiO2, ∼70 nm in thickness) was deposited onto a F-doped SnO2 (FTO, Pilkington, TEC8) substrate by spray pyrolysis, using a 20 mM titanium diisopropoxide bis(acetylacetonate) solution (Aldrich) at 450 °C to prevent direct contact between the FTO and the hole-conducting layer. A 200–300-nm-thick mesoporous TiO2 (particle size: about 50 nm, crystalline phase: anatase) film was spin-coated onto the bl-TiO2/FTO substrate using home-made pastes14 and calcining at 500 °C for 1 h in air to remove organic components. CH3NH3I (MAI) and CH3NH3Br (MABr) were first synthesized by reacting 27.86 ml CH3NH2 (40% in methanol, Junsei Chemical) and 30 ml HI (57 wt% in water, Aldrich) or 44 ml HBr (48 wt% in water, Aldrich) in a 250 ml round-bottom flask at 0 °C for 4 h with stirring, respectively. The precipitate was recovered by evaporation at 55 °C for 1 h. MAI and MABr were dissolved in ethanol, recrystallized from diethyl ether, and dried at 60 °C in a vacuum oven for 24 h. The prepared MAI and MABr powders, PbI2 (Aldrich) and PbBr2 (Aldrich) for 0.8 M MAPb(I1 − xBrx)3 (x = 0.1–0.15) solution were stirred in a mixture of GBL and DMSO (7:3 v/v) at 60 °C for 12 h. The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively. During the second spin-coating step, the substrate (around 1 cm × 1 cm) was treated with toluene drop-casting. A detailed time-rotation profile for the spin-coating is represented in Supplementary Fig. 1c. The substrate was dried on a hot plate at 100 °C for 10 min. A solution of poly(triarylamine) (15 mg, PTAA, EM Index, Mw = 17,500 g mol−1) in toluene (1.5 ml) was mixed with 15 μl of a solution of lithium bistrifluoromethanesulphonimidate (170 mg) in acetonitrile (1 ml) and 7.5 μl 4-tert-butylpyridine and spin-coated on the MAPb(I1 − xBrx)3 (x = 0.1–0.15)/mp-TiO2/bl-TiO2/FTO substrate at 3,000 r.p.m for 30 s. Finally, a Au counterelectrode was deposited by thermal evaporation. The active area of this electrode was fixed at 0.16 cm2." # Formatting the Nature Nanotech paragraph according to ChemDataExtractor p = Document(Heading(u'Solar cell fabrication'), Paragraph(ptext)) p # Sentence version sp = 'The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively.' s = Document(u'The resulting solution was coated onto the mp-TiO2/bl-TiO2/FTO substrate by a consecutive two-step spin-coating process at 1,000 and 5,000 r.p.m for 10 and 20 s, respectively.') p.records.serialize() s
development_notebooks/dev_synthesis_parsers/Spincoating.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Have the function FirstFactorial(num) take the num parameter being passed and return the factorial of it. For example: if num = 4, then your program should return (4 * 3 * 2 * 1) = 24. For the test cases, the range will be between 1 and 18 and the input will always be an integer. # # Use the Parameter Testing feature in the box below to test your code with different arguments. # + colab={} colab_type="code" id="ciBm9fNWfy7W" def FirstReverse(str): # code goes here return str[::-1] # keep this function call here print(FirstReverse(input()))
coderbyte/20191212_3/First Reverse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''gv2'': conda)' # name: python3 # --- # + # Examen 2 de Análisis de datos atmosféricos. # CCA - UNAM - maestría. # 28 de septiembre de 2021. import pandas as pd import numpy as np from scipy import stats from matplotlib import pyplot as plt # + # Ejercicio 1 print( "Ejercicio 1" ) print( f"{stats.binom.pmf( 2, 18, 0.1 ):.4f}" ) print() # Ejercicio 2 print( "Ejercicio 2" ) print( f"{stats.uniform.sf( ( 8 - 0 ) / ( 20 - 0 ) ):.4f}" ) print() # Ejercicio 3 print( "Ejercicio 3" ) print( f"a. {stats.poisson.pmf( 2, 2.3 ):.4f}" ) print( f"b. {stats.poisson.pmf( 10, 2.3 * 5 ):.4f}" ) print( f"c. {stats.poisson.sf( 0, 2.3 * 2 ):.4f}" ) print() # Ejercicio 4 print( "Ejercicio 4" ) print( f"{stats.expon.ppf( 0.9, scale = 140 / np.log(2) ):.2f}" ) print() # + # Ejercicio 5 mu = 65 sigma = 8 print( "Ejercicio 5" ) print( f"a. {stats.norm.sf( 61, mu, sigma ):.4f}" ) a = ( stats.norm.cdf( 69, mu, sigma ) - stats.norm.cdf( 63, mu, sigma ) ) print( f"b. {a:.4f}" ) print( f"c. {stats.norm.cdf( 70, mu, sigma ):.4f}" ) print( f"d. {stats.norm.sf( 75, mu, sigma ):.4f}" ) print() # Gráfica inciso a. fig, ax = plt.subplots() x1 = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 1000) y1 = stats.norm.pdf(x1, mu, sigma) x2 = np.linspace(61, mu + 3 * sigma, 1000) y2 = stats.norm.pdf(x2, mu, sigma) ax.plot(x1, y1) ax.fill_between(x2, y2) ax.set_title("P{X > 61}", fontsize = 16) ax.set_xlabel("Peso [kg]") ax.set_ylabel("P") ax.set_xlim(mu - 3 * sigma, mu + 3 * sigma) ax.set_ylim(0) # Gráfica inciso b. fig, ax = plt.subplots() x1 = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 1000) y1 = stats.norm.pdf(x1, mu, sigma) x2 = np.linspace(63, 69, 1000) y2 = stats.norm.pdf(x2, mu, sigma) ax.plot(x1, y1) ax.fill_between(x2, y2) ax.set_title("P{63 < X < 69}", fontsize = 16) ax.set_xlabel("Peso [kg]") ax.set_ylabel("P") ax.set_xlim(mu - 3 * sigma, mu + 3 * sigma) ax.set_ylim(0) # Gráfica inciso c. fig, ax = plt.subplots() x1 = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 1000) y1 = stats.norm.pdf(x1, mu, sigma) x2 = np.linspace(mu - 3 * sigma, 70, 1000) y2 = stats.norm.pdf(x2, mu, sigma) ax.plot(x1, y1) ax.fill_between(x2, y2) ax.set_title("P{X < 70}", fontsize = 16) ax.set_xlabel("Peso [kg]") ax.set_ylabel("P") ax.set_xlim(mu - 3 * sigma, mu + 3 * sigma) ax.set_ylim(0) # Gráfica inciso d. fig, ax = plt.subplots() x1 = np.linspace(mu - 3 * sigma, mu + 3 * sigma, 1000) y1 = stats.norm.pdf(x1, mu, sigma) x2 = np.linspace(75, mu + 3 * sigma, 1000) y2 = stats.norm.pdf(x2, mu, sigma) ax.plot(x1, y1) ax.fill_between(x2, y2) ax.set_title("P{X > 75}", fontsize = 16) ax.set_xlabel("Peso [kg]") ax.set_ylabel("P") ax.set_xlim(mu - 3 * sigma, mu + 3 * sigma) ax.set_ylim(0) # + # Ejercicio 6 print( "Ejercicio 6" ) print( f"a. {stats.binom.pmf( 0, 50, .02 ):.4f}" ) print( f"b. {stats.binom.pmf( 1, 50, .02 ):.4f}" ) print( f"{stats.binom.pmf( 2, 50, .02 ):.4f}" ) print( f"c. {stats.binom.sf( 2, 50, .02 ):.4f}" ) print( f"d. {50 * 0.02:.0f}" ) print() # Ejercicio 7 print( "Ejercicio 7" ) a = stats.expon.sf( 21, 20, 0.5 ) print( f"a. {a:.4f}" ) b = stats.binom.pmf( 0, 15, a ) c = stats.binom.pmf( 1, 15, a ) d = stats.binom.pmf( 2, 15, a ) e = b + c + d print( f"b. {b:.4f} + {c:.4f} " f" + {d:.4f} = {e:.4f}" ) print() # Ejercicio 4.3 print( "Ejercicio 4.3" ) print( f"b. {stats.poisson.sf( 0, 1 / 18 ):.4f}" ) print( f"c. {stats.poisson.sf( 0, 13 / 23 ):.4f}" ) print() # + # Ejercicio 4.7 path_d = "../datos/" fname = "A.3_Wilks.csv" df = pd.read_csv(path_d + fname, index_col = "Year") # Ajuste de distribución. mu, sigma = stats.norm.fit(df["Temperature"]) print("Ejercicio 4.7") print("a.") print(f"mu: {mu:.2f} °C") print(f"sigma: {sigma:.2f} °C") print(f"max : {df['Temperature'].min():.2f}") print(f"min : {df['Temperature'].max():.2f}") print("b.") print(f"mu: {mu * 9 / 5 + 32:.2f} °F") print(f"sigma: {sigma * 9 / 5:.2f} °F") # Gráfica de histograma y distribución. fig = plt.figure() min = 23 max = 27 delta = 0.5 ax = df["Temperature"].hist( bins = np.arange(min, max + delta, delta), density = True ) x = np.linspace( min, max, 1000 ) y = stats.norm.pdf(x, mu, sigma) ax.plot(x, y) ax.set_title("Temperatura durante junio en Guayaquil", fontsize = 16) ax.legend(["Distribución", "Muestra"]) ax.set_xlabel("Temperatura [°C]") ax.set_ylabel("P") ax.set_xlim( min, max) ax.set_ylim(0) df.head() # + # Ejercicio 4.10 path_d = "../datos/" fname = "Table 4.8.csv" df = pd.read_csv(path_d + fname, index_col = "Year") # Ajuste de distribución. alpha, loc, beta = stats.gamma.fit( df["Precipitation"], floc = 0 ) print( "Ejercicio 4.10" ) print("a.") print(f"alpha: {alpha:.2f}") print(f"loc : {loc:.2f}") print(f"beta : {beta:.2f} in") print(f"max : {df['Precipitation'].min():.2f}") print(f"min : {df['Precipitation'].max():.2f}") print("b.") print(f"alpha: {alpha:.2f}") print(f"beta : {beta * 25.4:.2f} mm") print() # Gráfica de histograma y distribución. fig = plt.figure() min = 0.5 max = 8.5 delta = 1 ax = df["Precipitation"].hist( bins = np.arange(min, max + delta, delta), density = True ) x = np.linspace( 0, max, 1000 ) y = stats.gamma.pdf(x, alpha, loc, beta) ax.plot(x, y) ax.set_title("Precipitación durante julio en Ithaca", fontsize = 16) ax.legend(["Distribución", "Muestra"]) ax.set_xlabel("Precipitación [in]") ax.set_ylabel("P") ax.set_xlim( 0, max) ax.set_ylim(0) # Ejercicio 4.11 print( "Ejercicio 4.11" ) print("a.") print(f"p_30: {stats.gamma.ppf(0.3, alpha, loc, beta):.2f}") print(f"p_70: {stats.gamma.ppf(0.7, alpha, loc, beta):.2f}") print("b.") median = stats.gamma.ppf(0.5, alpha, loc, beta) mean_s = df["Precipitation"].mean() print(f"median : {median:.2f}") print(f"sample mean : {mean_s:.2f}") print(f"mean - median: {mean_s - median:.2f}") print("c.") print(f"{stats.gamma.sf(7, alpha, loc, beta):.2f}") df.head() # + # Ejercicio 4.16 path = "../datos/" fname = "A.1_Wilks.csv" temp = ["Canandaigua - Min Temp", "Canandaigua - Max Temp"] df = pd.read_csv(path + fname, index_col = "Date") # Normal bivariada. # Se obtienen los parámetros. mu_x = df[temp[0]].mean() mu_y = df[temp[1]].mean() sigma_x = df[temp[0]].std() sigma_y = df[temp[1]].std() rho = df[temp].corr() print("Ejercicio 4.16") print("a.") print("mu_x = " f"{mu_x:.1f}") print("mu_y = " f"{mu_y:.1f}") print("sigma_x = " f"{sigma_x:.2f}") print("sigma_y = " f"{sigma_y:.2f}") print("rho = " f"{rho.iat[1, 0]:.4f}") # Distribución condicional. x = 0 y = 20 # Parámetros condicionales. mu_y_x = ( mu_y + ( rho.iat[1, 0] * sigma_y * ( x - mu_x ) ) / sigma_x ) sigma_y_x = sigma_y * np.sqrt( 1 - rho.iat[1, 0] ** 2 ) print("b.") print("mu_y_x = " f"{mu_y_x:.2f}") print("sigma_y_x = " f"{sigma_y_x:.2f}") p_cond = stats.norm.cdf(y, mu_y_x, sigma_y_x) print(f"{p_cond:.4f}") df.head() # - # Ejercicio 4.19 print( "Ejercicio 4.19" ) a = stats.weibull_min.cdf( 10, 1.2, scale = 7.4 ) b = stats.weibull_min.cdf( 20, 1.2, scale = 7.4 ) print( f"{b:.4f} - {a:.4f} = {b - a:.4f}" ) print()
code/Examen 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from jqdatasdk import * from datetime import datetime,timedelta import matplotlib.pyplot as plt import tushare as ts import mplfinance as mpf list=['300480.XSHE', '600209.XSHG', '600803.XSHG', '603090.XSHG', '603599.XSHG'] date = '2020-09-11' if len(list)!=0: d = {'code': list} edf = pd.DataFrame(d) date = datetime.strptime(date, "%Y-%m-%d") edf["time"]=date edf # + def drawing(code,start,end,time): df = ts.get_k_data(code,start,end) df.set_index('date',inplace=True) df.index = pd.DatetimeIndex(df.index) # print(df.head(5)) df = df[['open','high','low','close','volume']] df.columns = ['Open','High','Low','Close','Volume'] close = df.loc[time,'Close'] # print(close) kwargs = dict(type='candle',mav=(5,10,20),volume=True,figratio=(19,8),figscale=0.85) mc = mpf.make_marketcolors(up = 'r',down = 'g')#00ff00 my_style = mpf.make_mpf_style(marketcolors=mc) signal = [time,close] apd = mpf.make_addplot(signal,type='scatter') mpf.plot(df,**kwargs,style=my_style,title='Candle:'+str(time)+' | '+code, ylabel='price', ylabel_lower='date',vlines=dict(vlines=[time],linewidths=(1)))#, savefig='my_image.png' # mpf.plot(time,close,'o',color='r',markersize=10) # mpf.plot(df.index,figscale=0.7,fill_between=dict(y1=0,y2=close)) # mpf.plot(daily,type='candle',vlines=dict(vlines=[time],linewidths=(1))) # mpf.plot(tdf,addplot=apd) plt.show() # - for i in edf.index: code = edf.loc[i,'code'][:-5] time = edf.loc[i,'time'] # time = datetime.strptime(time, "%Y-%m-%d").date() # 获取 K线图的起始时间 start = time-timedelta(days=90) # 将起始时间转为字符串 start = datetime.strftime(start, "%Y-%m-%d") # 获取 K线图的终点时间 end = time+timedelta(days=20) # 将终点时间转为字符串 end = datetime.strftime(end, "%Y-%m-%d") # name = get_security_info(code).display_name print(code,time) drawing(code,start,end,time)
其他应用/1.2 中间画图.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import functools import operator import tensorflow as tf import tensorflow.sparse as sparse # + def flatten(inputs, dims_to_flatten): input_shape = inputs.shape rank = input_shape.rank batch_dims = input_shape[:rank-dims_to_flatten] non_batch_dims = input_shape[-dims_to_flatten:] if tf.executing_eagerly(): # Full static shape is guaranteed to be available. # Performance: Using `constant_op` is much faster than passing a list. flattened_shape = tf.concat([batch_dims, [-1]], 0) return tf.reshape(inputs, flattened_shape) else: last_dim = int(functools.reduce(operator.mul, non_batch_dims)) flattened_shape = tf.concat([[-1], batch_dims[1:], [last_dim]]) return tf.reshape(inputs, flattened_shape) def get_conv_fun(input_shape, kernel_shape, strides): # Get spatial shape def get_result_spat_shape(input_spat_shape, kernel_spat_shape, strides): return ((input_spat_shape - kernel_spat_shape) // strides) + 1 # Get sparced indices generator def iterate_sparsed_indices(sparsed_shape, input_shape, kernel_shape, result_shape, strides): for i in range(sparsed_shape[-2]): for j in range(sparsed_shape[-1]): channel_num = i % result_shape[-1] col_num = (i // result_shape[-1]) % result_shape[-2] row_num = (i // result_shape[-1]) // result_shape[-2] offset = (row_num * input_shape[-2] + col_num) * input_shape[-1] * strides + channel_num if j >= offset and \ ((j - offset) % input_shape[-1]) < kernel_shape[-1] and \ (((j - offset) // input_shape[-1]) % input_shape[-2]) < kernel_shape[-2] and \ (((j - offset) // input_shape[-1]) // input_shape[-2]) < kernel_shape[-3]: yield [i, j] # Get height and width of result tensor result_spat_shape = ((tf.constant(input_shape[-3:-1]) - tf.constant(kernel_shape[-3:-1])) // strides) + 1 # Get depth of result tensor (for pooling filter, strides=1, kernel depth (supposely) =1) result_depth_shape = (tf.constant(input_shape[-1:]) - tf.constant(kernel_shape[-1:])) + 1 result_shape = tf.concat([ input_shape[:-3], result_spat_shape, result_depth_shape ], 0) input_flat_len = tf.reduce_prod(tf.constant(input_shape[-3:])) result_flat_len = tf.reduce_prod(tf.constant(result_shape[-3:])) sparsed_shape = tf.concat([result_flat_len, input_flat_len], axis=0) sparsed_shape = tf.cast(sparsed_shape, tf.int64) sparsed_indices = tf.constant( list(iterate_sparsed_indices(sparsed_shape, input_shape, kernel_shape, result_shape, strides)), dtype=tf.int64 ) def conv_fun(inputs, kernel, bias): nonlocal sparsed_shape nonlocal sparsed_indices sparsed_values = tf.reshape(kernel, [-1]) sparsed_values = tf.tile(sparsed_values, sparsed_shape[:1]) sparsed_kernel = sparse.SparseTensor(sparsed_indices, sparsed_values, sparsed_shape) input_flat = tf.expand_dims(flatten(inputs, tf.constant(3)), -1) return tf.sparse.sparse_dense_matmul(sparsed_kernel, input_flat) + bias return conv_fun # - x = tf.reshape(tf.range([3*3*2], dtype=tf.float32), shape=(3,3,2)) # kernel = tf.constant([[[1,2], [3,4]], [[5,6], [7,8]]], dtype=tf.float32) # kernel = tf.constant([[[1,2],[3,4],[5,6]], [[7,8],[9,10],[11,12]]], dtype=tf.float32) kernel = tf.constant([[[1], [2]], [[3], [4]]], dtype=tf.float32) strides = 1 bias = 1 conv_fun = get_conv_fun(x.shape, kernel.shape, strides) conv_fun(x, kernel, bias)
VNN/notebooks/conv_test2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Airbnb NYC # Kaggle's Airbnb NYC data. More info about the data is <a href="https://www.kaggle.com/dgomonov/new-york-city-airbnb-open-data">here.</a> EDA only. # ### Table of Contents # # 1. [Imports](#Imports) # 2. [Data](#Data) # 3. [EDA](#EDA) # ### Imports # + # main import numpy as np import pandas as pd import scipy.stats as stats import pickle # datetimes import datetime as dt from datetime import datetime import time # dataviz # %matplotlib notebook import matplotlib.pyplot as plt import seaborn as sns import graphviz # plot style # %config InteractiveShellApp.pylab_import_all = False # %pylab inline pylab.rcParams['figure.figsize'] = (10, 7) plt.style.use('seaborn-colorblind') # outline from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # %precision %.3f # pandas display pd.set_option('display.max_rows', 200) pd.set_option('display.max_columns', 100) # warnings import warnings warnings.filterwarnings('ignore') # - # ### Data df = pd.read_csv('../data/AB_NYC_2019.csv') df.head() len(df) # # %missing values df.isna().mean() # + # get rid of unnecessary columns df = df.drop(['id','host_name','last_review'], axis=1) # fill nan values df['reviews_per_month'].fillna(0, inplace=True) # - len(df[df['name'].isnull()]) # ### EDA # Neighbourhood Group df.neighbourhood_group.value_counts() sns.boxplot(x='neighbourhood_group',y='price',data=df) plt.ylim(-10,1000) # lots of outliers # Neighbourhood df.neighbourhood.value_counts().head(10) # Room type df.room_type.value_counts() # Room types & Neighbourhood groups g = sns.FacetGrid(data=df,col='neighbourhood_group',margin_titles=True,ylim=(0,600)) g.map(sns.boxplot,'room_type','price',palette='colorblind') # Prices sns.distplot(df.price,bins=100) # Prices (<=600) across NYC plt.figure(figsize=(10,10)) sns.scatterplot(x='latitude',y='longitude',data=df[df.price<=600],hue='price') # Prices in Manhattan (<=1000) plt.figure(figsize=(8,8)) sns.scatterplot(x='latitude',y='longitude',data=df[(df.neighbourhood_group=='Manhattan')&(df.price<=1000)],hue='price') # Reviews histogram sns.distplot(df.reviews_per_month,bins=30) # Top 10 hosts df.host_id.value_counts().head(10) # Price vs Num of Reviews sns.scatterplot(x='number_of_reviews',y='price',data=df,hue='neighbourhood_group')
notebooks/Airbnb_NYC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # name: python369jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6 # --- # + import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_theme(style="darkgrid") dataframe = pd.read_csv('Nodes.csv', delimiter=';', header=0, index_col=0) dataframe=dataframe.astype(float) # - ax = plt.gca() ax.ticklabel_format(style='plain') dataframe.plot(kind='line', y='Soma de MEMORY (bytes)', label='SUM Memory (bytes)',ax=ax) dataframe.plot(kind='line', y='Soma de CPU (m)', label='SUM CPU (millicpu)',ax=ax) plt.xlabel("Time (s)") #plt.ylabel("Memory (%)") plt.legend() plt.savefig('out/Nodes_MEM_CPU.pdf', bbox_inches='tight') plt.savefig('out/Nodes_MEM_CPU.png', dpi=300, bbox_inches='tight')
performance-analysis/article/_old/nodes-resources-consume-by-time/cluster_nodes_mem_cpu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os notesDict = {} with open('notes.tabs', 'rt') as f: for line in f: note, freq, wave = (line.replace('\n','').replace(' ', '').split('\t')) for note in note.split('/'): notesDict[note] = freq def appendToFile(path, data): """ appendToFile appends a line of data to specified file. Then adds new line Args: path (string): the file path data (data): the data to be written Return: VOID """ with open(path, 'a') as file: file.write(data + '\n') def writeSong(path, song, notesDict, metronome): for note, time in song: time = int(float(1000 * 60 * 4 * time / 80)) data = ("{},{}".format(int(float(notesDict[note])), time)) appendToFile(path, data) metronome = 140 pause = ['D2', 1/8] # + twinkleTwinkle = [ ['C5', 1/4], ['C5', 1/4], ['G5', 1/4], ['G5', 1/4], ['A5', 1/4], ['A5', 1/4], ['G5', 1/2], ['F5', 1/4], ['F5', 1/4], ['E5', 1/4], ['E5', 1/4], ['D5', 1/4], ['D5', 1/4], ['C5', 1/2], # ['G6', 1/4], ['G6', 1/4], ['F6', 1/4], ['F6', 1/4], # ['E6', 1/4], ['E6', 1/4], ['D6', 1/2], # ['G6', 1/4], ['G6', 1/4], ['F6', 1/4], ['F6', 1/4], # ['E6', 1/4], ['E6', 1/4], ['D6', 1/2], ['G5', 1/4], ['G5', 1/4], ['F5', 1/4], ['F5', 1/4], ['E5', 1/4], ['E5', 1/4], ['D5', 1/2], ['G5', 1/4], ['G5', 1/4], ['F5', 1/4], ['F5', 1/4], ['E5', 1/4], ['E5', 1/4], ['D5', 1/2], ['C5', 1/4], ['C5', 1/4], ['G5', 1/4], ['G5', 1/4], ['A5', 1/4], ['A5', 1/4], ['G5', 1/2], ['F5', 1/4], ['F5', 1/4], ['E5', 1/4], ['E5', 1/4], ['D5', 1/4], ['D5', 1/4], ['C5', 1/2] ] # + # twinkleTwinkle = [['D2', 1/8],['D2', 1/8],['D2', 1/8],['D2', 1/8],['D2', 1/8],['D2', 1/8] # ] # - song = [] for note in twinkleTwinkle: song.append(note) song.append(pause) os.remove('twinkleTwinkle.csv') writeSong('twinkleTwinkle.csv', song, notesDict, metronome) # ##
code/songFileCreation/makeSongs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %pylab inline # %load_ext autoreload # %autoreload 2 from pylab import rcParams rcParams['figure.figsize'] = 15, 5 LENGTH = 250 BATCH_SIZE = 16 def data_generator(): """ Unbounded generation of new data samples, 1 at a time """ while True: y = np.zeros(LENGTH) i = random.randint(30, LENGTH-30) direction = random.choice([False, True]) if direction: y[i:] = 1 else: y[:i] = 1 x = y * 0.5 + 0.2 if direction: x[i:i+10] = 2 else: x[i-10:i] = 2 x = np.sin(np.cumsum(x)) / 2 + 0.5 # f1 = np.sin(np.array(range(100)) / 2.0) # f2 = np.sin(np.array(range(100)) / 4.0) # x = f1 * (y-1) + f2 * y yield np.stack([x, 1-x], axis=-1), np.stack([y, 1 - y], axis=-1) x, y = next(data_generator()) plot(x[:,0], label='input') plot(y[:,0], label='output') legend() grid() title('Example Sample') ylim(-0.1, 1.1) show() def batch(iterator, batch_size): """ Given an `iterator` which generates 1 sample at a time, batch them up and return `batch_size` samples on each yield. Batch axis will be the first axis. """ while True: X = [] Y = [] for _ in range(batch_size): x, y = next(iterator) X.append(x) Y.append(y) yield np.array(X), np.array(Y) from tensorflow_hmm.hmm_layer import HMMLayer # + # Create your first MLP in Keras from keras.models import Sequential from keras.layers import Dense, LSTM, Conv1D, InputLayer, Activation from keras import optimizers data = batch(data_generator(), BATCH_SIZE) # - def make_model(encoder, decoder, loss): # create model model = Sequential() model.add(InputLayer(input_shape=(LENGTH,2))) if encoder == 'conv': model.add(Conv1D(16, 5, activation='elu', padding='same', strides=1)) model.add(Conv1D(16, 5, activation='elu', padding='same', strides=1)) elif encoder == 'lstm': model.add(LSTM(16, return_sequences=True)) model.add(LSTM(16, return_sequences=True)) else: raise ValueError('invalid encoder type: {}'.format(encoder)) if decoder == 'conv': model.add(Conv1D(2, 1, activation='elu', padding='same', strides=1)) elif decoder == 'lstm': model.add(LSTM(2, return_sequences=True)) else: raise ValueError('invalid decoder type: {}'.format(encoder)) if loss == 'hmm': model.add(HMMLayer(states=2, length=LENGTH)) model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.9)) elif loss == 'softmax': model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') else: raise ValueError('invalid loss type: {}'.format(encoder)) return model def plot_sample_prediction(model, data): from tensorflow_hmm import hmm X, Y = data.next() prediction = model.predict(X) # TODO: change keras layer to use viterbi during inference # NOTE: this isn't exactly right because if there is an hmm layer, the output prediction has already factored in # the transition probability priors hmm_np = hmm.HMMNumpy(np.array([[0.99, 0.01], [0.01, 0.99]]), p0=np.array([0.5, 0.5])) viterbi, _ = hmm_np.viterbi_decode(prediction[0,:,:]) print 'accuracy: {0:.2f}%'.format(100.0 * sum(Y[0,:,1] == viterbi) / Y.shape[1]) print 'mean accuracy over mini-batch: {0:.2f}%'.format(np.mean([ 100.0 * sum(Y[i,:,1] == hmm_np.viterbi_decode(prediction[i,:,:])[0]) / Y.shape[1] for i in range(Y.shape[0]) ])) plot(viterbi, label='viterbi') plot(prediction[0,:,1], label='prediction') plot(X[0,:,1], label='input') plot(Y[0,:,1], label='truth') ylim(-0.1, 1.1) grid() legend() show() # ## HMM Loss model = make_model(encoder='conv', decoder='conv', loss='hmm') model.fit_generator(data, epochs=5, steps_per_epoch=128) plot_sample_prediction(model, data) # ## Softmax + Categorial Cross Entropy Loss model = make_model(encoder='conv', decoder='conv', loss='softmax') model.fit_generator(data, epochs=5, steps_per_epoch=128) plot_sample_prediction(model, data)
tensorflow/tensorflow_hmm/notebooks/keras_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="dWyI9dbXRoTS" # # part of shape defense # ============================== # <NAME> # # + colab={} colab_type="code" id="yTOeDD4fIbdq" # %matplotlib inline # + colab={} colab_type="code" id="-Fw-gSFYIbdu" from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import numpy as np import matplotlib.pyplot as plt import os # + colab={} colab_type="code" id="MXobUARXIbd4" # FGSM attack code def fgsm_attack(image, epsilon, data_grad): # Collect the element-wise sign of the data gradient sign_data_grad = data_grad.sign() # Create the perturbed image by adjusting each pixel of the input image perturbed_image = image + epsilon*sign_data_grad # Adding clipping to maintain [0,1] range perturbed_image = torch.clamp(perturbed_image, 0, 1) # Return the perturbed image return perturbed_image # - def load_model(net, model_path): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") load_weights = torch.load(model_path, map_location=device) net.load_state_dict(load_weights) # + from skimage import io, color, feature def detect_edge(data): edge_maps = np.zeros_like(data) for idx,img in enumerate(data): # import pdb; pdb.set_trace() edge_maps[idx,0] = feature.canny(np.array(img[0], dtype=np.float64), sigma = 1) return edge_maps # + colab={} colab_type="code" id="-VUhyipnIbd8" def test( model, device, test_loader, epsilon, do_seg = False): # Accuracy counter correct = 0 adv_examples = [] # Loop over all examples in test set for data, target in test_loader: data_orig = torch.clone(data) # add noise # import pdb; pdb.set_trace() # data = add_noise(np.array(np.copy(data))) data = torch.Tensor(data.tolist()) # send to device data, target = data.to(device), target.to(device) # Set requires_grad attribute of tensor. Important for Attack data.requires_grad = True # Forward pass the data through the model output = model(data) init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability # If the initial prediction is wrong, dont bother attacking, just move on if init_pred.item() != target.item(): continue # Calculate the loss loss = F.nll_loss(output, target) # Zero all existing gradients model.zero_grad() # Calculate gradients of model in backward pass loss.backward() # Collect datagrad data_grad = data.grad.data # Call FGSM Attack perturbed_data = fgsm_attack(data, epsilon, data_grad) # MULTIPLY THE ATTACK BY THE MASK; THAT IS REMOVE THE PERTURBATION ON BACKGROUND # import pdb; pdb.set_trace() if do_seg: mask = np.zeros_like(data_orig.detach()) mask[data_orig.detach() > 0] = 1 perturbed_data = perturbed_data.cpu().detach()* mask perturbed_data = perturbed_data.to(device) # Re-classify the perturbed image output = model(perturbed_data) # Check for success final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability if final_pred.item() == target.item(): correct += 1 # Special case for saving 0 epsilon examples if (epsilon == 0) and (len(adv_examples) < 5): adv_ex = perturbed_data.squeeze().detach().cpu().numpy() adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) ) else: # Save some adv examples for visualization later if len(adv_examples) < 5: adv_ex = perturbed_data.squeeze().detach().cpu().numpy() adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) ) # Calculate final accuracy for this epsilon final_acc = correct/float(len(test_loader)) print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc)) # Return the accuracy and an adversarial example return final_acc, adv_examples # + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" id="-qBAcHmlZJdJ" outputId="62c9a40e-46c3-4222-fcdb-1aac317b343f" # from google.colab import drive # drive.mount('/gdrive') # + colab={} colab_type="code" id="VwLQh6vWRoTi" # epsilons = [0, .05, .1, .15, .2, .25, .3] import os epsilons = [0, 8/255, 16/255, 32/255, 64/255, 128/255] # pretrained_model = "/gdrive/My Drive/Tmp/lenet_mnist_model.pth" # pretrained_model = '/gdrive/My Drive/Tmp/cifar_net.pth' #pretrained_model = "lenet_mnist_model.pth" # path = '/gdrive/My Drive/Tmp/slope_models/' # + colab={} colab_type="code" id="iI7soiDFRoTk" # LeNet Model definition class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), ])), batch_size=100, shuffle=True) # # MNIST Test dataset and dataloader declaration test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), ])), batch_size=100, shuffle=True) # # Define what device we are using # print("CUDA Available: ",torch.cuda.is_available()) use_cuda = False device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu") model = Net().to(device) # training with slope 1 for now # + [markdown] colab_type="text" id="AC7K1TMmRoTn" # # training a model first # + colab={"base_uri": "https://localhost:8080/", "height": 323} colab_type="code" id="oLSvmY2jRoTs" outputId="1a4b5167-36ec-41c3-f930-1105c30db5f2" from torchsummary import summary # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # vgg = models.vgg16().to(device) summary(model, (1, 28, 28)) # + colab={} colab_type="code" id="RF2nKzqHH7qX" def add_noise(data): mask = data > 0 # noise_data = np.random.rand(data.shape[0], data.shape[1], data.shape[2], data.shape[3]) noise_data = np.random.rand(*data.shape) noise_data[mask] = data[mask] return noise_data # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="TJO-OexIKbGh" outputId="49044589-6b98-4f73-bb1e-4fb6530a204d" # plt.imshow(d[40,0]) # plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="bhLPYIhEK4Np" outputId="d9f4ef6e-8d41-4431-ef1a-d8f8e164c500" xx = add_noise(data) plt.imshow(xx[40,0]) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="x1iOE9bfRoTv" outputId="865d51a6-df70-49fb-dace-f108555b0eba" # training a model over noise data EPOCHS = 10 losses = [] optimizer = optim.Adadelta(model.parameters()) #, lr=(1e-3)) model.train() for epoch in range(EPOCHS): for batch_idx, (data, target) in enumerate(train_loader): # add noise # import pdb; pdb.set_trace() # data = add_noise(np.array(np.copy(data))) # data = torch.Tensor(data.tolist()) # send to device data, target = data.to(device), target.to(device) # Init optimizer.zero_grad() # Predict y_pred = model(data) # Calculate loss # loss = F.cross_entropy(y_pred, target) loss = F.nll_loss(y_pred, target) losses.append(loss.cpu().data) # losses.append(loss.cpu().data[0]) # Backpropagation loss.backward() optimizer.step() # Display if batch_idx % 100 == 1: print('\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch+1, EPOCHS, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.cpu().data), end='') # Eval evaluate_x = test_loader.dataset.data.type_as(torch.FloatTensor()) evaluate_y = test_loader.dataset.targets evaluate_x, evaluate_y = evaluate_x.to(device), evaluate_y.to(device) model.eval() output = model(evaluate_x[:,None,...]) pred = output.data.max(1)[1] d = pred.eq(evaluate_y.data).cpu() accuracy = d.sum().type(dtype=torch.float64)/d.size()[0] print('\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Test Accuracy: {:.4f}%'.format( epoch+1, EPOCHS, len(train_loader.dataset), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.cpu().data, accuracy*100, end='')) # + colab={} colab_type="code" id="Y1QQN9tT4hNR" # torch.save(model.state_dict(), 'models/lenet_mnist_model_noise.pth') torch.save(model.state_dict(), 'models/lenet_mnist_model.pth') # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="4659L91ARoTy" outputId="1c635849-f9e1-4209-8cb5-409c0f65b39c" evaluate_x = test_loader.dataset.data.type_as(torch.FloatTensor()) evaluate_x = evaluate_x / 255.0 evaluate_y = test_loader.dataset.targets.type_as(torch.FloatTensor()) evaluate_x = add_noise(np.array(evaluate_x)) # [:,None,...]))) evaluate_x = torch.Tensor(evaluate_x) evaluate_x, evaluate_y = evaluate_x.to(device), evaluate_y.to(device) model.eval() output = model(evaluate_x[:,None,...]) pred = output.data.max(1)[1] d = pred.eq(evaluate_y.data).cpu() accuracy = d.sum().type(dtype=torch.float64)/d.size()[0] print('Accuracy:', accuracy*100) # + [markdown] colab_type="text" id="VAOjLT_6WM17" # On clean data # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="_rZHtuGjPXls" outputId="0285ab43-76a0-4733-ca67-71ecf2a10869" evaluate_x = test_loader.dataset.data.type_as(torch.FloatTensor()) evaluate_x = evaluate_x / 255.0 evaluate_y = test_loader.dataset.targets.type_as(torch.FloatTensor()) evaluate_x, evaluate_y = evaluate_x.to(device), evaluate_y.to(device) model.eval() output = model(evaluate_x[:,None,...]) pred = output.data.max(1)[1] d = pred.eq(evaluate_y.data).cpu() accuracy = d.sum().type(dtype=torch.float64)/d.size()[0] print('Accuracy:', accuracy*100) # + colab={} colab_type="code" id="If6tetU0SuCi" # + colab={} colab_type="code" id="2S-h1DZ5T-3g" # + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="f06ufm6oQDa1" outputId="5d0deb7c-5fe4-48d3-dc52-138966f0eff2" plt.imshow(evaluate_x[10].cpu()); plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ZtJTcgiUS9Xl" outputId="47bc66c0-2d94-4cb8-e1e3-196ff5050bed" mask.shape # + [markdown] colab_type="text" id="8vKYl4O6RoT0" # # Now perform adversarial attack # + colab={"base_uri": "https://localhost:8080/", "height": 255} colab_type="code" id="iFXLuEzmRoT1" outputId="d61bac74-3ad0-4d02-c047-0784ba899aab" test_loader_new = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), ])), batch_size=1, shuffle=True) model = Net().to(device) # training with slope 1 for now orig_model = 'models/lenet_mnist_model.pth' # use_cuda=True if os.path.exists(orig_model): load_model(model,orig_model) print('model loaded') model.eval() accuracies = [] examples = [] accuracies_orig = [] examples_orig = [] # epsilons = [.2, .25, .3] # Run test for each epsilon for eps in epsilons: do_seg = True acc, ex = test(model, device, test_loader_new, eps, do_seg) accuracies.append(acc) examples.append(ex) do_seg = False acc_orig, ex_orig = test(model, device, test_loader_new, eps, do_seg) accuracies_orig.append(acc_orig) examples_orig.append(ex_orig) # + colab={"base_uri": "https://localhost:8080/", "height": 729} colab_type="code" id="0kqYpB3NRoT6" outputId="1af07b79-8715-4b6d-89be-efd0264f2342" # Plot several examples of adversarial samples at each epsilon cnt = 0 plt.figure(figsize=(8,10)) for i in range(len(epsilons)): for j in range(len(examples[i])): cnt += 1 plt.subplot(len(epsilons),len(examples[0]),cnt) plt.xticks([], []) plt.yticks([], []) if j == 0: plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14) orig,adv,ex = examples[i][j] plt.title("{} -> {}".format(orig, adv)) plt.imshow(ex, cmap="gray") plt.tight_layout() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 729} colab_type="code" id="aznrPn5paU2J" outputId="81181aca-7fc7-4611-d990-b82d363e8207" # Plot several examples of adversarial samples at each epsilon cnt = 0 plt.figure(figsize=(8,10)) for i in range(len(epsilons)): for j in range(len(examples_orig[i])): cnt += 1 plt.subplot(len(epsilons),len(examples_orig[0]),cnt) plt.xticks([], []) plt.yticks([], []) if j == 0: plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14) orig,adv,ex = examples_orig[i][j] plt.title("{} -> {}".format(orig, adv)) plt.imshow(ex, cmap="gray") plt.tight_layout() plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 350} colab_type="code" id="I-fX6qq_RoT8" outputId="a3f7e662-9b24-4307-996c-c141ad32f2cd" # plt.figure(figsize=(5,5)) # plt.plot(epsilons, accuracies, "*-") # plt.plot(epsilons, accuracies_orig, "o-") # plt.yticks(np.arange(0, 1.1, step=0.1)) # plt.xticks(np.arange(0, .35, step=0.05)) # plt.title("Accuracy vs Epsilon") # plt.xlabel("Epsilon") # plt.ylabel("Accuracy") # plt.legend(['with bg masking', 'w/o bg masking']) # plt.show() # + colab={} colab_type="code" id="BmK5A7bW3EMH" # + colab={} colab_type="code" id="jjBjUxkC4ZeC" accuracies_clean = [0.981, 0.9634, 0.9391, 0.8973, 0.8509, 0.7917, 0.7217] accuracies_clean_orig = [0.981, 0.9426, 0.851, 0.6826, 0.4301, 0.2082, 0.0869] # + colab={"base_uri": "https://localhost:8080/", "height": 350} colab_type="code" id="_V77tpDLjtS9" outputId="a0fcebcb-ca43-4430-fc90-01676c2bf4ef" plt.figure(figsize=(5,5)) plt.plot(epsilons, accuracies_clean, "*-") plt.plot(epsilons, accuracies_clean_orig, "o-") plt.plot(epsilons, accuracies, "*--") plt.plot(epsilons, accuracies_orig, "o--") plt.yticks(np.arange(0, 1.1, step=0.1)) plt.xticks(np.arange(0, .35, step=0.05)*255) plt.title("Accuracy vs Epsilon") plt.xlabel("Epsilon") plt.ylabel("Accuracy") plt.legend(['with bg masking (clean)', 'w/o bg masking (clean)', 'with bg masking (noise)', 'w/o bg masking (noise)'], loc='center left') plt.show() # + colab={} colab_type="code" id="kBTm5q-Nj0oG"
Foreground/forground_orig.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dashboard # # This "dashboard" for now only shows the state of a vanilla [Redis](https://redis.io) server (on/off) and allows to toggle it by pressing on the button. It's recommended to opn this notebook as a [Voilà](https://voila.readthedocs.io) dashboard by clicking on the Voilà icon in the notebook's toolbar. # + from dashboard import make_redis_button redis_button = make_redis_button() redis_button
examples/dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import urllib import json # Generate a session session = requests.session() # Login url = 'http://10.210.201.5/logincheck' username = 'admin' password = '<PASSWORD>' data = f'username={urllib.parse.quote(username)}&secretkey={urllib.parse.quote(password)}' response = session.post(url=url, data=data) # Keep the cookies cookies = response.cookies # + # Login with token # https://kb.fortinet.com/kb/documentLink.do?externalID=FD45595 # https://github.com/fortinet-solutions-cse/fortiosapi/blob/f3ac6561de5588c600a7ade8ff6c742c81b8dfcf/fortiosapi/fortiosapi.py#L231 # - # Request - get url = 'http://10.210.201.5/api/v2/cmdb/firewall/address' response = session.get(url=url) print(response.text) # Request - get (format=name|subnet) url = 'http://10.210.201.5/api/v2/cmdb/firewall/address?format=name|subnet' response = session.get(url=url) print(response.text) # Request - get (action=default) url = 'http://10.210.201.5/api/v2/cmdb/firewall/address?action=default' response = session.get(url=url) print(response.text) # Cast data as a dict to read/use it. data = json.loads(response.text) # Request - post url = 'http://10.210.201.5/api/v2/cmdb/firewall/address' data = { 'name': 'address jimmy lin 10.210.201.168/32', 'type': 'ipmask', 'subnet': '10.210.201.168 255.255.255.255', } headers = { 'X-CSRFTOKEN': session.cookies['ccsrftoken'][1:-1] } response = session.post(url=url, data=json.dumps(data), headers=headers) print(response.text) # Request - put mkey = 'address jimmy lin 10.210.201.168/32' url = 'http://10.210.201.5/api/v2/cmdb/firewall/address/' + urllib.parse.quote(mkey, safe='') data = { 'name': 'address__jimmy_lin__10.210.201.168/32', 'type': 'ipmask', 'subnet': '10.210.201.168 255.255.255.255', } headers = { 'X-CSRFTOKEN': session.cookies['ccsrftoken'][1:-1] } response = session.put(url=url, data=json.dumps(data), headers=headers) print(response.text) # Request - delete mkey = 'address jimmy lin 10.210.201.168/32' url = 'http://10.210.201.5/api/v2/cmdb/firewall/address/' + urllib.parse.quote(mkey, safe='') headers = { 'X-CSRFTOKEN': session.cookies['ccsrftoken'][1:-1] } response = session.delete(url=url, headers=headers) print(response.text) # Logout url = 'http://10.210.201.5/logout' session.post(url=url)
ipynb/Practice - 5 - Put.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/indahpuspitaa17/TensorFlow-Data-and-Deployment/blob/main/tfhub_basic_examples.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="zX4Kg8DUTKWO" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="HKpNHJUklgmV" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="rVYfUz6plf-E" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="z43VJvxa8HOX" # # Getting Started with TensorFlow Hub # # [TensorFlow Hub](https://tfhub.dev/) is a repository of reusable TensorFlow machine learning modules. A module is a self-contained piece of a TensorFlow graph, along with its weights and assets, that can be reused across different tasks. These modules can be reused to solve new tasks with less training data, diminishing training time. # # In this notebook we will go over some basic examples to help you get started with TensorFlow Hub. In particular, we will cover the following topics: # # * Loading TensorFlow Hub Modules and Performing Inference. # # * Using TensorFlow Hub Modules with Keras. # # * Using Feature Vectors with Keras for Transfer Learning. # # * Saving and Running a TensorFlow Hub Module Locally. # # * Changing the Download Location of TensorFlow Hub Modules. # + [markdown] id="YkSgkrJqqnHt" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%204%20-%20TensorFlow%20Serving/Week%202/Examples/tfhub_basic_examples.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%204%20-%20TensorFlow%20Serving/Week%202/Examples/tfhub_basic_examples.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> # View source on GitHub</a> # </td> # </table> # + [markdown] id="pwT2KTfqf0TS" # ## Setup # + id="Ui_ybeBYf0TT" try: # %tensorflow_version 2.x except: pass # + id="amAqIJk4W_i_" colab={"base_uri": "https://localhost:8080/"} outputId="d06672a3-57dc-4f72-e449-c68a5eb55aa8" import os import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_hub as hub from PIL import Image print("\u2022 Using TensorFlow Version:", tf.__version__) # + [markdown] id="2fH5ftX6f0TZ" # ## Download Test Image # # We will download the image of a puppy to test our TensorFlow Hub modules. # + id="wHcJrF0HYy5O" colab={"base_uri": "https://localhost:8080/"} outputId="b08e3a53-bc9f-41c2-9deb-1b370bf5fbdb" # !wget -O dog.jpeg https://cdn.pixabay.com/photo/2016/12/13/05/15/puppy-1903313_960_720.jpg original_image = Image.open('./dog.jpeg') # + [markdown] id="1dfkh03u7KUe" # Let's take a look at the image we just downloaded. # + id="i2gkQ4OMf0Tc" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="c9644292-47f6-497e-afa0-39549b1c01e6" plt.figure(figsize=(6,6)) plt.imshow(original_image) plt.show() # + [markdown] id="6cdyKGtJ7gvi" # ## Format Image # # We will now resize and normalize our image so that is compatible with the module we are going to use. In this notebook we will use the [MobileNet](https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4) model which was trained in ImageNet. For this module, the input images are expected to have color values in the range `[0,1]` and to have an input size of `(224,224)`. # + id="naIvQyXwf0Tf" IMAGE_SIZE = (224, 224) img = original_image.resize(IMAGE_SIZE) img = np.array(img) / 255.0 # + [markdown] id="RVZGQmEu9tjK" # Let's now plot the reformatted image, to see what it looks like. # + id="h56vUl319rhe" colab={"base_uri": "https://localhost:8080/", "height": 338} outputId="1de1d9e3-56fc-432b-8792-6236802f4123" plt.figure(figsize=(5,5)) plt.imshow(img) plt.title('New Image Size: {}'.format(img.shape), fontdict={'size': 16}, color='green') plt.show() # + [markdown] id="kywC4eUp8NHK" # ## Get ImageNet Labels # # We will now get the labels for all the 1001 classes in the ImageNet dataset. # + id="vN41_3uFZed1" colab={"base_uri": "https://localhost:8080/"} outputId="4ecb397b-f4b2-4895-e317-c1e0e0464903" # !wget -O labels.txt --quiet https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt with open('labels.txt', 'r') as f: labels = [l.strip() for l in f.readlines()] # get number of labels num_classes = len(labels) print('There are a total of {0} labels representing {0} classes.\n'.format(num_classes)) # + [markdown] id="z-xemFy9_hI2" # Let's take a look at the first 5 labels. # + id="eKo1E_JFgUFM" colab={"base_uri": "https://localhost:8080/"} outputId="f4bc043d-4d19-43a2-8f5d-572b7d03c060" for label in labels[0:5]: print(label) # + [markdown] id="b0f6ojXNf0Tk" # ## Loading a TensorFlow Hub Module # # To load a module, we use its unique **module handle**, which is just a URL string. To obtain the module handle, we have to browse through the catalog of modules in the [TensorFlow Hub](https://tfhub.dev/) website. # # For example, in this case, we will be using the complete **MobileNet** model. If we go to [MobileNet's webpage](https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4) in the TensorFlow Hub website, we will see that the module handle for this module is: # # ``` # 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4' # ``` # # Finally, we'll make use of TensorFlow Hub's, `load` API to load the module into memory. # + id="IeECgRwvf0Tl" MODULE_HANDLE = 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4' module = hub.load(MODULE_HANDLE) # + [markdown] id="7HXz7_Aqf0Tn" # ## Performing Inference # # Once we have loaded the module, we can then start running inference on it. Note however, that the module generates the final layer's logits without any activations. Therefore, we have to apply the `softmax` activation to the module's output. The result will be a Tensor of shape `(1, 1001)`, where the first dimension refers to the batch size. In this case it is just `1` because we only passed 1 image. # # In the cell below, we will pass the image of the puppy and get the top 5 predictions from our model along with their probability scores. # + id="iviLKe3sYmvu" colab={"base_uri": "https://localhost:8080/"} outputId="2bd53794-41f6-4c97-98e2-d55a101c2310" predictions = tf.nn.softmax(module([img]))[0] top_k_pred_values, top_k_indices = tf.math.top_k(predictions, k=5) top_k_pred_values = top_k_pred_values.numpy() top_k_indices = top_k_indices.numpy() for value, i in zip(top_k_pred_values, top_k_indices): print('{}: {:.3}'.format(labels[i], value)) # + [markdown] id="BpCrUAhM8Sa-" # ## Using a TensorFlow Hub Module with Keras # # We can also integrate TensorFlow Hub modules into the high level Keras API. In this case, we make use of the `hub.KerasLayer` API to load it. We can add the `hub.KerasLayer` to a Keras `sequential` model along with an activation layer. Once the model is built, all the Keras model methods can be accessed like you would normally do in Keras. # + id="UJ8gqfwVf0Tr" model = tf.keras.Sequential([ hub.KerasLayer(MODULE_HANDLE, input_shape=IMAGE_SIZE + (3,)), tf.keras.layers.Activation('softmax') ]) # + [markdown] id="V7etoiGVf0Tt" # ## Performing Inference # # To perform inference with the Keras model, we have to add a dimension to our image to account for the batch size. Remember that our Keras model expects the input to have shape `(batch_size, image_size)`, where the `image_size` includes the number of color channels. # + id="mstecPiRf0Tu" # Add batch dimension img_arr = np.expand_dims(img, axis=0) # + [markdown] id="9lZY1656JAuY" # As we did previously, in the cell below we will pass the image of the puppy and get the top 5 predictions from our Keras model along with their probability scores. # + id="pLpZLpgXf0Tw" colab={"base_uri": "https://localhost:8080/"} outputId="7a4233fd-0e07-4499-d0d1-a87ee256f811" predictions = model.predict(img_arr)[0] top_k_pred_values, top_k_indices = tf.math.top_k(predictions, k=5) top_k_pred_values = top_k_pred_values.numpy() top_k_indices = top_k_indices.numpy() for value, i in zip(top_k_pred_values, top_k_indices): print('{}: {:.3}'.format(labels[i], value)) # + [markdown] id="FihuPGEiUdh7" # # Using Feature Vectors with Keras # # While we can use complete models as we did in the previous section, perhaps, the most important part of TensorFlow Hub is in how it provides **Feature Vectors** that allows us to take advantage of transfer learning. Feature vectors are just complete modules that had their final classification head removed. # # In the cell below we show an example of how a feature vector can be added to a Keras `sequential` model. # + id="7bB4E3dnf0Tz" MODULE_HANDLE ="https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4" # + id="_B3xMxdeUjMp" # Number of classes in the new dataset NUM_CLASSES = 20 model = tf.keras.Sequential([ hub.KerasLayer(MODULE_HANDLE, input_shape=IMAGE_SIZE + (3,)), tf.keras.layers.Dense(NUM_CLASSES, activation='softmax') ]) # + [markdown] id="9OTNyXHvf0T7" # Now that the model is built, the next step in transfer learning will be to train the model on a new dataset with the new classifier (i.e. the last layer of the model). Remember that the number of output units in the last layer will correspond to the number of classes in your new dataset. After the model has been trained, we can perform inference in the same way as with any Keras model (see previous section). # + [markdown] id="mLhxYyr-f0T8" # # Saving a TensorFlow Hub Module for Local Use # # We can download TensorFlow Hub modules, by explicitly downloading the module as a **SavedModel** archived as a tarball. This is useful if we want to work with the module offline. # # To do this, we first have to download the Hub module by appending a query parameter to the module handled URL string. This is done by setting the TF Hub format query parameter as shown below. For now, only the compressed option is defined. # + id="JHLdCTUcf0T8" colab={"base_uri": "https://localhost:8080/"} outputId="05551d5e-fc2f-4aa8-83df-2c32dc772cdc" MODULE_HANDLE = 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4?tf-hub-format=compressed' # !wget -O ./saved_model.tar.gz $MODULE_HANDLE # + [markdown] id="AvPb67hkfZaP" # Next, we need to decompress the tarball. # + id="UlIKzuHQf0T-" colab={"base_uri": "https://localhost:8080/"} outputId="d7d1877a-298d-4297-c227-9cc91da942b2" # Untar the tarball # !mkdir -p ./saved_model # !tar xvzf ./saved_model.tar.gz -C ./saved_model # + [markdown] id="GU1llWHpf0UA" # # Running a TensorFlow Hub Module Locally # # We can load the SavedModel containing the saved TensorFlow Hub module by using `hub.load`. # + id="3IH9-d6MpzGf" module = hub.load('./saved_model') # + [markdown] id="sx6m8Gh4p2Wk" # After the TensorFlow Hub module is loaded, we can start making inferences as shown below. As before, we will pass the image of the puppy and get the top 5 predictions from our model along with their probability scores. # + id="7Udwudrof0UB" colab={"base_uri": "https://localhost:8080/"} outputId="a8372e02-66f2-44cb-a6e8-872d876c9e42" predictions = tf.nn.softmax(module([img]))[0] top_k_pred_values, top_k_indices = tf.math.top_k(predictions, k=5) top_k_pred_values = top_k_pred_values.numpy() top_k_indices = top_k_indices.numpy() for value, i in zip(top_k_pred_values, top_k_indices): print('{}: {:.3}'.format(labels[i], value)) # + [markdown] id="Mc1A0-E-wrp5" # ## Changing the Download Location of TensorFlow Hub Modules. # # Finally, we can change the download location of TensorFlow Hub modules to a more permanent location. We can do this by setting the environment variable `'TFHUB_CACHE_DIR'` to the directory we want our modules to be saved in. # # In Python, we can set this environment variable in the environment dictionary that's present in the Pythons `os` module as you can see below. # + id="uD99XTD_o4u5" new_dir = './hub_cache_dir' os.environ['TFHUB_CACHE_DIR'] = new_dir # + [markdown] id="nI7lMBOvq-TR" # Once we set the new location of the TF Hub cache directory environment variable, all the subsequent modules that we request will get downloaded to that location. # + id="TXXl3HVQf0UF" MODULE_HANDLE = 'https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4' module = hub.load(MODULE_HANDLE) # + [markdown] id="anBLXShXtp79" # We can take a look the contents of the new directory and all its subdirectories by using the `-R` option. # + id="VW9djqDyrl3r" colab={"base_uri": "https://localhost:8080/"} outputId="d31d012b-fe19-4a4f-d3a4-91aca966e7f6" # !ls -R {new_dir}
tfhub_basic_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: env_ecobici # language: python # name: env_ecobici # --- # # Indice de calidad de aire por ageb # # # __Descripción:__ # # A partír de los modelos de contaminacion por AGEB de cada contaminante (O3, PM10,PM2.5) se obtiene el Indice de calidad de aire por AGEB. # # # __Input__ # # - archivos de contaminación por AGEB modelados en **interpolacion*.ipynb** # # Responsable: <NAME> # # import pandas as pd df_O3 = pd.read_csv('../data/contaminacion/contaminacion_por_ageb/contaminacion_O3_ageb.csv') df_PM10 = pd.read_csv('../data/contaminacion/contaminacion_por_ageb/contaminacion_PM10_ageb.csv') df_PM25 = pd.read_csv('../data/contaminacion/contaminacion_por_ageb/contaminacion_PM2.5_ageb.csv') df_O3.fillna(df_O3.mean(), inplace=True) df_PM10.fillna(df_PM10.mean(), inplace=True) df_PM25.fillna(df_PM25.mean(), inplace=True) df_O3.dropna().shape df_PM10.dropna().shape df_PM25.dropna().shape df_O3.head() # ## convertir a indice # + import datetime # Fecha adelantada 24 horas def asignar_fecha(row, columna): nueva_fecha = row[columna] + datetime.timedelta(hours=24) return nueva_fecha # Conversión de unidades de microgramo/metro_cúbico a ppb(partículas por billón) def convertir_ppb(row, columna): ppb = row[columna] / 1.96 return ppb # Cálculo del índice de calidad del aire para el PM10 (usando datos en microgramo/metro_cúbico) def indice_PM10(row): if row >= 0 and row <= 40: indice = 1.2500 * (row) return round(indice) if row >= 41 and row <= 75: indice = (1.4412 * (row - 41)) + 51 return round(indice) if row >= 76 and row <= 214: indice = (0.3551 * (row - 76)) + 101 return round(indice) if row >= 215 and row <= 354: indice = (0.3525 * (row - 215)) + 151 return round(indice) if row >= 355 and row <= 424: indice = (1.4348 * (row - 355)) + 201 return round(indice) if row >= 425 and row <= 504: indice = (1.2532 * (row - 425) + 301) return round(indice) if row >= 505 and row <= 604: indice = (1.0000 * (row - 505) + 401) return round(indice) # Cálculo del índice de calidad del aire para el O3 (usando datos en ppb) def indice_O3(row): if row >= 0 and row <= 70: indice = 0.7143 * (row) return round(indice) if row >= 71 and row <= 95: indice = (2.0417 * (row - 71)) + 51 return round(indice) if row >= 96 and row <= 154: indice = (2.4138 * (row - 96)) + 101 return round(indice) if row >= 155 and row <= 204: indice = (1.0000 * (row - 155)) + 151 return round(indice) if row >= 205 and row <= 404: indice = (0.4975 * (row - 205)) + 201 return round(indice) if row >= 405 and row <= 504: indice = (1.000 * (row - 405) + 301) return round(indice) if row >= 505 and row <= 604: indice = (1.0000 * (row - 505) + 401) return round(indice) def convertir_unidades(df, columna_fecha): df = df.sort_values([columna_fecha], ascending=[0]) df['PM10mean_max_ppb'] = df.apply(lambda row: convertir_ppb(row, 'PM10mean_y'), axis=1) df_concentraciones = df[['fecha', 'O3_y', 'PM10mean_y_ppb', 'TMP', 'TMP_x', 'TMP_y']] return df_concentraciones # - columnas_a_iterar = df_O3.columns.tolist()[4:] columnas_a_iterar for columna in columnas_a_iterar: df_O3.loc[:,columna] = df_O3.loc[:,columna].apply(lambda x: indice_O3(x)) for columna in columnas_a_iterar: df_PM10.loc[:,columna] = df_PM10.loc[:,columna].apply(lambda x: indice_PM10(x)) for columna in columnas_a_iterar: df_PM25.loc[:,columna] = df_PM25.loc[:,columna].apply(lambda x: indice_PM10(x)) # Promediamos los valores: df_contaminacion_mean = df_O3[['CVE_AGEB', 'O3_por_ageb_0', 'O3_por_ageb_1', 'O3_por_ageb_7', 'O3_por_ageb_8', 'O3_por_ageb_9', 'O3_por_ageb_10','O3_por_ageb_11', 'O3_por_ageb_12','O3_por_ageb_13', 'O3_por_ageb_14', 'O3_por_ageb_15', 'O3_por_ageb_16', 'O3_por_ageb_17', 'O3_por_ageb_18', 'O3_por_ageb_19', 'O3_por_ageb_20','O3_por_ageb_21', 'O3_por_ageb_22', 'O3_por_ageb_23']] df_contaminacion_mean.head() for column in columnas_a_iterar: df_contaminacion_mean[column] = (df_O3.loc[:,column] + df_PM10.loc[:,column] + df_PM25.loc[:,column])/3 df_contaminacion_mean.fillna(df_contaminacion_mean.mean(), inplace=True) df_contaminacion_mean.head() # ### arreglamos los agebs df_contaminacion_mean['CVE_AGEB'] = [''.join(filter(lambda x: x.isdigit(), row)) for row in df_contaminacion_mean['CVE_AGEB']] df_contaminacion_mean['CVE_AGEB'] = df_contaminacion_mean['CVE_AGEB'].astype(int) df_contaminacion_mean.to_csv("../data/production_data/contaminacion/contaminacion_indice_ageb.csv",index=False)
contaminacion_por_ageb/indice_de_calidad_de_aire_por_ageb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="4JuL3Yfpg6UH" colab_type="code" colab={} import numpy as np import pandas as pd # + id="uONCUG0dhGgX" colab_type="code" colab={} data = pd.read_csv('movie_metadata.csv') # + id="J21sCnb8hRxU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="64455b14-3ef3-488b-f86a-7fa0607e04b8" data.columns # + id="Dy9QjQMghUhl" colab_type="code" colab={} ### Keeping the columns that are useful in Movies Recommender System data = data.loc[:,['actor_1_name','actor_2_name','actor_3_name','director_name','genres','movie_title']] # + id="NvQo1sTqiWOK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="7cecbc53-d851-4da1-e2ba-347271c09eef" data.head() # + id="gyzXFPi-icZ9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="bd912cb7-d442-458e-c00d-800a58f368d5" ### Handling null values data.isnull().sum(axis=0) # + id="w12eP6Nhivoa" colab_type="code" colab={} ### Replacing null values in all columns with string 'unknown' data['actor_1_name'] = data['actor_1_name'].replace(np.nan, 'unknown') data['actor_2_name'] = data['actor_2_name'].replace(np.nan, 'unknown') data['actor_3_name'] = data['actor_3_name'].replace(np.nan, 'unknown') data['director_name'] = data['director_name'].replace(np.nan, 'unknown') # + id="pUyVte3EjNGY" colab_type="code" colab={} ### In the ‘genres’ column, replacing the ‘|’ with whitespace, ### So the genres would be considered different strings. data['genres'] = data['genres'].replace('|', ' ') # + id="dAtVcVNijR0c" colab_type="code" colab={} ### Now converting the ‘movie_title’ columns values to lowercase for searching simplicity. data['movie_title'] = data['movie_title'].str.lower() # + id="ql32BDfEjTCp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d55fbae7-7894-43ed-f068-25cf63f8a5bb" ### All the movie_title values have a special character added to the end ### Which needs to be removed data['movie_title'][0] # + id="RVCZ5oVXjS_2" colab_type="code" colab={} data['movie_title'] = data['movie_title'].str[:-1] # + id="CElAHauRjS9d" colab_type="code" colab={} data.to_csv('data.csv',index=False) # + id="Okq2LSljjS5v" colab_type="code" colab={}
movies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: default:Python # language: python # name: conda-env-default-py # --- # # Use Julia with Studio Lab # # # [![Open In Studio Lab](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/aws/studio-lab-examples/blob/main/custom-environments/julia/1-install-julia.ipynb) # # This notebook demonstrates installing a recent version of Julia (>=1.7.1) and an associated Julia kernel. # ## 1. Downloading and installing Julia # # Julia is available either via direct download or via the Conda package manager. Here we demonstrate installing the official Julia release. It is self-contained and as simple as downloading, decompressing, and then linking the Julia executable into a path recognized by our Anaconda environment. # + tags=[] # %system curl -OL https://julialang-s3.julialang.org/bin/linux/x64/1.7/julia-1.7.1-linux-x86_64.tar.gz # %system tar xzvf julia-1.7.1-linux-x86_64.tar.gz -C $HOME # %system ln -s $HOME/julia-1.7.1/bin/julia $CONDA_PREFIX/bin/julia # %system rm julia-1.7.1-linux-x86_64.tar.gz # - # ## 2. Install the IJulia kernel # # Installing the IJulia kernel is straightforward. You need to invoke the Julia executable from the previous step and then use the Julia package manager to install the IJulia kernel. # %system julia -e 'using Pkg; Pkg.add(["IJulia"]);' # Lastly, let's register a couple of kernel profiles with the default Anaconda environment so it shows up in our SageMaker Studio Lab launcher. You will need to add two profiles for single-threaded, and multi-threaded startup enabled. # %system JUPYTER_DATA_DIR=$CONDA_PREFIX/share/jupyter/ \ # julia -e 'using IJulia; installkernel("Julia (single-threaded)")' # %system JUPYTER_DATA_DIR=$CONDA_PREFIX/share/jupyter/ \ # julia -e 'using IJulia; installkernel("Julia (4 threads)", env=Dict("JULIA_NUM_THREADS"=>"4"))' # ## 3. Using Julia # # With Julia installed and the IJulia kernel registered, you can create a new Julia notebook from the SageMaker Studio Lab launcher. You can also follow along with the example Julia notebook in this folder.
custom-environments/julia/1-install-julia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Anaconda (Tensorflow) # language: python # name: tensorflow # --- # # Tensorflow MNIST Experts Tutorial # # This notebook follows the [Tensorflow Experts tutorial](https://www.tensorflow.org/tutorials/mnist/pros/) # # ## Setup # First we import the libraries we'll use. # + import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # - # ### Load MNIST Data # Then we set some constants and start the mnist dataset # + FLAGS = 'MNIST_data/' mnist = input_data.read_data_sets(FLAGS, one_hot=True) # - # ### Start Tensorflow InteractiveSession # And we start an interactive Tensorflow session sess = tf.InteractiveSession() # ## Build a Softmax Regression Model # # ### Placeholders # Now we define the placeholders for the input and output targets x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) # ### Variables # And in the same way we define our tensor variables and initialize then # + W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) sess.run(tf.global_variables_initializer()) # - # ### Predicted Class and Loss Function # We now implement our regression model y = tf.matmul(x, W) + b # And in the same way we define the loss function cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_)) # Note that `tf.nn.softmax_cross_entropy_with_logits` internally applies the softmax on the model's unnormalized model prediction and sums across all classes, and `tf.reduce_mean` takes the average over these sums. # ## Train the Model # Now that we have defined our model and training loss function, we can train our model # + train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) for i in range(1000): batch = mnist.train.next_batch(100) train_step.run(feed_dict={x: batch[0], y_:batch[1]}) # - # ### Evaluate the Model # To evaluate our model we define a `correct_prediction` that will hold the predicted output with the actual output and then we define an `accuracy` step that will return the mean of the `correct_prediction` # + correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})) # - # ## Build a Multilayer Convolutional Network # Now we'll define a small convolutional neural network # # ### Weight Initialization # To create this model, we're going to need to create a lot of weights and biases. One should generally initialize weights with a small amount of noise for symmetry breaking, and to prevent 0 gradients. Since we're using [ReLU](https://www.wikiwand.com/en/Rectifier_(neural_networks)) neurons, it is also good practice to initialize them with a slightly positive initial bias to avoid "dead neurons". Instead of doing this repeatedly while we build the model, let's create two handy functions to do it for us. # + def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) # - # ### Convolution and Pooling # Our convolutions uses a stride of one and are zero padded so that the output is the same size as the input. Our pooling is plain old max pooling over 2x2 blocks. To keep our code cleaner, let's also abstract those operations into functions. # + def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # - # ### First Convolutional Layer # We can now implement our first layer. It will consist of convolution, followed by max pooling. The convolution will compute 32 features for each 5x5 patch. Its weight tensor will have a shape of `[5, 5, 1, 32]`. The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) # To apply the layer, we first reshape `x` to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels. x_image = tf.reshape(x, [-1, 28, 28, 1]) # We then convolve `x_image` with the weight tensor, add the bias, apply the ReLU function, and finally max pool. The `max_pool_2x2` method will reduce the image size to 14x14. h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # ### Second Convolutional Layer # In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch. # + W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # - # ### Densely Connected Layer # Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU. # + W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # - # #### Dropout # To reduce overfitting, we will apply [dropout](https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf) before the readout layer. We create a `placeholder` for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's `tf.nn.dropout` op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # ### Readout Layer # Finally, we add a layer, just like for the one layer softmax regression above. # + W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # - # ### Train and Evaluate the Model # How well does this model do? To train and evaluate it we will use code that is nearly identical to that for the simple one layer SoftMax network above. # # The differences are that: # # * We will replace the steepest gradient descent optimizer with the more sophisticated ADAM optimizer. # * We will include the additional parameter `keep_prob` in `feed_dict` to control the dropout rate. # * We will add logging to every 100th iteration in the training process. # # Feel free to go ahead and run this code, but it does 20,000 training iterations and may take a while (possibly up to half an hour), depending on your processor. # + cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.global_variables_initializer()) for i in range(20000): batch = mnist.train.next_batch(50) if i%100 == 0: train_accuracy = accuracy.eval(feed_dict={ x: batch[0], y_: batch[1], keep_prob: 1.0 }) print("step {0:d}, training accuracy {1:g}".format(i, train_accuracy)) train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) batch_size = 100 test_accuracy = np.zeros((int(mnist.test.num_examples / batch_size), 1), dtype=np.float32) for i in range(int(mnist.test.num_examples / batch_size)): batch = mnist.test.next_batch(batch_size) test_accuracy[i] = accuracy.eval(feed_dict={ x: batch[0], y_: batch[1], keep_prob: 1.0 }) print("test accuracy {0:g}".format(np.mean(test_accuracy)))
mnist_experts_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score df_train = pd.read_csv('../input/train.csv') train = df_train['review'].to_numpy() df_y_train = pd.read_csv('../input/y_train.csv') y_train = df_y_train['rate'].to_numpy() df_val = pd.read_csv('../input/val.csv') val = df_val['review'].to_numpy() df_y_val = pd.read_csv('../input/y_val.csv') y_val = df_y_val['rate'].to_numpy() import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) from gensim.test.utils import datapath from gensim import utils from pathlib import Path import gensim model = gensim.models.Word2Vec.load('../embedding/word2vec.model') ave_vec = np.zeros((train.shape[0],50),dtype='float') word_vectors = model.wv len(word_vectors.vocab) for k, seq in enumerate(train): tokens = gensim.utils.simple_preprocess(seq) for i in tokens: if i in word_vectors.vocab: ave_vec[k] += model.wv[i] ave_vec[k] /= len(tokens) print(ave_vec.shape) # - word_vectors = model.wv len(word_vectors.vocab) ave_vec_val = np.zeros((val.shape[0],50),dtype='float') for k, seq in enumerate(val): tokens = gensim.utils.simple_preprocess(seq) for i in tokens: if i in word_vectors.vocab: ave_vec_val[k] += model.wv[i] ave_vec_val[k] /= np.array(len(tokens)) print(ave_vec_val.shape) # + from imblearn.over_sampling import SMOTE # doctest: +NORMALIZE_WHITESPACE from collections import Counter n_alphas = 36 #alphas = np.exp(np.linspace(-5,10,n_alphas)) C = np.exp(np.linspace(-5,10,n_alphas)) print(Counter(y_train)) print(Counter(y_val)) stra = {5.0:15214, 4.0:10000, 3.0:2000, 2.0:1000, 1.0:1000} sm = SMOTE(random_state=42, sampling_strategy=stra) X_res, y_res = sm.fit_resample(ave_vec, y_train) print(Counter(y_res)) train_score = [] val_score = [] for j,i in enumerate(C): m = LogisticRegression(penalty='l2',C=i,class_weight='balanced',n_jobs=-1).fit(X_res, y_res) train_pre = m.predict(ave_vec) val_pre = m.predict(ave_vec_val) train_score.append(f1_score(y_train, train_pre, average='macro')) val_score.append(f1_score(y_val, val_pre, average='macro')) # - from matplotlib import pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.externals import joblib ax = plt.gca() ax.plot(C, train_score, label='train') ax.plot(C, val_score, label='val') ax.set_xscale('log') ax.set_xlim(ax.get_xlim()) # reverse axis plt.xlabel('C') plt.ylabel('weights') plt.title('logistic regression with L2 penalty(Word2Vec)') plt.axis('tight') plt.legend() plt.show() C_opt = C[np.argmax(val_score)] m_opt = LogisticRegression(penalty='l2',C=C_opt,class_weight='balanced',n_jobs=-1).fit(X_res, y_res) print(f1_score(y_val, m_opt.predict(ave_vec_val), average='macro')) mat = confusion_matrix(y_val, m_opt.predict(ave_vec_val)) print('confusion matrix is: \n',mat) print('f1 of each class:\n',f1_score(y_val, m_opt.predict(ave_vec_val), average=None)) joblib.dump(m_opt,'../input/model_w2v/logistic_L2.pkl') # + from imblearn.over_sampling import SMOTE # doctest: +NORMALIZE_WHITESPACE from collections import Counter n_alphas = 36 #alphas = np.exp(np.linspace(-5,10,n_alphas)) C = np.exp(np.linspace(-5,10,n_alphas)) print(Counter(y_train)) print(Counter(y_val)) stra = {5.0:15214, 4.0:10000, 3.0:2000, 2.0:1000, 1.0:1000} sm = SMOTE(random_state=42, sampling_strategy=stra) X_res, y_res = sm.fit_resample(ave_vec, y_train) print(Counter(y_res)) train_score = [] val_score = [] for j,i in enumerate(C): m = LogisticRegression(penalty='l1',C=i,class_weight='balanced',n_jobs=-1, solver='saga', tol=0.001).fit(X_res, y_res) train_pre = m.predict(ave_vec) val_pre = m.predict(ave_vec_val) train_score.append(f1_score(y_train, train_pre, average='macro')) val_score.append(f1_score(y_val, val_pre, average='macro')) # - # SMOTE with L1 from matplotlib import pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.externals import joblib ax = plt.gca() ax.plot(C, train_score, label='train') ax.plot(C, val_score,label='val') ax.set_xscale('log') ax.set_xlim(ax.get_xlim()) # reverse axis plt.xlabel('C') plt.ylabel('F1-macro') plt.title('macro score of training set and validation set') plt.axis('tight') plt.legend() plt.show() C_opt = C[np.argmax(val_score)] m_opt = LogisticRegression(penalty='l1',C=C_opt,class_weight='balanced',n_jobs=-1, solver='saga', tol=0.001).fit(X_res, y_res) print(f1_score(y_val, m_opt.predict(ave_vec_val), average='macro')) mat = confusion_matrix(y_val, m_opt.predict(ave_vec_val)) print('confusion matrix is: \n',mat) print('F1 score of each class:\n',f1_score(y_val, m_opt.predict(ave_vec_val), average=None)) joblib.dump(m_opt,'../input/model_w2v/logistic_L1.pkl') # elastic net from imblearn.over_sampling import SMOTE # doctest: +NORMALIZE_WHITESPACE from collections import Counter n_alphas = 36 #alphas = np.exp(np.linspace(-5,10,n_alphas)) C = np.exp(np.linspace(-5,10,n_alphas)) print(Counter(y_train)) print(Counter(y_val)) stra = {5.0:15214, 4.0:10000, 3.0:2000, 2.0:1000, 1.0:1000} sm = SMOTE(random_state=42, sampling_strategy=stra) X_res, y_res = sm.fit_resample(ave_vec, y_train) print(Counter(y_res)) a = np.linspace(0.2,0.7,6) train_score = [[],[],[],[],[],[]] val_score = [[],[],[],[],[],[]] for n,u in enumerate(a): for j,i in enumerate(C): m = LogisticRegression(penalty='elasticnet',C=i,class_weight='balanced',n_jobs=-1, l1_ratio=u,solver='saga').fit(X_res, y_res) train_pre = m.predict(ave_vec) val_pre = m.predict(ave_vec_val) train_score[n].append(f1_score(y_train, train_pre, average='macro')) val_score[n].append(f1_score(y_val, val_pre, average='macro')) elastic_opt = LogisticRegression(penalty='elasticnet',C=C[(np.argmax(val_score))%36],class_weight='balanced',n_jobs=-1, l1_ratio=a[(np.argmax(val_score))//36],solver='saga') elastic_opt.fit(X_res,y_res) print('the best macro score on validation set:',f1_score(y_val, elastic_opt.predict(ave_vec_val), average='macro')) mat = confusion_matrix(y_val, elastic_opt.predict(ave_vec_val)) print('confusion matrix is: \n',mat) print('accuracy of each class:\n',f1_score(y_val, elastic_opt.predict(ave_vec_val), average=None)) train_score = np.array(train_score) val_score = np.array(val_score) ax1 = plt.gca() # randomly choose 50 coefficients in the 9676 variables # to plot figure of coefs vs regularizer. ax1.plot(C, train_score[(np.argmax(val_score))//36], label='train') ax1.plot(C, val_score[(np.argmax(val_score))//36],label='val') ax1.set_xscale('log') #ax1.set_xlim(ax1.get_xlim()[::-1]) # reverse axis plt.xlabel('C') plt.ylabel('macro score') plt.title('macro score of training set and validation set') plt.axis('tight') plt.legend() plt.show() val_score[(np.argmax(val_score))//36][(np.argmax(val_score))%36] joblib.dump(elastic_opt,'../input/model_w2v/logistic_elastic.pkl')
w2v_train/word2vec1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": true} import time from pprint import pprint from work import work from cloudmesh.common.StopWatch import StopWatch from cloudmesh.common.debug import VERBOSE a = {"value": 2, "name": "John", "name1": "Carlos", "name2": "John", "name3": "John", "name4": "John", "name5": "John"} StopWatch.start("Hello World") print("Hello World") time.sleep(0.2) StopWatch.stop("Hello World") StopWatch.start("xyz") time.sleep(0.1) print("xyz") StopWatch.stop("xyz") StopWatch.benchmark() print(a) pprint(a) # VERBOSE(a) work("hallo")
docs/report/su21-reu-369/project/code/a.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Dependencies and Setup # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np from scipy.stats import sem # Hide warning messages in notebook import warnings warnings.filterwarnings('ignore') # File to Load (Remember to Change These) mouse_drug_data_to_load = "data/mouse_drug_data.csv" clinical_trial_data_to_load = "data/clinicaltrial_data.csv" # Read the Mouse and Drug Data and the Clinical Trial Data mouse_data_df = pd.read_csv(mouse_drug_data_to_load) clinical_trial_df = pd.read_csv(clinical_trial_data_to_load) # Combine the data into a single dataset # Need help in merging dataset to appear similiar as sample data combined_data_df = pd.merge(mouse_data_df, clinical_trial_df, how="outer", on="Mouse ID") # Display the data table for preview combined_data_df.head() # - clinical_trial_df.head() mouse_data_df.head() # ## Tumor Response to Treatment # + # Store the Mean Tumor Volume Data Grouped by Drug and Timepoint mean_tumor_volume = combined_data_df.groupby(['Drug', 'Timepoint']).mean()["Tumor Volume (mm3)"] # Convert to DataFrame "can give any name to column" mean_df = pd.DataFrame({"Tumor Volume (mm3)" : mean_tumor_volume}) mean_df = mean_df.reset_index() # Preview DataFrame #grouped_data_df.head() mean_df.head() # + # Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint groupby.(["columns"]) / .sem()["specific column"]) sem_volume = combined_data_df.groupby(['Drug', 'Timepoint']).sem()["Tumor Volume (mm3)"] #Standard_error_tumor_volumes_df sem_df = pd.DataFrame({"Tumor Volume (mm3)" : sem_volume}) sem_df = sem_df.reset_index() # Preview DataFrame sem_df.head(60) # - # + # Minor Data Munging to Re-Format the Data Frames using the .pivot function #Syntax: DataFrame.pivot(index=None, columns=None, values=None)[source] # set index , set column header , set data for all columns mean_pivot = mean_df.pivot(index='Timepoint', columns="Drug")["Tumor Volume (mm3)"] # Preview that Reformatting worked mean_pivot.head() # + # Minor Data Munging to Re-Format the Data Frames using the .pivot function # set index , set column header , set data for all columns sem_pivot = sem_df.pivot(index='Timepoint', columns="Drug")["Tumor Volume (mm3)"] # Preview that Reformatting worked sem_pivot.head() # + # Generate the Plot (with Error Bars) each one is a line for a specific drug plt.errorbar(mean_pivot.index,mean_pivot["Capomulin"],yerr = sem_pivot["Capomulin"], fmt = '-', marker = 'o',color="r", alpha = 0.5, label="Capomulin") plt.errorbar(mean_pivot.index,mean_pivot["Infubinol"],yerr = sem_pivot["Infubinol"], fmt = '-', marker = 'o',color="b", alpha = 0.5, label="Infubinol") plt.errorbar(mean_pivot.index,mean_pivot["Ketapril"],yerr = sem_pivot["Ketapril"], fmt = '-', marker = 'o',color="g", alpha = 0.5, label="Ketapril") plt.errorbar(mean_pivot.index,mean_pivot["Placebo"],yerr = sem_pivot["Placebo"], fmt = '-', marker = 'o',color="k", alpha = 0.5, label="Placebo") plt.xlabel("Time (Days)") plt.ylabel("Tumor Volume (mm3)") plt.title("Tumor Response To Treatment") plt.legend(loc="best", fontsize="small", fancybox=True) plt.grid() plt.show() # Save the Figure ?How to save Image? plt.savefig("Images/Treatment.png") # - # ## Metastatic Response to Treatment # + # Store the Mean Met. Site Data Grouped by Drug and Timepoint last on tumor volume groupby.(["columns"]) / .sem()["specific column"]) mean_met = combined_data_df.groupby(["Drug", "Timepoint"]).mean()["Metastatic Sites"] # Convert to DataFrame mean_met_df = pd.DataFrame({"Metastatic Sites" : mean_met}) # reset index() for pivot table to work mean_met_df = mean_met_df.reset_index() # Preview DataFrame mean_met_df.head() # - # + # Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint groupby.(["columns"]) / .sem()["specific column"]) sem_met = combined_data_df.groupby(['Drug', 'Timepoint']).sem()["Metastatic Sites"] # Convert to DataFrame sem_met_df = pd.DataFrame({"Metastatic Sites": sem_met}) # reset index() for pivot table to work sem_met_df = sem_met_df.reset_index() # Preview DataFrame sem_met_df.head() # - # + # Minor Data Munging to Re-Format the Data Frames using the .pivot function # set index , set column header , set data for all columns mean_met_pivot = mean_met_df.pivot(index='Timepoint', columns="Drug")["Metastatic Sites"] # Preview that Reformatting worked mean_met_pivot.head() # + # Minor Data Munging to Re-Format the Data Frames using the .pivot function # set index , set column header , set data for all columns sem_met_pivot = sem_met_df.pivot(index='Timepoint', columns="Drug")["Metastatic Sites"] # Preview that Reformatting worked sem_met_pivot.head() # + # Generate the Plot (with Error Bars) plt.errorbar(mean_met_pivot.index,mean_met_pivot["Capomulin"],yerr = sem_met_pivot["Capomulin"], fmt = '-', marker = 'o',color="r", alpha = 0.5, label="Capomulin") plt.errorbar(mean_met_pivot.index,mean_met_pivot["Infubinol"],yerr = sem_met_pivot["Infubinol"], fmt = '-', marker = 'o',color="b", alpha = 0.5, label="Capomulin") plt.errorbar(mean_met_pivot.index,mean_met_pivot["Ketapril"],yerr = sem_met_pivot["Ketapril"], fmt = '-', marker = 'o',color="g", alpha = 0.5, label="Capomulin") plt.errorbar(mean_met_pivot.index,mean_met_pivot["Placebo"],yerr = sem_met_pivot["Placebo"], fmt = '-', marker = 'o',color="k", alpha = 0.5, label="Capomulin") # set title, labels, legend, grid plt.xlabel("Treatment Duration (Days)") plt.ylabel("Met. Sites") plt.title("Metastatic Spread During Treatment") plt.legend(loc="best", fontsize="small", fancybox=True) plt.grid() # Save the Figure plt.savefig("Images/Spread.png") # Show the Figure plt.show() # - # ![Metastatic Spread During Treatment](../Images/spread.png) # ## Survival Rates # Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric) count_mouse = combined_data_df.groupby(["Drug","Timepoint"]).count()["Mouse ID"] # Convert to DataFrame count_df = pd.DataFrame({"Mouse Count": count_mouse}) count_df = count_df.reset_index() # Preview DataFrame count_df.head() # Minor Data Munging to Re-Format the Data Frames .pivot count_pivot = count_df.pivot(index='Timepoint', columns="Drug")["Mouse Count"] # Preview the Data Frame count_pivot.head() # + # Generate the Plot (Accounting for percentages) plt.plot(count_pivot.index,count_pivot["Capomulin"], marker = 'o',color="r", alpha = 0.5, label="Capomulin") plt.plot(count_pivot.index,count_pivot["Infubinol"], marker = 'o',color="b", alpha = 0.5, label="Infubinol") plt.plot(count_pivot.index,count_pivot["Ketapril"], marker = 'o',color="g", alpha = 0.5, label="Ketapril") plt.plot(count_pivot.index,count_pivot["Placebo"], marker = 'o',color="k", alpha = 0.5, label="Placebo") # set title, labels, legend, grid plt.xlabel("Time (Days)") plt.ylabel("Survival Rate (%)") plt.title("Survival Duration Time") plt.legend(loc="best", fontsize="small", fancybox=True) plt.grid() # Save the Figure plt.savefig("Images/Survival.png") # Show the Figure plt.show() # - # ![Metastatic Spread During Treatment](../Images/survival.png) # ## Summary Bar Graph # + # Calculate the percent changes for each drug mean_pivot Capomulin_percent=(mean_pivot["Capomulin"].iloc[9]-mean_pivot["Capomulin"].iloc[0])/mean_pivot["Capomulin"].iloc[0]*100 # Convert to DataFrame capomulin_df = Capomulin_percent # Preview DataFrame capomulin_df # + # Calculate the percent changes for each drug mean_pivot infubinol_percent=(mean_pivot["Infubinol"].iloc[9]-mean_pivot["Infubinol"].iloc[0])/mean_pivot["Infubinol"].iloc[0]*100 # convert to DataFrame infubinol_df = infubinol_percent # Preview Dataframe infubinol_df # + # calculate the percent changes for each drug mean_pivot ketapril_percent =(mean_pivot["Ketapril"].iloc[9]-mean_pivot["Ketapril"].iloc[0])/mean_pivot["Ketapril"].iloc[0]*100 # convert to Dataframe ketapril_df = ketapril_percent # Preview DataFrame ketapril_df # + # calculate the percent changes for each drug mean_pivot placebo_percent = (mean_pivot["Placebo"].iloc[9]-mean_pivot["Placebo"].iloc[0])/mean_pivot["Placebo"].iloc[0]*100 # convert to DataFrame placebo_df = placebo_percent # Preview DataFrame placebo_df # + # Store all Relevant Percent Changes into a Tuple percent_tuple = {'Capomulin' : capomulin_df, 'Infubinol' : infubinol_df, 'Ketapril' : ketapril_df, 'Placebo' : placebo_df} percentchange_tumor_volume = pd.Series(percent_tuple) # Index the 4 Drugs drugs_index = percentchange_tumor_volume.keys() # Splice the data between passing and failing drugs summary_bar = plt.subplot() x_axis = np.arange(0, len(drugs_index)) # Orient widths. Add labels, tick marks, etc. tick_locations = [] for x in x_axis: tick_locations.append(x + 0.5) plt.xticks(tick_locations, drugs_index) colors = [] for value in percentchange_tumor_volume: if value >=0 : colors.append('r') else: colors.append('g') # Use functions to label the percentages of changes percent_change = summary_bar.bar(x_axis, percentchange_tumor_volume, color=colors, align='edge') # Call functions to implement the function calls plt.title("Tumor Change Over 45 Days Treatment") plt.ylabel("% Tumor Volume Change") plt.xlim(-0.25, len(drugs_index)) plt.ylim(-30, max(percentchange_tumor_volume) +20) plt.grid() # Save the Figure plt.savefig("Images/change.png") # Show the Figure fig.show() # - # ![Metastatic Spread During Treatment](../Images/change.png) percentchange_tumor_volume drugs_index
Pymaceuticals/pymaceuticals_starter.ipynb
# ## Setting Up Kinesis Stream # Before starting this lab first you need to set up a kinesis stream in your amazon account and push bitcoin data from coindesk api. Do the following steps to set it up: # 1. Login to your AWS account and launch an EC2 instance. The smallest unit is fine as we will launch a very small task. If you are eligable for free tier you can also use it. # 2. Create a kinesis stream with the name "bitcoin-exchange-rate" # 3. Get an aws access key and secret key and make sure it has access to kinesis # 4. Configure your EC2's aws cli with these keys. To do so you can just type aws configure at the EC2 terminal and fill in the keys # 4. Launch the push_data_to_kinesis.py script (from this folder) as a process or via screen (so it always runs). Solve the required pip dependencies if required (i.e. install boto3) from pyspark.sql.types import * from pyspark.sql.functions import from_json, to_timestamp from pyspark.sql.functions import window from pyspark.sql.functions import avg # ### Reading From Kinesis Stream # We will start our streaming application buy reading the data from Kinesis. Kinesis is a message queue, similar to Kafka, provided by AWS. You can read from Kinesis stream in the following way: kinesisDF = spark \ .readStream \ .format("kinesis") \ .option("streamName", "bitcoin-exchange-rate") \ .option("initialPosition", "earliest") \ .option("region", "us-west-2") \ .option("awsAccessKey", 'awsAccessKey') \ .option("awsSecretKey", 'awsSecretKey') \ .load() # Its a good idea to clear up old files, streaming application produces a lot of temp files, and there is a limit to how many temp files you can have for a free Databricks account. dbutils.fs.rm('dbfs:/SOME_CHECKPOINT_DIRECTORY/', True) dbutils.fs.rm(('dbfs:/tmp/'), True) # We will enforce a schema as it is more efficient, if we leave it blank Spark can figure out the Schema as well! pythonSchema = StructType().add("timestamp", StringType()).add("EUR", FloatType()).add("USD", FloatType()).add ("GBP", FloatType()) # Now we will read from the stream into our streaming dataframe! bitcoinDF = kinesisDF.selectExpr("cast (data as STRING) jsonData").select(from_json("jsonData", pythonSchema).alias("bitcoin")).select("bitcoin.*") # We will also convert the timestamp column to the timestamp type so we can query with datetime object in Python bitcoinDF = bitcoinDF.withColumn('timestamp', to_timestamp(bitcoinDF.timestamp, "yyyy-MM-dd HH:mm:ss")) display(bitcoinDF) # ### Quering! # Now you can use all the things you learnt previously from Spark SQL! For example, you can groupBy certain attribute and aggregate, filter, or select as you wish! <br/> # We haven't been introduced to the concept of windowing, which we will briefly zoom in now. # A window function can also be applied to to Bucketize rows into one or more time windows given a timestamp specifying column. For that we will use window groupBy function (pyspark.sql.functions.window) <br/> # you can call the window groupby function in the following way: __window(timeColumn, windowDuration, slideDuration=None, startTime=None)__. The definition of slide interval and window interval are as follows: # * Window Duration: how far back in time the windowed transformation goes # * Slide Duration: how often a windowed intransformation is computed windowedCounts = bitcoinDF.groupBy(window(bitcoinDF.timestamp, "10 minutes", "5 minutes").alias('time_window')).agg(avg(bitcoinDF.EUR).alias('window_avg_euro_rate')) display(windowedCounts) # to read the stream from memory we will set up a table in our memory called bitcoin_window query = windowedCounts.writeStream.format("memory").queryName("bitcoin_window").outputMode("complete").start() # you can then take your stream to a dataframe using SQL queries in the following way df = spark.sql('select time_window.start, time_window.end, window_avg_euro_rate from bitcoin_window') # we can query on top of live data for average euro rate for the last hour from datetime import datetime, timedelta from pyspark.sql.functions import avg timedelta_ten_mins = datetime.now() - timedelta(minutes=60) last_hour_rate_query = df.filter(df.start > timedelta_ten_mins).select('window_avg_euro_rate').agg(avg('window_avg_euro_rate').alias('rate')).collect() print(last_hour_rate_query[0].rate) # ## Excercise # Try writing a query to window maximum bitcoin rate per ten minutes for USD, EUR, and GBP. Show them all in line chart. # ## Challenge # Write a smart algorithm to trade bitcoin automatically. Start with a hypothetical amount (ex. ten bitcoin) and trade it between currencies and see if you can automatically increase your net worth by trading.
Modules/04 Stream Processing/Labs/Lab 8 - Structured Streaming Kinesis.ipynb