code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RajamannarAanjaram/TSAI-Assignment/blob/master/%2012%20Dawn%20of%20Transformers/%20SpatialTransformers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="LL7CHiUUEBcV"
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
plt.ion() # interactive mode
# + id="8ho58yDAPB7R"
import warnings
warnings.filterwarnings('ignore')
# + colab={"base_uri": "https://localhost:8080/", "height": 122, "referenced_widgets": ["20cea94988f541e1bc9c983d262bae57", "3aa414248e8942a0aba53a9099c44bd3", "9eed6fa1e53e4cef86820d5d1468446f", "4274f8bedaab422086a8c48791e7ca51", "a80d33235054411a98b56c1306cb7b96", "4d62dbdb0c89451eb9a1a62cd5a379d0", "ad839d9792264b888d74a2b1c73e7c5c", "e90a778b534b4e08a33c1e4c131ffd7f"]} id="IKYwQV3UHNdF" outputId="e9021909-ca99-4f15-ab01-c55dd8ea7e92"
train_data = datasets.CIFAR10('./cifar10_data', train=True, download=True)
x = np.concatenate([np.asarray(train_data[i][0]) for i in range(len(train_data))])
train_mean = np.mean(x, axis=(0, 1))
train_std = np.std(x, axis=(0, 1))
print(train_mean/255, train_std/255)
# + colab={"base_uri": "https://localhost:8080/", "height": 104, "referenced_widgets": ["871c7e777bb74cddabe570c1933f7d60", "3ff1323fa90149f1b2bf04170c29b93f", "e3ba7ee786274ac991e23dc4f21a9647", "886167cc82cf4e7eb85f57a2d8b08a0f", "<KEY>", "240c611a9bf8489a9df1efa90015f267", "0e812513e50b40aa8319be70dc60f4eb", "86f4cef8833544b5aca5eb03fb625eaa"]} id="lrU73MxqF2pZ" outputId="87f9ad4c-2f98-42af-d7b0-a14fb566a5b0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Training dataset
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='.', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215841, 0.44653091),
(0.24703223, 0.24348513, 0.26158784))
])), batch_size=64, shuffle=True, num_workers=4)
# Test dataset
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='.', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.49139968, 0.48215841, 0.44653091),
(0.24703223, 0.24348513, 0.26158784))
])), batch_size=64, shuffle=True, num_workers=4)
# + id="s_Eb6TT0IR-N"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5) ## 28x28x10
self.conv2 = nn.Conv2d(10, 20, kernel_size=5) ## 24x24x20
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(500, 50)
self.fc2 = nn.Linear(50, 10)
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(3, 8, kernel_size=7),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True),
nn.Conv2d(8, 10, kernel_size=5),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True)
)
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential(
nn.Linear(10 * 4 * 4, 32),
nn.ReLU(True),
nn.Linear(32, 3 * 2)
)
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
# Spatial transformer network forward function
def stn(self, x):
xs = self.localization(x)
xs = xs.view(-1, 10 * 4 * 4)
theta = self.fc_loc(xs)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
def forward(self, x):
# transform the input
x = self.stn(x)
# Perform the usual forward pass
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# + colab={"base_uri": "https://localhost:8080/"} id="znXCy75FKJRR" outputId="7f6d46f8-dfd4-4013-bd84-7e199e02f627"
model = Net().to(device)
summary(model, input_size=(3,32,32))
# + colab={"base_uri": "https://localhost:8080/"} id="HTEvcXJXPxNM" outputId="eacb20b1-5d13-4154-8840-10af525ef4a3"
model
# + id="EIqF3uVeI-19"
optimizer = optim.SGD(model.parameters(), lr=0.01)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 500 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
#
# A simple test procedure to measure STN the performances on MNIST.
#
def test():
with torch.no_grad():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
# + id="cVeOVmyiJJeS"
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
# We want to visualize the output of the spatial transformers layer
# after the training, we visualize a batch of input images and
# the corresponding transformed batch using STN.
def visualize_stn():
with torch.no_grad():
# Get a batch of training data
data = next(iter(test_loader))[0].to(device)
input_tensor = data.cpu()
transformed_input_tensor = model.stn(data).cpu()
in_grid = convert_image_np(
torchvision.utils.make_grid(input_tensor))
out_grid = convert_image_np(
torchvision.utils.make_grid(transformed_input_tensor))
# Plot the results side-by-side
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(in_grid)
axarr[0].set_title('Dataset Images')
axarr[1].imshow(out_grid)
axarr[1].set_title('Transformed Images')
# + colab={"base_uri": "https://localhost:8080/", "height": 654} id="sGlfCagJJSaD" outputId="733c6a43-bec5-4a73-9956-76faf3aa54df"
for epoch in range(1, 50 + 1):
train(epoch)
test()
# + id="auQ8aeTXJWx3"
| assignment_12/Saptial Transformers/experiments/ SpatialTransformers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="bhHMDGAF8WP-" colab_type="text"
# # IMDB
# + id="h2KNQiHo8WQA" colab_type="code" colab={}
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# + id="Ts7Lfckk8WQI" colab_type="code" colab={}
from fastai.text import *
# + [markdown] id="tNJtLxac8WQN" colab_type="text"
# ## Preparing the data
# + [markdown] id="SdFa7Zsa8WQP" colab_type="text"
# First let's download the dataset we are going to study. The [dataset](http://ai.stanford.edu/~amaas/data/sentiment/) has been curated by <NAME> et al. and contains a total of 100,000 reviews on IMDB. 25,000 of them are labelled as positive and negative for training, another 25,000 are labelled for testing (in both cases they are highly polarized). The remaning 50,000 is an additional unlabelled data (but we will find a use for it nonetheless).
#
# We'll begin with a sample we've prepared for you, so that things run quickly before going over the full dataset.
# + id="JIi7kShqhM7b" colab_type="code" outputId="72f42998-e88b-47d9-b102-f21935555db9" executionInfo={"status": "ok", "timestamp": 1571746865991, "user_tz": -60, "elapsed": 323, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}} colab={"base_uri": "https://localhost:8080/", "height": 53}
path = untar_data(URLs.IMDB_SAMPLE)
path.ls()
# + [markdown] id="MRrnDP6j8WQZ" colab_type="text"
# It only contains one csv file, let's have a look at it.
# + id="HjMSI6r28WQc" colab_type="code" outputId="3d589692-755c-4f7f-dbc6-d0b81a774824" executionInfo={"status": "ok", "timestamp": 1571746873901, "user_tz": -60, "elapsed": 482, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}} colab={"base_uri": "https://localhost:8080/", "height": 197}
df = pd.read_csv(path/'texts.csv')
df.head()
# + id="BKoe84pH8WQi" colab_type="code" outputId="b72a33dd-b789-42d6-d84a-f1a164a334c3" colab={"base_uri": "https://localhost:8080/", "height": 55} executionInfo={"status": "ok", "timestamp": 1571746873903, "user_tz": -60, "elapsed": 306, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
df['text'][1]
# + [markdown] id="07NDSEER8WQo" colab_type="text"
# It contains one line per review, with the label ('negative' or 'positive'), the text and a flag to determine if it should be part of the validation set or the training set. If we ignore this flag, we can create a DataBunch containing this data in one line of code:
# + id="WFSa-tDD8WQp" colab_type="code" colab={}
data_lm = TextDataBunch.from_csv(path, 'texts.csv')
# + [markdown] id="6wzGIOSW8WQv" colab_type="text"
# By executing this line a process was launched that took a bit of time. Let's dig a bit into it. Images could be fed (almost) directly into a model because they're just a big array of pixel values that are floats between 0 and 1. A text is composed of words, and we can't apply mathematical functions to them directly. We first have to convert them to numbers. This is done in two differents steps: tokenization and numericalization. A `TextDataBunch` does all of that behind the scenes for you.
#
# Before we delve into the explanations, let's take the time to save the things that were calculated.
# + id="hSxbbcQ08WQw" colab_type="code" colab={}
data_lm.save()
# + [markdown] id="c93jSHZO8WQ1" colab_type="text"
# Next time we launch this notebook, we can skip the cell above that took a bit of time (and that will take a lot more when you get to the full dataset) and load those results like this:
# + id="o6ITDnfD8WQ2" colab_type="code" colab={}
data = load_data(path)
# + [markdown] id="m8GNWdFb8WQ7" colab_type="text"
# ### Tokenization
# + [markdown] id="AYiYT3Nj8WQ8" colab_type="text"
# The first step of processing we make the texts go through is to split the raw sentences into words, or more exactly tokens. The easiest way to do this would be to split the string on spaces, but we can be smarter:
#
# - we need to take care of punctuation
# - some words are contractions of two different words, like isn't or don't
# - we may need to clean some parts of our texts, if there's HTML code for instance
#
# To see what the tokenizer had done behind the scenes, let's have a look at a few texts in a batch.
# + id="E77e1RUu8WQ-" colab_type="code" outputId="a117e2e9-bf38-4dcc-a9b2-456490e24a61" colab={"base_uri": "https://localhost:8080/", "height": 283} executionInfo={"status": "ok", "timestamp": 1571746927688, "user_tz": -60, "elapsed": 216, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
data = TextClasDataBunch.from_csv(path, 'texts.csv')
data.show_batch()
# + [markdown] id="mzXKvw5T8WRF" colab_type="text"
# The texts are truncated at 100 tokens for more readability. We can see that it did more than just split on space and punctuation symbols:
# - the "'s" are grouped together in one token
# - the contractions are separated like this: "did", "n't"
# - content has been cleaned for any HTML symbol and lower cased
# - there are several special tokens (all those that begin by xx), to replace unknown tokens (see below) or to introduce different text fields (here we only have one).
# + [markdown] id="7b0xB2zN8WRG" colab_type="text"
# ### Numericalization
# + [markdown] id="jvn35V3y8WRI" colab_type="text"
# Once we have extracted tokens from our texts, we convert to integers by creating a list of all the words used. We only keep the ones that appear at least twice with a maximum vocabulary size of 60,000 (by default) and replace the ones that don't make the cut by the unknown token `UNK`.
#
# The correspondance from ids to tokens is stored in the `vocab` attribute of our datasets, in a dictionary called `itos` (for int to string).
# + id="PjSVg0Q28WRK" colab_type="code" outputId="c3762d67-daed-4be5-96c5-70137c457d19" colab={"base_uri": "https://localhost:8080/", "height": 198} executionInfo={"status": "ok", "timestamp": 1571746928203, "user_tz": -60, "elapsed": 501, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
data.vocab.itos[:10]
# + [markdown] id="v_Ox6Tcg8WRQ" colab_type="text"
# And if we look at what a what's in our datasets, we'll see the tokenized text as a representation:
# + id="rbdkUWfj8WRS" colab_type="code" outputId="457cff4c-8d9c-4859-acb2-28337132c309" colab={"base_uri": "https://localhost:8080/", "height": 164} executionInfo={"status": "ok", "timestamp": 1571746962491, "user_tz": -60, "elapsed": 412, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
data.train_ds[0][0]
# + [markdown] id="FtqU6rT08WRY" colab_type="text"
# But the underlying data is all numbers
# + id="JB5r_h8q8WRd" colab_type="code" outputId="d6581be6-ac4a-4b5b-a428-646aa784ad16" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1571746975019, "user_tz": -60, "elapsed": 509, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
data.train_ds[0][0].data[:10]
# + [markdown] id="_8zRTZM98WRn" colab_type="text"
# ### With the data block API
# + [markdown] id="6-5WdjBA8WRp" colab_type="text"
# We can use the data block API with NLP and have a lot more flexibility than what the default factory methods offer. In the previous example for instance, the data was randomly split between train and validation instead of reading the third column of the csv.
#
# With the data block API though, we have to manually call the tokenize and numericalize steps. This allows more flexibility, and if you're not using the defaults from fastai, the variaous arguments to pass will appear in the step they're revelant, so it'll be more readable.
# + id="KjE9uK8-8WRr" colab_type="code" colab={}
data = (TextList.from_csv(path, 'texts.csv', cols='text')
.split_from_df(col=2)
.label_from_df(cols=0)
.databunch())
# + [markdown] id="_DI0ULQx8WRw" colab_type="text"
# ## Language model
# + [markdown] id="A5ELTq0c8WRy" colab_type="text"
# Note that language models can use a lot of GPU, so you may need to decrease batchsize here.
# + id="QqE-iAS08WR0" colab_type="code" colab={}
bs=48
# + [markdown] id="hd8Ye-tD8WR4" colab_type="text"
# Now let's grab the full dataset for what follows.
# + id="8-LZN5Ht8WR5" colab_type="code" outputId="c1e172c5-0abd-4c45-cd71-ae18e9549528" colab={"base_uri": "https://localhost:8080/", "height": 144} executionInfo={"status": "ok", "timestamp": 1571747002619, "user_tz": -60, "elapsed": 420, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
path = untar_data(URLs.IMDB)
path.ls()
# + id="dPeDceJu8WSA" colab_type="code" outputId="622a170a-850a-4c5b-bbc2-ae831e5ccd09" colab={"base_uri": "https://localhost:8080/", "height": 90} executionInfo={"status": "ok", "timestamp": 1571747006538, "user_tz": -60, "elapsed": 415, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
(path/'train').ls()
# + [markdown] id="bcqEJJ0m8WSI" colab_type="text"
# The reviews are in a training and test set following an imagenet structure. The only difference is that there is an `unsup` folder on top of `train` and `test` that contains the unlabelled data.
#
# We're not going to train a model that classifies the reviews from scratch. Like in computer vision, we'll use a model pretrained on a bigger dataset (a cleaned subset of wikipedia called [wikitext-103](https://einstein.ai/research/blog/the-wikitext-long-term-dependency-language-modeling-dataset)). That model has been trained to guess what the next word, its input being all the previous words. It has a recurrent structure and a hidden state that is updated each time it sees a new word. This hidden state thus contains information about the sentence up to that point.
#
# We are going to use that 'knowledge' of the English language to build our classifier, but first, like for computer vision, we need to fine-tune the pretrained model to our particular dataset. Because the English of the reviews left by people on IMDB isn't the same as the English of wikipedia, we'll need to adjust the parameters of our model by a little bit. Plus there might be some words that would be extremely common in the reviews dataset but would be barely present in wikipedia, and therefore might not be part of the vocabulary the model was trained on.
# + [markdown] id="xJxlKvOn8WSJ" colab_type="text"
# This is where the unlabelled data is going to be useful to us, as we can use it to fine-tune our model. Let's create our data object with the data block API (next line takes a few minutes).
# + id="SRQp-yA38WSK" colab_type="code" colab={}
data_lm = (TextList.from_folder(path)
#Inputs: all the text files in path
.filter_by_folder(include=['train', 'test', 'unsup'])
#We may have other temp folders that contain text files so we only keep what's in train and test
.random_split_by_pct(0.1)
#We randomly split and keep 10% (10,000 reviews) for validation
.label_for_lm()
#We want to do a language model so we label accordingly
.databunch(bs=bs))
data_lm.save('data_lm.pkl')
# + [markdown] id="L0C4mJMK8WSO" colab_type="text"
# We have to use a special kind of `TextDataBunch` for the language model, that ignores the labels (that's why we put 0 everywhere), will shuffle the texts at each epoch before concatenating them all together (only for training, we don't shuffle for the validation set) and will send batches that read that text in order with targets that are the next word in the sentence.
#
# The line before being a bit long, we want to load quickly the final ids by using the following cell.
# + id="FVUTQ0PX8WSP" colab_type="code" colab={}
data_lm = load_data(path, 'data_lm.pkl', bs=bs)
# + id="W2ZUirwc8WST" colab_type="code" outputId="bef29fab-371d-4bcf-8907-2d95c534e70f" colab={"base_uri": "https://localhost:8080/", "height": 283} executionInfo={"status": "ok", "timestamp": 1571747253168, "user_tz": -60, "elapsed": 1871, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
data_lm.show_batch()
# + [markdown] id="Y2kRYu7k8WSZ" colab_type="text"
# We can then put this in a learner object very easily with a model loaded with the pretrained weights. They'll be downloaded the first time you'll execute the following line and stored in `~/.fastai/models/` (or elsewhere if you specified different paths in your config file).
# + id="eyzYQTc28WSa" colab_type="code" colab={}
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
# + id="O0b-wU4m8WSg" colab_type="code" outputId="ad0eb94e-2c10-4dd1-fc8e-7720b4b297f0" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1571747325173, "user_tz": -60, "elapsed": 73854, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
learn.lr_find()
# + id="Hs-95qJN8WSo" colab_type="code" outputId="0c8ff580-8baf-43c6-9b3a-6104588dca56" colab={"base_uri": "https://localhost:8080/", "height": 283} executionInfo={"status": "ok", "timestamp": 1571747325905, "user_tz": -60, "elapsed": 74568, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00279122503348686172"}}
learn.recorder.plot(skip_end=15)
# + id="saHNNGk48WSx" colab_type="code" outputId="340ae634-d45c-4143-f5e1-96c9ccd28e2c" colab={"base_uri": "https://localhost:8080/", "height": 94}
learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))
# + id="SEGvUCT48WS3" colab_type="code" colab={}
learn.save('fit_head')
# + id="ZdR7CtHn8WS6" colab_type="code" colab={}
learn.load('fit_head');
# + [markdown] id="r8B1zhxr8WS8" colab_type="text"
# To complete the fine-tuning, we can then unfeeze and launch a new training.
# + id="2yBDFtWr8WS9" colab_type="code" colab={}
learn.unfreeze()
# + id="0J-TEM4l8WTA" colab_type="code" outputId="e98ba82f-aa9b-4424-b9b0-a1014be86a5e" colab={}
learn.fit_one_cycle(10, 1e-3, moms=(0.8,0.7))
# + id="R0YTECJz8WTE" colab_type="code" colab={}
learn.save('fine_tuned')
# + [markdown] id="gw_TFjrS8WTJ" colab_type="text"
# How good is our model? Well let's try to see what it predicts after a few given words.
# + id="cZ0HLHHX8WTK" colab_type="code" colab={}
learn.load('fine_tuned');
# + id="nQXTQhpl8WTT" colab_type="code" colab={}
TEXT = "I liked this movie because"
N_WORDS = 40
N_SENTENCES = 2
# + id="DEwmi5os8WTa" colab_type="code" outputId="d66dd06e-cfed-4036-b8b2-a98a42539c3c" colab={}
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
# + [markdown] id="xulurVOz8WTg" colab_type="text"
# We not only have to save the model, but also it's encoder, the part that's responsible for creating and updating the hidden state. For the next part, we don't care about the part that tries to guess the next word.
# + id="Isv7EldX8WTi" colab_type="code" colab={}
learn.save_encoder('fine_tuned_enc')
# + [markdown] id="1n08LUOS8WTl" colab_type="text"
# ## Classifier
# + [markdown] id="s9SBRIn68WTm" colab_type="text"
# Now, we'll create a new data object that only grabs the labelled data and keeps those labels. Again, this line takes a bit of time.
# + id="rptSjHOH8WTn" colab_type="code" colab={}
path = untar_data(URLs.IMDB)
# + id="v9a38W8o8WTq" colab_type="code" colab={}
data_clas = (TextList.from_folder(path, vocab=data_lm.vocab)
#grab all the text files in path
.split_by_folder(valid='test')
#split by train and valid folder (that only keeps 'train' and 'test' so no need to filter)
.label_from_folder(classes=['neg', 'pos'])
#label them all with their folders
.databunch(bs=bs))
data_clas.save('data_clas.pkl')
# + id="IxJhiIJz8WTs" colab_type="code" colab={}
data_clas = load_data(path, 'data_clas.pkl', bs=bs)
# + id="Pj4as5-c8WTv" colab_type="code" outputId="ffeb3b32-a741-4675-a684-1824f4e4537a" colab={}
data_clas.show_batch()
# + [markdown] id="l8AHAeB48WTz" colab_type="text"
# We can then create a model to classify those reviews and load the encoder we saved before.
# + id="xUCJ41Wb8WT2" colab_type="code" colab={}
learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
learn.load_encoder('fine_tuned_enc')
# + id="GxtfyBm58WT5" colab_type="code" colab={}
learn.lr_find()
# + id="YYJMfmi38WUC" colab_type="code" colab={}
learn.recorder.plot()
# + id="tba5m1-68WUH" colab_type="code" outputId="14ac749e-7c24-458b-a1c9-21f97dcd9797" colab={}
learn.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
# + id="KxWP2S638WUL" colab_type="code" colab={}
learn.save('first')
# + id="gjpS_nb08WUO" colab_type="code" colab={}
learn.load('first');
# + id="SMWzOy7W8WUU" colab_type="code" outputId="c452a818-8c04-46e9-eaa3-c90dac8e0ac7" colab={}
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
# + id="6_WEgwyB8WUa" colab_type="code" colab={}
learn.save('second')
# + id="-c8erqOA8WUe" colab_type="code" colab={}
learn.load('second');
# + id="qSnrItiS8WUh" colab_type="code" outputId="75c79a86-53c1-434e-f97b-445c14060028" colab={}
learn.freeze_to(-3)
learn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
# + id="NMTXwjUN8WUn" colab_type="code" colab={}
learn.save('third')
# + id="bdu2PHzX8WUp" colab_type="code" colab={}
learn.load('third');
# + id="zfN2No9W8WUr" colab_type="code" outputId="e5dd48ce-e1cd-4bda-c154-7a5e8e901ef5" colab={}
learn.unfreeze()
learn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3), moms=(0.8,0.7))
# + id="kBf1CRsk8WUx" colab_type="code" outputId="302d71aa-634e-4ef6-fc3e-b42aed5287bf" colab={}
learn.predict("I really loved that movie, it was awesome!")
# + id="NoTqoZ2U8WU3" colab_type="code" colab={}
| fast.Ai/dl1/FastAi3_imdb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('Output.csv')
df
count = df['followers'].value_counts()
count
new = df[df['followers'].isin(count[count==2].index)]
new
new.to_csv('3295_rows.csv')
count = new['followers'].value_counts()
count
| Instagram/Gephi/ig_nodes_reduction-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Visualizing Fourier Transforms
#
# ## 1D Discrete Fourier Transform
#
# $$
# \text{Given a function} f(n) \text{the 1D Discrete Fourier Transform is} \\
# F(k) = \sum^N_{n=0} f(n) e^{2 \pi j (n k / N)}
# $$
#
# ### DFT of a Point Source
#
# +
import numpy as np
import matplotlib.pyplot as plt
n = 100
point_f = np.zeros(n)
point_f[5] = 1
point_ft = np.fft.fft(point_f)
x = np.arange(0, n)
plt.plot(x, np.absolute(point_ft))
# -
# The Fourier transform is just a summation of sin waves, if we plot the real and imaginary part, we will get a sin wave
# +
fig, ax = plt.subplots(2, 2)
fig.subplots_adjust(wspace=0.5, hspace=0.5)
ax[0,0].plot(x, point_ft.real)
ax[0,0].set_title("Real")
ax[0,1].plot(x, point_ft.imag)
ax[0,1].set_title("Imag")
ax[1,0].plot(np.angle(point_ft))
ax[1,0].set_title("Phase")
ax[1,1].plot(np.abs(point_ft))
ax[1,1].set_title("Amplitude")
# -
# If we plot the phase and amplitude
# ### DFT of multiple point sources
# +
n = 100
multiple_point_f = np.zeros(n)
multiple_point_f[5] = 1
multiple_point_f[8] = 10
multiple_point_ft = np.fft.fft(multiple_point_f)
x = np.arange(0, n)
plt.plot(x, np.absolute(multiple_point_ft))
# -
# ### FT of a Gaussian
# +
mu = 0
sigma = 0.1
gauss_f = np.random.normal(mu, sigma, n)
gauss_Ft = np.fft.fft(gauss_f)
fig, ax = plt.subplots(2)
fig.subplots_adjust(wspace=0.5, hspace=0.5)
ax[0].plot(x, np.absolute(gauss_f))
ax[0].set_title("fn")
ax[1].plot(x, np.absolute(gauss_Ft))
ax[0].set_title("Fk")
# -
# ## 2D Fourier Transform
#
# TODO
#
| notebooks/Effects of Fourier Transform.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Integrating a simple ODE
#
#
# Solve the ODE dy/dt = -2y between t = 0..4, with the initial condition
# y(t=0) = 1.
#
#
# +
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
def calc_derivative(ypos, time):
return -2*ypos
time_vec = np.linspace(0, 4, 40)
yvec = odeint(calc_derivative, 1, time_vec)
plt.figure(figsize=(4, 3))
plt.plot(time_vec, yvec)
plt.xlabel('t: Time')
plt.ylabel('y: Position')
plt.tight_layout()
| _downloads/plot_odeint_simple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Processes XES and XANES .dat files into spectra
import numpy as np
import os
import shutil
import subprocess
import timeit
# # Must have .dat files in correct directory!
# ### See below.
# +
Types = np.array([1,2,3,4,5])
ROOTDIR = os.getcwd()
TYPEdir = "Categories/"
TEST_XESdir = "Data/dat_files/TESTdatfiles/XES/"
TEST_XANESdir = "Data/dat_files/TESTdatfiles/XANES/"
XESdir = "Data/dat_files/TRAINdatfiles/XES/"
XANESdir = "Data/dat_files/TRAINdatfiles/XES/"
## Run this once at the very beginning:
# for t in Types:
# os.makedirs(f"Type{t}")
# -
# ## Get lists of compounds to process
test_list = [f.replace('.dat','') for f in os.listdir(TEST_XESdir) if f.endswith('.dat')]
print(len(test_list))
# +
# xes_list = [f.replace('.dat','') for f in os.listdir(XESdir) if f.endswith('.dat')]
# xanes_list = [f.replace('.dat','') for f in os.listdir(XANESdir) if f.endswith('.dat')]
# -
# # Test data processing
# +
def make_test_directories(c):
os.chdir('Data/TEST/')
os.makedirs(c)
os.chdir(f'{c}')
os.makedirs('XANES')
os.makedirs('XES')
os.chdir(ROOTDIR)
def process_TEST(process_list=test_list, mode='XES'):
# grab appropriate directories
if mode == 'XES':
directory = TEST_XESdir
elif mode == 'XANES':
directory = TEST_XANESdir
else:
print(f"Invalid mode {mode}. Must be 'XES' or 'XANES'.")
return
i = 1
for c in process_list:
# we don't want to proccess everything
process = False
# if .dat file exists
if os.path.exists(f'{directory}{c}.dat'):
# make directories in TEST folder
if not os.path.exists(f'Data/TEST/{c}'):
make_test_directories(c)
else:
# directory already exists
pass
shutil.copyfile(f'{directory}{c}.dat', f'{c}.dat')
process = True
else:
print(f"Cannot locate {directory}{c}.dat")
return
# only process if .dat file available
if process:
if mode == 'XES':
subprocess.call(['python', 'tddftoutputparser.py', '-f', f'{c}.dat',
'-l', '0.6', '-g', '0.3', '-emin', '2445', '-emax', '2480', '-eshift',
'-mode', 'XES'])
elif mode == 'XANES':
shutil.copyfile(f'{TEST_XESdir}{c}.dat', f'xes_{c}.dat')
subprocess.call(['python', 'tddftoutputparser.py', '-f', f'{c}.dat', '-eshift', '-lb',
'-mode', 'XANES'])
os.remove(f'xes_{c}.dat')
# check spectrum was correctly processed
if os.path.exists(f'{c}.processedspectrum'):
# if old processed spectrum exitss, remove it first before replacing
if os.path.exists(f'Data/TEST/{c}/{mode}/{c}.processedspectrum'):
os.remove(f'Data/TEST/{c}/{mode}/{c}.processedspectrum')
# move processed spectrum file
shutil.move(f'{c}.processedspectrum', f'Data/TEST/{c}/{mode}/{c}.processedspectrum')
else:
print("\t ! No processed spectrum file")
# check if dat file already in dat{a directory
if os.path.exists(f'Data/TEST/{c}/{mode}/{c}.dat'):
os.remove(f'Data/TEST/{c}/{mode}/{c}.dat')
# now move copied .dat file over
shutil.move(f'{c}.dat', f'Data/TEST/{c}/{mode}/{c}.dat')
print(f'{i}\r', end="")
i += 1
# +
start = timeit.default_timer()
process_TEST(process_list=test_list, mode='XES')
stop = timeit.default_timer()
print(f"Runtime: {(stop - start)/60} min")
# +
start = timeit.default_timer()
process_TEST(process_list=test_list, mode='XANES')
stop = timeit.default_timer()
print(f"Runtime: {(stop - start)/60} min")
# -
# # Training Data Processing
# +
def make_train_directories(c, t):
os.chdir(f'Data/Type{t}')
os.makedirs(c)
os.chdir(f'{c}')
os.makedirs('XANES')
os.makedirs('XES')
os.chdir(ROOTDIR)
def process_spectra(process_list=None, mode='XES'):
# grab appropriate directories
if mode == 'XES':
directory = XESdir
elif mode == 'XANES':
directory = XANESdir
else:
print(f"Invalid mode {mode}. Must be 'XES' or 'XANES'.")
return
i = 1
for t in Types:
file_name = f"{TYPEdir}Type{t}/Type{t}.txt"
file = open(file_name, 'r')
for line in file:
c = line.replace('\n','')
# process everything if no list
if process_list is None or c in process_list:
# check location of dat file
# not processed yet
if os.path.exists(f'{directory}{c}.dat'):
# direcotries need to be created
if not os.path.exists(f'Data/Type{t}/{c}'):
make_train_directories(c)
shutil.copyfile(f'{directory}{c}.dat', f'{c}.dat')
processed = False
# grab already processed dat file
elif os.path.exists(f'Data/Type{t}/{c}/{mode}/{c}.dat'):
shutil.copyfile(f'Data/Type{t}/{c}/{mode}/{c}.dat', f'{c}.dat')
processed = True
# data file not in the two expected locations
else:
print(f"Cannot find {directory}{c}.dat")
return
if mode == 'XES':
subprocess.call(['python', 'tddftoutputparser.py', '-f', f'{c}.dat',
'-l', '0.6', '-g', '0.3', '-emin', '2445', '-emax', '2480', '-eshift',
'-mode', 'XES'])
elif mode == 'XANES':
# copy xes over as well to mnormalize by k alpha
if processed:
xes_dir = f'Data/Type{t}/{c}/XES/'
else:
xes_dir = XESdir
shutil.copyfile(f'{xes_dir}{c}.dat', f'xes_{c}.dat')
subprocess.call(['python', 'tddftoutputparser.py', '-f', f'{c}.dat', '-eshift', '-lb',
'-mode', 'XANES'])
os.remove(f'xes_{c}.dat')
# check spectrum was correctly processed
if os.path.exists(f'{c}.processedspectrum'):
# if old processed spectrum exitss, remove it first before replacing
if os.path.exists(f'Data/Type{t}/{c}/{mode}/{c}.processedspectrum'):
os.remove(f'Data/Type{t}/{c}/{mode}/{c}.processedspectrum')
# move processed spectrum file
shutil.move(f'{c}.processedspectrum', f'Data/Type{t}/{c}/{mode}/{c}.processedspectrum')
else:
print("\t ! No processed spectrum file")
# check if dat file already in dat{a directory
if os.path.exists(f'Data/Type{t}/{c}/{mode}/{c}.dat'):
os.remove(f'Data/Type{t}/{c}/{mode}/{c}.dat')
# now move copied .dat file over
shutil.move(f'{c}.dat', f'Data/Type{t}/{c}/{mode}/{c}.dat')
print(f'{i}\r', end="")
i += 1
# +
start = timeit.default_timer()
process_spectra(mode='XES')
# process_spectra(process_list=['2-Fluorothiophenol'], mode='XES')
stop = timeit.default_timer()
print(f"Runtime: {(stop - start)/60} min")
# +
start = timeit.default_timer()
# process_spectra(process_list=['2-Fluorothiophenol'], mode='XANES')
process_spectra(mode='XANES')
stop = timeit.default_timer()
print(f"Runtime: {(stop - start)/60} min")
# -
| Process_Dat_to_Spectra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Yln-wzDzE2XB"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
from sklearn.ensemble import RandomForestClassifier
from sklearn import datasets
from sklearn import metrics
from sklearn.model_selection import train_test_split
import seaborn as sns
# + colab={"base_uri": "https://localhost:8080/"} id="kUIfTBwC7a81" outputId="01fc16f4-0e85-4da5-f95c-c853bffa98a4"
# !gdown --id '1e0T6Lc0PUwsCO4VAfmd4ADDk0FaCq8_m' --output level-6.csv
# + colab={"base_uri": "https://localhost:8080/", "height": 353} id="d6PvPMcktCEk" outputId="6b75069a-0bf9-49a9-f2e3-9a9b744a6c67"
train = pd.read_csv("level-6.csv")
train.head()
# + colab={"base_uri": "https://localhost:8080/"} id="ZMQfRvhgJSQM" outputId="b7a6f6f9-5ab3-4613-c695-c3753b859937"
# shortrun = train[train['Merged'] < 920].index
# train = train.drop(shortrun,axis=0)
train.info()
# + id="ef5oh2zU6cHw"
CAT_COL = ["index", "Cancer"]
NUM_COL=[]
for i in range(len(train.columns)):
NUM_COL.append(train.columns[i])
NUM_COL.remove('index')
NUM_COL.remove('Cancer')
cat_col = []
num_col = []
for col in train:
if col in CAT_COL:
cat_col.append(col)
elif col in NUM_COL:
num_col.append(col)
for col in cat_col:
train[col] = train[col].astype(str)
df_cat = train.loc[:,cat_col] # take all the categorical columns
df_cat = pd.get_dummies(df_cat) # one hot encoding
df_num = train.loc[:,num_col] # take all the numerical columns
df_final = pd.concat([df_cat, df_num], axis=1) # concat categorical/numerical data
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="4rsZbO2Dbb31" outputId="c44145e8-6642-4b76-bccd-4cbe85d3e800"
df_final.head()
# + colab={"base_uri": "https://localhost:8080/"} id="ClBkPVtMq9hp" outputId="df341dda-efea-46e6-f28a-294df1e886fc"
not_select = ["index", "Cancer", "Merged"]
train_select = train.drop(not_select,axis=1)
train_select.info()
# + id="Dv4LE3iTv2oJ"
cat_col = []
num_col = []
for col in train_select:
if col in CAT_COL:
cat_col.append(col)
elif col in NUM_COL:
num_col.append(col)
for col in cat_col:
if train_select[col].dtype != "O":
# print(col)
train_select[col] = train_select[col].astype(str)
df_cat_select = train_select.loc[:,cat_col] # take all the categorical columns
#df_cat_select = pd.get_dummies(df_cat_select) # one hot encoding
df_num_select = train_select.loc[:,num_col] # take all the numerical columns
df_final_select = pd.concat([df_cat_select, df_num_select], axis=1) # concat categorical/numerical data
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="6uX8WzkkbOKO" outputId="cd296a8b-edd9-48ea-9905-fa7c215460f4"
df_final_select.head()
# + [markdown] id="WlXuzo6BHkIq"
# #Random Forest Classifier
# + colab={"base_uri": "https://localhost:8080/"} id="oT7bA9UMwVDo" outputId="a250fbd5-b402-49e9-956e-135fbb0ac0bd"
train_accuracy = []
val_accuracy = []
avg_train_accuracy = 0
avg_val_accuracy = 0
for i in range(10):
#Use RandomForestClassifier to predict Cancer
x = df_final_select
y = train["Cancer"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
#RandomForest
rfc = RandomForestClassifier(n_estimators=1000)
#rfc=RandomForestClassifier(n_estimators=100,n_jobs = -1,random_state =50, min_samples_leaf = 10)
rfc.fit(X_train,y_train)
y_predict = rfc.predict(X_test)
score_rfc = rfc.score(X_test,y_test)
score_rfc_train = rfc.score(X_train,y_train)
print("train_accuracy = ",score_rfc_train*100," %")
print("val_accuracy = ",score_rfc*100," %")
train_accuracy.append(score_rfc_train)
val_accuracy.append(score_rfc)
avg_val_accuracy = sum(val_accuracy)/(i+1)
avg_train_accuracy = sum(train_accuracy)/(i+1)
# from sklearn.metrics import confusion_matrix
# mat = confusion_matrix(y_test, y_predict)
# sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
# plt.xlabel('true label')
# plt.ylabel('predicted label')
print('avg_val_accuracy = ',round(avg_val_accuracy,4))
print('avg_train_accuracy = ',round(avg_train_accuracy,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="8M5PS7kiCB0Z" outputId="ec1038b5-2ba2-4839-e444-2ae341f0e25d"
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_predict)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label')
# + [markdown] id="9c86bBBnHotC"
# #SVM
# + colab={"base_uri": "https://localhost:8080/"} id="-Qzl52nywn9w" outputId="490089f3-9133-4318-d241-a15e0b919825"
from sklearn import svm
train_accuracy = []
val_accuracy = []
avg_train_accuracy = 0
avg_val_accuracy = 0
for i in range(10):
#Use SVM to predict Cancer
x = df_final_select
y = train["Cancer"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
clf = svm.SVC()
clf.fit(X_train,y_train)
y_predict = clf.predict(X_test)
score_clf = clf.score(X_test,y_test)
score_clf_train = clf.score(X_train,y_train)
print("train_accuracy = ",score_clf_train*100," %")
print("val_accuracy = ",score_clf*100," %")
train_accuracy.append(score_clf_train)
val_accuracy.append(score_clf)
avg_val_accuracy = sum(val_accuracy)/(i+1)
avg_train_accuracy = sum(train_accuracy)/(i+1)
print('avg_train_accuracy = ',round(avg_train_accuracy,4))
print('avg_val_accuracy = ',round(avg_val_accuracy,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="MaOb5CBaavU1" outputId="c631c521-e05e-418f-e5fa-b9cd469b90f8"
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_predict)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label')
# + [markdown] id="lARu7KV5HrH0"
# #Neural network MLPClassifier
# + colab={"base_uri": "https://localhost:8080/"} id="8vWodBUQwvoX" outputId="88daba36-885e-48ac-b34c-de7d8dc3ce0e"
from sklearn.neural_network import MLPClassifier
train_accuracy = []
val_accuracy = []
avg_train_accuracy = 0
avg_val_accuracy = 0
for i in range(10):
#Use Neural Network MLPClassifier to predict Cancer
x = df_final_select
y = train["Cancer"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
nnclf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 30), random_state=1, max_iter=2000)
nnclf.fit(X_train,y_train)
y_predict = nnclf.predict(X_test)
score_nnclf = nnclf.score(X_test,y_test)
score_nnclf_train = nnclf.score(X_train,y_train)
print("train_accuracy = ",score_nnclf_train*100," %")
print("val_accuracy = ",score_nnclf*100," %")
train_accuracy.append(score_nnclf_train)
val_accuracy.append(score_nnclf)
avg_val_accuracy = sum(val_accuracy)/(i+1)
avg_train_accuracy = sum(train_accuracy)/(i+1)
print('avg_train_accuracy = ',round(avg_train_accuracy,4))
print('avg_val_accuracy = ',round(avg_val_accuracy,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="MayY0h2WbP8g" outputId="36c631b6-77e6-4532-f379-8f1f86f7ec0d"
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_predict)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label')
# + [markdown] id="dG7iRLaQHxpa"
# #Logistic Regression
# + colab={"base_uri": "https://localhost:8080/"} id="2a8ZSpJW06uP" outputId="73bb9119-9e19-4353-85a1-945985bdaad6"
from sklearn.linear_model import LogisticRegression
train_accuracy = []
val_accuracy = []
avg_train_accuracy = 0
avg_val_accuracy = 0
for i in range(10):
#Use Logistic Regression to predict Cancer
x = df_final_select
y = train["Cancer"]
# y = np.array(y,dtype=int)
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
logclf = LogisticRegression(random_state=0).fit(X_train,y_train)
y_predict = logclf.predict(X_test)
score_logclf = logclf.score(X_test,y_test)
score_logclf_train = logclf.score(X_train,y_train)
print("train_accuracy = ",score_logclf_train*100," %")
print("val_accuracy = ",score_logclf*100," %")
train_accuracy.append(score_logclf_train)
val_accuracy.append(score_logclf)
avg_val_accuracy = sum(val_accuracy)/(i+1)
avg_train_accuracy = sum(train_accuracy)/(i+1)
print('avg_train_accuracy = ',round(avg_train_accuracy,4))
print('avg_val_accuracy = ',round(avg_val_accuracy,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="-S0YWa-ybrns" outputId="14fe25d4-a75b-4dde-b114-b48ab18bdee2"
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_predict)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label')
# + [markdown] id="N3JUGSM38aJn"
# #DNN
# + id="RpZzOe8b8bxF"
import os
import numpy as np
import keras as K
import tensorflow as tf
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
os.environ['TF_CPP_MIN_LOG_LEVEL']= '2'
# + id="UWd3yzzD9OrV"
target_var = 'Cancer'
features = list(df_final_select.columns)
# 目標變量的類別
Class = train[target_var].unique()
# 目標變量的類別字典
Class_dict = dict(zip(Class, range(len(Class))))
# 增加一列target, 將目標變量進行編碼
train['target'] = train[target_var].apply(lambda x: Class_dict[x])
# 對目標變量進行0-1編碼(One-hot Encoding)
lb = LabelBinarizer()
lb.fit(list(Class_dict.values()))
transformed_labels = lb.transform(train['target'])
label2 = np.zeros((123,1))
for i in range(len(transformed_labels)):
if transformed_labels[i][0] == 0:
label2[i][0] = np.int(1)
elif transformed_labels[i][0] == 1:
label2[i][0] = np.int(0)
transformed_labels = np.concatenate((transformed_labels, label2),axis=1)
y_bin_labels = [] # 對多分類進行0-1編碼的變量
for i in range(transformed_labels.shape[1]):
y_bin_labels.append( 'y' + str(i))
train['y' + str(i)] = transformed_labels[:, i]
# 將數據集分為訓練集和測試集
train_x, test_x, train_y, test_y = train_test_split(df_final_select[features], train[y_bin_labels], train_size=0.8, test_size=0.2, random_state=0)
# + id="IPJ1Ti0kHPZq"
np.random.seed(5)
# model
init = K.initializers.glorot_uniform(seed=1)
model = K.models.Sequential()
model.add(K.layers.Dense(units=30, input_dim=587, kernel_initializer=init, activation='relu'))
# model.add(K.layers.Dense(units=5, input_dim=30, kernel_initializer=init, activation='relu'))
model.add(K.layers.Dense(units=2, kernel_initializer=init, activation='relu'))
# + colab={"base_uri": "https://localhost:8080/"} id="PxjE56odqjf9" outputId="d5f4b6ab-31f9-440b-b261-ec9a56d285b1"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="t7iv-JC0HCjC" outputId="4d973a29-aa00-4e8a-f4be-5184c5d21c3e"
# 訓練模型
opt = tf.keras.optimizers.Adam(learning_rate=0.0005)
model.compile(optimizer=opt, loss='categorical_crossentropy',
metrics=['accuracy'])
h = model.fit(train_x, train_y, epochs=10, batch_size=4,
validation_data=(test_x, test_y))
# + [markdown] id="F5xB5pClkVkc"
# |train_accuracy|val_accuracy|
# |-----|--------|
# |0.6531| 0.6000|
# |0.6327| 0.6400|
# |0.5510| 0.4400|
# |0.6531| 0.6400|
# |0.6837| 0.7200|
# |0.6939| 0.7600|
# |0.6735| 0.6800|
# |0.7857| 0.8000|
# |0.5816| 0.7200|
# |0.6837| 0.6400|
# + colab={"base_uri": "https://localhost:8080/"} id="dnx3I41Bhkkf" outputId="27b67824-b5fe-4bfa-e013-8c814b2216d1"
train_accuracy = [0.6531,0.6327,0.5510,0.6531,0.6837,0.6939,0.6735,0.7857,0.5816,0.6837]
val_accuracy = [0.6000,0.6400,0.4400,0.6400,0.7200,0.7600,0.6800,0.8000,0.7200,0.6400]
print('avg_train_accuracy = ',round(sum(train_accuracy)/10,4))
print('avg_val_accuracy = ',round(sum(val_accuracy)/10,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="znlqV1t6JlW5" outputId="d5474e88-5a5c-43c6-c01f-e596ec126951"
#0.6531/0.6400
# summarize history for accuracy
plt.plot(h.history['accuracy'])
plt.plot(h.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="V1w93MIPDfcA" outputId="2c16475f-8560-40ed-8bc6-36f1f0ce10df"
#0.6939/0.7600
# summarize history for accuracy
plt.plot(h.history['accuracy'])
plt.plot(h.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + [markdown] id="68kXEzSY2jzk"
# #PH-CNN
# ##reference:
# ###https://github.com/dfioravanti/phylogenetic-cnn
# ###https://www.tensorflow.org/tutorials/images/cnn
# + id="VRqcZJgbQjqA"
import os
from tensorflow.keras import datasets, layers, models
# + [markdown] id="m7mjcXdwIkhd"
# ##Import coordinate data
# + colab={"base_uri": "https://localhost:8080/"} id="Eq5PomusU5Hs" outputId="b3d0f86a-c7cb-449a-ccc8-dff3d4ffa5b7"
#reshape
# !gdown --id '1HgYWrJfHoQ4wnQFbvN7FTk4_hmsIBsQ4' --output ordinate.csv
C = pd.read_csv("ordinate.csv")
# Xs = df_final_select.values
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="j44B6B1bWuPE" outputId="6dc21c8c-40e8-475d-bf6d-0d0d9c504f9c"
# C = C.drop(shortrun,axis=0)
C.head()
# + colab={"base_uri": "https://localhost:8080/"} id="pnBWgtjrZucz" outputId="6978b762-3ed6-4bdb-e5f3-8c5367a7f2f0"
not_select=['index', 'Merged','Cancer']
C_final = C.drop(not_select,axis=1)
C_final.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="NAMmWg9oaTgL" outputId="a61fa00c-4df5-45f2-a4be-7a546d58588f"
C_final.head()
# + colab={"base_uri": "https://localhost:8080/"} id="wJ2Lg_O2Hw0P" outputId="e2f80321-64ae-4e5d-807a-e70f772e074a"
X_final = pd.concat([df_final_select, C_final], axis=1)
X_final.shape
# + id="F8OPpkS4I9Je"
target_var = 'Cancer'
features = list(X_final.columns)
# 目標變量的類別
Class = train[target_var].unique()
# 目標變量的類別字典
Class_dict = dict(zip(Class, range(len(Class))))
# 增加一列target, 將目標變量進行編碼
train['target'] = train[target_var].apply(lambda x: Class_dict[x])
# 對目標變量進行0-1編碼(One-hot Encoding)
lb = LabelBinarizer()
lb.fit(list(Class_dict.values()))
transformed_labels = lb.transform(train['target'])
label2 = np.zeros((123,1))
for i in range(len(transformed_labels)):
if transformed_labels[i][0] == 0:
label2[i][0] = np.int(1)
elif transformed_labels[i][0] == 1:
label2[i][0] = np.int(0)
transformed_labels = np.concatenate((transformed_labels, label2),axis=1)
y_bin_labels = [] # 對多分類進行0-1編碼的變量
for i in range(transformed_labels.shape[1]):
y_bin_labels.append( 'y' + str(i))
train['y' + str(i)] = transformed_labels[:, i]
# 將數據集分為訓練集和測試集
train_x, test_x, train_y, test_y = train_test_split(X_final[features], train[y_bin_labels], train_size=0.8, test_size=0.2, random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="9FkdmWzEmSbA" outputId="6053d97a-54a1-4c05-c4d5-e7e090840a6a"
print(train_x.shape)
print(test_x.shape)
# + id="tviObpLcYPhy"
train_x = train_x.values.reshape(98, 4, 4, 40)
train_x = train_x.reshape(98, 4, 4, 40)
test_x = test_x.values.reshape(25, 4, 4, 40)
test_x = test_x.reshape(25, 4, 4, 40)
# + [markdown] id="t8juxTg7Inxt"
# ## Model
# + id="DyW6_hRmTrar"
model = K.models.Sequential()
model.add(K.layers.Conv2D(40, (3, 3), activation='relu', input_shape=train_x.shape[1:]))
model.add(K.layers.Conv2D(20, (1, 1), activation='relu', input_shape=train_x.shape[1:]))
model.add(K.layers.MaxPooling2D((2, 2)))
model.add(K.layers.Flatten())
model.add(K.layers.Dense(64, activation= 'relu'))
model.add(K.layers.Dropout(0.25))
model.add(K.layers.Dense(2,activation='sigmoid'))
# + colab={"base_uri": "https://localhost:8080/"} id="66Juh5bhTr5z" outputId="295157fe-55e8-48a7-a312-4de4199f5424"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="SOrKcw-kUMaj" outputId="eb624cdc-bd2b-445c-b3e3-ea8049beb470"
opt = tf.keras.optimizers.Adam(learning_rate=0.0005)
# opt = K.optimizers.Adam(lr=0.0005)
model.compile(optimizer=opt, loss='categorical_crossentropy',
metrics=['accuracy'])
h2 = model.fit(train_x, train_y, epochs=30, batch_size=4,
validation_data=(test_x, test_y))
# + [markdown] id="YAmumX4iBmnb"
# |train_accuracy|val_accuracy|
# |-----|--------|
# |0.9490| 0.8800|
# |0.9592| 0.8400|
# |0.9490| 0.8000|
# |0.9694| 0.8000|
# |0.9490| 0.8000|
# |0.9490| 0.8800|
# |0.9490| 0.8000|
# |0.9796| 0.8400|
# |0.9694| 0.8400|
# |0.9592| 0.8400|
# + colab={"base_uri": "https://localhost:8080/"} id="r_oWoZKvBmnc" outputId="a297d144-b333-40e1-efc0-fc859c098ba8"
train_accuracy = [0.9490,0.9592,0.9490,0.9694,0.9490,0.9490,0.9490,0.9796,0.9694,0.9592]
val_accuracy = [0.8800,0.8400,0.8000,0.8000,0.8000,0.8800,0.8000,0.8400,0.8400,0.8400]
print('avg_train_accuracy = ',round(sum(train_accuracy)/10,4))
print('avg_val_accuracy = ',round(sum(val_accuracy)/10,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="BPQCJOI6-nuF" outputId="ac1c5a65-7998-4ec6-82e6-338a6177b717"
#0.9490/0.8800
# summarize history for accuracy
plt.plot(h2.history['accuracy'])
plt.plot(h2.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="UoVOEwk6E9_w" outputId="2c177aba-35e5-4001-efd9-28ac77204b14"
#0.9694/0.8400
# summarize history for accuracy
plt.plot(h2.history['accuracy'])
plt.plot(h2.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + [markdown] id="ssRe8cRAGM2_"
# #GCN
# + id="tjKI-lbdNrCq"
import os
import numpy as np
import keras as K
import tensorflow as tf
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
os.environ['TF_CPP_MIN_LOG_LEVEL']= '2'
# + colab={"base_uri": "https://localhost:8080/"} id="JauagBlRME5R" outputId="6e5d3d1b-f284-4540-821b-4ba62c5b66b8"
# !gdown --id '1IQF5ywTp5Yy9WRUSkKcl1YeYulqBz_up' --output A.csv
# !gdown --id '1w2qIqBvvk-M2RdQ78ojfL891FMY5AwUx' --output D.csv
A = pd.read_csv("A.csv",header=None)
D = pd.read_csv("D.csv",header=None)
# + colab={"base_uri": "https://localhost:8080/", "height": 353} id="X4tLlS7TPlLR" outputId="f5066df7-7b03-48af-c76a-6e8d2d0cd5e9"
train = pd.read_csv("level-6.csv")
train.head()
# + id="Ry2bJ8F0OFf6"
A = A.to_numpy()
D = D.to_numpy()
# df_final_select = df_final_select.to_numpy()
# + colab={"base_uri": "https://localhost:8080/"} id="NE8t3AcBaY_a" outputId="8d0f6701-8d10-4b25-8e93-dbfcbc68b4f5"
print(A)
# + colab={"base_uri": "https://localhost:8080/"} id="8MJPtoTjP4lP" outputId="f0b31cb1-5627-453d-aa8f-28eaa4f290fa"
print(D)
# + [markdown] id="CZ6fLKBAOqkY"
# 
# + colab={"base_uri": "https://localhost:8080/"} id="oH_CNUQFOllj" outputId="dd7c8328-3a04-4989-cc0d-54ed2fa01bf5"
x = df_final_select.to_numpy()
I = np.eye(123)
g_theta = np.matmul((I + ((D**0.5)*A*(D**0.5))),x)
print(g_theta)
# + id="rD557AU3MDnR"
target_var = 'Cancer'
features = list(df_final_select.columns)
# 目標變量的類別
Class = train[target_var].unique()
# 目標變量的類別字典
Class_dict = dict(zip(Class, range(len(Class))))
# 增加一列target, 將目標變量進行編碼
train['target'] = train[target_var].apply(lambda x: Class_dict[x])
# 對目標變量進行0-1編碼(One-hot Encoding)
lb = LabelBinarizer()
lb.fit(list(Class_dict.values()))
transformed_labels = lb.transform(train['target'])
label2 = np.zeros((123,1))
for i in range(len(transformed_labels)):
if transformed_labels[i][0] == 0:
label2[i][0] = np.int(1)
elif transformed_labels[i][0] == 1:
label2[i][0] = np.int(0)
transformed_labels = np.concatenate((transformed_labels, label2),axis=1)
y_bin_labels = [] # 對多分類進行0-1編碼的變量
for i in range(transformed_labels.shape[1]):
y_bin_labels.append( 'y' + str(i))
train['y' + str(i)] = transformed_labels[:, i]
# 將數據集分為訓練集和測試集
train_x, test_x, train_y, test_y = train_test_split(g_theta, train[y_bin_labels], train_size=0.8, test_size=0.2, random_state=0)
# + colab={"base_uri": "https://localhost:8080/"} id="gMRfPqaeY_1G" outputId="b02f4099-8257-4968-8c3e-dc98f2939c1b"
train_x.shape
# + id="LFo1ZouPYkOM"
# train_x = train_x.values.reshape(98, 1, 1, 587)
train_x = train_x.reshape(98, 1, 1, 587)
# test_x = test_x.values.reshape(25, 1, 1, 587)
test_x = test_x.reshape(25, 1, 1, 587)
# + [markdown] id="7Bjht2zfYrvM"
# ## Model
# + id="l_kwkPAxYrvM"
model = K.models.Sequential()
model.add(K.layers.Conv2D(587, (1, 1), activation='relu', input_shape=train_x.shape[1:]))
# model.add(K.layers.Conv2D(30, (1, 1), input_shape=train_x.shape[1:]))
model.add(K.layers.Flatten())
model.add(K.layers.Dense(32, activation= 'relu'))
model.add(K.layers.Dropout(0.15))
model.add(K.layers.Dense(2,activation='sigmoid'))
# + colab={"base_uri": "https://localhost:8080/"} id="XUx3JejlYrvM" outputId="f5b04cd8-09ae-4ec1-c4f6-edabd7c891d7"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="Rqh2dJmgYrvM" outputId="e2aaaabc-7802-47ab-b568-7e6e000d8b42"
opt = tf.keras.optimizers.Adam(learning_rate=0.00002)
# opt = K.optimizers.Adam(lr=0.0005)
model.compile(optimizer=opt, loss='categorical_crossentropy',
metrics=['accuracy'])
h2 = model.fit(train_x, train_y, epochs=50, batch_size=4,
validation_data=(test_x, test_y))
# + [markdown] id="YIYLhSTJnAQN"
# |train_accuracy|val_accuracy|
# |-----|--------|
# |0.6837| 0.7200|
# |0.6633| 0.7200|
# |0.6327| 0.6400|
# |0.6939| 0.6800|
# |0.7551| 0.6400|
# |0.6735| 0.6400|
# |0.7143| 0.5600|
# |0.6531| 0.6000|
# |0.7245| 0.7200|
# |0.7551| 0.7200|
# + colab={"base_uri": "https://localhost:8080/"} id="MKbKBWx2nAQN" outputId="d36dc2c0-9e3d-4ee3-d2a7-ae7bd21d61b9"
train_accuracy = [0.6837,0.6633,0.6327,0.6939,0.7551,0.6735,0.7143,0.6531,0.7245,0.7551]
val_accuracy = [0.7200,0.7200,0.6400,0.6800,0.6400,0.6400,0.5600,0.6000,0.7200,0.7200]
print('avg_train_accuracy = ',round(sum(train_accuracy)/10,4))
print('avg_val_accuracy = ',round(sum(val_accuracy)/10,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="TfL1rQTHnAQN" outputId="f24ac5d6-6795-428c-e3bd-c3e1f244c0db"
#0.6837/0.7200
# summarize history for accuracy
plt.plot(h2.history['accuracy'])
plt.plot(h2.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="baz-S1esnAQN" outputId="4a2a78aa-fbe2-4799-ff2b-d9b4027c0210"
#0.6939/0.6800
# summarize history for accuracy
plt.plot(h2.history['accuracy'])
plt.plot(h2.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| Gastric_Cancer/Qiime2_Taxonomy_GastricCancer_dataset2(123).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# +
csv_path = "show list.csv"
shows = pd.read_csv(csv_path)
shows = shows["Title"]
# -
base_url = "http://api.tvmaze.com/search/shows?q="
# +
responses = []
show_info = []
show_name = []
show_id = []
tvrage = []
thetvdb = []
imdb = []
for show in shows:
mod_url = base_url + show
# print(json.dumps(response_json, indent=4, sort_keys=True))
try:
show_data = requests.get(mod_url)
response_json = show_data.json()
show_name.append(response_json[0]['show']['name'])
show_id.append(response_json[0]['show']['id'])
tvrage.append(response_json[0]['show']['externals']['tvrage'])
thetvdb.append(response_json[0]['show']['externals']['thetvdb'])
imdb.append(response_json[0]['show']['externals']['imdb'])
except:
pass
# -
show_data_df = pd.DataFrame({"Show Name": show_name, "Show ID": show_id})
show_data_df.head()
# +
cast_response = []
character_name = []
character_id = []
show_id_cast = []
for ids in show_id:
cast_url = (f"http://api.tvmaze.com/shows/{ids}?embed[]=episodes&embed[]=cast")
try:
cast_data = requests.get(cast_url).json()
cast_response.append(cast_data)
for i in range(0, 150):
character_name.append(cast_data['_embedded']['cast'][i]['character']['name'])
character_id.append(cast_data['_embedded']['cast'][i]['character']['id'])
show_id_cast.append(cast_data["id"])
except:
pass
# -
character_data_df = pd.DataFrame({"Show ID": show_id_cast, "Character Name": character_name, "Character ID": character_id})
character_data_df
# +
episode_id = []
episode_season = []
episode_num = []
episode_air = []
episode_name = []
show_id_ep = []
for ids in show_id:
cast_url = (f"http://api.tvmaze.com/shows/{ids}?embed[]=episodes&embed[]=cast")
try:
cast_data = requests.get(cast_url).json()
for i in range(0, 150):
episode_id.append(cast_data['_embedded']['episodes'][i]['id'])
episode_season.append(cast_data['_embedded']['episodes'][i]['season'])
episode_num.append(cast_data['_embedded']['episodes'][i]['number'])
episode_air.append(cast_data['_embedded']['episodes'][i]['airstamp'])
episode_name.append(cast_data['_embedded']['episodes'][i]['name'])
show_id_ep.append(cast_data["id"])
except:
pass
# -
episode_data_df = pd.DataFrame({"Show ID": show_id_ep, "Episode Name": episode_name, "Episode ID": episode_id, "Episode Name": episode_name, "Episode Season": episode_season, "Episode Number": episode_num, "Episode Air Date": episode_air})
episode_data_df = episode_data_df.groupby(["Show ID"])
episode_data_df.head()
episode_count_df = pd.DataFrame({"Total Episode Count": (episode_data_df["Episode ID"].count())})
merged_df = pd.merge(character_data_df, episode_count_df, how='inner', on="Show ID")
name_merge_df = pd.merge(merged_df, show_data_df, how='inner', on="Show ID")
name_merge_df = name_merge_df[["Show ID", "Show Name", "Total Episode Count", "Character ID", "Character Name"]]
name_merge_df = name_merge_df.drop_duplicates()
name_merge_df
# Cleaning Character Names Data
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({'James "Rhodey" Rhodes / War Machine': 'James Rhodes / War Machine', 'Peggy Carter': 'Agent Peggy Carter', "P<NAME>er/Spider-Man": "Peter Parker / Spider-Man"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME> / Ironheart": "Ironheart"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME> / Scarlet Witch": "Scarlet Witch", "<NAME> / Scarlet Witch": "Scarlet Witch"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Sue Richards / Invisible Girl": "Invisible Woman / Invisible Girl", "Sue Storm / Invisible Woman": "Invisible Woman / Invisible Girl", "Susan Richards / Invisible Woman": "Invisible Woman / Invisible Girl"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"T'Challa/Black Panther": "T'Challa / Black Panther", "T'Challa / The Black Panther": "T'Challa / Black Panther", "Black Panther": "T'Challa / Black Panther"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"T<NAME>ark/Ironman": "Tony Stark / Iron Man", "Iron Man": "<NAME> / Iron Man"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "<NAME> / Spider-Man"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Ant-Man": "Scott Lang / Ant-Man", "Ant Man": "Scott Lang / Ant-Man"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>ers/Captain America": "Captain America", "Steve Rogers / Captain America": "Captain America"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "<NAME> / Quake"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Wasp": "Janet van Dyne / The Wasp", "Janet van Dyne/Wasp": "Janet van Dyne / The Wasp"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Bruce Banner / Hulk", "Hulk": "Bruce Banner / Hulk", "The Hulk": "Bruce Banner / Hulk", "Bruce Banner/Hulk": "Bruce Banner / Hulk", "David Banner": "Bruce Banner / Hulk"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"The Beast": "Dr. Hank McCoy / Beast", "Beast": "Dr. Hank McCoy / Beast"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Thor"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Storm": "Ororo Munroe / Storm", "Ororo Monroe/Storm": "Ororo Munroe / Storm"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Invisible Woman": "Invisible Woman / Invisible Girl"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Hope van Dyne / The Wasp": "Janet van Dyne/Wasp"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Hawkeye": "<NAME> / Hawkeye"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Gwen Stacy": "Gwen Stacy / Ghost Spider", "Ghost-Spider": "Gwen Stacy / Ghost Spider"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Shadowcat": "Kitty Pryde / Shadowcat"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME> / Spider-Man": "<NAME> / Spider-Man"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"The Thing": "<NAME> / The Thing", "<NAME> / The Thing": "<NAME> / The Thing"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME> / Iron Fist": "<NAME> / Iron Fist"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Agent <NAME>"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Rocket": "Rocket Raccoon"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Agent <NAME>"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME> / Ms. Marvel": "Captain Marvel", "Ms. Marvel": "Captain Marvel", "<NAME> / Ms. Marvel": "Captain Marvel", "Carol Danvers / Captain Marvel": "Captain Marvel"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Agent <NAME>"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Norrin Radd / Silver Surfer", "Silver Surfer / Dark Surfer"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Falcon": "<NAME> / Falcon"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Nightcrawler": "<NAME> / Nightcrawler"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Wolverine": "Logan / Wolverine"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME> / Spider Woman": "Spider-Woman", "<NAME> / Spider-Woman": "Spider-Woman"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"Jonah": "<NAME>"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Prof. <NAME> / M<NAME>"})
name_merge_df['Character Name'] = name_merge_df['Character Name'].replace({"<NAME>": "Agent <NAME>"})
character_appearances = name_merge_df.groupby(["Character Name"])[["Show ID"]].count()
character_appearances["Sum of Episodes"] = name_merge_df.groupby(["Character Name"])[["Total Episode Count"]].sum()
character_appearances = character_appearances.sort_values(by='Show ID', ascending=False)
character_appearances.head(10)
character_appearances = character_appearances.rename(columns={"Show ID":"Count of Shows"})
character_appearances.head(10)
character_appearances["Count of Shows"].describe()
num_ep_appearances = character_appearances.sort_values(by='Sum of Episodes', ascending=False)
num_ep_appearances.head(10)
num_ep_appearances["Sum of Episodes"].describe()
episodes_sca = ["Hulk", "Spider-Man", "Captain Marvel", "Iron Man","Thor"]
sum_ep_info = [911, 643, 396, 378, 313]
plt.title("Top 5 Characters with the Most Popular TV Shows")
plt.xlabel("Marvel Character")
plt.ylabel("Sum of Episodes in All Character TV Shows")
plt.scatter(episodes_sca, sum_ep_info, marker="o", facecolors="red", edgecolors="black")
ten_tv = character_appearances.head(10)
ten_tv
ten_tv[['Count of Shows']].describe()
episodes_sca = ["Spider-Man", "Hulk", "Captian Marvel", "Iron-Man", "Thor", "Falcon", "The Wasp", "Wolverine", "Storm", "Vision"]
sum_ep_info = [11, 9, 7, 7, 6, 5, 5, 5, 4, 4]
plt.title("Top 10 Characters with the Most Appearances in TV Shows")
plt.xlabel("Marvel Character")
plt.ylabel("Total Number of TV Show Appearences")
plt.xticks(rotation = 90)
plt.scatter(episodes_sca, sum_ep_info, marker="o", facecolors="red", edgecolors="black")
plt.tight_layout()
plt.savefig('TVplot')
name_merge_df.to_csv("TV_DF.csv", index = False)
movie_csv_path = "MoviesDF.csv"
movies = pd.read_csv(movie_csv_path)
movies.head(10)
ten_movies = movies.head(10)
ten_movies.describe()
movie_sca = ["Captain America", "Black Widow", "Fury", "Hulk", "<NAME>", "Iron-Man", "Thanos", "Falcon", "War Machina", "Thor"]
sum_mcu_info = [10, 8, 7, 7, 7, 6, 6, 6, 6, 6]
plt.title("Top 10 Characters with the Most Appearances in MCU")
plt.xlabel("Marvel Character")
plt.ylabel("Total Number of MCU Appearences")
plt.xticks(rotation = 90)
plt.scatter(movie_sca, sum_mcu_info, marker="o", facecolors="red", edgecolors="black")
plt.tight_layout()
plt.savefig('Movieplot')
| tv_maze_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Machine Learning for Engineers: [ImbalancedData](https://www.apmonitor.com/pds/index.php/Main/ImbalancedData)
# - [Imbalanced Data and Learning](https://www.apmonitor.com/pds/index.php/Main/ImbalancedData)
# - Source Blocks: 6
# - Description: Identify imbalanced data and use undersampling or oversampling to improve the machine learning classification results.
# - [Course Overview](https://apmonitor.com/pds)
# - [Course Schedule](https://apmonitor.com/pds/index.php/Main/CourseSchedule)
#
# <img width=400px align=left src='https://apmonitor.com/pds/uploads/Main/imbalanced_data.png'>
# +
#pip install imblearn
# +
#pip install xgboost
# -
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
from imblearn.under_sampling import RandomUnderSampler as RUS
from sklearn.metrics import ConfusionMatrixDisplay, f1_score
from sklearn.model_selection import train_test_split as tts
from sklearn.datasets import make_blobs as MB
# ### Import Data with Weighted Classes
data = pd.read_csv('http://apmonitor.com/pds/uploads/Main/imbalanced_train.csv')
datat = pd.read_csv('http://apmonitor.com/pds/uploads/Main/imbalanced_test.csv')
data.columns = [0,1,2,3,4,'label']
datat.columns = [0,1,2,3,4,'label']
data['label'].hist(bins=5)
plt.show()
data.head()
# ### Visualize Data
sns.pairplot(data,hue='label')
plt.show()
# ### Find and drop redundant columns
sns.heatmap(data.corr())
plt.show()
# In this case, none of the columns show great correlation so we will use all features
# ### Create Models
# #### Make One Model Weighted, and One Unweighted
#
# In order to "weight" a model, there are several approaches one can take. Oversampling the minority classes or undersampling the majority classes is one way to try to "fill" imbalanced data. When oversampling minority classes, typically methods such as SMOTE are used. SMOTE creates artificial samples of a class with features within the vector space of the other class samples. Another approach, which is not shown here, is to use the "weights" kwarg that many model classes have. These accept some coefficient array that tells the model to adjust itself to compensate for imbalances. Reproportion the data to be the same quantity for each class by oversampling the minority and undersampling the majority.
# +
Xtrain = data[[0,1,2,3,4]]
ytrain = data['label']
Xtest = datat[[0,1,2,3,4]]
ytest = datat['label']
fa = np.count_nonzero(ytrain.values==2)*2
ov = {0:int(np.count_nonzero(ytrain.values==0)),\
1:int(np.count_nonzero(ytrain.values==1)),\
2:int(fa)} #oversample the smallest class
un = {0:fa,1:fa,2:fa} #sample the same number of samples for each class
# so the population is roughly equal
over = SMOTE(sampling_strategy=ov, random_state=4, k_neighbors = 4)
under = RUS(sampling_strategy=un, random_state=4)
model_weighted = xgb.XGBClassifier(use_label_encoder=False)
model_imb = xgb.XGBClassifier(use_label_encoder=False)
# model_sampled =
model_imb.fit(Xtrain,ytrain)
steps = [ ('o',over),('u',under),('xgboost',model_weighted)] #
pipeline = Pipeline(steps)
pipeline.fit(Xtrain,ytrain)
# yp = pipline.predict(Xtest)
pass
# -
# ### Visualize Results
ConfusionMatrixDisplay.from_estimator(model_imb,Xtest,ytest)
plt.title(f'No resampling, F1={f1_score(ytest,model_imb.predict(Xtest),average="weighted"):.3g}')
plt.show()
print(f'True quantities: 0:{np.count_nonzero(ytest.values==0)} ' + \
'1:{np.count_nonzero(ytest.values==1)} ' + \
'2:{np.count_nonzero(ytest.values==2)}')
ConfusionMatrixDisplay.from_estimator(pipeline,Xtest,ytest)
plt.title(f'Resampled, F1={f1_score(ytest,pipeline.predict(Xtest),average="weighted"):.3g}')
plt.show()
# ### Conclusion
#
# The model trained on the resampled data predicted the smaller classes more often than the model trained on the skewed data. This leads to more misclassification of the majority class, but improves the classification of the minority class.
| Imbalanced_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3 (fastai)
# language: python
# name: fastai
# ---
r = {"4":5}
def countCharacters(words, chars: str) -> int:
if words == [] or chars == "": return 0
def _get_dict(string):
d = {}
for c in set(string):
d[c] = string.count(c)
return d
def _is_possible(word, chars_dict):
word_dict = _get_dict(word)
for key in word_dict.keys():
if key not in chars_dict.keys() or word_dict[key] > chars_dict[key]:
return False
return True
chars_dict = _get_dict(chars)
return sum([len(word) for word in words if _is_possible(word, chars_dict)])
words = ["cat","bt","hat","tree"]
chars = "atach"
countCharacters(words, chars)
def countCharacters(self, words: List[str], chars: str) -> int:
total = 0
chars_dict = {c:chars.count(c) for c in set(chars)}
for w in words:
for i,c in enumerate(w):
if c not in chars_dict.keys() or w.count(c) > chars_dict[c]:
break
if i+1 == len(w):
total += len(w)
return total
def countCharacters(words, chars: str) -> int:
return sum([len(word) for word in words if all(0 < word.count(x) <= chars.count(x) for x in word)])
| lt1160_Find Words That Can Be Formed by Characters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # %load 'validate_poi.py'
# +
"""
Starter code for the validation mini-project.
The first step toward building your POI identifier!
Start by loading/formatting the data
After that, it's not our code anymore--it's yours!
"""
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "rb") )
### first element is our labels, any added elements are predictor
### features. Keep this the same for the mini-project, but you'll
### have a different feature list when you do the final project.
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
# -
# ## types van datasets
#print een Python dictionary (=JSON object uit JSON-array)
data_dict["<NAME>"]
type(data_dict)
type(data)
len(data_dict)
# +
#data_dict
# -
# convert dict to dataframe
import pandas as pd
hond = pd.DataFrame.from_dict(data)
hond.shape
type(hond)
hond.info()
# example: df.columns = [‘A’, ‘B’, ‘C’, ‘D’]
hond.columns = ['POI?', 'salary']
hond
type(features)
len(features)
# hiermee maak je van 'dict' een 'lists'
zoet = list(features)[0:5]
zoet
type(zoet)
joop = ['a', 'b'] # hiermee creeer je een 'list'
joop
list(features)[0] ==> hiermee pak je de 1e list uit het object
#list(vals)[0].shape # hier creeer je een tuple
koekje = list(features)[0].shape #waarvan je shape kan krijgen
koekje
type(koekje)
list? # to view the docstring
len(labels)
sum(labels) # dus 17 van 95 zijn een PersonOfInterest
from sklearn.model_selection import cross_validate #from sklearn import cross_validation
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# ## Your first overfit POI identifier
# Did you run a decision tree classifier with the default parameters on the full data? Remember that we're looking at the performance of the overfit tree first.
clf_overfit = DecisionTreeClassifier()
clf_overfit = clf_overfit.fit(features, labels)
prediction_overfit = clf_overfit.predict(features)
print ("Accuracy : ", accuracy_score(prediction_overfit, labels))
# Pretty high accuracy, huh? Yet another case where testing on the training data would make you think you were doing amazingly well, but as you already know, that's exactly what holdout test data is for...
# ## Deploying a Training/Testing Regime
# Tried using splitting data into test/train
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size = 0.3, random_state=42)
len(X_train)
len(X_test)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
print ("Accuracy : ", accuracy_score(prediction, y_test))
# Aaaand the testing data brings us back down to earth after that 99% accuracy in the last quiz.
prediction
| 14_validate_poi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Visualization Utilities
# Because I should probably start standardizing my data exploration.
# +
import numpy as np
import sys
import os
import pandas as pd
from nltk import word_tokenize
from collections import Counter
from itertools import chain
import matplotlib.pyplot as plt, mpld3
# %matplotlib inline
import re
sys.path.append('..')
from imp import reload
from data import reddit_preprocessor, DataHelper
from data.reddit_preprocessor import *
import json
from pprint import pprint
from jupyterthemes import jtplot
jtplot.style('onedork', ticks=True, fscale=1.5)
jtplot.figsize(x=11., y=8.)
DATA_ROOT = '/home/brandon/Datasets/test_data'
FROM = os.path.join(DATA_ROOT, 'train_from.txt')
TO = os.path.join(DATA_ROOT, 'train_to.txt')
# +
COL_NAMES = ['inp_sentence', 'resp_sentence']
def make_dataframe(data_dir):
"""
data_dir: contains train_from.txt, train_to.txt
"""
from_lines = []
to_lines = []
with open(os.path.join(data_dir, 'train_from.txt'), 'r') as from_file:
with open(os.path.join(data_dir, 'train_to.txt'), 'r') as to_file:
from_line = from_file.readline()
to_line = to_file.readline()
while from_line and to_line:
from_lines.append(from_line.strip())
to_lines.append(to_line.strip())
from_line = from_file.readline()
to_line = to_file.readline()
df = pd.DataFrame(np.stack((from_lines, to_lines), 1),
columns=COL_NAMES)
return df
def word_tokenize(df):
word_freq = {}
# I know. I KNOW.
sentences = np.squeeze(list(((map(
DataHelper.word_tokenizer,
list(np.expand_dims(df[COL_NAMES[0]].values, 1)))))))
word_freq['from'] = Counter(chain.from_iterable(sentences))
# Stop judging me.
sentences = np.squeeze(list(((map(
DataHelper.word_tokenizer,
list(np.expand_dims(df[COL_NAMES[1]].values, 1)))))))
word_freq['to'] = Counter(chain.from_iterable(sentences))
return word_freq
def plot_freq_dist(word_freq, n):
words_dict = {}
for dist in word_freq:
most_comm = word_freq[dist].most_common(n)
words, counts = zip(*most_comm)
words_dict[dist] = words
counts_series = pd.Series.from_array(counts)
plt.figure(figsize=(8, 5))
ax = counts_series.plot(kind='bar')
ax.set_title('Frequency Distribution: ' + dist)
ax.set_ylabel('Counts')
ax.set_xlabel('Words')
ax.set_xticklabels(words_dict[dist])
from_words = set(words_dict['from'])
to_words = set(words_dict['to'])
common_words = from_words.intersection(to_words)
common_word_freqs = [
[word_freq['from'][w] for w in common_words],
[word_freq['to'][w] for w in common_words]]
ind = np.arange(len(common_words))
plt.figure(figsize=(8, 5))
p1 = plt.bar(ind, common_word_freqs[0], width=0.5, color='b')
p2 = plt.bar(ind, common_word_freqs[1], width=0.5, color='r')
plt.xticks(ind, common_words)
plt.legend((p1[0], p2[0]), ('From', 'To'))
return common_words
# -
pd.set_option('display.max_colwidth', 10000)
df = make_dataframe(DATA_ROOT)
pd.set_option('display.max_colwidth', 10000)
df.head(len(df.index))
word_freq = word_tokenize(df)
common_words = plot_freq_dist(word_freq, 5)
common_words
# # From TensorBoard to JSON to Matplotlib
# +
import os
import numpy as np
import pandas as pd
import yaml
import re
import matplotlib.pyplot as plt
# %matplotlib inline
from jupyterthemes import jtplot
from scipy.interpolate import spline
jtplot.style('onedork', ticks=True, fscale=1.5)
jtplot.figsize(x=11., y=8.)
pd.set_option('display.max_colwidth', 1000)
# --------------------------------------------------------
# Globals
# --------------------------------------------------------
SEQ = os.getenv('SEQ')
STATIC = os.getenv('STATIC')
BASE_DIR = os.path.join(os.getcwd(), 'individual_tb_plots')
ACC_DIR = os.path.join(BASE_DIR, 'accuracy')
TRAIN_DIR = os.path.join(BASE_DIR, 'train')
VALID_DIR = os.path.join(BASE_DIR, 'valid')
COL_NAMES = ['wall_time', 'iteration']
# -
# Format the dictionary configuration dictionaries
# +
from copy import deepcopy
omfg = {}
configs = {}
run_keys = set()
for fname in os.listdir(ACC_DIR):
name = re.search(r'(?:-)(\w+)(?:-)', fname).group(0)[1:-1]
run_keys.add(name)
run_keys = list(run_keys)
for k in run_keys:
fname = os.path.join(SEQ, 'out/cornell', k, 'config.yml')
with open(fname, 'r') as config_file:
configs[k] = yaml.load(config_file)
def filtered_configs(configs):
_configs = [(k, deepcopy(configs[k])) for k in configs]
# Remove dataset name (assuming they're all cornell)
_configs = list(filter(lambda c: c[1].pop('dataset'), _configs))
# Remove model name (all are DynamicBot)
_configs = list(filter(lambda c: c[1].pop('model'), _configs))
# misc.
_configs = list(filter(lambda c: c[1]['model_params'].pop('ckpt_dir'), _configs))
for k in ['model_params', 'dataset_params']:
kk_list = list(_configs[0][1][k])
for kk in kk_list:
vals = set()
for conf in _configs:
conf = conf[1]
vals.add(conf[k].get(kk))
if len(vals) == 1 and 'attention' not in kk:
# Remove items that are all the same.
_wtf = list(filter(lambda c: c[1][k].pop(kk), _configs))
if _wtf: _configs = _wtf
return {k: v for k, v in _configs}
def renamed(name):
_omfg = name
if 'idi' in name:
name = name.replace('basic', '')
name = name.replace('bidi', 'Bidi')
name = name.replace('basic', 'Basic')
if 'LSTM' not in name:
name += 'GRU'
omfg[_omfg] = name
return name
f_configs = filtered_configs(configs)
f_configs = {renamed(n): v for n, v in f_configs.items()}
pprint(f_configs)
pprint(omfg)
# +
df_acc = {}
df_train = {}
df_valid = {}
for k in run_keys:
fname = 'run-'+k+'-tag-evaluation_accuracy.json'
df_acc[omfg[k]] = pd.read_json(os.path.join(ACC_DIR, fname))
fname = 'run-'+k+'-tag-evaluation_loss_train.json'
df_train[omfg[k]] = pd.read_json(os.path.join(TRAIN_DIR, fname))
fname = 'run-'+k+'-tag-evaluation_loss_valid.json'
df_valid[omfg[k]] = pd.read_json(os.path.join(VALID_DIR, fname))
df_acc[omfg[k]].columns = COL_NAMES + ['accuracy']
df_train[omfg[k]].columns = COL_NAMES + ['training loss']
df_valid[omfg[k]].columns = COL_NAMES + ['validation loss']
# +
run_keys = list(f_configs.keys())
def plot_df(df, y_name, run_key, n_points=25, use_spline=True, plot_perp=False):
"""Assuming df is from tensorboard json . . . """
if plot_perp:
df = df.loc[2:]
df[y_name] = np.exp(df[y_name])
if use_spline:
iters = df.iteration
iters_new = np.linspace(iters.min(), iters.max(), n_points)
smooth_y = spline(iters, df[y_name], iters_new)
plt.plot(iters_new, smooth_y, label=run_key)
else:
plt.plot(df['iteration'], df[y_name], label=run_key)
plt.title(y_name.title())
plt.ylabel(y_name)
if y_name == 'accuracy':
plt.ylim([0., 1.])
plt.yticks(list(np.arange(
0., float(plt.yticks()[0][-1])+0.1, step=0.1)))
leg_kwargs = dict(fontsize='x-small',
loc='upper left')
else:
plt.yticks(list(np.arange(
0., float(plt.yticks()[0][-1])+1., step=1.)))
leg_kwargs = dict(fontsize='small',
loc='upper right')
plt.xlim([0., 1.0e4])
plt.xlabel('Iteration')
plt.legend(**leg_kwargs)
def plot_runs(df_dict, y_name, n_points=25,
use_spline=True, plot_perp=False,
figsize=(10,7)):
"""Calls plot_df for each key in the df_dict."""
fig = plt.figure(figsize=figsize)
for k in run_keys:
plot_df(deepcopy(df_dict[k]), y_name, k,
n_points=n_points,
use_spline=use_spline,
plot_perp=plot_perp)
return fig
# -
jtplot.style('onedork', ticks=True, context='talk', fscale=1.5)
acc_fig = plot_runs(df_acc, 'accuracy')
train_fig = plot_runs(df_train, 'training loss')
valid_fig = plot_runs(df_valid, 'validation loss')
plt.show()
# +
html_dir = os.path.join(STATIC, 'assets/plots')
for fname, fig in {'accuracy': acc_fig, 'training': train_fig, 'validation': valid_fig}.items():
with open(os.path.join(html_dir, fname+'.json'), 'w') as f:
mpld3.save_json(fig, f)
with open(os.path.join(html_dir, 'configs.json'), 'w') as f:
json.dump(f_configs, f)
# -
os.listdir(html_dir)
html_dir
| notebooks/DataVizUtils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Filter By RFree Demo
#
# Example of reading an MMTF Hadoop Sequence file, filtering the entries by resolution, and counting the number of entries. This example shows how methods can be chained together.
#
# [R Free](http://pdb101.rcsb.org/learn/guide-to-understanding-pdb-data/r-value-and-r-free)
#
# ## Imports
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io import mmtfReader
from mmtfPyspark.filters import RFree
from mmtfPyspark.structureViewer import view_structure
# ## Configure Spark
conf = SparkConf().setMaster("local[*]") \
.setAppName("FilterByRFreeDate")
sc = SparkContext(conf = conf)
# ## Read in MMTF Files, filter by RFree and count
# +
path = "../../resources/mmtf_reduced_sample/"
structures = mmtfReader.read_sequence_file(path, sc) \
.filter(RFree(0.15,0.25))
print(f"Number of structures : {structures.count()}")
# -
# ## Visualize Structures
structure_names = structures.keys().collect()
view_structure(structure_names, style='stick')
# ## Terminate Spark
sc.stop()
| docs/_static/demos/filters/FilterByRFreeDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Document for the problem
# ### The Warehouse Location Problem
#
# We use the warehouse location problem throughout this chapter, which considers
# the optimal locations to build warehouses to meet delivery demands. Let `N` be a
# set of candidate warehouse locations, and let `M` be a set of customer locations. For
# each warehouse `n`, the cost of delivering product to customer `m` is given by $ d_{n,m} $.
# We wish to determine the optimal warehouse locations that will minimize the total
# cost of product delivery. The binary variables $y_n$ are used to define whether or not a
# warehouse should be built, where $y_n$ is 1 if warehouse `n` is selected and 0 otherwise.
# The variable $x_{n,m}$ indicates the fraction of demand for customer m that is served by
# warehouse `n`.
#
# The variables `x` and `y` are determined by the optimizer, and all other quantities are
# known inputs or parameters in the problem. This problem is a particular description
# of the p-median problem, and it has the interesting property that the `x` variables will
# converge to {0, 1} even though they are not specified as binary variables.
# The complete problem formulation is:
#
# Objective:
# $$
# min \sum_{n \in N}{\sum_{m \in M}{d_{n,m}x_{n,m}}}
# $$
#
# s.t.:
# $$
# \sum_{n \in N}{x_{n,m}} = 1, \forall m \in M
# $$
#
# $$
# x_{n,m} \leq y_n, \forall n \in N, m \in M
# $$
#
# $$
# \sum_{n \in N}{y_n} \leq P
# $$
#
# $$
# 0 \leq x \leq 1
# $$
#
# $$
# y \in {0, 1}
# $$
# Here, the objective is to minimize the total cost associated with delivering products to all the customers. Equation WL.2 ensures that each customer’s
# demand is fully met, and equation WL.3 ensures that a warehouse can deliver product to customers only if that warehouse is selected to be built. Finally, with equation WL.4 the number of warehouses that can be built is limited to `P`.
# For our example, we will assume that $P = 2$, with the following data for warehouse and customer locations,
#
# For our example, we will assume that $P = 2$, with the following data for warehouse and customer locations,
# ```
# Customer locations M = {‘NYC’, ‘LA’, ‘Chicago’, ‘Houston’}
# Candidate warehouse locations N = {‘Harlingen’, ‘Memphis’, ‘Ashland’}
# ```
# with the costs $d_{n,m}$ as given in the following table:
#
#
# | | NYC | LA | Chicago | Houston |
# | --------- | ---- | ---- | ------- | ------- |
# | Harlingen | 1956 | 1606 | 1410 | 330 |
# | Memphis | 1096 | 1792 | 531 | 567 |
# | Ashland | 485 | 2322 | 324 | 1236 |
# +
import os
import pandas as pd
import pyomo.environ as pyo
# import ipymaterialui as mui
import ipywidgets as widgets
from IPython.display import display
# import ipyvuetify as v
# -
# ## Read the data from csv using pandas
#
# ---
data_select = widgets.RadioButtons(
options=['Upload', 'Select'],
value='Select'
)
data_source = widgets.Dropdown(description='Data file:')
uploader = widgets.FileUpload(
accept='.csv', # Accepted file extension e.g. '.txt', '.pdf', 'image/*', 'image/*,.pdf'
multiple=False # True to accept multiple files upload else False
)
def create_data_file_dd(data_source):
data_file_list = list()
cwd = os.getcwd()
for item in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, item)) and item.lower().endswith(".csv"):
data_file_list.append(item)
default_value = data_file_list[0] if data_file_list else None
data_source.options = data_file_list
data_source.value = default_value
display(data_source)
# +
out = widgets.Output()
@out.capture(clear_output=True, wait=True)
def display_data_source(*args):
if data_select.value == "Select":
create_data_file_dd(data_source)
else:
display(uploader)
# -
display_data_source()
data_select.observe(display_data_source, 'value')
display(widgets.HBox([data_select, out]))
# +
import io
def load_data(csv_file_path) -> pd.DataFrame:
if data_select.value == 'Upload':
df = pd.read_csv(io.BytesIO(uploader.get_state()['data'][0]), index_col=0)
else:
df = pd.read_csv(csv_file_path, index_col=0)
return df
# -
# ### Show the data
#
# +
sd_button = widgets.Button(description='Show the data')
out_data = widgets.Output()
display(sd_button, out_data)
data = None
def on_button_click(b):
global data
with out_data:
data = load_data(data_source.value)
print(data)
sd_button.on_click(on_button_click)
# +
def create_model(df: pd.DataFrame) -> pyo.ConcreteModel:
N = list(df.index.map(str))
M = list(df.columns.map(str))
d = {(r, c):df.loc[r,c] for r in N for c in M}
P = 2
model = pyo.ConcreteModel(name = "(WL)")
model.N = pyo.Set(initialize=N)
model.M = pyo.Set(initialize=M)
model.x = pyo.Var(N, M, bounds=(0, 1))
model.y = pyo.Var(N, within=pyo.Binary)
def obj_rule(model):
return sum(d[n,m]*model.x[n,m] for n in N for m in M)
model.obj = pyo.Objective(rule=obj_rule)
def one_per_cust_rule(model, m):
return sum(model.x[n,m] for n in N) == 1
model.one_per_cust = pyo.Constraint(M, rule=one_per_cust_rule)
def warehouse_active_rule(model, n, m):
return model.x[n,m] <= model.y[n]
model.warehouse_active = pyo.Constraint(N, M, rule=warehouse_active_rule)
def num_warehouses_rule(model):
return sum(model.y[n] for n in N) <= P
model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule)
return model
# -
# ## Create the model
#
# ---
# +
model = None
cm_button = widgets.Button(description='Create the model')
out_model = widgets.Output()
display(cm_button, out_model)
def on_m_button_click(b):
global model
global data
if data is None:
data = load_data(data_source.value)
with out_model:
model = create_model(data)
model.pprint()
cm_button.on_click(on_m_button_click)
# -
# ## Solve the model
#
# ---
# +
sm_button = widgets.Button(description='Solve the model')
out_res = widgets.Output()
display(sm_button, out_res)
def on_sm_button_click(b):
global model
global data
if data is None:
data = load_data(data_source.value)
with out_res:
solver = pyo.SolverFactory('glpk')
res = solver.solve(model)
print(res)
# post process
# produce nicely formatted output
for wl in model.N:
if pyo.value(model.y[wl]) > 0.5:
customers = [str(cl) for cl in model.M if pyo.value(model.x[wl, cl] > 0.5)]
print(f"{wl} + serves customers: {customers}")
else:
print(str(wl)+": do not build")
sm_button.on_click(on_sm_button_click)
| wl_csv_ui.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="BWIyC9Ip_bcq"
import numpy as np
import pandas as pd
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
from myrmsprop import MyRmsprop
from utils import plot_decision_boundary,attn_avg,plot_analysis
from synthetic_dataset import MosaicDataset1
from eval_model import calculate_attn_loss,analyse_data
# %matplotlib inline
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# + id="lGVy-1EllAc_"
train_data = np.load("train_type4_data.npy",allow_pickle=True)
test_data = np.load("test_type4_data.npy",allow_pickle=True)
# + id="uL771xuGZC5Q"
mosaic_list_of_images = train_data[0]["mosaic_list"]
mosaic_label = train_data[0]["mosaic_label"]
fore_idx = train_data[0]["fore_idx"]
test_mosaic_list_of_images = test_data[0]["mosaic_list"]
test_mosaic_label = test_data[0]["mosaic_label"]
test_fore_idx = test_data[0]["fore_idx"]
# + id="uf76JwkxZCT0"
batch = 3000
train_dataset = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx)
train_loader = DataLoader( train_dataset,batch_size= batch ,shuffle=False)
#batch = 2000
#test_dataset = MosaicDataset1(test_mosaic_list_of_images, test_mosaic_label, test_fore_idx)
#test_loader = DataLoader(test_dataset,batch_size= batch ,shuffle=False)
# + id="DOpZfj1bq7wN"
n_batches = 3000//batch
bg = []
for i in range(n_batches):
torch.manual_seed(i)
betag = torch.randn(3000,9)#torch.ones((250,9))/9
bg.append( betag.requires_grad_() )
# + colab={"base_uri": "https://localhost:8080/"} id="fzb3ii4drXpu" outputId="7ce5daa6-8972-414c-c0a0-2be3e23b7046"
len(bg)
# + id="Am8LcQm1Og-f"
data = np.load("dist_top1_type4.npy",allow_pickle=True)
indices = data[0]['indices'].numpy()
rows,cols = np.unravel_index(indices,shape=(27000,27000))
H = torch.zeros((27000,27000))
H[rows,cols] =1
#H = np.zeros((27000,27000))
# for i, data in enumerate(train_loader, 0):
# print(i) # only one batch
# inputs,_,_ = data
# inputs = torch.reshape(inputs,(27000,2))
# dis = (torch.cdist(inputs,inputs)**2).to(dtype=torch.float32).flatten()
# + colab={"base_uri": "https://localhost:8080/"} id="vXCBGTnkosew" outputId="cb7ec9a3-73aa-49f8-a5e4-4a0383c6b059"
print(indices,rows[0],cols[0])
# to check H contains only 7290000 elements
torch.sum(H)
# + id="HbrMidFCla6h"
class Module2(nn.Module):
def __init__(self):
super(Module2, self).__init__()
self.linear1 = nn.Linear(2,100)
self.linear2 = nn.Linear(100,3)
def forward(self,x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
# + id="rRqj2VELllkX"
torch.manual_seed(1234)
what_net = Module2().double()
what_net.load_state_dict(torch.load("type4_what_net.pt"))
what_net = what_net.to("cuda")
# + id="sAY-x6UAwrwE"
# for param in what_net.parameters():
# param.requires_grad = False
# + id="S633XgMToeN3"
optim1 = []
H= H.to("cpu")
for i in range(n_batches):
optim1.append(MyRmsprop([bg[i]],H=H,lr=1))
# + colab={"base_uri": "https://localhost:8080/"} id="qPaYaojinMTA" outputId="91a37744-c532-4331-834d-aeea92453b4a"
# instantiate optimizer
optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.001)#, momentum=0.9)#,nesterov=True)
criterion = nn.CrossEntropyLoss()
acti = []
analysis_data_tr = []
analysis_data_tst = []
loss_curi_tr = []
loss_curi_tst = []
epochs = 200
# calculate zeroth epoch loss and FTPT values
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,bg,what_net,criterion)
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tr.append(running_loss)
analysis_data_tr.append(anlys_data)
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what_net.train()
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
beta = bg[i] # alpha for ith batch
#print(labels)
inputs, labels,beta = inputs.to("cuda"),labels.to("cuda"),beta.to("cuda")
# zero the parameter gradients
optimizer_what.zero_grad()
optim1[i].zero_grad()
# forward + backward + optimize
avg,alpha = attn_avg(inputs,beta)
outputs = what_net(avg)
loss = criterion(outputs, labels)
# print statistics
running_loss += loss.item()
#alpha.retain_grad()
loss.backward(retain_graph=False)
optimizer_what.step()
optim1[i].step()
running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,bg,what_net,criterion)
analysis_data_tr.append(anls_data)
loss_curi_tr.append(running_loss_tr) #loss per epoch
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy))
if running_loss_tr<=0.08:
break
print('Finished Training run ')
analysis_data_tr = np.array(analysis_data_tr)
# + id="AciJnAh5nfug"
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = np.arange(0,epoch+2)
df_train[columns[1]] = analysis_data_tr[:,-2]/30
df_train[columns[2]] = analysis_data_tr[:,-1]/30
df_train[columns[3]] = analysis_data_tr[:,0]/30
df_train[columns[4]] = analysis_data_tr[:,1]/30
df_train[columns[5]] = analysis_data_tr[:,2]/30
df_train[columns[6]] = analysis_data_tr[:,3]/30
# + colab={"base_uri": "https://localhost:8080/", "height": 272} id="NoQpS_6scRsC" outputId="e0fe7a68-b33b-4663-9a77-f8d14eee5d23"
df_train
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="EY_j8B274vuH" outputId="883a7858-c052-4a0a-e4c3-4dfa196aa068"
plot_analysis(df_train,columns,[0,3,6])
# + id="VCnS6r2_3WdU"
aph = []
for i in bg:
aph.append(F.softmax(i,dim=1).detach().numpy())
aph = np.concatenate(aph,axis=0)
# torch.save({
# 'epoch': 500,
# 'model_state_dict': what_net.state_dict(),
# #'optimizer_state_dict': optimizer_what.state_dict(),
# "optimizer_alpha":optim1,
# "FTPT_analysis":analysis_data_tr,
# "alpha":aph
# }, "type4_what_net_500.pt")
# + colab={"base_uri": "https://localhost:8080/"} id="KVzrDOGS4UxU" outputId="4126054d-c052-4ce5-9f91-ddf67e4e7f02"
aph[0]
# + id="7Ut6ZTAXbvqx"
avrg = []
with torch.no_grad():
for i, data1 in enumerate(train_loader):
inputs , labels , fore_idx = data1
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
beta = bg[i]
beta = beta.to("cuda")
avg,alpha = attn_avg(inputs,beta)
avrg.append(avg.detach().cpu().numpy())
avrg= np.concatenate(avrg,axis=0)
# + id="2KQFYlmTLG0N" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="8e8c3c1d-79e3-4057-d04a-08f5dd1954ee"
data = np.load("type_4_data.npy",allow_pickle=True)
plot_decision_boundary(what_net,[1,8,2,9],data,bg,avrg)
# + id="0TlpHSOC0hfk"
| 1_mosaic_data_attention_experiments/3_stage_wise_training/Attention_weights_for_every_data/type4_data/init_1/distance_kernel/both_pretrained_what/lr_1/type4_attn_ewts_distance_kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python
# coding: utf-8
# -
import codecs
from datetime import datetime as dt
from datetime import timedelta as td
import json
import sys
import numpy as np
import os
import pandas as pd
import plotly
from plotly import subplots
import plotly.express as px
import plotly.tools as tls
import plotly.graph_objects as go
import plotly.io as pio
import plotly.offline as offline
import sys
if "ipy" in sys.argv[0]:
offline.init_notebook_mode()
from cov19utils import create_basic_plot_figure, \
show_and_clear, moving_average, \
blank2zero, csv2array, \
get_twitter, tweet_with_image, \
get_gpr_predict, FONT_NAME, DT_OFFSET, \
download_if_needed, json2nparr, code2int, age2int, \
get_populations, get_os_idx_of_arr, dump_val_in_arr, \
calc_last1w2w_dif, create_basic_scatter_figure, \
show_and_save_plotly
import re
import requests
def get_gr_name(x):
if x == 'a':
return "(アルファ株)"
if x == 'd':
return "(デルタ株)"
if x == 'd+':
return "(デルタ+株)"
return "(変異株)"
cats = [
"飲食店",
"接待飲食",
"事業所",
"福祉施設",
"医療機関",
"小学校",
"中学校",
"高校",
"大学",
"専門学校",
"警察",
"自衛隊",
"教育機関",
"消防署",
"運動施設",
"保育園/幼稚園"]
def get_template_fig(title, subtitles, today_str):
figa = subplots.make_subplots(
shared_xaxes=True,
rows=4, cols=4, subplot_titles=subtitles,
horizontal_spacing=0.03, vertical_spacing=0.07)
figa.update_layout(
width=800, height=800, template='plotly_dark',
margin={"r":10,"t":50,"l":10,"b":10},
title=title + " " + today_str,
showlegend=False)
return figa
df = pd.read_csv("maps.csv", encoding='shift-jis', header=0)
latest_date = df['opened'].iloc[-1].replace('/', '-')
start_date = df['opened'].iloc[0].replace('/', '-')
print("start:{} last:{}".format(start_date, latest_date))
tw_body = "北海道 職域別感染者数 全期間({}~{})".format(start_date, latest_date)
tw_body
fig = get_template_fig(tw_body, cats, "")
xrange = pd.to_datetime([df['opened'].iloc[0], df['opened'].iloc[-1]], format='%Y/%m/%d')
xrange
fig.add_trace(go.Scatter(x=xrange, y=[0, 0], mode='lines', line=dict(width=0, color='red')), 1, 1)
cnt = 0
for y in np.arange(4):
for x in np.arange(4):
cnt += 1
sub_df = df[df['id'] == cnt].groupby('opened').sum()
sub_df['opened'] = sub_df.index.values
sub_df.index.name = None
sub_df['opened'] = pd.to_datetime(sub_df['opened'], format='%Y/%m/%d')
sub_df = sub_df.sort_values('opened')
trace = go.Scatter(
x=sub_df['opened'], y=sub_df['patients'], mode='lines+markers',
marker=dict(color='green', size=3),
line=dict(width=.5, color='red')
)
fig.add_trace(trace, y+1, x+1)
fig.update_xaxes(
range=xrange,
showgrid=False, showticklabels=False, ticks="",
zeroline=True)
fig.update_yaxes(rangemode="tozero")
show_and_save_plotly(fig, "hokkaido-work.jpg", js=False, show=True, image=True, html=True)
tw = get_twitter()
tw_body += " https://geneasyura.github.io/cov19-hm/hokkaido-work.html "
tweet_with_image(tw, "../docs/images/hokkaido-work.jpg", tw_body)
| csv/hokkaido-work.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="ca510898c048cb93f90e168d10ce75c9ea6bd721"
# # 異常検知基礎講座 通し課題
# + [markdown] _uuid="40533713bd22099ba0ffcb6f23dec8d98ea29fdb"
# ### ハワイには年に数回、例外的に大きな波が来るという。その異常値ともいうべき波を、「外れ値検出」を用い、どの程度の頻度・大きさで発生するかを確かめる。
# ### この大波を事前に予測できれば、波を狙ったサーファーをハワイに呼び寄せることが出来て、ハワイの観光業も潤い、サーファーの満足度を高めることも出来る。
# ### 予測にあたっては、ハワイの波が生まれる場所の一つと言われるオホーツク海の気象情報を用いる。オホーツク海の異常気象がハワイの大波を引き起こすとの仮定に基づき、「外れ値検出」を用いオホーツク海の異常を検出し、ハワイの大波を予測する。
# + [markdown] _uuid="f4291067a53c34f91ed2db88a15558b365620451"
# ### ハワイの波の情報を取得する
# #### • USのNational Weather Serviceより、オアフ島北部ワイメアビーチの波の情報を取得する。(2018年1月初ー11月末の1時間ごとの情報)
# #### ftp://polar.ncep.noaa.gov/pub/history/waves/multi_1/
# + _uuid="856bf4dafd47831714333c47e550125a7d9b6e33"
# import required libraries
import pandas as pd
import pandas.tseries.offsets as offsets
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy.stats as stats #統計に関するライブラリ
from IPython.core.display import display
from datetime import datetime
import warnings
warnings.filterwarnings('ignore')
# Data is downloaded and ready to use!
# 51201はワイメアビーチの観測地点を指す記号
df_data = pd.read_csv("./data/51201/connected.tsv", sep=' ')
print("Row lengths of imported data: ", len(df_data))
# + _uuid="7fe8d1d0db26b5ffb9044249b34c4f55457db8d6"
#まずはHeaderを確認
display(df_data.head())
df_data = df_data.rename(columns={'Unnamed: 0': 'dummy', '2018': 'year', '1': 'month', '1.1': 'day', '0': 'hour',
'3.8': 'wind_speed','75': 'wind_direction','1.5': 'wave_height','13.9': 'peak_period'})
df_data = df_data.drop(['dummy', 'peak_period'], axis=1)
display(df_data.head())
df_data.describe()
print('波の高さは平均で1.4m、95%(±2σ)の波は2.54m以下であるが、最大は5.8mとかなり大きい!')
# + [markdown] _uuid="57c73c6a4caa264ace4d06b9a13aa4e61d64a96b"
# #### 日付の編集
#
#
# +
# 日付を日単位、時間単位で編集
df_data_wave = df_data.copy()
df_data_wave['datetime_str'] = df_data_wave['year'].astype(str) \
+ df_data_wave['month'].astype(str).str.zfill(2) \
+ df_data_wave['day'].astype(str).str.zfill(2) \
+ ' ' + df_data_wave['hour'].astype(str).str.zfill(2)
df_data_wave['datetime_'] = pd.to_datetime(df_data_wave['datetime_str'], format='%Y%m%d %H')
df_data_wave['date_str'] = df_data_wave['year'].astype(str) \
+ df_data_wave['month'].astype(str).str.zfill(2) \
+ df_data_wave['day'].astype(str).str.zfill(2)
df_data_wave['date_'] = pd.to_datetime(df_data_wave['date_str'], format='%Y%m%d')
df_data_wave = df_data_wave.drop(['year','month','day','hour','datetime_str','date_str','wind_direction'], axis=1)
display(df_data_wave.head())
# -
# 日付と波の高さのグラフを描く
df_data_wave.plot(x=df_data_wave.columns[2], y=df_data_wave.columns[1], kind='line', figsize=(15,5))
print("2018年は4-5回、3m超の波が来たようだ")
# 確率密度の小ささで異常度を観る
muhat = df_data_wave["wave_height"].mean()
sigmahat = df_data_wave["wave_height"].std(ddof=0)
df_data_wave['score'] = stats.norm.pdf(df_data_wave["wave_height"], loc=muhat, scale=sigmahat)
display(df_data_wave.head())
df_data_wave.plot(x=df_data_wave.columns[2], y=df_data_wave.columns[4], kind='line', figsize=(15,5))
print('確率密度では異常度があまり際立たない')
# 情報量で異常度を観る
df_data_wave['ascore'] = - np.log(df_data_wave["score"])
display(df_data_wave.head())
df_data_wave.plot(x=df_data_wave.columns[2], y=df_data_wave.columns[5], kind='line', figsize=(15,5))
print('異常値が見やすくなった、キリが良いので5以上を異常値とする')
# 上記グラフより情報量が5を超えているデータを異常値として抽出する
print(df_data_wave[df_data_wave['ascore'] > 5])
print('データ数が多すぎて、よく分からない')
# 異常値が多すぎるので日付単位で出力する
print(df_data_wave[df_data_wave['ascore'] > 5].groupby('date_').mean())
print('改めてではあるが、年4回 3~6mの波が来ていることが分かった')
# ### オホーツク海の気象情報を取得する
# #### ・気象庁より、オホーツク海を代表し根室の気象情報を取得する。(2018年1月初ー11月末の1時間ごとの情報)
# #### https://www.data.jma.go.jp/risk/obsdl/index.php#!table
# 次に北海道・根室の気象情報を取り込む
df_data_2 = pd.read_csv("./data/nemuro/connected.csv")
print("Row lengths of imported data: ", len(df_data_2))
#まずはHeaderを確認
df_data_2['datetime_'] = pd.to_datetime(df_data_2['datetime_'], format='%Y/%m/%d %H:00')
df_data_2['date_'] = df_data_2['datetime_'].dt.date
display(df_data_2.head())
df_data_2.describe()
print('異常値として幾つか取れそうであるが、まずは気圧を用いる')
# 根室の気圧変化を見る
df_data_nemuro = df_data_2.copy()
df_data_nemuro = df_data_nemuro.drop(['Temperature','rain','wind_direction','vapor_presure'], axis=1)
df_data_nemuro.plot(x=df_data_nemuro.columns[0], y=df_data_nemuro.columns[2], kind='line', figsize=(15,5), rot=90)
print('年に数回、大きく低気圧もしくは高気圧に変化しているのが見て取れる')
# 確率密度の小ささで異常度を観る
muhat = df_data_nemuro["air_pressure"].mean()
sigmahat = df_data_nemuro["air_pressure"].std(ddof=0)
df_data_nemuro['score'] = stats.norm.pdf(df_data_nemuro["air_pressure"], loc=muhat, scale=sigmahat)
display(df_data_nemuro.head())
df_data_nemuro.plot(x=df_data_nemuro.columns[0], y=df_data_nemuro.columns[5], kind='line', figsize=(15,5), rot=90)
print('やはり確率密度では異常度があまり際立たない')
# 情報量で異常度を観る
df_data_nemuro['ascore'] = - np.log(df_data_nemuro["score"])
display(df_data_nemuro.head())
df_data_nemuro.plot(x=df_data_nemuro.columns[0], y=df_data_nemuro.columns[6], kind='line', figsize=(15,5))
print('異常値が見やすくなった、キリがいいので6以上を異常値とする')
# 上記グラフより情報量が6を超えているデータを異常値として抽出する
print(df_data_nemuro[df_data_nemuro['ascore'] > 6])
print('データ数が多すぎて、よく分からない')
# 異常値が多すぎるので日付単位で出力する
print(df_data_nemuro[df_data_nemuro['ascore'] > 6].groupby('date_').mean())
print('年9回ほど基本的には低気圧に大きく変動していることが分かった')
# ### ハワイの波とオホーツク海の関連を見る
# #### • 関連性が見れれば、オホーツク海の異常気象により、ハワイの波の異常値を観測できる
# まずはマージしてみる
df_merged = pd.merge(df_data_wave, df_data_nemuro, on='datetime_')
display(df_merged.head())
#、グラフに描く
df_data_nemuro_tmp = df_data_nemuro.copy()
df_merged = pd.merge(df_data_wave, df_data_nemuro_tmp, on='datetime_')
df_merged = df_merged.drop(['wind_speed','wave_height','date__x','score_x','wind','air_pressure','sea_preasure','date__y','score_y'], axis=1)
df_merged = df_merged.rename(columns={'ascore_x': 'Hawaii Wave Height', 'ascore_y': 'Nemuro Air Pressure'})
df_merged.plot(x=df_merged.columns[0], kind='line', figsize=(15,5))
print('ハワイもオホーツク海も1月に大きなピークがあり、数日の誤差があるように見える。オホーツク海の異常気象の数日後にハワイに大波が来るのではないかと仮定。')
#相関係数を見るためのテスト
df_merged = pd.merge(df_data_wave, df_data_nemuro, on='datetime_')
print(df_merged[['ascore_x','ascore_y']].corr())
print(df_merged[['ascore_x','ascore_y']].corr().at['ascore_x','ascore_y'])
# 日付の加算のテスト
df_data_nemuro_tmp = df_data_nemuro.copy()
display(df_data_nemuro_tmp.head())
df_data_nemuro_tmp['datetime_'] = df_data_nemuro_tmp['datetime_'] + offsets.Hour(1)
display(df_data_nemuro_tmp.head())
# 根室の気象状況を数時間ずらしたとき、ハワイの波と根室の気象の情報量の相関係数を見る
df_data_nemuro_tmp = df_data_nemuro.copy()
hs_ = []
corrs_ = []
max_corr = 0
max_h = 0
for h in range(0,200,1):
df_merged = pd.merge(df_data_wave, df_data_nemuro_tmp, on='datetime_')
corr_ = df_merged[['ascore_x','ascore_y']].corr().at['ascore_x','ascore_y']
hs_.append(h)
corrs_.append(corr_)
if(corr_>max_corr):
max_corr=corr_
max_h=h
df_data_nemuro_tmp['datetime_'] = df_data_nemuro_tmp['datetime_'] + offsets.Hour(1)
# 結果を表示
plt.plot(hs_, corrs_)
print('相関が最大となるのは', max_h,'時間後、値は',max_corr)
print('オホーツク海の異常気象の4日と11時間後に、ハワイに大波が出現している')
# ハワイの波の情報量と根室の気象状況を107時間分シフトさせ、グラフに描いてみる
df_data_nemuro_tmp = df_data_nemuro.copy()
df_data_nemuro_tmp['datetime_'] = df_data_nemuro_tmp['datetime_'] + offsets.Hour(max_h)
df_merged = pd.merge(df_data_wave, df_data_nemuro_tmp, on='datetime_')
df_merged = df_merged.drop(['wind_speed','wave_height','date__x','score_x','wind','air_pressure','sea_preasure','date__y','score_y'], axis=1)
df_merged = df_merged.rename(columns={'ascore_x': 'Hawaii Wave Height', 'ascore_y': 'Nemuro Air Pressure'})
df_merged.plot(x=df_merged.columns[0], kind='line', figsize=(15,5))
print('オホーツク海の気象情報量を107時間シフトすると、ハワイの波の情報量がいくつかのポイントで重なった')
# ### 課題
# #### ・ピークが107時間の前後で現れているケースがある。前後する要因は何だろうか?
# #### ・オホーツク海の異常気象がハワイの大波とならないケース、オホーツク海の異常気象無しにハワイに大波となるケースがある。要因は何だろうか?
| Day1_work_Yam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="P9bJCDjdlgG6" colab_type="text"
# # **Spit some [tensor] flow**
#
# We need to learn the intricacies of tensorflow to master deep learning
#
# `This is how we write code`
# + id="aQwc0re5mFld" colab_type="code" outputId="c48a10df-b9c3-4500-9b09-681c6701caa3" colab={"base_uri": "https://localhost:8080/", "height": 35}
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
# + id="qyw8HvOuBEZm" colab_type="code" outputId="6975a348-9010-46b7-fd39-61313736ea7f" colab={"base_uri": "https://localhost:8080/", "height": 35}
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
type(data)
print(data.keys())
# + id="41JwZflmB4qr" colab_type="code" outputId="6bf6d970-d413-47d4-8ea6-eb8f74dcef6e" colab={"base_uri": "https://localhost:8080/", "height": 208}
print(data.target_names)
print("============================================")
print(data.feature_names)
# + id="7hQGKjLvBT6z" colab_type="code" colab={}
X = data.data
# + id="ovyPUHaZBqu8" colab_type="code" colab={}
y = data.target
# + id="BSOcVLhKCGOa" colab_type="code" colab={}
# TRAIN TEST SPLIT
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# + id="bqh-eNkNCb7D" colab_type="code" colab={}
N, D = X_train.shape
# + id="WA8o1DP8CvGT" colab_type="code" colab={}
from sklearn.preprocessing import StandardScaler
scaleObj = StandardScaler()
X_train = scaleObj.fit_transform(X_train)
X_test = scaleObj.transform(X_test)
# + id="mIv1FdhRDRrc" colab_type="code" outputId="62513536-1337-437f-9576-83fd16f09790" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(D,)),
tf.keras.layers.Dense(1,activation='sigmoid')
])
model.compile(
optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy']
)
report = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100)
# + id="pQE2TTKnh75m" colab_type="code" colab={}
y_pred = model.predict(X_test)
# + id="WO32VafQiDMO" colab_type="code" colab={}
y_pred = np.round(y_pred).flatten()
# + id="qy3CyzCFiKiN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="11a6221d-bae4-4d86-fcac-c08cf321bff5"
print("Accuracy: ", np.mean(y_pred == y_test))
# + id="LuietUFpEG5a" colab_type="code" outputId="dafd34bb-dba5-4cbe-ff15-69579212ec7c" colab={"base_uri": "https://localhost:8080/", "height": 87}
print("Train eval: ", model.evaluate(X_train, y_train))
print("Test eval: ", model.evaluate(X_test, y_test))
# + [markdown] id="t9Gs0ZQhil2O" colab_type="text"
# ## SCIKIT LEARN EVALUATION METRICS
# + id="dPz8dvWyifJm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="9bf5fdc2-379c-413c-dbca-452f78b142aa"
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
con_mat = confusion_matrix(y_test, y_pred)
con_mat
# + id="zraSRtcSjEtN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1c6054b0-afa3-4130-988d-1f755dabcea4"
# Making the Confusion Matrix
from sklearn.metrics import accuracy_score
acc_score = accuracy_score(y_test, y_pred)
acc_score
# + id="clLHxtHzjApv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="1e270356-3a57-4e97-e473-3a868f5111ca"
from sklearn.metrics import classification_report
class_report = classification_report(y_test, y_pred)
print(class_report)
| Tensorflow_2X_Notebooks/Demo10_ClassificationEvaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# # Azure Notebooks - Configuring the Default Python Version
#
# __Notebook Version:__ 1.0<br>
#
#
# ### Description
# Azure notebooks currently defaults to Python 3.5. Many of the notebooks and dependent libraries require a minumum of Python 3.6.
#
# Carry out the following steps on your Azure Notebooks project to set the correct defaults.
#
# Note, you cannot configure the python kernel version independently but you can do it by specifying a requirements.txt - which is a list of python dependencies together with the minimum versions required by the environment.
# ## 1. Click on the cog icon - Project Settings
# Note you may need to navigate to the project root to be able to set this successfully.
# 
#
# ## 2. Select the Environment tab and click Add
# 
# ## 3. Select Requirements.txt from the first drop-down
# 
# ## 4. Select the requirements.txt file from the list
# This should have been cloned from the Azure Sentinel github when you first created the project.
#
# The drop-down list might be quite long. Keep hitting 'r' to jump to files beginning with the letter 'r'.
#
# 
# ## 5. Select the Python version - 3.6
# You can select a later version such as 3.7 if this is available.
#
# 
# ## 6. Save the Setup Step
# 
| Notebooks/HowTos/AzureNotebooks-ConfigurePythonVersion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="DweYe9FcbMK_"
# ##### Copyright 2019 The TensorFlow Authors.
#
#
# + cellView="form" colab={} colab_type="code" id="AVV2e0XKbJeX"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="sUtoed20cRJJ"
# # CSV 데이터 불러오기
# + [markdown] colab_type="text" id="1ap_W4aQcgNT"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/csv"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ko/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/ko/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/ko/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />노트북 다운로드하기</a>
# </td>
# </table>
# -
# Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 tensorflow/docs 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 <EMAIL>로 메일을 보내주시기 바랍니다
# + [markdown] colab_type="text" id="C-3Xbt0FfGfs"
# 이 튜토리얼에서는 CSV 파일을 불러와서 `tf.data.Dataset`로 변환하는 방법을 살펴봅니다.
#
# 이 튜토리얼에서 사용된 데이터는 타이타닉 승객 목록입니다. 이 모델은 연령, 성별, 티켓 등급 및 개인 여행 여부와 같은 특성을 기반으로 승객의 생존 가능성을 예측합니다.
# + [markdown] colab_type="text" id="fgZ9gjmPfSnK"
# ## 설치
# + colab={} colab_type="code" id="I4dwMQVQMQWD"
try:
# # %tensorflow_version은 코랩에서만 사용할 수 있습니다.
# %tensorflow_version 2.x
except Exception:
pass
# + colab={} colab_type="code" id="baYFZMW_bJHh"
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import numpy as np
import tensorflow as tf
# + colab={} colab_type="code" id="Ncf5t6tgL5ZI"
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
# + colab={} colab_type="code" id="4ONE94qulk6S"
# numpy 값을 읽기 쉽게 표시하도록 만듭니다.
np.set_printoptions(precision=3, suppress=True)
# + [markdown] colab_type="text" id="Wuqj601Qw0Ml"
# ## 데이터 불러오기
#
# 먼저 CSV 파일의 상단을 살펴보고, 데이터 형식이 어떻게 지정되었는지 확인해봅시다.
# + colab={} colab_type="code" id="54Dv7mCrf9Yw"
# !head {train_file_path}
# + [markdown] colab_type="text" id="jC9lRhV-q_R3"
# [Pandas로 불러오기](pandas.ipynb)한 후에 NumPy 배열을 TensorFlow에 전달해도 됩니다. 큰 파일 모음을 불러와야 하거나, [TensorFlow and tf.data](../../guide/data.ipynb)와 통합된 로더를 사용해야 하는 경우에는 `tf.data.experimental.make_csv_dataset` 함수를 사용합니다.
# + [markdown] colab_type="text" id="67mfwr4v-mN_"
# 명시적으로 식별해야하는 유일한 열은 모델이 예측하려는 값을 가진 열입니다.
# + colab={} colab_type="code" id="iXROZm5f3V4E"
LABEL_COLUMN = 'survived'
LABELS = [0, 1]
# + [markdown] colab_type="text" id="t4N-plO4tDXd"
# 이제 파일에서 CSV 데이터를 읽고 데이터세트를 만듭니다.
#
# (전체 문서는 `tf.data.experimental.make_csv_dataset`을 참조하십시오.)
# + colab={} colab_type="code" id="yIbUscB9sqha"
def get_dataset(file_path, **kwargs):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=5, # 예제를 좀더 쉽게 살펴볼 수 있도록 일부러 작게 설정했습니다.
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True,
**kwargs)
return dataset
raw_train_data = get_dataset(train_file_path)
raw_test_data = get_dataset(test_file_path)
# + colab={} colab_type="code" id="v4oMO9MIxgTG"
def show_batch(dataset):
for batch, label in dataset.take(1):
for key, value in batch.items():
print("{:20s}: {}".format(key,value.numpy()))
# + [markdown] colab_type="text" id="vHUQFKoQI6G7"
# 데이터세트의 각 항목은 배치이며 (*많은 예*, *많은 레이블*)와 같은 튜플로 표시됩니다. 예제의 데이터는 행 기반 텐서가 아닌 열 기반 텐서로 구성되며, 각 텐서는 배치 크기(이 경우는 5)만큼의 요소가 있습니다.
#
# 직접 보는 것이 이해하는데 더 도움이 될 것입니다.
# + colab={} colab_type="code" id="HjrkJROoxoll"
show_batch(raw_train_data)
# + [markdown] colab_type="text" id="YOYKQKmMj3D6"
# 보이는 바와 같이 CSV의 열 이름이 지정됩니다. 데이터세트 생성자가 이름을 자동으로 선택합니다. 작업중인 파일의 첫 번째 행이 열 이름이 아니라면 `make_csv_dataset` 함수의`column_names` 인수에 문자열 리스트로 이름을 전달합니다.
# + colab={} colab_type="code" id="2Av8_9L3tUg1"
CSV_COLUMNS = ['survived', 'sex', 'age', 'n_siblings_spouses', 'parch', 'fare', 'class', 'deck', 'embark_town', 'alone']
temp_dataset = get_dataset(train_file_path, column_names=CSV_COLUMNS)
show_batch(temp_dataset)
# + [markdown] colab_type="text" id="gZfhoX7bR9u4"
# 이 예에서는 사용 가능한 모든 열을 사용합니다. 데이터세트에서 일부 열을 생략해야 하는 경우에는 사용하려는 열의 목록을 작성하고 생성자의 `select_columns`인수(선택적)에 전달하면 됩니다.
# + colab={} colab_type="code" id="S1TzSkUKwsNP"
SELECT_COLUMNS = ['survived', 'age', 'n_siblings_spouses', 'class', 'deck', 'alone']
temp_dataset = get_dataset(train_file_path, select_columns=SELECT_COLUMNS)
show_batch(temp_dataset)
# + [markdown] colab_type="text" id="9cryz31lxs3e"
# ## 데이터 전처리
#
# CSV 파일은 다양한 데이터 유형을 포함할 수 있습니다. 일반적으로 데이터를 모델에 전달하기 전에 이러한 혼합 유형들을 고정 길이 벡터로 변환하고자 할 것입니다.
#
# TensorFlow에는 일반적인 입력 변환을 위한 내장 시스템이 있습니다 :`tf.feature_column`. 자세한 내용은 [이 튜토리얼] (../ keras / feature_columns)을 참조하십시오.
#
#
# ([nltk](https://www.nltk.org/)이나 [sklearn](https://scikit-learn.org/stable/))과 같은 도구로 데이터를 전처리하고, 처리된 데이터를 TensorFlow에 전달하면 됩니다.
#
#
# 모델 내에서 전처리를 할 때의 주요 이점은 전처리 과정을 모델에 포함시킬 수 있다는 것입니다. 이렇게 하면 원시 데이터를 모델에 직접 전달해도 됩니다.
# + [markdown] colab_type="text" id="9AsbaFmCeJtF"
# ### 연속형 데이터
# + [markdown] colab_type="text" id="Xl0Q0DcfA_rt"
# 데이터가 이미 적절한 숫자 형식이라면 데이터를 모델에 전달하기 전에 벡터로 묶을 수 있습니다:
# + colab={} colab_type="code" id="4Yfji3J5BMxz"
SELECT_COLUMNS = ['survived', 'age', 'n_siblings_spouses', 'parch', 'fare']
DEFAULTS = [0, 0.0, 0.0, 0.0, 0.0]
temp_dataset = get_dataset(train_file_path,
select_columns=SELECT_COLUMNS,
column_defaults = DEFAULTS)
show_batch(temp_dataset)
# + colab={} colab_type="code" id="zEUhI8kZCfq8"
example_batch, labels_batch = next(iter(temp_dataset))
# + [markdown] colab_type="text" id="IP45_2FbEKzn"
# 다음은 모든 열을 묶는 간단한 함수입니다:
# + colab={} colab_type="code" id="JQ0hNSL8CC3a"
def pack(features, label):
return tf.stack(list(features.values()), axis=-1), label
# + [markdown] colab_type="text" id="75LA9DisEIoE"
# 이 함수를 데이터세트의 각 요소에 적용합니다:
# + colab={} colab_type="code" id="VnP2Z2lwCTRl"
packed_dataset = temp_dataset.map(pack)
for features, labels in packed_dataset.take(1):
print(features.numpy())
print()
print(labels.numpy())
# + [markdown] colab_type="text" id="1VBvmaFrFU6J"
# 혼합 데이터 유형이 있는 경우 이러한 단순 숫자 필드를 분리할 수 있습니다. 이를 `tf.feature_column` api로 처리할 수도 있지만 약간의 오버 헤드를 발생시키므로, 정말 필요한 경우가 아니라면 피하는 것이 좋습니다. 혼합 데이터세트로 되돌아가겠습니다.
# + colab={} colab_type="code" id="ad-IQ_JPFQge"
show_batch(raw_train_data)
# + colab={} colab_type="code" id="HSrYNKKcIdav"
example_batch, labels_batch = next(iter(temp_dataset))
# + [markdown] colab_type="text" id="p5VtThKfGPaQ"
# 따라서 숫자 특성 리스트를 선택하고 이를 단일 열로 묶는 좀더 일반적인 전처리기를 정의합니다.
# + colab={} colab_type="code" id="5DRishYYGS-m"
class PackNumericFeatures(object):
def __init__(self, names):
self.names = names
def __call__(self, features, labels):
numeric_features = [features.pop(name) for name in self.names]
numeric_features = [tf.cast(feat, tf.float32) for feat in numeric_features]
numeric_features = tf.stack(numeric_features, axis=-1)
features['numeric'] = numeric_features
return features, labels
# + colab={} colab_type="code" id="1SeZka9AHfqD"
NUMERIC_FEATURES = ['age','n_siblings_spouses','parch', 'fare']
packed_train_data = raw_train_data.map(
PackNumericFeatures(NUMERIC_FEATURES))
packed_test_data = raw_test_data.map(
PackNumericFeatures(NUMERIC_FEATURES))
# + colab={} colab_type="code" id="wFrw0YobIbUB"
show_batch(packed_train_data)
# + colab={} colab_type="code" id="_EPUS8fPLUb1"
example_batch, labels_batch = next(iter(packed_train_data))
# + [markdown] colab_type="text" id="o2maE8d2ijsq"
# #### 데이터 정규화
#
# 연속형 데이터는 항상 정규화되어야 합니다.
# + colab={} colab_type="code" id="WKT1ASWpwH46"
import pandas as pd
desc = pd.read_csv(train_file_path)[NUMERIC_FEATURES].describe()
desc
# + colab={} colab_type="code" id="cHHstcKPsMXM"
MEAN = np.array(desc.T['mean'])
STD = np.array(desc.T['std'])
# + colab={} colab_type="code" id="REKqO_xHPNx0"
def normalize_numeric_data(data, mean, std):
# Center the data
return (data-mean)/std
# + [markdown] colab_type="text" id="VPsoMUgRCpUM"
# 이제 숫자 열을 만듭니다. `tf.feature_columns.numeric_column`API는 `normalizer_fn`인수를 받는데, 이 함수는 각 배치마다 실행될 것입니다.
#
# [`functools.partial`](https://docs.python.org/3/library/functools.html#functools.partial)을 사용하여 `MEAN`과 `STD`를 normalizer_fn에 바인딩합니다.
# + colab={} colab_type="code" id="Bw0I35xRS57V"
# 생성한 것을 바로 확인해 보세요.
normalizer = functools.partial(normalize_numeric_data, mean=MEAN, std=STD)
numeric_column = tf.feature_column.numeric_column('numeric', normalizer_fn=normalizer, shape=[len(NUMERIC_FEATURES)])
numeric_columns = [numeric_column]
numeric_column
# + [markdown] colab_type="text" id="HZxcHXc6LCa7"
# 모델을 학습시킬 때 이 피쳐 열을 포함시키고, 이 숫자 데이터 블록을 가운데에 배치합니다:
# + colab={} colab_type="code" id="b61NM76Ot_kb"
example_batch['numeric']
# + colab={} colab_type="code" id="j-r_4EAJAZoI"
numeric_layer = tf.keras.layers.DenseFeatures(numeric_columns)
numeric_layer(example_batch).numpy()
# + [markdown] colab_type="text" id="M37oD2VcCO4R"
# 여기서 사용된 평균 기반 정규화는 각 열의 평균을 미리 알아야합니다.
# + [markdown] colab_type="text" id="tSyrkSQwYHKi"
# ### 범주형 데이터
#
# CSV 데이터의 일부 열은 범주형 열입니다. 즉, 컨텐츠는 제한된 옵션 중의 하나여야 합니다.
#
# `tf.feature_column`API를 사용하여 각 범주형 열에 대해 `tf.feature_column.indicator_column`형의 컬렉션을 생성합니다.
# + colab={} colab_type="code" id="mWDniduKMw-C"
CATEGORIES = {
'sex': ['male', 'female'],
'class' : ['First', 'Second', 'Third'],
'deck' : ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],
'embark_town' : ['Cherbourg', 'Southhampton', 'Queenstown'],
'alone' : ['y', 'n']
}
# + colab={} colab_type="code" id="kkxLdrsLwHPT"
categorical_columns = []
for feature, vocab in CATEGORIES.items():
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature, vocabulary_list=vocab)
categorical_columns.append(tf.feature_column.indicator_column(cat_col))
# + colab={} colab_type="code" id="H18CxpHY_Nma"
# 방금 생성한 것을 확인해 봅니다.
categorical_columns
# + colab={} colab_type="code" id="p7mACuOsArUH"
categorical_layer = tf.keras.layers.DenseFeatures(categorical_columns)
print(categorical_layer(example_batch).numpy()[0])
# + [markdown] colab_type="text" id="R7-1QG99_1sN"
# 이것은 나중에 모델을 만들 때 데이터 처리 입력의 일부가 됩니다.
# + [markdown] colab_type="text" id="kPWkC4_1l3IG"
# ### 결합된 전처리층
# + [markdown] colab_type="text" id="R3QAjo1qD4p9"
# 두 개의 피처 열 컬렉션을 결합하고 이를 `tf.keras.layers.DenseFeatures`에 전달하여 입력층을 만듭니다. 이 입력층으로 두 개의 입력 유형을 추출하고 전처리할 것입니다.
# + colab={} colab_type="code" id="3-OYK7GnaH0r"
preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numeric_columns)
# + colab={} colab_type="code" id="m7_U_K0UMSVS"
print(preprocessing_layer(example_batch).numpy()[0])
# + [markdown] colab_type="text" id="DlF_omQqtnOP"
# ## 모델 만들기
# + [markdown] colab_type="text" id="lQoFh16LxtT_"
# `preprocessing_layer`부터 시작하는 `tf.keras.Sequential`을 만듭니다.
# + colab={} colab_type="code" id="3mSGsHTFPvFo"
model = tf.keras.Sequential([
preprocessing_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# + [markdown] colab_type="text" id="hPdtI2ie0lEZ"
# ## 훈련, 평가 및 예측
# + [markdown] colab_type="text" id="8gvw1RE9zXkD"
# 이제 모델을 인스턴스화하고 훈련시킬 수 있습니다.
# + colab={} colab_type="code" id="sW-4XlLeEQ2B"
train_data = packed_train_data.shuffle(500)
test_data = packed_test_data
# + colab={} colab_type="code" id="Q_nm28IzNDTO"
model.fit(train_data, epochs=20)
# + [markdown] colab_type="text" id="QyDMgBurzqQo"
# 모델이 학습되면 `test_data` 세트에서 정확도를 확인할 수 있습니다.
# + colab={} colab_type="code" id="eB3R3ViVONOp"
test_loss, test_accuracy = model.evaluate(test_data)
print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
# + [markdown] colab_type="text" id="sTrn_pD90gdJ"
# `tf.keras.Model.predict`를 사용하여 배치 또는 배치 데이터세트로부터 레이블을 추론합니다.
# + colab={} colab_type="code" id="Qwcx74F3ojqe"
predictions = model.predict(test_data)
# 몇 개 결과 표시
for prediction, survived in zip(predictions[:10], list(test_data)[0][1][:10]):
print("Predicted survival: {:.2%}".format(prediction[0]),
" | Actual outcome: ",
("SURVIVED" if bool(survived) else "DIED"))
| site/ko/tutorials/load_data/csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"outputs_hidden": true}
import numpy as np
import matplotlib.pyplot as plt
# + jupyter={"outputs_hidden": true}
file = open('testfile.txt', 'w')
file.write("Hello World!\nWe are testing out file writing abilities")
file.close()
# + jupyter={"outputs_hidden": true}
file1 = open('testdata.txt', 'w')
# + jupyter={"outputs_hidden": false}
# + jupyter={"outputs_hidden": false}
for i in range(len(xdata)):
txt = str(xdata[i]) + '\t' + str(ydata[i]) + '\n'
file1.write(txt)
# + jupyter={"outputs_hidden": false}
textfiledata = np.loadtxt('testdata.txt')
# + jupyter={"outputs_hidden": false}
xdata = np.linspace(0,1,10)
ydata = 2*xdata
# + jupyter={"outputs_hidden": true}
file1.close()
# + jupyter={"outputs_hidden": false}
textfiledata = np.loadtxt('testdata.txt')
# + jupyter={"outputs_hidden": false}
textfiledata
# + jupyter={"outputs_hidden": true}
textfiledatax = textfiledata[:,0]
textfiledatay = textfiledata[:,1]
# + jupyter={"outputs_hidden": false}
plt.plot(textfiledatax, textfiledatay)
# + jupyter={"outputs_hidden": true}
| physics/2002/FileRW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# name: python3
# ---
# default_exp model_fn
# %load_ext autoreload
# %autoreload 2
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# # Multitask Learning Model
# Multitask Learning Model
#
# +
# export
from typing import Dict, Tuple
from inspect import signature
from copy import copy
from loguru import logger
import tensorflow as tf
from tensorflow.python.util.nest import flatten_with_joined_string_paths
import transformers
from m3tl.modeling import MultiModalBertModel
from m3tl.params import Params
from m3tl.utils import (get_embedding_table_from_model, get_transformer_main_model, create_dict_from_nested_model, variable_summaries, get_phase, set_phase)
from m3tl.special_tokens import TRAIN, EVAL, PREDICT
@tf.function
def filter_loss(loss, features, problem):
if tf.reduce_mean(input_tensor=features['%s_loss_multiplier' % problem]) == 0:
return_loss = 0.0
else:
return_loss = loss
return return_loss
class BertMultiTaskBody(tf.keras.Model):
"""Model to extract bert features and dispatch corresponding rows to each problem_chunk.
for each problem chunk, we extract corresponding features
and hidden features for that problem. The reason behind this
is to save computation for downstream processing.
For example, we have a batch of two instances and they're from
problem a and b respectively:
Input:
[{'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0},
{'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}]
Output:
{
'a': {'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0}
'b': {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}
}
"""
def __init__(self, params: Params, name='BertMultiTaskBody'):
super(BertMultiTaskBody, self).__init__(name=name)
self.params = params
self.bert = MultiModalBertModel(params=self.params)
if self.params.custom_pooled_hidden_size:
self.custom_pooled_layer = tf.keras.layers.Dense(
self.params.custom_pooled_hidden_size, activation=tf.keras.activations.selu)
else:
self.custom_pooled_layer = None
# @tf.function
def get_features_for_problem(self, features, hidden_feature, problem):
# get features with ind == 1
mode = get_phase()
if mode == PREDICT:
feature_this_round = features
hidden_feature_this_round = hidden_feature
else:
multiplier_name = '%s_loss_multiplier' % problem
# record_ind = tf.where(tf.cast(
# tf.squeeze(features[multiplier_name]), tf.bool))
record_ind = tf.where(tf.cast(features[multiplier_name], tf.bool))
hidden_feature_this_round = {}
for hidden_feature_name in hidden_feature:
if hidden_feature_name != 'embed_table':
hidden_feature_this_round[hidden_feature_name] = tf.squeeze(tf.gather(
hidden_feature[hidden_feature_name], record_ind, axis=0
), axis=1)
hidden_feature_this_round[hidden_feature_name].set_shape(
[None, *hidden_feature[hidden_feature_name].shape.as_list()[1:]])
else:
hidden_feature_this_round[hidden_feature_name] = hidden_feature[hidden_feature_name]
feature_this_round = {}
for features_name in features:
feature_this_round[features_name] = tf.gather_nd(
features[features_name],
record_ind)
return feature_this_round, hidden_feature_this_round
def call(self, inputs: Dict[str, tf.Tensor]) -> Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]]:
mode = get_phase()
inputs = copy(inputs)
# keys: ['last_hidden_state', 'pooler_output', 'hidden_states',
# 'attentions', 'model_input_mask', 'model_token_type_ids'
# model_input_mask, model_token_type_ids]
features, bert_model_outputs = self.bert(inputs, mode==TRAIN)
# extract bert hidden features
features['model_input_mask'] = bert_model_outputs['model_input_mask']
features['model_token_type_ids'] = bert_model_outputs['model_token_type_ids']
hidden_feature = {}
for logit_type in ['seq', 'pooled', 'all', 'embed', 'embed_table']:
if logit_type == 'seq':
# tensor, [batch_size, seq_length, hidden_size]
hidden_feature[logit_type] = bert_model_outputs['last_hidden_state']
elif logit_type == 'pooled':
# tensor, [batch_size, hidden_size]
hidden_feature[logit_type] = bert_model_outputs['pooler_output']
if self.custom_pooled_layer:
hidden_feature[logit_type] = self.custom_pooled_layer(
hidden_feature[logit_type])
elif logit_type == 'all':
# list, num_hidden_layers * [batch_size, seq_length, hidden_size]
hidden_feature[logit_type] = bert_model_outputs['all_encoder_layers']
elif logit_type == 'embed':
# for res connection
hidden_feature[logit_type] = bert_model_outputs['embedding_output']
elif logit_type == 'embed_table':
hidden_feature[logit_type] = bert_model_outputs['embedding_table']
if self.params.detail_log:
weight_var: tf.Variable
for weight_var in self.bert.weights:
variable_summaries(weight_var, 'body')
# for each problem chunk, we extract corresponding features
# and hidden features for that problem. The reason behind this
# is to save computation for downstream processing.
# For example, we have a batch of two instances and they're from
# problem a and b respectively:
# Input:
# [{'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0},
# {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}]
# Output:
# {
# 'a': {'input_ids': [1,2,3], 'a_loss_multiplier': 1, 'b_loss_multiplier': 0}
# 'b': {'input_ids': [4,5,6], 'a_loss_multiplier': 0, 'b_loss_multiplier': 1}
# }
# NOTE: This logic now move to mtl model
return_feature = {}
return_hidden_feature = {}
return_feature['all'] = features
return_hidden_feature['all'] = hidden_feature
return return_feature, return_hidden_feature
# +
# hide
from m3tl.test_base import TestBase
import m3tl
import shutil
import numpy as np
test_base = TestBase()
test_base.params.assign_problem(
'weibo_fake_ner&weibo_fake_cls|weibo_fake_multi_cls|weibo_masklm')
params = test_base.params
train_dataset = m3tl.train_eval_input_fn(
params=params, mode=m3tl.TRAIN)
eval_dataset = m3tl.train_eval_input_fn(
params=params, mode=m3tl.EVAL
)
train_dataset = train_dataset.repeat()
one_batch_data = next(train_dataset.as_numpy_iterator())
# -
mtl_body = BertMultiTaskBody(params=params)
set_phase(TRAIN)
features, hidden_features = mtl_body(one_batch_data)
# export
class BertMultiTaskTop(tf.keras.Model):
"""Model to create top layer, aka classification layer, for each problem.
"""
def __init__(self, params: Params, name='BertMultiTaskTop', input_embeddings: tf.Tensor = None):
super(BertMultiTaskTop, self).__init__(name=name)
self.params = params
problem_type_layer = self.params.top_layer
self.top_layer_dict = {}
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
problem_type = self.params.problem_type[problem]
# some layers has different signatures, assign inputs accordingly
layer_signature_name = signature(
problem_type_layer[problem_type].__init__).parameters.keys()
inputs_kwargs = {
'params': self.params,
'problem_name': problem
}
for signature_name in layer_signature_name:
if signature_name == 'input_embeddings':
inputs_kwargs.update(
{signature_name: input_embeddings})
self.top_layer_dict[problem] = problem_type_layer[problem_type](
**inputs_kwargs)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]]) -> Dict[str, tf.Tensor]:
inputs = copy(inputs)
features, hidden_feature = inputs
return_dict = {}
mode = get_phase()
for problem_dict in self.params.run_problem_list:
for problem in problem_dict:
feature_this_round = features[problem]
hidden_feature_this_round = hidden_feature[problem]
problem_type = self.params.problem_type[problem]
# if pretrain, return pretrain logit
if problem_type == 'pretrain':
pretrain = self.top_layer_dict[problem]
return_dict[problem] = pretrain(
(feature_this_round, hidden_feature_this_round))
return return_dict
with tf.name_scope(problem):
layer = self.top_layer_dict[problem]
return_dict[problem] = layer(
(feature_this_round, hidden_feature_this_round))
return return_dict
# +
# use embedding table as the classification top of mask lm
# top layer takes per-problem features and hidden features
from m3tl.utils import dispatch_features
features_per_problem, hidden_features_per_problem = {}, {}
for problem in params.problem_list:
features_per_problem[problem], hidden_features_per_problem[problem] = dispatch_features(
features=features['all'], hidden_feature=hidden_features['all'], problem=problem, mode=TRAIN
)
input_embeddings = m3tl.utils.get_embedding_table_from_model(
mtl_body.bert.bert_model)
mtl_top = BertMultiTaskTop(params=params, input_embeddings=input_embeddings)
set_phase(TRAIN)
logit_dict = mtl_top((features_per_problem, hidden_features_per_problem))
# -
for problem, problem_logit in logit_dict.items():
# last dim of logits equals to num_classes
assert problem_logit.shape[-1] == params.get_problem_info(problem=problem, info_name='num_classes')
# +
# export
class BertMultiTask(tf.keras.Model):
def __init__(self, params: Params, name='BertMultiTask') -> None:
super(BertMultiTask, self).__init__(name=name)
self.params = params
# initialize body model, aka transformers
self.body = BertMultiTaskBody(params=self.params)
self.mtl_model = self.params.mtl_model['model'](
self.params, self.params.mtl_model['model_name'])
self.mtl_model_include_top = self.params.mtl_model['include_top']
# mlm might need word embedding from bert, map_structure
# build sub-model
input_embeddings = get_embedding_table_from_model(self.body.bert.bert_model)
self.top = BertMultiTaskTop(
params=self.params, input_embeddings=input_embeddings)
# loss combination strategy
self.loss_combination = self.params.loss_combination_strategy['model'](
self.params, self.params.loss_combination_strategy['name'])
self.eval_loss_metric_dict_list = []
self.eval_metric_dict_list = []
# for horovod
self.first_batch = True
def call(self, inputs):
mode = get_phase()
inputs = copy(inputs)
pred_dict = {}
body_outputs = self.body(
inputs)
if self.params.output_body_seq_hidden:
pred_dict['seq'] = body_outputs[1]['all']['seq']
if self.params.output_body_pooled_hidden:
pred_dict['pooled'] = body_outputs[1]['all']['pooled']
if self.mtl_model_include_top:
problem_pred = self.mtl_model(
body_outputs)
pred_dict.update(problem_pred)
return pred_dict
mtl_model_outputs = self.mtl_model(
body_outputs)
if self.params.output_mtl_model_hidden:
pred_dict['mtl'] = mtl_model_outputs
pred_per_problem = self.top(
(mtl_model_outputs))
if self.params.output_problem_pred:
pred_dict.update(pred_per_problem)
return pred_dict
def compile(self, *args, **kwargs):
super(BertMultiTask, self).compile(*args, **kwargs)
logger.critical('Initial lr: {}'.format(self.params.lr))
logger.critical('Train steps: {}'.format(self.params.train_steps))
logger.critical('Warmup steps: {}'.format(self.params.num_warmup_steps))
self.optimizer, self.lr_scheduler = transformers.optimization_tf.create_optimizer(
init_lr=self.params.lr,
num_train_steps=self.params.train_steps,
num_warmup_steps=self.params.num_warmup_steps,
weight_decay_rate=0.01
)
self.mean_acc = tf.keras.metrics.Mean(name='mean_acc')
def add_flatten_losses_metrics(self, return_dict: dict):
current_eval_loss_dict = create_dict_from_nested_model(self, ele_name='losses')
flatten_losses = flatten_with_joined_string_paths(current_eval_loss_dict)
flatten_losses = {p: v for p, v in flatten_losses}
return_dict.update(flatten_losses)
return return_dict
def train_step(self, data):
set_phase(TRAIN)
with tf.GradientTape() as tape:
# Forward pass
_ = self(data)
# gather losses from all problems
metric_dict = {m.name: m.result() for m in self.metrics}
metric_dict = self.add_flatten_losses_metrics(return_dict=metric_dict)
# apply loss combination strategy
current_round_nest_loss_metric = create_dict_from_nested_model(self, ele_name='losses')
combined_losses = self.loss_combination(
current_loss_dict=current_round_nest_loss_metric,
current_metric_dict=self.metrics,
history=self.history
)
if self.params.use_horovod:
import horovod.tensorflow as hvd
tape = hvd.DistributedGradientTape(tape)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(combined_losses, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.mean_acc.update_state(
[v for n, v in metric_dict.items() if n != 'mean_acc'])
return_dict = metric_dict
return_dict[self.mean_acc.name] = self.mean_acc.result()
# Return a dict mapping metric names to current value.
# Note that it will include the loss (tracked in self.metrics).
if self.first_batch and self.params.use_horovod:
self.first_batch = False
hvd.broadcast_variables(trainable_vars, root_rank=0)
hvd.broadcast_variables(self.optimizer.variables(), root_rank=0)
return return_dict
def test_step(self, data):
"""The logic for one evaluation step.
This method can be overridden to support custom evaluation logic.
This method is called by `Model.make_test_function`.
This function should contain the mathemetical logic for one step of
evaluation.
This typically includes the forward pass, loss calculation, and metrics
updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model.make_test_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned.
"""
set_phase(EVAL)
y_pred = self(data)
# Updates stateful loss metrics.
self.compiled_loss(
None, y_pred, None, regularization_losses=self.losses)
self.compiled_metrics.update_state(None, y_pred, None)
# get metrics to calculate mean
m_list = []
for metric in self.metrics:
if 'mean_acc' in metric.name:
continue
if 'acc' in metric.name:
m_list.append(metric.result())
if 'f1' in metric.name:
m_list.append(metric.result())
self.mean_acc.update_state(
m_list)
ret_dict = {m.name: m.result() for m in self.metrics}
ret_dict = self.add_flatten_losses_metrics(ret_dict)
return ret_dict
def predict_step(self, data):
set_phase(PREDICT)
return self(data)
# -
mtl = BertMultiTask(params=params)
logit_dict = mtl(one_batch_data)
for problem, problem_logit in logit_dict.items():
# last dim of logits equals to num_classes
assert problem_logit.shape[-1] == params.get_problem_info(problem=problem, info_name='num_classes')
mtl.compile()
hist = mtl.fit(train_dataset, validation_data=train_dataset, validation_steps=1, steps_per_epoch=1, epochs=3, verbose=1)
params.output_body_pooled_hidden = True
params.output_body_seq_hidden = True
params.output_mtl_model_hidden = True
mtl = BertMultiTask(params=params)
logit_dict = mtl(one_batch_data)
assert 'pooled' in logit_dict
assert 'seq' in logit_dict
assert 'mtl' in logit_dict
| source_nbs/13_model_fn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:iiot_env] *
# language: python
# name: conda-env-iiot_env-py
# ---
import pandas as pd
from datetime import datetime
import numpy as np
wh_data = pd.read_csv('warehouse.csv')
wh_data.head()
wh_Temp_data = pd.read_csv('warehouse_zones_temparature_limits.csv')
wh_Temp_data.head()
wh_Sensor_data = pd.read_csv('sensor_information.csv')
wh_Sensor_data
merged_df = pd.merge(wh_Sensor_data, wh_Temp_data, on=['WID','ZID'])
final_df = pd.merge(merged_df, wh_data, on='WID')
final_df
| data generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import panel as pn
import numpy as np
import holoviews as hv
pn.extension()
# -
# For a large variety of use cases we do not need complete control over the exact layout of each individual component on the page, as could be achieved with a [custom template](../../user_guide/Templates.ipynb), we just want to achieve a more polished look and feel. For these cases Panel ships with a number of default templates, which are defined by declaring four main content areas on the page, which can be populated as desired:
#
# * **`header`**: The header area of the HTML page
# * **`sidebar`**: A collapsible sidebar
# * **`main`**: The main area of the application
# * **`modal`**: A modal area which can be opened and closed from Python
#
# These three areas behave very similarly to other Panel layout components. In particular the header, sidebar and modal behave just like the list-like `Row`/`Column` layouts while the main area behaves like a `GridSpec`. This means we can easily append new components into these areas. Unlike other layout components however, the contents of the areas is fixed once rendered. If you need a dynamic layout you should therefore insert a regular Panel layout component (e.g. a `Column` or `Row`) and modify it in place once added to one of the content areas.
#
# Templates can allow for us to quickly and easily create web apps for displaying our data. Panel comes with a default Template, and includes multiple Templates that extend the default which add some customization for a better display.
#
# #### Parameters:
#
# In addition to the four different areas we can populate, the `FastListTemplate` also provide additional parameters:
#
# * **`busy_indicator`** (BooleanIndicator): Visual indicator of application busy state.
# * **`header_background`** (str): Optional header background color override.
# * **`header_color`** (str): Optional header text color override.
# * **`favicon`** (str): URI of favicon to add to the document head (if local file, favicon is base64 encoded as URI).
# * **`logo`** (str): URI of logo to add to the header (if local file, logo is base64 encoded as URI).
# * **`theme`** (Theme): A Theme class (available in `panel.template`. One of `DefaultTheme` or `DarkTheme`).
# - For convenience you can provide "default" or "dark" string to the constructor.
# - If you add `?theme=default` or `?theme=dark` in the url this will set the theme unless explicitly declared
# * **`site`** (str): Name of the site. Will be shown in the header. Default is '', i.e. not shown.
# * **`site_url`** (str): Url of the site and logo. Default is "/".
# * **`title`** (str): A title to show in the header. Also added to the document head meta settings and as the browser tab title.
# * **`main_max_width`** (str): The maximum width of the main area. For example '800px' or '80%'. If the string is '' (default) no max width is set.
# * **`sidebar_footer`** (str): Can be used to insert additional HTML. For example a menu, some additional info, links etc.
# * **`enable_theme_toggle`** (boolean): If `True` a switch to toggle the Theme is shown. Default is `True`.
# * **`config`** (TemplateConfig): Contains configuration options similar to `pn.config` but applied to the current Template only. (Currently only `css_files` is supported)
# ________
# In this case we are using the `FastGridTemplate`, built using the [Fast.design](https://www.fast.design/) framework. Here is an example of how you can set up a display using this template:
# +
template = pn.template.FastGridTemplate(title='FastGridTemplate')
pn.config.sizing_mode = 'stretch_width'
xs = np.linspace(0, np.pi)
freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2)
phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi)
@pn.depends(freq=freq, phase=phase)
def sine(freq, phase):
return hv.Curve((xs, np.sin(xs*freq+phase))).opts(
responsive=True, min_height=400, title="Sine")
@pn.depends(freq=freq, phase=phase)
def cosine(freq, phase):
return hv.Curve((xs, np.cos(xs*freq+phase))).opts(
responsive=True, min_height=400, title="Cosine")
template.sidebar.append(pn.pane.Markdown("## Settings"))
template.sidebar.append(freq)
template.sidebar.append(phase)
template.main[:3, :6] = hv.DynamicMap(sine)
template.main[:3, 6:] = hv.DynamicMap(cosine)
template.servable();
# -
# <h3><b>FastGridTemplate with DefaultTheme</b></h3>
# <img src="../../assets/FastGridTemplate.png" style="margin-left: auto; margin-right: auto; display: block;"></img>
# </br>
# <h3><b>FastGridTemplate with DarkTheme</b></h3>
# <img src="../../assets/FastGridTemplateDark.png" style="margin-left: auto; margin-right: auto; display: block;"></img>
# The app can be displayed within the notebook by using `.servable()`, or rendered in another tab by replacing it with `.show()`.
#
# Themes can be added using the optional keyword argument `theme`. This template comes with a `DarkTheme` and a `DefaultTheme`, which can be set via `FastlistTemplate(theme=DarkTheme)`. If no theme is set, then `DefaultTheme` will be applied.
#
# It should be noted **this template currently does not render correctly in a notebook**, and for the best performance the should ideally be deployed to a server.
| examples/reference/templates/FastGridTemplate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from read_data import read_text_data
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Bidirectional
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
#import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
import pandas as pd
# +
model = load_model('text_model.h5')
x_train,y_train,id_train,x_val,y_val,id_val=pd.io.pickle.read_pickle('text_data.pc',
compression='gzip')
print(model.summary())
checkpoint = ModelCheckpoint("text_best_model.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
model.fit(x_train, y_train, epochs=3, batch_size=64,
callbacks = [checkpoint, early])
scores = model.evaluate(x_val, y_val, verbose=0)
print("Accuracy: %f%%" % (scores[1]*100))
| Deep_part/.ipynb_checkpoints/Text_continue_run-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example of Forward Feature Selection, Backward Feature Elimination, Exhaustive Feature Selection, Recursive Feature Elimination and Random Forest Importance.
# In this model we are pridicting that 'will the customer churn or not' using random forest classifier.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
churn_data = pd.read_csv('data/churn_data.csv')
churn_data.isnull().sum()
customer_data = pd.read_csv('data/customer_data.csv')
customer_data.isnull().sum()
internet_data = pd.read_csv('data/internet_data.csv')
internet_data.isnull().sum()
churn_data.head()
customer_data.head()
internet_data.head()
datatemp = pd.merge(customer_data, internet_data, how = 'inner', on = 'customerID')
dataSet = pd.merge(datatemp, churn_data, how = 'inner', on = 'customerID')
dataSet.info()
dataSet.isnull().sum()
dataSet.head()
unique_val = []
for i in dataSet:
if(dataSet[i].dtype):
unique_val.append(i)
unique_val.append(dataSet[i].unique())
import pprint
pprint.pprint(unique_val)
# Yes/No type attributes can be converted to [1,0]
# but attributes with more information need dummy variables
yesnoToggle = {'Male':1, 'Female': 0,'Yes': 1, 'No': 0 ,'No internet service': 0}
toToggleAttr = ['PhoneService', 'PaperlessBilling', 'Churn', 'Partner', 'Dependents', 'gender','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies']
for i in toToggleAttr:
dataSet[i] = dataSet[i].map(yesnoToggle)
# +
con = pd.get_dummies(dataSet['Contract'], prefix='Contract')
dataSet = pd.concat([dataSet,con], axis=1)
dataSet = dataSet.drop('Contract', axis = 1)
ml = pd.get_dummies(dataSet['MultipleLines'], prefix = 'MultipleLines')
dataSet = pd.concat([dataSet, ml], axis= 1)
dataSet = dataSet.drop(['MultipleLines', 'MultipleLines_No phone service'], axis = 1)
isp = pd.get_dummies(dataSet['InternetService'], prefix = 'InternetService')
dataSet = pd.concat([dataSet, isp], axis = 1)
dataSet = dataSet.drop(['InternetService_No', 'InternetService'], axis = 1)
pm = pd.get_dummies(dataSet['PaymentMethod'], prefix = 'PaymentMethod')
dataSet = pd.concat([dataSet, pm], axis = 1)
dataSet = dataSet.drop('PaymentMethod', axis = 1)
dataSet['TotalCharges'] = pd.to_numeric(dataSet['TotalCharges'],errors = 'coerce')
# -
dataSet.head()
dataSet.info()
dataSet.isnull().sum()
dataSet = dataSet.replace(np.nan, 0)
# # Making Model
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn import tree
# +
y = dataSet['Churn']
X = dataSet.drop( ['Churn', 'customerID'] ,axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=500)
model = RandomForestClassifier(n_estimators=30, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7)#, max_features = "sqrt")
model = model.fit(X_train,y_train)
y_pred_train = model.predict(X_train)
print('accuracy on train data:',metrics.accuracy_score(y_train, y_pred_train))
y_pred = model.predict(X_test)
print('accuracy on test data:',metrics.accuracy_score(y_test, y_pred))
# -
# Classifier Analysis Tools to analyse the model.
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
from sklearn.metrics import roc_auc_score, roc_curve
fpr, tpr, thersh = roc_curve(y_test, y_pred)
plt.figure()
plt.plot(fpr, tpr, label = 'Random Forest Classification')
plt.plot([0,1], [0,1], 'r--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
from sklearn.metrics import classification_report
print( classification_report(y_test, y_pred))
# Now we have a base case model where the model uses 28 features. This is a large number of feautures so we are going to use wrapper methods to reduce the complexity of the model by reducing the feature set.
# # Forward Feature Selection
# By using SequentialFeatureSelector in sklearn library.
from sklearn.feature_selection import SequentialFeatureSelector
testing = RandomForestClassifier(n_estimators=20, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7)
ffs = SequentialFeatureSelector(testing, n_features_to_select=8, direction='forward', n_jobs=-1)
ffs.fit(X, y)
selection = ffs.get_support()
featureList = []
totalFeatureList = X.columns
for i, name in enumerate(totalFeatureList):
if selection[i] == True:
featureList.append(name)
newX=X[featureList]
X_train, X_test, y_train, y_test = train_test_split(newX, y, train_size=0.7, test_size=0.3, random_state=500)
# +
testing.fit(X_train, y_train)
y_pred_train = testing.predict(X_train)
print('accuracy on train data:',metrics.accuracy_score(y_train, y_pred_train))
y_pred = testing.predict(X_test)
print('accuracy on test data:',metrics.accuracy_score(y_test, y_pred))
# -
print( classification_report(y_test, y_pred))
# # Backward Feature Elimination
# By using SequentialFeatureSelector in sklearn library
testing_back = RandomForestClassifier(n_estimators=20, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7)
bfs = SequentialFeatureSelector(testing_back, n_features_to_select=8, direction='backward', n_jobs=-1)
bfs.fit(X, y)
selection = bfs.get_support()
featureList = []
totalFeatureList = X.columns
for i, name in enumerate(totalFeatureList):
if selection[i] == True:
featureList.append(name)
newX=X[featureList]
X_train, X_test, y_train, y_test = train_test_split(newX, y, train_size=0.7, test_size=0.3, random_state=500)
# +
testing_back.fit(X_train, y_train)
y_pred_train = testing_back.predict(X_train)
print('accuracy on train data:',metrics.accuracy_score(y_train, y_pred_train))
y_pred = testing_back.predict(X_test)
print('accuracy on test data:',metrics.accuracy_score(y_test, y_pred))
# -
print( classification_report(y_test, y_pred))
# # Exhaustive Feature Selection
# By using mlxtend library
from mlxtend.feature_selection import ExhaustiveFeatureSelector
efs = ExhaustiveFeatureSelector(RandomForestClassifier(n_estimators=12, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7),
min_features=2,
max_features=4,
scoring='roc_auc',
n_jobs=-1)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=500)
efs = efs.fit(X_train, y_train)
print(X_train.columns[list(efs.best_idx_)])
print(efs.best_score_)
testing_exhaust = RandomForestClassifier(n_estimators=12, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7)
newX = X[['tenure', 'MonthlyCharges', 'Contract_One year', 'Contract_Two year']]
X_train, X_test, y_train, y_test = train_test_split(newX, y, train_size=0.7, test_size=0.3, random_state=500)
testing_exhaust.fit(X_train, y_train)
# +
y_pred_train = testing_exhaust.predict(X_train)
print('accuracy on train data:',metrics.accuracy_score(y_train, y_pred_train))
y_pred = testing_exhaust.predict(X_test)
print('accuracy on test data:',metrics.accuracy_score(y_test, y_pred))
# -
print( classification_report(y_test, y_pred))
# # Recursive Feature Elimination
# By using sklearn library
from sklearn.feature_selection import RFE
rfe=RFE(RandomForestClassifier(n_estimators=20, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7), n_features_to_select=8)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=500)
rfe.fit(X_train, y_train)
# +
y_pred_train = rfe.predict(X_train)
print('accuracy on train data:',metrics.accuracy_score(y_train, y_pred_train))
y_pred = rfe.predict(X_test)
print('accuracy on test data:',metrics.accuracy_score(y_test, y_pred))
# -
print( classification_report(y_test, y_pred))
# # Random Forest Importance
# A embedded method of feature selection using the feature importance to rank attributes ans selecting from it
# +
y = dataSet['Churn']
X = dataSet.drop( ['Churn', 'customerID'] ,axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=500)
# -
model = RandomForestClassifier(n_estimators=30, criterion="entropy", random_state = 100, min_samples_split = 9, max_depth = 7)
model.fit(X_train, y_train)
importance = model.feature_importances_
feature_id = X.columns
plt.figure(figsize=(8,8))
plt.bar(feature_id,importance)
plt.xlabel("feature_id")
plt.xticks(rotation=270)
plt.ylabel("importace")
# this barplot of feature vs importance help us to identify those features which have high importance. We select adequate number of feature to make model simple yet accurate.
| wrappers-randomforestimp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#environment setup with watermark
# %load_ext watermark
# %watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,matplotlib,nltk,sklearn,tensorflow,theano,mxnet,chainer
#
# ======================================================================
# Compressive sensing: tomography reconstruction with L1 prior (Lasso)
# ======================================================================
#
# This example shows the reconstruction of an image from a set of parallel
# projections, acquired along different angles. Such a dataset is acquired in
# **computed tomography** (CT).
#
# Without any prior information on the sample, the number of projections
# required to reconstruct the image is of the order of the linear size
# ``l`` of the image (in pixels). For simplicity we consider here a sparse
# image, where only pixels on the boundary of objects have a non-zero
# value. Such data could correspond for example to a cellular material.
# Note however that most images are sparse in a different basis, such as
# the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
# necessary to use prior information available on the sample (its
# sparsity): this is an example of **compressive sensing**.
#
# The tomography projection operation is a linear transformation. In
# addition to the data-fidelity term corresponding to a linear regression,
# we penalize the L1 norm of the image to account for its sparsity. The
# resulting optimization problem is called the `lasso`. We use the
# class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
# algorithm. Importantly, this implementation is more computationally efficient
# on a sparse matrix, than the projection operator used here.
#
# The reconstruction with L1 penalization gives a result with zero error
# (all pixels are successfully labeled with 0 or 1), even if noise was
# added to the projections. In comparison, an L2 penalization
# (:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
# errors for the pixels. Important artifacts are observed on the
# reconstructed image, contrary to the L1 penalization. Note in particular
# the circular artifact separating the pixels in the corners, that have
# contributed to fewer projections than the central disk.
#
#
# +
print(__doc__)
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2.) ** 2 + (y - l / 2.) ** 2 < (l / 2.) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return np.logical_xor(res, ndimage.binary_erosion(res))
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
# -
test complete; Gopal
| tests/scikit-learn/plot_tomography_l1_reconstruction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Blog post 1: Linear regression and binary classification, a friendly introduction
# # Assumptions and Disclaimers
# This blogpost assumes that you have already completed the following tutorials from Amazon SageMaker documentation!:
# - [Setting up](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html)
# - [Create am Amazon SageMaker Notebook Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-setup-working-env.html)
# - I have included 'sagemaker' in the name of my S3 bucket, "cyrusmv-sagemaker-demos' and have chosen to let any SageMaker notebook instance to access any S3 bucket with the term 'sagemaker' included in the name. This is however is not a recommended security option for production and is only useful for simplifying the flow of the blog.
# - In this blog I am using [Titanic dataset](https://github.com/agconti/kaggle-titanic). I have put the dataset in an S3 bucket. You should also download the dataset and upload the data onto S3 otherwise you shall receive errors.
#
#
# # Introduction
# Surprisingly, many problems in the real-world can be approximated to a linear problem, meaning that a linear polynomial can provide a good output approximation to an n-dimensional input vector. Linear regression is perhaps the most applied and simplest of all machine learning models.
#
# With this in mind, we have implemented a scalable linear regression model as a part of Amazon Algorithms that can be used in Amazon SageMaker. . I have put together a three-part series of blog posts that explain our scalable linear regression model.
#
# In this blog post, which is blog post one, I provide an easy and intuitive introduction to linear regression. I also provide references to implement your own linear regression, both from scratch and using MXNet and Gluon.
#
# In blog post two, “Getting Hands-On with Linear Learner and Amazon SageMaker,” I walk you through an entire process for data pre-processing, training, and deployment of a model as a live endpoint. I use the Visa Credit Card Fraud dataset from Kaggle, pre-process the data, and use Amazon Linear Learner to predict fraudulent transactions. I observe that using default values of LinearLearner yields an impressive precision of near certainty. However, the recall on the fraudulent transactions is 80 percent.
#
# In blog post three, “Excel in tuning models using Amazon Linear Learner algorithm,” I attempt to fine-tune the model on the Visa dataset to see whether or not the recall could be improved.
#
# # Prerequisites
# For this series of blog posts, I assume that you have already completed the following tutorials from Amazon SageMaker documentation:
#
# - [Setting up](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html)
# - [Create an Amazon SageMaker Notebook Instance](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-setup-working-env.html)
#
# I have included “sagemaker” in the name of my S3 bucket, "cyrusmv-sagemaker-demos' and have chosen to let any SageMaker notebook instance access any Amazon S3 bucket with the term “sagemaker” included in the name. Note: This is not a recommended security option for production and is only useful for simplifying the flow of the blog.
# In this blog I am using the Titanic dataset from Kaggle (https://www.kaggle.com/c/titanic/data). I have put the dataset in an Amazon S3 bucket. You should also download the dataset and upload the data to Amazon S3, otherwise you will receive errors.
#
#
# # An introduction to linear regression
# Let’s consider a dataset composed of discrete values scattered around in a way that a straight line can pass through them, meaning that most of the data are within a certain distance from the line.
#
# ## The theory
# Remembering from high-school math, we know that in a two-dimensional space a straight line can be represented as a function, formalized as the following:
#
# $$f:\mathcal{D} \rightarrow \mathcal{R}$$
# $$y = f(x) = ax + b$$
# Now if $\mathcal{D}=\left\{0,1, 2, 3, ..., 49\right\}$, then for $a=2$ and $b=1$, $\mathcal{R}$ is a set including all positive odd numbers that are less than $100$.
#
# In the following code we create such data and plot the function.
import numpy as np #a library for array operations
import matplotlib.pyplot as plt #Python 2D plotting library
import random as rnd
import seaborn as sns #Plotting library based on matplotlib that can take pandas.DataFrame inputs
import pandas as pd #tabular data structure with SQL-like function support
#import boto3 #Python SDK for AWS services
import os
range_start = 0
range_end = 49
d = np.arange(range_start, range_end)
r = list(map(lambda x: 2 * x + 1, d)) #function y = 2x+1 for domain d, returning the range as a list
plt.scatter(d, r, )
plt.show()
# **Adding Noise:**
# The problem is that the data in real life is noisy and almost never follows a perfect line. In many cases, however the distribution of data permits us to approximate the data to a line that represents the distribution with some error. The following code synthesizes a dataset based on the same function, except that the domain will be a random normal distribution with some random noise between 0 and 1.
rnd.seed(42)
noise_factor = lambda x: rnd.random() * x
np.random.seed(13)
d = np.random.normal(loc=range_start, scale=1, size=range_end + 1)
r = list(map(lambda x: 2 * x + 1 + rnd.random()* noise_factor(3), d))
plt.scatter(d, r, )
plt.show()
# It is noticeable that a straight line can provide a good approximation for the data distribution. in a multi-dimensional world, where the function variable is a vector such as `<age, income, postal district, ...>`, our function would be:
# $$f:{V} \subseteq \mathbb{R}^n \rightarrow \mathcal{R} \subseteq \mathbb{R}
# \\
# y = f(V) = V.W + b
# $$
# In this case, V is the input feature vector, w is a parameter or weight vector, and b is bias. The task of linear regression is to find optimal w and b in order to approximate the data to a line.
#
# In the following graph we use a seaborn library to find the optimal line that approximates our distribution (the green line).
#
# Suppose we have a 4D input space $$V=[inv_1=gender, v_2=age, v_3=education, v_4=postal\_district]$$
# We want to predict income. Since the range of the function is $\mathbb {R}$ and $V$ has a dimension of $1 \times 4$, we should find $W_{4 \times 1}$ and scalar $b$ to solve the linear regression problem.
#
# $$
# f: \mathbb{R^4} \rightarrow \mathbb{R} \\
# f(V) = \left[\begin{array}{cc} v_1 & v_2 & v_3 & v_4 \end{array}\right]_{1 \times 4} \
# \left[\begin{array}{cc} w_1\\ w_2\\ w_3\\ w_4 \end{array}\right]_{4 \times 1} \
# + b = y \ \epsilon \ \mathbb {R}
# $$
#
# In machine learning the input space is nearly always multi-dimensional, but in our examples and plots we have used 1D synthetic data in order to simplify the presentation.
#
# *Challenge: You can change the shape and noise of the data and check the results.*
#
sns.axes_style()
noisy_data = pd.DataFrame(np.column_stack((d, r)), columns=['x', 'y'])
sns.set(color_codes=True)
sns.regplot('x', 'y', data=noisy_data, line_kws={'color': 'g'}, scatter_kws={'color':'orange'})
sns.set()
sns.set_style({'axes.grid': False, 'grid.color': 'blue'})
plt.show()
print("in the perfect world f(d[5]) = f({}) = {}".format(d[5], 2*d[5]+1))
print("in the reality f(d[5]) = f({}) = {}".format(d[5], r[5]))
# ## Model error
# In the perfect world f(d[5]) = f(0.5323378882945463) = __2.064675776589093__.
# In the reality f(d[5]) = f(0.5323378882945463) = **2.3961453468307248**.
#
# The first number is generated by the fitted model. The second number is the observed value. There is a distance between the numbers.
#
# The residual of an observed value is the difference between the observed value and the estimated value.
#
# This might look like loss of accuracy, but in fact this approximation is the source of the generalization power of machine learning.
#
# There are many methods of calculating loss. One of the most common for linear regression is or Root Mean Squared Error.
#
# Root Mean Squared Error (RMSE) is the deviation of the residuals or prediction errors from the fitted model. Residuals measure how far are the data-points are from the regression line. RMSE measures the deviation of residuals from the fitted model.
#
# 
#
# *reference: https://github.com/zackchase/mxnet-the-straight-dope/blob/master/chapter02_supervised-learning/linear-regression-scratch.ipynb*
#
#
# if $y_i$ is the the observed value for a particular input $x_i$ and $\hat{y_i}$ is the predicted value, then:
#
# $$\mathcal{L} = RMSE = \sqrt{\frac{1}{n}\sum_{i=1}^{N}{(y_i - \hat{y_i})^2}}$$
#
# Other loss functions are also popular such as Absolute Mean Square Error:
# $$
# MAE = \frac{1}{1} \sum_{i=1}^{n}|\hat{y_i}-y_i|
# $$
#
# RMSE magnifies the magnitude of errors and that affects large errors more pronouncedly. Thus it is more effective if you need to penalize large errors.
# ## An example
# A classic example of linear regression is a survival score for a passenger in Titanic's infamous maiden voyage. The Titanic dataset in Kaggle has captured this data. For more information, go to [Titanic: Machine Learning for Disaster.]((https://www.kaggle.com/c/titanic))
#
# In the Titanic dataset there are several inputs such as class, ticket price, gender, age, and others. We want to project a probability for survival of a passenger, given the features that represent the passenger.
#
# We also want to make a binary prediction whether or not a passenger will survive. Regression models always make a real-value prediction. If that value is bounded between 0 and 1, then that would be a probability value from a distribution. We can then use a threshold or project the output value to a binary projection.
#
# For instance, we can set the threshold to 0.5, so that anything above indicates that a passenger has survived, and anything below indicates otherwise.
#
# Since models do approximate, they necessarily produce errors. A model can predict a false positive, true positive, false negative, and true negative.
#
# - A false positive prediction in our example is when the model predicts survival when the person has not survived.
#
# - A true positive prediction in our example is when the model predicts survival correctly.
#
# - A false negative prediction in our example is when the model predicts not-survived while the person has survived.
#
# - A true negative prediction in out example is when the model correctly predicts that a passenger has not survived.
#
# Based on what is important for us we might want to adjust the threshold in order to reduce false positives or false negatives at the expense of a model’s total precision or accuracy.
#
# Let'S take a look at another example. In the case of a cancer diagnosis algorithm, we want to have no false negatives, if at all possible. It is always better to flag a healthy patient as suspect and perform further tests, as opposed to letting a patient with cancer leaves the clinic unchecked.
#
# *Note that I have already uploaded train.csv from the kaggle website onto an S3 bucket.*
#
# +
#Downloading data files from S3 to the notebook instance
BUCKET_NAME = 'cyrusmv-sagemaker-demos' #Change this to your own bucket name
KEY = 'titanic-kaggle/train.csv'
LOCAL_FILE = '/tmp/titanic_train.csv'
#Downloading the file to a local folder
s3 = boto3.resource('s3')
bucket = s3.Bucket(BUCKET_NAME)
bucket.download_file(Key=KEY, Filename=LOCAL_FILE)
os.listdir('/tmp')
# -
#Inspecting Data
df = pd.read_csv(LOCAL_FILE)
df.head(5)
# All columns, other than *survived* are called *features*. The goal of regression task is to predict a *target* based on learning patterns from *features*. Column *survived* is our *target*.
# The regression task predicts a probability of survival. As mentioned earlier, in a binary prediction task, we use a threshold function to make a binary decision based on a probability score. For example, using a step function to set the threshold.
#
# $$
# f(x) = \begin{cases}
# 1 & x\geqq 0.5, \\
# 0 & x< 0.5
# \end{cases}
# $$
# We can indeed change the threshold in order to tune the model for precision or recall as we discussed earlier.
#
# __***Note:***__ For logistic regression with Gluon please refer to: [logistic regression using gluon](https://github.com/zackchase/mxnet-the-straight-dope/blob/master/chapter02_supervised-learning/logistic-regression-gluon.ipynb)
#
#
#
# We shall now depart from the Titanic example, because the data requires pre-processing. Pre-processing of data is a crucial task in developing machine learning models and requires a separate post. We now use the credit card data set from Kaggle, where data is pre-processed, meaning:
# - All data is numeric
# - All data is factorized, meaning that ranges are transformed to *"one-of-N* form.
# - All data is normalized, meaning that the ranges have become comparable.
#
# For more information and to learn how to write your own linear regression and binary classification models please go to the following:
# - [linear regression from scratch](https://github.com/zackchase/mxnet-the-straight-dope/blob/master/chapter02_supervised-learning/linear-regression-scratch.ipynb)
# - [linear regression using gluon](https://github.com/zackchase/mxnet-the-straight-dope/blob/master/chapter02_supervised-learning/linear-regression-gluon.ipynb)
#
# for the binary variant or logistic regression please refer to:
# - [logistic regression using gluon](https://github.com/zackchase/mxnet-the-straight-dope/blob/master/chapter02_supervised-learning/logistic-regression-gluon.ipynb)
#
# # What’s next
# In blog post 1 I provide you with a basic introduction to linear regression. I also provide references to implement your own linear regression, both from scratch and using MXNet and Gluon.
#
# In blog post 2, [Getting Hands-On with Linear Learner and Amazon SageMaker](linearlearner-blogpost-part2.ipynb), I’ll download [Visa dataset from Kaggle](https://www.kaggle.com/mlg-ulb/creditcardfraud) from an Amazon S3 location to my notebook instance and pre-process it to feed the data to the algorithm. We can then create a live endpoint and make predictions using trained models.
#
| src/linearlearner-blogpost-part1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"col": 0, "height": 4, "hidden": false, "row": 0, "width": 4}, "report_default": {}}}}
# one
# + extensions={"jupyter_dashboards": {"version": 1, "views": {"grid_default": {"hidden": true}, "report_default": {}}}}
| verseny/submit/Eze/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Finding Lane Lines on the Road**
# ***
# In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
#
# Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
#
# In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
#
# ---
# Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
#
# **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
#
# ---
# **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
#
# ---
#
# <figure>
# <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
# </figcaption>
# </figure>
# <p></p>
# <figure>
# <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
# </figcaption>
# </figure>
# **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
# ## Import Packages
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pprint as pp
# %matplotlib inline
# ## Read in an Image
# +
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('Image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# -
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## Helper Functions
# Below are some helper functions to help get you started. They should look familiar from the lesson!
# +
import math
from collections import deque
from enum import Enum
class LineDrawing(Enum):
CANNY = 1 # draw lines without processing
LINEAR_INTERPOLATION = 2 # try to interpolate and find the average line between all lines
LINE_AVERAGE = 3 # will weight each line for its importance and interpolate with a polygon for better results
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold, 10)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
DEGREES_CONVERTION = 180 / np.pi
def line_angle(p1x, p1y, p2x, p2y):
return math.atan((p2x - p1x)/(p2y - p1y)) * DEGREES_CONVERTION
def get_lines_in_range(lines, alpha, delta):
"""
Returns all lines in a given angle range
(alpha - delta) < line angele < (alpha + delta)
"""
result = deque()
min_angle = alpha - delta
max_angle = alpha + delta
#print(min_angle, max_angle)
for line_list in lines:
for p1x, p1y, p2x, p2y in line_list:
angle = line_angle(p1x, p1y, p2x, p2y)
if min_angle <= angle <= max_angle:
result.append(((p1x, p1y), (p2x, p2y)))
#print((p1x, p1y), (p2x, p2y), '%s°' % angle)
return result
def interpolate_on_all_lines(lines, bottomLeft, topLeft, topRight, bottomRight):
"""
Will interpolate all points between the top and bottom Y coordiantes of the polygon with
a polynome of order 1 and step 10 pixels.
`bottomLeft`, `topLeft`, `topRight`, `bottomRight` are the vertexes of the polygon
"""
points = deque()
for line in lines:
points.append(line[0])
points.append(line[1])
if len(points) == 0:
return []
xs = np.array([p[0] for p in points])
ys = np.array([p[1] for p in points])
p = np.poly1d(np.polyfit(ys, xs, 1))
interpolated_points = deque()
start = min(bottomLeft[1], bottomRight[1])
stop = max(topLeft[1], topRight[1])
y_points = np.linspace(start, stop, 10)
for y in y_points:
x = p(y)
interpolated_points.append((int(x), int(y)))
return line_points(interpolated_points)
def make_line_equation(p1, p2):
""" Builds a line equation fucntion. """
def f(y):
return ((y-p1[1])*(p2[0]-p1[0]))/(p2[1]-p1[1]) + p1[0]
return f
def int_point(p):
x, y = p
return int(x), int(y)
def interpolate_yx(ys, xs, ys_range, degree):
""" Utility interpolation function """
interpolated_points = deque()
p = np.poly1d(np.polyfit(ys, xs, degree))
for y in ys_range:
x = p(y)
interpolated_points.append((x, y))
return interpolated_points
def compute_line_average(lines, dim_y, bottomLeft, topLeft, topRight, bottomRight, tails_points=40):
"""
Will weight each line by its importance(length) and interpolate point by point each value of the line to
extract an average line passing in the middle of all lines.
The resulting array of segments, will have some missing data, in between segments and between
the last segment and the polygon and the first segment and the polygon
For data missing in between, lines are drawn to connect the sgments.
For data missing at the edges, linear interpolation is used, building a polygon with the last `tails_points`
points and extrapolating.
`dim_y` is the y dimension of the image
`bottomLeft`, `topLeft`, `topRight`, `bottomRight` are the vertexes of the polygon
`tails_points` is the amout of points used to create the interpolating function to reach the edges of the polygon
"""
# sort tuples to be in order (y1 > y2)
sorted_line_tuples = deque()
for p1, p2 in lines:
if p2[1] > p1[1]:
sorted_line_tuples.append((p2, p1))
else:
sorted_line_tuples.append((p1, p2))
sorted_line_tuples = list(sorted_line_tuples)
# sort lines from top to bottom
sorted_lines = sorted(sorted_line_tuples, key=lambda tup: tup[0][1])
# create a buffer to store for each point of Y a weight from each contributing line
y_buffer = [deque() for _ in range(dim_y)]
for li, (p1,p2) in enumerate(sorted_lines):
weight = p1[1]- p2[1]
for yi in range(p2[1], p1[1]+1):
# append tuple (index, weight)
y_buffer[yi].append((li, weight))
for y, y_indexes in enumerate(y_buffer):
if len(y_indexes) > 0:
xs = 0
weights = 0
for y_ind, weight in y_indexes:
xs += make_line_equation(*sorted_lines[y_ind])(y) * weight
weights += weight
x = xs / weights
y_buffer[y] = x
# Fill buffer with None before starting numbers and before end numbers
for y, x in enumerate(y_buffer):
if isinstance(x, deque):
y_buffer[y] = None
else:
break
y_buffer_size = len(y_buffer) -1
for y, x in enumerate(reversed(y_buffer)):
if isinstance(x, deque):
y_buffer[y_buffer_size - y] = None
else:
break
# Find segments which are missing in the middle
missing_lines = deque()
last_element = None
is_last_a_number = False
is_last_deque = False
for y, x in enumerate(y_buffer):
if x is None:
last_element = None
continue
is_deque = isinstance(x, deque)
is_none = x is None
is_a_number = not is_deque and not is_none
if is_deque and is_last_a_number:
missing_lines.append((y_buffer[y-1], y, 1))
if is_last_deque and is_a_number:
missing_lines.append((y_buffer[y], y, 2))
is_last_deque = is_deque
is_last_a_number = is_a_number
#pp.pprint(missing_lines)
for f1, f2 in pairwise(missing_lines):
if not(f1[2] == 1 and f2[2] == 2):
raise ValueError("Something when wrong with the line interpolation")
p1 = f1[0], f1[1]
p2 = f2[0], f2[1]
f = make_line_equation(p1, p2)
for y in range(p1[1], p2[1]+1):
y_buffer[y] = f(y)
# Extract points for the final interpolation to clear out the lines
px = deque()
py = deque()
for y, x in enumerate(y_buffer):
if x is not None:
px.append(x)
py.append(y)
interpolated_points = list(interpolate_yx(py, px, range(min(py), max(py)), 2))
#interpolated_points = [(x, y) for x, y in zip(px, py)]
# Compute interpolation at the and start of the interval to close the lines
s_pts = interpolated_points[:tails_points]
e_pts = interpolated_points[-tails_points:]
s_pts_x = [p[0] for p in s_pts]
s_pts_y = [p[1] for p in s_pts]
e_pts_x = [p[0] for p in e_pts]
e_pts_y = [p[1] for p in e_pts]
bottom_y = min(bottomLeft[1], bottomRight[1])
top_y = max(topLeft[1], topRight[1])
top_part = interpolate_yx(s_pts_y, s_pts_x, range(top_y, min(s_pts_y) - 1), 1)
bottom_part = interpolate_yx(e_pts_y, e_pts_x, range(max(e_pts_y), bottom_y + 1), 1)
interpolated_points.extend(bottom_part)
interpolated_points.extend(top_part)
return interpolated_points
def draw_lines(img, lines, polygon, alpha, delta, drawing_mode, color=[255, 0, 255], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
lines_in_range = get_lines_in_range(lines, alpha, delta)
if drawing_mode == LineDrawing.CANNY:
for p1, p2 in lines_in_range:
cv2.line(img, p1, p2, color, thickness)
elif drawing_mode == LineDrawing.LINEAR_INTERPOLATION:
bottomLeft, topLeft, topRight, bottomRight = polygon
lines_to_draw = interpolate_on_all_lines(lines_in_range, bottomLeft, topLeft, topRight, bottomRight)
for p1, p2 in lines_to_draw:
cv2.line(img, p1, p2, color, thickness)
elif drawing_mode == LineDrawing.LINE_AVERAGE:
bottomLeft, topLeft, topRight, bottomRight = polygon
points_to_draw = compute_line_average(lines_in_range, image.shape[1],
bottomLeft, topLeft, topRight, bottomRight)
# Please note points are not totally orderd by their `y` coordinate, although no sorting is needed
for p in points_to_draw:
int_p = int_point(p)
cv2.line(img, int_p, int_p, color, thickness)
else:
raise ValueError("Please provide a valid '%s' mode" % LineDrawing)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap, polygon, alpha, delta, drawing_mode):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines, polygon, alpha, delta, drawing_mode)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
def line_points(iterable):
"""
Converts a polygon iterable into a list of line points to draw:
[p0, p1, p2, p3] -> (p0, p1), (p1, p2), (p2, p3), (p3, p1)
"""
if len(iterable) == 0:
raise ValueError("Please provide a valid polygon")
final_iterable = list(iterable) + [iterable[0]]
p1 = iter(final_iterable)
p2 = iter(final_iterable)
p2.__next__()
return zip(p1, p2)
def draw_ploygon(image, polygon, color=[0, 255, 0], thickness=3):
""" Draws on top of an existing image"""
for p1, p2 in line_points(polygon):
p1 = tuple(int(x) for x in p1)
p2 = tuple(int(x) for x in p2)
cv2.line(image, p1,p2, color, thickness)
def make_hsv_ranges_min_max(hue, delta=4):
min_color = np.array([hue - delta, 100, 100], np.uint8)
max_color = np.array([hue + delta, 255, 255], np.uint8)
return min_color, max_color
def make_hsv(h, s, v):
return np.array([h, s, v], np.uint8)
def plot_image(image):
""" Provide an image and creates a new figure and plots rgb or grayscale images """
plt.figure()
if len(image.shape) == 2:
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
def rgb_bgr_image(image):
b,g,r = cv2.split(image)
return cv2.merge((r,g,b))
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
# +
# Polygons are used as filters for selecting the regions of interest
# They must have the following structure: bottomLeft, topLeft, topRight, bottomRight
def integer_polygon(polygon):
return [(int(p[0]), int(p[1])) for p in polygon]
def cutting_polygon_function_entire_image(imshape):
""" Returns a polygon containig the entire image area"""
return integer_polygon([
(imshape[1] * 0.0, imshape[0] * 0.0),
(imshape[1] * 0.0, imshape[0] * 1.0),
(imshape[1] * 1.0, imshape[0] * 1.0),
(imshape[1] * 1.0 ,imshape[0] * 0.0)
])
def left_cutting_polygon(imshape):
return integer_polygon([
(imshape[1] * 0.09, imshape[0] * 1.00),
(imshape[1] * 0.45, imshape[0] * 0.62),
(imshape[1] * 0.55, imshape[0] * 0.62),
(imshape[1] * 0.23, imshape[0] * 1.00)
])
def right_cutting_polygon(imshape):
return integer_polygon([
(imshape[1] * 0.80, imshape[0] * 1.00),
(imshape[1] * 0.505, imshape[0] * 0.62),
(imshape[1] * 0.56, imshape[0] * 0.62),
(imshape[1] * 0.97, imshape[0] * 1.00),
])
# +
# Report output function
def save_report_image(image, name, width=300, height=169, out_dir="img_references"):
"""
Images will be scaled to a fixed height and width. in a
"""
out_path = os.path.join(out_dir, "%s.jpg" % name)
resized = cv2.resize(image, (width, height))
cv2.imwrite(out_path, cv2.cvtColor(resized, cv2.COLOR_RGB2BGR), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
# -
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# +
import os
DIR = "test_images"
test_images = [os.path.join(DIR, x) for x in os.listdir(DIR)]
# -
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# +
def process_image_v2(img, polygon_builders, alphas, deltas, drawing_mode,
print_debug=False, debug_ploygon=False):
"""
`polygon_builders` list of functions like the ones defined above, used to ouput
a polygon in the image coordinates
`alphas` list of angles 0° - 360° used to filter hugh lines:
`LineDrawing.CANNY` draws lines as they are detected by the helper function
`LineDrawing.LINEAR_INTERPOLATION` will interpolate and find the average line between all lines
`LineDrawing.LINE_AVERAGE` will weight each line by its importance(length) and interpolate
all values to extract a smoother result
`deltas` list of angle deltas in degrees, together with the alpha they provide the range in which
hough lines are detected
`drawing_mode` selcts which method should be used to draw Hough Lines
`print_debug` prints in the console the list of intermediate images used to obtain the final result
`debug_ploygon` prints the outlines of the polygons
"""
hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# Select all `yellow colors` for lane line detection
yellow_min_hsv, yellow_max_hsv = make_hsv_ranges_min_max(21, delta=10)
yellow_hsv_mask = cv2.inRange(hsv_img, yellow_min_hsv, yellow_max_hsv)
# Select all `white colors` in the entire image
white_min_hsv = make_hsv(6, 0, 50)
white_max_hsv = make_hsv(110, 21, 255)
white_hsv_mask = cv2.inRange(hsv_img, white_min_hsv, white_max_hsv)
selected_colors = weighted_img(yellow_hsv_mask, white_hsv_mask, 1., 1., 0.)
# lowering gray image luminosity, prevents edge detection in white / black asphalt
gray_image = (grayscale(img).astype(float) * 0.50).astype(np.uint8)
# Sum the mask selections of yellow and white to the gray image to use in the detector
gray_overlap = weighted_img(gray_image, selected_colors, 1., 1., 0.)
blurred_overlap = gaussian_blur(gray_overlap, kernel_size=5)
image_edges = canny(blurred_overlap, low_threshold=50, high_threshold=150)
# Hough lines are detected only in certain areas delimited by polygons
# In each polygon lines must have a certain slople in range of `alpha ± delta` provided in degrees
# All detections are summed togherer in a final image
hough_lines_images = deque()
for polygon_builder, alpha, delta in zip(polygon_builders, alphas, deltas):
polygon = polygon_builder(image_edges.shape)
vertices = np.array([polygon], dtype=np.int32)
masked_image = region_of_interest(image_edges, vertices)
hough_lines_image = hough_lines(masked_image,
rho=2, theta=np.pi/180, threshold=35, min_line_len=5, max_line_gap=10,
polygon=polygon, alpha=alpha, delta=delta, drawing_mode=drawing_mode)
hough_lines_images.append(hough_lines_image)
hough_lines_images = list(hough_lines_images)
complete_hough_lines_image = hough_lines_images[0]
for partial_hough_image in hough_lines_images[1:]:
complete_hough_lines_image = weighted_img(complete_hough_lines_image, partial_hough_image, 1., 1., 0.)
# Overlapping of lines on top of the original image
weighted_image = weighted_img(complete_hough_lines_image, img, 1., 1., 0.)
# Draws on top of the final result the polygons indicating the detection area
if debug_ploygon:
for polygon_builder in polygon_builders:
draw_ploygon(weighted_image, polygon_builder(image_edges.shape))
# Plots in the console the intermediate steps of the pipeline
if print_debug:
plot_image(img)
plot_image(yellow_hsv_mask)
plot_image(white_hsv_mask)
plot_image(gray_overlap)
plot_image(blurred_overlap)
plot_image(image_edges)
plot_image(complete_hough_lines_image)
plot_image(weighted_image)
return weighted_image
def wrapped_process_image(img, drawing_mode, print_debug, debug_ploygon):
return process_image_v2(img, polygon_builders=[left_cutting_polygon, right_cutting_polygon],
alphas=[-50, 60], deltas=[10, 10],
drawing_mode=drawing_mode,
print_debug=print_debug, debug_ploygon=debug_ploygon)
def pipeline_process_image(img):
return wrapped_process_image(img, drawing_mode=LineDrawing.LINE_AVERAGE,
print_debug=False, debug_ploygon=False)
def test_function(drawing_mode, print_debug, debug_ploygon):
# Extract an image from the challenge clip and use it to test
#vidcap = cv2.VideoCapture('test_videos/solidWhiteRight.mp4')
#vidcap = cv2.VideoCapture('test_videos/solidYellowLeft.mp4')
vidcap = cv2.VideoCapture('test_videos/challenge.mp4')
vidcap.set(cv2.CAP_PROP_POS_MSEC, 4200)
success, img = vidcap.read()
if success:
img = rgb_bgr_image(img)
print('Image info: ', img.shape)
result = wrapped_process_image(img, drawing_mode=drawing_mode,
print_debug=print_debug, debug_ploygon=debug_ploygon)
#save_report_image(result, 'test')
plot_image(result)
# Uncomment to test the base function of the detector
test_function(LineDrawing.LINEAR_INTERPOLATION, True, True)
# -
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image_frame(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = pipeline_process_image(image)
return result
# Let's try the one with the solid white lane on the right first ...
# +
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image_frame) #NOTE: this function expects color images!!
# %time white_clip.write_videofile(white_output, audio=False)
# -
# Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
# Now for the one with the solid yellow lane on the left. This one's more tricky!
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image_frame)
# %time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
# ## Writeup and Submission
#
# If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
#
# ## Optional Challenge
#
# Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image_frame)
# %time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
| P1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ray RLlib - Overview
#
# © 2019-2020, Anyscale. All Rights Reserved
#
# 
#
# This tutorial, part of [Anyscale Academy](https://anyscale.com/academy), introduces the broad topic of _reinforcement learning_ (RL) and [RLlib](https://ray.readthedocs.io/en/latest/rllib.html), Ray's comprehensive RL library.
#
# The lessons in this tutorial use different _environments_ from [OpenAI Gym](https://gym.openai.com/) to illustrate how to train _policies_.
#
# See the instructions in the [README](../README.md) for setting up your environment to use this tutorial.
#
# Go [here](../Overview.ipynb) for an overview of all tutorials.
# ## Tutorial Sections
#
# Because of the breadth of RL this tutorial is divided into several sections. See below for a recommended _learning plan_.
#
# ### Introduction to Reinforcement Learning and RLlib
#
# | | Lesson | Description |
# | :- | :----- | :---------- |
# | 00 | [Ray RLlib Overview](00-Ray-RLlib-Overview.ipynb) | Overview of this tutorial, including all the sections. (This file.) |
# | 01 | [Introduction to Reinforcement Learning](01-Introduction-to-Reinforcement-Learning.ipynb) | A quick introduction to the concepts of reinforcement learning. You can skim or skip this lesson if you already understand RL concepts. |
# | 02 | [Introduction to RLlib](02-Introduction-to-RLlib.ipynb) | An overview of RLlib, its goals and the capabilities it provides. |
# | 03 | [RL References](03-RL-References.ipynb) | References on reinforcement learning. |
#
# Exercise solutions for this introduction can be found [here](solutions/Ray-RLlib-Solutions.ipynb).
# ### Multi-Armed Bandits
#
# _Multi-Armed Bandits_ (MABs) are a special kind of RL problem that have broad and growing applications. They are also an excellent platform for investigating the important _exploitation vs. exploration tradeoff_ at the heart of RL. The term _multi-armed bandit_ is inspired by the slot machines in casinos, so called _one-armed bandits_, but where a machine might have more than one arm.
#
# | | Lesson | Description |
# | :- | :----- | :---------- |
# | 00 | [Multi-Armed-Bandits Overview](multi-armed-bandits/00-Multi-Armed-Bandits-Overview.ipynb) | Overview of this set of lessons. |
# | 01 | [Introduction to Multi-Armed Bandits](multi-armed-bandits/01-Introduction-to-Multi-Armed-Bandits.ipynb) | A quick introduction to the concepts of multi-armed bandits (MABs) and how they fit in the spectrum of RL problems. |
# | 02 | [Exploration vs. Exploitation Strategies](multi-armed-bandits/02-Exploration-vs-Exploitation-Strategies.ipynb) | A deeper look at algorithms that balance exploration vs. exploitation, the key challenge for efficient solutions. Much of this material is technical and can be skipped in a first reading, but skim the first part of this lesson at least. |
# | 03 | [Simple Multi-Armed Bandit](multi-armed-bandits/03-Simple-Multi-Armed-Bandit.ipynb) | A simple example of a multi-armed bandit to illustrate the core ideas. |
# | 04 | [Linear Upper Confidence Bound](multi-armed-bandits/04-Linear-Upper-Confidence-Bound.ipynb) | One popular algorithm for exploration vs. exploitation is _Upper Confidence Bound_. This lesson shows how to use a linear version in RLlib. |
# | 05 | [Linear Thompson Sampling](multi-armed-bandits/05-Linear-Thompson-Sampling.ipynb) | Another popular algorithm for exploration vs. exploitation is _Thompson Sampling_. This lesson shows how to use a linear version in RLlib. |
# | 06 | [Market Example](multi-armed-bandits/06-Market-Example.ipynb) | A simplified real-world example of MABs, finding the optimal stock and bond investment strategy. |
#
# Exercise solutions for this segment of the tutorial can be found [here](multi-armed-bandits/solutions/Multi-Armed-Bandits-Solutions.ipynb).
# ### Explore Reinforcement Learning and RLlib
#
# This section dives into more details about RL and using RLlib. It is best studied after going through the MAB material.
#
# | | Lesson | Description |
# | :- | :----- | :---------- |
# | 00 | [Explore RLlib Overview](explore-rllib/00-Explore-RLlib-Overview.ipynb) | Overview of this set of lessons. |
# | 01 | [Application - Cart Pole](explore-rllib/01-Application-Cart-Pole.ipynb) | The best starting place for learning how to use RL, in this case to train a moving car to balance a vertical pole. Based on the `CartPole-v0` environment from OpenAI Gym, combined with RLlib. |
# | 02 | [Application: Bipedal Walker](explore-rllib/02-Bipedal-Walker.ipynb) | Train a two-legged robot simulator. This is an optional lesson, due to the longer compute times required, but fun to try. |
# | 03 | [Custom Environments and Reward Shaping](explore-rllib/03-Custom-Environments-Reward-Shaping.ipynb) | How to customize environments and rewards for your applications. |
#
# Some additional examples you might explore can be found in the `extras` folder:
#
# | Lesson | Description |
# | :----- | :---------- |
# | [Extra: Application - Mountain Car](explore-rllib/extras/Extra-Application-Mountain-Car.ipynb) | Based on the `MountainCar-v0` environment from OpenAI Gym. |
# | [Extra: Application - Taxi](explore-rllib/extras/Extra-Application-Taxi.ipynb) | Based on the `Taxi-v3` environment from OpenAI Gym. |
# | [Extra: Application - Frozen Lake](explore-rllib/extras/Extra-Application-Frozen-Lake.ipynb) | Based on the `FrozenLake-v0` environment from OpenAI Gym. |
#
# In addition, exercise solutions for this tutorial can be found [here](explore-rllib/solutions/Ray-RLlib-Solutions.ipynb).
#
# For earlier versions of some of these tutorials, see [`rllib_exercises`](https://github.com/ray-project/tutorial/blob/master/rllib_exercises/rllib_colab.ipynb) in the original [github.com/ray-project/tutorial](https://github.com/ray-project/tutorial) project.
# ## Learning Plan
#
# We recommend the following _learning plan_ for working through the lessons:
#
# Start with the introduction material for RL and RLlib:
#
# * [Ray RLlib Overview](00-Ray-RLlib-Overview.ipynb) - This file
# * [Introduction to Reinforcement Learning](01-Introduction-to-Reinforcement-Learning.ipynb)
# * [Introduction to RLlib](02-Introduction-to-RLlib.ipynb)
#
# Then study several of the lessons for multi-armed bandits, starting with these lessons:
#
# * [Multi-Armed-Bandits Overview](multi-armed-bandits/00-Multi-Armed-Bandits-Overview.ipynb)
# * [Introduction to Multi-Armed Bandits](multi-armed-bandits/01-Introduction-to-Multi-Armed-Bandits.ipynb)
# * [Exploration vs. Exploitation Strategies](multi-armed-bandits/02-Exploration-vs-Exploitation-Strategies.ipynb): Skim at least the first part of this lesson.
# * [Simple Multi-Armed Bandit](multi-armed-bandits/03-Simple-Multi-Armed-Bandit.ipynb)
#
# As time permits, study one or both of the following lessons:
#
# * [Linear Upper Confidence Bound](multi-armed-bandits/04-Linear-Upper-Confidence-Bound.ipynb)
# * [Linear Thompson Sampling](multi-armed-bandits/05-Linear-Thompson-Sampling.ipynb)
#
# Then finish with this more complete example:
#
# * [Market Example](multi-armed-bandits/06-Market-Example.ipynb)
#
# Next, return to the "exploration" lessons under `explore-rllib` and work through as many of the following lessons as time permits:
#
# * [Application: Cart Pole](explore-rllib/03-Application-Cart-Pole.ipynb): Further exploration of the popular _CartPole_ example.
# * [Application: Bipedal Walker](explore-rllib/04-Bipedal-Walker.ipynb): A nontrivial, but simplified robot simulator.
# * [Custom Environments and Reward Shaping](explore-rllib/05-Custom-Environments-Reward-Shaping.ipynb): More about creating custom environments for your problem. Also, finetuning the rewards to ensure sufficient exploration.
#
# Other examples that use different OpenAI Gym environments are provided for your use in the `extras` directory:
#
# * [Extra: Application - Mountain Car](explore-rllib/extras/Extra-Application-Mountain-Car.ipynb)
# * [Extra: Application - Taxi](explore-rllib/extras/Extra-Application-Taxi.ipynb)
# * [Extra: Application - Frozen Lake](explore-rllib/extras/Extra-Application-Frozen-Lake.ipynb)
#
# Finally, the [references](03-RL-References.ipynb) collect useful books, papers, blog posts, and other available tutorial materials.
# ## Getting Help
#
# * The [#tutorial channel](https://ray-distributed.slack.com/archives/C011ML23W5B) on the [Ray Slack](https://ray-distributed.slack.com)
# * [Email](mailto:<EMAIL>)
#
# Find an issue? Please report it!
#
# * [GitHub issues](https://github.com/anyscale/academy/issues)
| ray-rllib/00-Ray-RLlib-Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import rasterio
from rasterio.plot import show
from rasterio.merge import merge
from rasterio.plot import show
import rasterio.features
import rasterio.warp
import glob
import os
import rioxarray as xr
import pandas as pd
import geopandas as gpd
import numpy as np
from rasterio.enums import Resampling
from rasterio.crs import CRS
import matplotlib.pyplot as plt
import earthpy as et
import earthpy.plot as ep
from shapely.geometry import mapping
import subprocess
from osgeo import gdal
import multiprocessing as mp
from typing import List, Any, Sequence, Tuple
import xarray as xarray
from numpy import savetxt
# %matplotlib inline
# -
# # Landcover Data
# ### Importing the Training Region Polygon
#region of interest
studyRegion = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/GeoJSONS/PIREDD_Plataue.geojson'
studyRegion = gpd.read_file(studyRegion)
print(studyRegion.head())
print(studyRegion.crs)
# +
fig, ax = plt.subplots(figsize=(6, 6))
studyRegion.plot(ax=ax)
ax.set_title("studyRegion",
fontsize=16)
plt.show()
# -
# ### Importing and Exploring the Landcover Data
#landcover data
ESA_CCI = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/ESA_CCI/TIF/ESA_CCI_LC_Map_2013.tif'
landcover = xr.open_rasterio(ESA_CCI)
print(landcover)
# +
#landcover classes
landcover_classes = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/ESA_CCI/TIF/ESACCI-LC-Legend.csv'
classes = pd.read_csv(landcover_classes, delimiter=";", index_col=0)
print(f"There are {len(classes)} classes.")
print(classes.head())
# -
#explore statistics
print(landcover.rio.crs)
print(landcover.rio.nodata)
print(landcover.rio.bounds())
print(landcover.rio.width)
print(landcover.rio.height)
print(landcover.rio.crs.wkt)
#landcover.values
landcover
# ### Reproject and Clip the Landcover Data to DRC Cooridnate Projection and Study Region
drc_landcover = landcover.rio.clip(studyRegion.geometry.apply(mapping))
#define projection for DRC
crs_drc = CRS.from_string('EPSG:3341')
landcover_drc_crs = drc_landcover.rio.reproject(crs_drc)
landcover_drc_crs.rio.crs
landcover_drc_crs.shape
landcover_drc_crs.values
#save as raster ahead of reclassification
landcover_drc_crs.rio.to_raster('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/PIREDD_Test_ESA/PIREDD_2013_ESA.tif')
# ### Reclassify classes
#training_landcover_path = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/ESA_CCI_Unmerged.tif'
#training_landcover = xr.open_rasterio(training_landcover_path)
training_landcover.plot()
# +
training_landcover_path = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/landcover_clipped_reproj.tif'
training_landcover = xr.open_rasterio(training_landcover_path)
def reclassify_landcover(input_array):
'''input xarray.DataArray with classes and output dataarray with super classes
but same coords and attributes as input'''
new_class_1 = np.where((input_array == 10)|(input_array == 20)|(input_array ==130)|(input_array == 11)|(input_array == 40),
1, input_array) #cropland
new_class_2 = np.where((input_array == 12)|(input_array == 30)|(input_array == 153)|(input_array == 152)|(input_array == 151)|(input_array == 150)|(input_array == 110)|(input_array == 120)|(input_array == 121)|(input_array == 122),
2, new_class_1)#shrubland
#new_class_3 = np.where((input_array == 40),
#3, new_class_2)#mosaic vegetationf
new_class_3 = np.where((input_array == 50)|(input_array == 60)|(input_array == 61)|(input_array == 62)|(input_array == 100)|(input_array == 151)|(input_array == 70)|(input_array == 71)|(input_array == 72)|(input_array == 80)|(input_array == 81)|(input_array == 82)|(input_array == 90)|(input_array == 160)|(input_array == 170),
3, new_class_2)#forest
#new_class_3 = np.where((input_array == 11)|(input_array == 12)|(input_array == 40)|,
#3, new_class_2)#shrubland
#new_class_4 = np.where((input_array ==130),
#4, new_class_3) #grassland
new_class_4 = np.where((input_array == 190)|(input_array == 202)|(input_array == 201)|(input_array == 200),
4, new_class_3) #urban
new_class_5 = np.where((input_array == 210)|(input_array == 180),
5, new_class_4)#water
new_class_6 = np.where((input_array == 0)|(input_array == 140)|(input_array == 220),
0, new_class_5)#No data and Other
#new_class_10 = np.where((input_array == 0), 10, new_class_9) #no data
output_array_final = xarray.DataArray(data=new_class_6, coords=input_array.coords, attrs=input_array.attrs)
return output_array_final
#super classes
drc_training_landcover = reclassify_landcover(training_landcover)
superclass_data=[]
superclass_vals = np.unique(drc_training_landcover)
for c in superclass_vals:
superclass_data.append((drc_training_landcover == c).sum())
superclass_df = pd.DataFrame(superclass_data, index = superclass_vals, columns = ['pixel_count'])
superclass_df.insert(superclass_df.shape[1], 'class_name', ['Cropland', 'Shrubland', 'Forest',
'Urban', 'Water', 'No Data and Other'
])
superclass_df
# -
drc_training_landcover.rio.to_raster('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/PIREDD_Test_ESA/Reclassified_Final/.tif')
savetxt('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/drc_training_landcover', drc_training_landcover, delimiter=',')
training_landcover_path = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/classification_training_data/third_remerge_landcover_training_data.tif'
training_landcover = xr.open_rasterio(training_landcover_path)
unique, counts = np.unique(training_landcover, return_counts=True)
list(zip(unique, counts))
#from 3D ----> 2D
drc_training_landcover = landcover_drc_crs[0, :, :]
print(drc_training_landcover.shape)
# +
# save to csv file
savetxt('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/ESA_Landcover_Data.csv', ESA_Landcover_Data, delimiter=',')
# -
landcover_drc_crs.values
# ESA_Landcover_Data.values
ESA_Landcover_Data
# +
#print(landcover_drc_crs)
# -
f, ax = plt.subplots(figsize=(20, 10))
landcover_drc_crs.plot(ax=ax)
ax.set(title="Landcover Map of Training Region")
ax.set_axis_off()
plt.savefig('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/landcover_map_of_training_region.png')
plt.show()
f, ax = plt.subplots(figsize=(20, 10))
drc_training_landcover.plot(ax=ax)
ax.set(title="Landcover Map of Training Region")
ax.set_axis_off()
plt.savefig('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/landcover_map_of_training_region.png')
plt.show()
# # Satellite Data
# ### Importing and Exploring the Satellite Data
#
#l8 = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/merged_l8_train_data.tif'
l8_reprojected = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/Hansen_Results/Hansen_Loss_Maps/yearly_arrays/loss_map_20.tif' #the imagery was reprojected in
#the command line using gdal as rioxarray was re-filloing the array with unwanted values
l8_data = xr.open_rasterio(l8_reprojected)
print(l8_data)
l8_data = l8_data.astype('uint8')
#explore statistics
print(l8_data.rio.crs)
print(l8_data.rio.nodata)
print(l8_data.rio.bounds())
print(l8_data.rio.width)
print(l8_data.rio.height)
print(l8_data.rio.crs.wkt)
l8_data.values
#l8_data
l8_data.plot() #plotting one band from the training imagery
#from 3D ----> 2D
l8_data = l8_data[0, :, :]
print(l8_data.shape)
# +
from numpy import savetxt
# save to csv file
savetxt('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/Landsat_Satellite_Data_Training.csv', l8_data, delimiter=',')
# -
l8_data
# ### Resample Landsat Data to match resolution of Landcover Data
# +
def write_image(arr: np.array, save_path: os.PathLike, **raster_meta) -> None:
"""
Write a Geotiff to disk with the given raster metadata.
Convenience function that automatically sets permissions correctly.
Args:
arr (np.array): The data to write to disk in geotif format
save_path (os.PathLike): The path to write to
"""
with rasterio.open(save_path, "w", **raster_meta) as target:
target.write(arr)
# Allow group workspace users access
def downsample_image(
image: rasterio.DatasetReader,
bands: List[int],
downsample_factor: int = 10.311,
resampling: Resampling = Resampling.bilinear,
) -> Tuple[np.ndarray, Any]:
"""
Downsample the given bands of a raster image by the given downsample_fator.
Args:
image (rasterio.DatasetReader): Rasterio IO handle to the image
bands (List[int]): The bands to downsample
downsample_factor (int, optional): Factor by which the image will be
downsampled. Defaults to 2.
resampling (Resampling, optional): Resampling algorithm to use. Must be one of
Rasterio's built-in resampling algorithms. Defaults to Resampling.bilinear.
Returns:
Tuple[np.ndarray, Any]: Return the resampled bands of the image as a numpy
array together with the transform.
"""
downsampled_image = image.read(
bands,
out_shape=(
int(image.height / downsample_factor),
int(image.width / downsample_factor),
),
resampling=resampling,
)
transform = image.transform * image.transform.scale(
(image.width / downsampled_image.shape[-1]),
(image.height / downsampled_image.shape[-2]),
)
return downsampled_image, transform
def generate_downsample(
file_path: os.PathLike,
downsample_factor: int = 10.311,
resampling: Resampling = Resampling.bilinear,
overwrite: bool = False,
) -> None:
"""
Generate downsample of the raster file at `file_path` and save it in same folder.
Saved file will have appendix `_downsample_{downsample_factor}x.tif`
Args:
file_path (os.PathLike): The path to the raster image todownsample
downsample_factor (int, optional): The downsampling factor to use.
Defaults to 2.
resampling (Resampling, optional): The resampling algorithm to use.
Defaults to Resampling.bilinear.
overwrite (bool, optional): Iff True, any existing downsampling file with the
same downsampling factor will be overwritten. Defaults to False.
"""
save_path = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/Hansen_Results/Hansen_Loss_Maps/yearly_arrays/loss_map_20_resampled.tif'
with rasterio.open(file_path) as image:
downsampled_image, transform = downsample_image(
image,
image.indexes,
downsample_factor=downsample_factor,
resampling=resampling,
)
nbands, height, width = downsampled_image.shape
write_image(
downsampled_image,
save_path,
driver="GTiff",
height=height,
width=width,
count=nbands,
dtype="uint8",
crs=image.crs,
transform=transform,
nodata=image.nodata,
)
# -
generate_downsample(l8_reprojected)
#visualising resampled landsat imagery
l8_resampled = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/PIREDD_Test_Data/Resampled/PIREDD_Plataue_L8_2020_resampled.tif'
l8_resampled = xr.open_rasterio(l8_resampled)
#print(l8_resampled)
### turning all 0 non data vlaues into nans
#l8_final = l8_resampled.where(l8_resampled !=0)
l8_filled = l8_resampled.fillna(0)
l8_filled.fillna(0)
#converting to raster for use in classification
l8_filled.rio.to_raster('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/PIREDD_Test_Data/Filled/PIREDD_Plataue_L8_2020_filled.tif')
l8_resampled[5].plot()
l8_resampled.values
#explore statistics
print(l8_resampled.rio.crs)
print(l8_resampled.rio.nodata)
print(l8_resampled.rio.bounds())
print(l8_resampled.rio.width)
print(l8_resampled.rio.height)
print(l8_resampled.rio.crs.wkt)
# +
### Merge satellite data tiles
"""
import rasterio
import numpy as np
from rasterio.merge import merge
from rasterio.plot import show
dirpath = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles'
search_criteria = 'D*.tif'
q = os.path.join(dirpath, search_criteria)
dem_fps = glob.glob(q)
src_files_to_mosaic = []
for fp in dem_fps:
src = rasterio.open(fp)
src_files_to_mosaic.append(src)
mosaic, out_trans = merge(src_files_to_mosaic)
src1 = rasterio.open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/DRC_L8_2013_Training_Image_New-0000000000-0000000000.tif')
src2 = rasterio.open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/DRC_L8_2013_Training_Image_New-0000000000-0000008960.tif')
src3 = rasterio.open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/DRC_L8_2013_Training_Image_New-0000000000-0000017920.tif')
src4 = rasterio.open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/DRC_L8_2013_Training_Image_New-0000008960-0000000000.tif')
src5 = rasterio.open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/DRC_L8_2013_Training_Image_New-0000008960-0000008960.tif')
src6 = rasterio.open('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/training_tiles/DRC_L8_2013_Training_Image_New-0000008960-0000017920.tif')
srcs_to_mosaic = [src1, src2, src3, src4, src5, src6]
# The merge function returns a single array and the affine transform info
arr, out_trans = merge(srcs_to_mosaic)
"""
# -
#filled_l8_path = '/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/filled_l8_data.tif'
#filled_l8 = xr.open_rasterio(l8)
#l8_data_drc_crs = l8_filled.rio.reproject(crs_drc)
#warp = gdal.Warp('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/reprojected_l8.tif','/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/filled_l8_data.tif',dstSRS='EPSG:3341')
# +
#l8_filled.rio.to_raster('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/filled_l8_data.tif')
# +
#l8_filled = l8_data.fillna(0)
#l8_filled.fillna(0)
# +
#l8_filled.rio.to_raster('/gws/nopw/j04/ai4er/users/jl2182/data/Mres_Data/classification_training_data/filled_l8_training_data.tif')
| Notebooks/Experimentation/training_data_prep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# GeoViews is designed to make full use of multidimensional gridded datasets stored in netCDF or other common formats, via the xarray and iris interfaces in HoloViews. This notebook will demonstrate how to load data using both of these data backends, along with some of their individual quirks. The data used in this notebook was originally shipped as part of the [``SciTools/iris-sample-data``](https://github.com/SciTools/iris-sample-data) repository, but a smaller netCDF file is included as part of the GeoViews so that it can be used with xarray as well.
# +
import iris
import numpy as np
import xarray as xr
import holoviews as hv
import geoviews as gv
import geoviews.feature as gf
from cartopy import crs
hv.extension('matplotlib', 'bokeh')
#conda install -c ioam -c conda-forge holoviews geoviews
# -
# ## Setting some notebook-wide options
# Let's start by setting some normalization options (discussed below) and always enable colorbars for the elements we will be displaying:
# %opts Image {+framewise} [colorbar=True] Curve [xrotation=60]
# You can see that it is easy to set global defaults for a project, allowing any suitable settings to be made into a default on a per-element-type basis. Now let's specify the maximum number of frames we will be displaying:
# %output max_frames=1000
#
# <div class="alert alert-info" role="alert">When working on a live server append ``widgets='live'`` to the line above for greatly improved performance and memory usage </div>
#
# ## Loading our data
# In this notebook we will primarily be working with xarray, but we will also load the same data using iris so that we can demonstrate that the two data backends are nearly equivalent.
#
# #### XArray
#
# As a first step we simply load the data using the ``open_dataset`` method xarray provides and have a look at the repr to get an overview what is in this dataset:
xr_ensemble = xr.open_dataset('/home/pangeo/data/ensemble.nc')
xr_ensemble
# +
kdims = ['time', 'longitude', 'latitude']
vdims = ['surface_temperature']
xr_dataset = gv.Dataset(xr_ensemble, kdims=kdims, vdims=vdims, crs=crs.PlateCarree())
# -
print(repr(xr_dataset))
print("XArray time type: %s" % xr_dataset.get_dimension_type('time'))
# To improve the formatting of dates on the xarray dataset we can set the formatter for datetime64 types:
hv.Dimension.type_formatters[np.datetime64] = '%Y-%m-%d'
# The `Dataset` object is not yet visualizable, because we have not chosen which dimensions to map onto which axes of a plot.
#
# # A Simple example
#
# To visualize the datasets, in a single line of code we can specify that we want to view it as a collection of Images indexed by longitude and latitude (a HoloViews ``HoloMap`` of ``gv.Image`` elements):
xr_dataset.to(gv.Image, ['longitude', 'latitude'])
# You can see that the `time` dimension was automatically mapped to a slider, because we did not map it onto one of the other available dimensions (x, y, or color, in this case). You can drag the slider to view the surface temperature at different times.
# Now let us load the pre-industrial air temperature:
air_temperature = gv.Dataset(xr.open_dataset('/home/pangeo/data/pre-industrial.nc'), kdims=['longitude', 'latitude'],
group='Pre-industrial air temperature', vdims=['air_temperature'],
crs=crs.PlateCarree())
air_temperature
# Note that we have the ``air_temperature`` available over ``longitude`` and ``latitude`` but *not* the ``time`` dimensions. As a result, this cube is a single frame (at right below) when visualized as a temperature map.
(xr_dataset.to.image(['longitude', 'latitude'])+
air_temperature.to.image(['longitude', 'latitude']))
# The above plot shows how to combine a fixed number of plots using ``+``, but what if you want to combine some arbitrarily long list of objects? You can do that by making a ``Layout`` explicitly, which is what ``+`` does internally.
#
# The following more complicated example shows how complex interactive plots can be generated with relatively little code, and also demonstrates how different HoloViews elements can be combined together. In the following visualization, the black dot denotes a specific longitude, latitude location *(0,10)*, and the curve is a sample of the ``surface_temperature`` at that location. The curve is unaffected by the `time` slider because it already lays out time along the x axis:
# %%opts Curve [aspect=2 xticks=4 xrotation=15] Points (color='k')
temp_curve = hv.Curve(xr_dataset.select(longitude=0, latitude=10), kdims=['time'])
temp_map = xr_dataset.to(gv.Image,['longitude', 'latitude']) * gv.Points([(0,10)], crs=crs.PlateCarree())
temp_map + temp_curve
# ## Overlaying data and normalization
# Let's view the surface temperatures together with the global coastline:
# %%opts Image [projection=crs.Geostationary()] (cmap='Greens') Overlay [xaxis=None yaxis=None]
xr_dataset.to.image(['longitude', 'latitude']) * gf.coastline
# Notice that every frame individually uses the full dynamic range of the Greens color map. This is because normalization is set to ``+framewise`` at the top of the notebook, which means every frame is normalized independently. This sort of normalization can be computed on an as-needed basis, using whatever values are found in the current data being shown in a given frame, but it won't let you see how different frames compare to each other.
#
# To control normalization, we need to decide on the normalization limits. Let's see the maximum temperature in the cube, and use it to set a normalization range by using the redim method:
# %%opts Image [projection=crs.Geostationary()] (cmap='Greens') Overlay [xaxis=None yaxis=None]
max_surface_temp = xr_dataset.range('surface_temperature')[1]
print(max_surface_temp)
xr_dataset.redim(surface_temperature=dict(range=(300, max_surface_temp))).to(gv.Image,['longitude', 'latitude']) \
* gf.coastline
# By specifying the normalization range we can reveal different aspects of the data. In the example above we can see a cooling effect over time as the dark green areas close to the top of the normalization range (317K) vanish. Values outside this range are clipped to the ends of the color map.
# Lastly, here is a demo of a conversion from ``surface_temperature`` to ``FilledContours``:
xr_dataset.to(gv.FilledContours,['longitude', 'latitude']) * gf.coastline
# As you can see, it's quite simple to expose any data you like from xarray, easily and flexibly creating interactive or static visualizations.
| Beginner-notebooks/99d_Gridded_Datasets_I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WloB7i6znYlz"
# <div>
# <img src="https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_" width="350"/>
# </div>
#
# #**Artificial Intelligence - MSc**
#
# ##CS6462 - PROBABILISTIC AND EXPLAINABLE AI
#
# ##SEM2 2021/2
#
# ###CS6462_Lab_2.3
#
# ###Instructor: <NAME>
# Copyright (C) 2022 - All rights reserved, do not copy or distribute without permission of the author.
# + [markdown] id="Km7A3JsgnpaU"
# #Calculate Exact Card Probabilities Numerically
# + [markdown] id="HzexoNCWnxZe"
# We will learn how we can get the exact probability using Python.
#
# * This is not always applicable but let’s try to solve the questions of Part 1.
#
# * The logic here is to generate all the possible combinations and then to calculate the ratio.
# + id="rPpVhr61oPrY"
# importing modules
import itertools
# + colab={"base_uri": "https://localhost:8080/"} id="MGAaD8ZznVAO" outputId="e288bce3-4f03-42e6-ed25-6a03e5c7bd78"
# make a deck of cards
deck = list(itertools.product(
['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'],
['Spade','Heart','Diamond','Club'])
)
# show deck
deck
# + [markdown] id="LSg-H80QpTcZ"
# **Question 1:**
#
# * How we can get all the possible 5-hand cards of a 52-card deck?
#
# * Combinations: $$ {52\choose 5} = 2,598,960 $$
# + colab={"base_uri": "https://localhost:8080/"} id="kMqqisAAoIlM" outputId="7f489dd3-fc0e-4e14-9e0d-b8a0a9117c8f"
from itertools import combinations
# get the (52 5) combinations
all_possible_by_5_combinations = list(combinations(deck,5))
# show the number of combinations
len(all_possible_by_5_combinations)
# + [markdown] id="R-9ln-Jwp11p"
# **Question 2:**
#
# What is the probability that when two cards are drawn from a deck of cards without a replacement that both of them will be Ace?
# + id="xA3QJ4_Mp9t1" colab={"base_uri": "https://localhost:8080/"} outputId="7926f4af-11bb-4284-aa93-46f1c6b4f48f"
# event
Aces = 0
# get the (52 2) combinations
all_possible_by_2_combinations = list(combinations(deck,2))
# verify the event occurs in every hand
for card in all_possible_by_2_combinations:
# count faces in each 2-card hand
if [d[0] for d in card].count('A') == 2:
# add one event if there are two aces
Aces+=1
# compute the probability of the event
prob = Aces / len(all_possible_by_2_combinations)
# show the probability
prob
# + [markdown] id="Dq8PPcHKrvlW"
# **Question 3:**
#
# What is the probability of two Aces in 5-card hand without replacement?
# + colab={"base_uri": "https://localhost:8080/"} id="GwgZ8SvPr5iK" outputId="9cc90656-33c6-4915-9985-d3d887471e35"
# event
Aces = 0
# verify the event occurs in every hand
for card in all_possible_by_5_combinations:
# count faces in each 2-card hand
if [d[0] for d in card].count('A') == 2:
# add one event if there are two aces
Aces+=1
# compute the probability of the event
prob = Aces / len(all_possible_by_5_combinations)
# show the probability
prob
# + [markdown] id="Cfu3LiXOsdan"
# **Question 4:**
#
# What is the probability of being dealt a flush (5 cards of all the same suit) from the first 5 cards in a deck?
# + colab={"base_uri": "https://localhost:8080/"} id="GbTR4hhxspBH" outputId="3dd8bbcc-6a8c-4a40-e49c-9ffcd3bf5049"
# event
flushes = 0
# verify the event occurs in every hand
for card in all_possible_by_5_combinations:
# count suits in each 5-card hand
flush = [d[1] for d in card]
# add one event if all 5 cards of all the same suit
if len(set(flush))== 1:
flushes+=1
# compute the probability of the event
prob = flushes / len(all_possible_by_5_combinations)
# show the probability
prob
# + [markdown] id="xDvahuUytTqC"
# **Question 5:**
#
# What is the probability of being dealt a royal flush from the first 5 cards in a deck?
# + colab={"base_uri": "https://localhost:8080/"} id="jOyXGjhitawL" outputId="9c316520-f59b-4808-8087-d63d11255a34"
# event
royal_flushes = 0
# verify the event occurs in every hand
for card in all_possible_by_5_combinations:
# count suits in each 5-card hand
flush = [d[1] for d in card]
# and then faces
face = [d[0] for d in card]
# add one event if the 5 cards dealt a royal flush
if len(set(flush))== 1 and sorted(['A','J', 'Q', 'K', '10'])==sorted(face):
royal_flushes +=1
# compute the probability of the event
prob = royal_flushes / len(all_possible_by_5_combinations)
# show the probability
prob
| Weeks_1-3/Labs/CS6462_Lab_2_3.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sos
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SoS
# language: sos
# name: sos
# ---
# + [markdown] kernel="SoS" tags=[]
# # APA Calling
# + [markdown] kernel="SoS"
# ## Aim
# The purpose of this notebook is to call APA-based information (PDUI) based on [DAPARS2 method](https://github.com/3UTR/DaPars2).
# + [markdown] kernel="SoS"
# ## Methods
# + kernel="SoS"
# %preview ../../images/apa_calling.png
# + [markdown] kernel="SoS"
# ### 3'UTR Reference
#
# * _gtf2bed12.py_ : Covert gtf to bed format (Source from in-house codes from Li Lab: https://github.com/Xu-Dong/Exon_Intron_Extractor/blob/main/scripts/gtf2bed12.py)
#
# * _DaPars_Extract_Anno.py_ : extract the 3UTR regions in bed formats from the whole genome bed (Source from Dapars 2: https://github.com/3UTR/DaPars2/blob/master/src/DaPars_Extract_Anno.py)
#
# ### Call WIG data from transcriptome BAM files
#
# Using bedtools or rsem-bam2wig, for RSEM based alignment
#
# ### Config files Generation
# * _Python 3_ loops to read line by line the sum of reads coverage of all chromosome.
#
# ### Dapars2 Main Function
#
# * _Dapars2_Multi_Sample.py_: use the least sqaures methods to calculate the usage of long isoforms (https://github.com/3UTR/DaPars2/blob/master/src/Dapars2_Multi_Sample.py)
#
# Note: this part of code have been modified from source to deal with some formatting discrepancy in wig file
#
# ### Impute missing values in Dapars result
#
# KNN using `impute` R package.
# + [markdown] kernel="SoS" tags=[]
# ## Input
# + [markdown] kernel="SoS"
# - A list of transcriptome level BAM files, eg generated by RSEM
# - The 3'UTR annotation reference file
#
# If you do not have 3'UTR annotation file, please generate it first. Input to this step is the transcriptome level gene feature file in `GTF` format that [we previously prepared](https://cumc.github.io/xqtl-pipeline/code/data_preprocessing/reference_data.html).
# + [markdown] kernel="SoS" tags=[]
# ## Output
# + [markdown] kernel="SoS"
# * Dapars config files
# * PUDI (Raw) information saved in txt
# * PDUI (Imputed) information saved in txt. This is recommended for further analysis.
# + [markdown] kernel="SoS"
# ## Minimal working example
# + [markdown] kernel="SoS"
# To generate 3'UTR reference data,
# + kernel="Bash"
sos run apa_calling.ipynb UTR_reference \
--cwd output/apa \
--hg-gtf reference_data/Homo_sapiens.GRCh38.103.chr.reformatted.ERCC.gtf \
--container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif
# + [markdown] kernel="SoS"
# ## Command interface
# + kernel="Bash"
sos run apa_calling.ipynb -h
# + [markdown] kernel="Bash"
# ## Workflow implementation
# + kernel="SoS"
[global]
parameter: walltime = '400h'
parameter: mem = '200G'
parameter: ncore = 16
# the output directory for generated files
parameter: cwd = path("output")
# Number of threads
parameter: numThreads = 8
parameter: job_size = 1
parameter: container = ''
# + [markdown] kernel="SoS"
# ### Step 0: Generate 3UTR regions based on GTF
# + [markdown] kernel="SoS"
# The 3UTR regions (saved in bed format) could be use __repeatly__ for different samples. It only served as the reference region. You may not need to run it if given generated hg19/hg38 3UTR regions.
# + kernel="SoS"
# Generate the 3UTR region according to the gtf file
[UTR_reference]
# gtf file
parameter: hg_gtf = path
input: hg_gtf
output: f'{cwd}/{_input:bn}.bed',
f'{cwd}/{_input:bn}.transcript_to_geneName.txt',
f'{cwd}/{_input:bn}_3UTR.bed'
bash: expand = '${ }', container = container
gtf2bed12.py --gtf ${_input} --out ${cwd}
mv ${cwd}/gene_annotation.bed ${_output[0]}
mv ${cwd}/transcript_to_geneName.txt ${_output[1]}
DaPars_Extract_Anno.py -b ${_output[0]} -s ${_output[1]} -o ${_output[2]}
# + [markdown] kernel="SoS"
# ### Step 1: Generate WIG calls and flagstat files from BAM files
#
# Generating WIG from BAM data via `bedtools` is recommended by Dapars authors. However, our transcriptome level calls are made using RSEM, which in fact contains a program called `rsem-bam2wig` with this one additional feature:
#
# ```
# –no-fractional-weight : If this is set, RSEM will not look for “ZW” tag and each alignment appeared in the BAM file has weight 1. Set this if your BAM file is not generated by RSEM. Please note that this option must be at the end of the command line
# ```
#
# Here we stick to `bedtools` because of its popularity and most generic BAM files may not have the `ZW` tag anyways.
# + kernel="SoS"
[bam2tools]
parameter: n = 9
n = [x for x in range(n)]
input: for_each = 'n'
task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, cores = numThreads
python: expand = True, container = container
import glob
import os
import subprocess
path = "/mnt/mfs/ctcn/datasets/rosmap/rnaseq/dlpfcTissue/batch{_n}/STAR_aligned"
name = glob.glob(path + "/**/*Aligned.sortedByCoord.out.bam", recursive = True)
wigpath = "/home/ls3751/project/ls3751/wig/batch{_n}/"
if not os.path.exists(wigpath):
os.makedirs(os.path.dirname(wigpath))
for i in name:
id = i.split("/")[-2]
filedir = path + "/" + id + "/" + id + ".bam"
out = wigpath + id + ".wig"
new_cmd = "bedtools genomecov -ibam " + filedir + " -bga -split -trackline" + " > " + out
os.system(new_cmd)
out_2 = wigpath + id + ".flagstat"
new_cmd_2 = f"samtools flagstat --thread {numThreads} " + filedir + " > " + out_2
os.system(new_cmd_2)
# + kernel="SoS"
[bam2toolsv1]
parameter: sample = path
parameter: tissue = path
task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, cores = numThreads, concurrent = True
python: expand = "${ }", container = container
import os
import multiprocessing
import re
sample_list = [line.strip('\n') for line in open("${sample}")]
def call_wig_flagstat(subject):
id = re.findall(r"\D(\d{8})\D",subject)[1]
prefix = "/mnt/mfs/statgen/ls3751/aqtl_analysis/wig/" + "${tissue}" + "/"
out = prefix + id + ".wig"
out_2 = prefix + id + ".flagstat"
new_cmd = "bedtools genomecov -ibam " + subject + " -bga -split -trackline" + " > " + out
os.system(new_cmd)
new_cmd_2 = f"samtools flagstat --thread {numThreads} " + subject + " > " + out_2
os.system(new_cmd_2)
for sample in sample_list:
call_wig_flagstat(sample)
### process = multiprocessing.Process(target = call_wig_flagstat, args =(sample_list[i]))
### process.start()
### for p in processes:
### p.join()
# + kernel="SoS"
[bam2toolsv2]
parameter: sample = path
parameter: tissue = path
task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, cores = numThreads
bash: expand = "${ }", container = container
input="${sample}"
while IFS=',' read -r col1 col2
do
bedtools genomecov -ibam $col1 -bga -split -trackline > /mnt/mfs/statgen/ls3751/aqtl_analysis/wig/${tissue}/$col2.wig &
samtools flagstat --thread ${numThreads} $col1 > /mnt/mfs/statgen/ls3751/aqtl_analysis/wig/${tissue}/$col2.flagstat &
done < "$input"
# + kernel="SoS"
[bam2toolstmp]
parameter: sample = path
parameter: tissue = path
task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, cores = ncore
python: expand = "${ }", container = container
import os
import re
sample_list = [line.strip('\n') for line in open("${sample}")]
def copy_wigfile(subject):
id = re.findall(r"\D(\d{8})\D",subject)[0]
prefix = "/mnt/mfs/statgen/ls3751/aqtl_analysis/wig/" + "${tissue}" + "/"
file1 = os.path.splitext(subject)[0] + ".flagstat"
out = prefix + id + ".wig"
out_2 = prefix + id + ".flagstat"
new_cmd = "cp " + subject + " " + out
os.system(new_cmd)
new_cmd_2 = "cp " + file1 + " " + out_2
os.system(new_cmd_2)
for sample in sample_list:
copy_wigfile(sample)
# + kernel="Bash" tags=[]
sos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/molecular_phenotypes/calling/apa_calling.ipynb bam2toolsv2 --cwd /mnt/mfs/statgen/ls3751/aqtl_analysis/wig/scripts --sample /mnt/mfs/statgen/ls3751/aqtl_analysis/wig/scripts/AC_1.txt --tissue AC --container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif -c /home/ls3751/project/ls3751/csgg.yml
# + [markdown] kernel="Bash"
# ### Step 2: Generating config files and calculating sample depth
# + [markdown] kernel="Bash"
# #### Notes on input file format
# + [markdown] kernel="Bash"
# For the input file, it has the following format. Additional notes are:
# * The first line is the information of file. If you do not have them, please add any content on first line
# * The file must end with ".wig". It will not cause any problem if you directly change from ".bedgraph"
# * If your input wig file did not have the characters __"chr"__ in the first column, please set `no_chr_prefix = T`
# + kernel="Bash"
head -n 10 /mnt/mfs/statgen/ls3751/MWE_dapars2/sample1.wig
# + kernel="SoS" tags=[]
# Generate configuration file
[APAconfig]
parameter: bfile = path
parameter: annotation = path
parameter: job_size = 1
# Default parameters for Dapars2:
parameter: least_pass_coverage_percentage = 0.3
parameter: coverage_threshold = 10
output: [f'{cwd}/sample_mapping_files.txt',f'{cwd}/sample_configuration_file.txt']
task: trunk_workers = 1, trunk_size = 1, walltime = walltime, mem = mem, cores = ncore
python3: expand = "${ }", container = container
import re
import os
target_all_sample = os.listdir("${bfile}")
target_all_sample = list(filter(lambda v: re.match('.*wig$', v), target_all_sample))
target_all_sample = ["${bfile}" + "/" + w for w in target_all_sample]
def extract_total_reads(input_flagstat_file):
num_line = 0
total_reads = '-1'
#print input_flagstat_file
for line in open(input_flagstat_file,'r'):
num_line += 1
if num_line == 5:
total_reads = line.strip().split(' ')[0]
break
return total_reads
#print(target_all_sample)
print("INFO: Total",len(target_all_sample),"samples found in provided dirctory!")
# Total depth file:
mapping_file = open("${_output[0]}", "w")
for current_sample in target_all_sample:
flag = current_sample.split(".")[0] + ".flagstat"
current_sample_total_depth = extract_total_reads(flag)
field_out = [current_sample, str(current_sample_total_depth)]
mapping_file.writelines('\t'.join(field_out) + '\n')
print("Coverage of sample ", current_sample, ": ", current_sample_total_depth)
mapping_file.close()
# Configuration file:
config_file = open(${_output[1]:r},"w")
config_file.writelines(f"Annotated_3UTR=${annotation}\n")
config_file.writelines( "Aligned_Wig_files=%s\n" % ",".join(target_all_sample))
config_file.writelines(f"Output_directory=${cwd}/apa \n")
config_file.writelines(f"Output_result_file=Dapars_result\n")
config_file.writelines(f"Least_pass_coverage_percentage=${least_pass_coverage_percentage}\n")
config_file.writelines( "Coverage_threshold=${coverage_threshold}\n")
config_file.writelines( "Num_Threads=${numThreads}\n")
config_file.writelines(f"sequencing_depth_file=${_output[0]}")
config_file.close()
# + [markdown] kernel="SoS"
# ### Step 3: Run Dapars2 main to calculate PDUIs
# Default input of `Dapars2_Multi_Sample.py` did not consider the situation that first column did not contain "chr" (shown in _Step 2_). We add a new argument no_chr_prefix (default is FALSE)
# + kernel="SoS"
# Call Dapars2 multi_chromosome
[APAmain]
parameter: chr_prefix = False
parameter: chrlist = list
input: for_each = 'chrlist'
output: [f'{cwd}/apa_{x}/Dapars_result_result_temp.{x}.txt' for x in chrlist], group_by = 1
task: trunk_workers = 1, trunk_size = job_size, walltime = walltime, mem = mem, cores = ncore
bash: expand = True, container = container
python2 /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/Dapars2_Multi_Sample.py {cwd}/sample_configuration_file.txt {_chrlist} {"F" if chr_prefix else "T"}
# + [markdown] kernel="Bash"
# ## Analysis demo
# ### Step 0: 3UTR generation
# + kernel="Bash" tags=[]
sos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/molecular_phenotypes/calling/apa_calling.ipynb UTR_reference \
--cwd /mnt/mfs/statgen/ls3751/MWE_dapars2/Output \
--hg_gtf /mnt/mfs/statgen/ls3751/MWE_dapars2/gencode.v39.annotation.gtf \
--container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif
# + kernel="Bash"
tree /mnt/mfs/statgen/ls3751/MWE_dapars2/Output
# + [markdown] kernel="Bash"
# ### Step 1: Bam to wig process and extracting reads depth file
# + kernel="Bash"
sos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/molecular_phenotypes/calling/apa_calling.ipynb bam2tools \
--n 0 1 2 3 4 5 6 7 8 \
--container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif
# + kernel="Bash"
sos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/molecular_phenotypes/calling/apa_calling.ipynb bam2toolsv2 \
--cwd /mnt/mfs/statgen/ls3751/aqtl_analysis \
--sample /mnt/mfs/statgen/ls3751/aqtl_analysis/wig/scripts/AC_1.txt \
--tissue AC \
--container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif
# + [markdown] kernel="Bash"
# ### Step 2: Generating config files
#
# + kernel="Bash"
sos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/molecular_phenotypes/calling/apa_calling.ipynb APAconfig \
--cwd /mnt/mfs/statgen/ls3751/rosmap/dlpfcTissue/batch0 \
--bfile /mnt/mfs/statgen/ls3751/rosmap/dlpfcTissue/batch0 \
--annotation /mnt/mfs/statgen/ls3751/MWE_dapars2/Output/gencode.v39.annotation_3UTR.bed \
--container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif
# + kernel="Bash"
tree /mnt/mfs/statgen/ls3751/MWE_dapars2/Output
# + [markdown] kernel="SoS"
# ### Step 3: Dapars2 Main
# Note: the example is a truncated version, which just have coverage in chr1,chr11 and chr12
# + kernel="Bash"
sos run /mnt/mfs/statgen/ls3751/github/xqtl-pipeline/code/molecular_phenotypes/calling/apa_calling.ipynb APAmain \
--cwd /mnt/mfs/statgen/ls3751/rosmap/dlpfcTissue/batch0 \
--chrlist chr21 chr14 chr1 \
--container /mnt/mfs/statgen/ls3751/container/dapars2_final.sif
# + kernel="Bash"
tree /mnt/mfs/statgen/ls3751/MWE_dapars2/Output
| code/molecular_phenotypes/calling/apa_calling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="a5e5cf94-d1dd-406a-837b-e50c7eaa797e" _uuid="394b19a07771c3e936cf3a75378442f834e3ff75"
# <h1>New York City Trip Duration</h1>
# <subtitle>A Beginner's Perspective</subtitle>
#
# <h2> Introduction</h2>
# <p>The dataset contains data about the taxi services in New York City. The data is split into two parts <b>test.csv</b> and <b>train.csv</b> the roles of which are self-explanatory. The aim is to predict the duration of a taxi trip based on certain parameters such as: <b> Pickup location, drop-off location, pickup date and time, number of passengers, vendor</b> etc. The impact of these factors on the trip duration will be assessed using machine learning models and then the model will be used to predict a value for the trip duration.</p>
#
# <h2>Methodology</h2>
#
# + _cell_guid="86ac325e-75ca-4d10-a7de-bba04bd00593" _kg_hide-input=false _uuid="b604f659145981957f823360782249cfc723ff96" _kg_hide-output=false
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import time
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_log_error as msle
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="dae07cad-6427-4511-90a4-ce9d29f03b3e" _uuid="ce96dd0f7e84182ede2e116df74cbfcedb3ed29a"
#Import training data
train = pd.read_csv('../input/nyc-taxi-trip-duration/train.csv')
train.head()
# + _cell_guid="c91b570d-9f2f-46fe-9af1-c21d8915b046" _uuid="190a9a1c990faeec43d92c390ff4735931fdee8f"
train.info()
# + _cell_guid="7e48f94c-89ae-4793-adc5-d68155c739d7" _uuid="0850e165353a87b43b07ca91c40c9b06ec7e17f6"
#Convert store_and_fwd_flag to categorical binary data. Also, datatype is converted to integer
trains = train.copy()
trains.loc[trains.store_and_fwd_flag == 'N', 'store_and_fwd_flag'] = 0
trains.loc[trains.store_and_fwd_flag == 'Y', 'store_and_fwd_flag'] = 1
trains['store_and_fwd_flag'] = pd.to_numeric(trains['store_and_fwd_flag'])
trains.info()
# + _cell_guid="c2b1ec0a-c0c8-41e1-b863-b742d8377bbc" _uuid="71896cfbc53cc0d80278c1af0c8016190b216fca"
#Convert pickup and dropoff date_time to datetime format
trains['pickup_datetime'] = pd.to_datetime(trains.pickup_datetime)
trains['dropoff_datetime'] = pd.to_datetime(trains.dropoff_datetime)
# + _cell_guid="d6f6e6a6-72bb-4f5e-8ad4-33cf02047d69" _uuid="44aae24fe509c3926440105d18a5cde7a648681a"
trains.info()
# + _cell_guid="f2b50183-c018-475a-82f6-780319dd5e57" _uuid="e41af0912f8f6d7230f7dddf2c538a0c19a1d5da"
#import datetime as dt
ptime = trains.pickup_datetime.dt.hour*100+trains.pickup_datetime.dt.minute+trains.pickup_datetime.dt.second*0.01
# # %matplotlib inline
# sns.set(style="white", palette="muted", color_codes=True)
# f, axes = plt.subplots(1, 1, figsize=(11, 7), sharex=True)
# sns.despine(left=True)
# sns.distplot(ptime, axlabel = 'Pickup Time', label = 'Pickup Time', bins = 20, color="r")
# plt.setp(axes, yticks=[])
# plt.tight_layout()
# plt.show()
# + _cell_guid="2fdcfa25-7d4f-482a-a6b3-ac95ac420895" _uuid="57e128f378f494595327d1e3307b2648c1edbd4e"
trains.loc[:, 'pickup_time'] = ptime
# + [markdown] _cell_guid="db472534-f771-4898-a319-3cac46e6e2ce" _uuid="918b935c424a3ed433bdebb5bfee80ac811df333"
#
# + _cell_guid="c1ca1c49-18b2-41f9-a86f-52e1fddeecc7" _uuid="30e7a1efa399aa8d6744f16f97a67e3b382a2b42"
# differ = (trains.dropoff_datetime - trains.pickup_datetime).dt.total_seconds() - trains.trip_duration
# sns.distplot(np.log10(differ.loc[((trains.dropoff_datetime - trains.pickup_datetime).dt.total_seconds()- trains.trip_duration)!=0]))
# + _cell_guid="31d77e5d-c038-441e-833a-f1536e4c35c8" _uuid="1219e43b0b05e4bad3002e22796bdbba79eb93ff"
def haversine_(lat1, lng1, lat2, lng2):
"""function to calculate haversine distance between two co-ordinates"""
lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))
AVG_EARTH_RADIUS = 6371 # in km
lat = lat2 - lat1
lng = lng2 - lng1
d = np.sin(lat * 0.5) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(lng * 0.5) ** 2
h = 2 * AVG_EARTH_RADIUS * np.arcsin(np.sqrt(d))
return(h)
# + _cell_guid="9d51cfbc-358b-47cf-8fc9-8313c2410d73" _uuid="21a8e7bc72a0354fc6d115313dbf6c4a10dbac87"
# # %matplotlib inline
# sns.set(style="white", palette="muted", color_codes=True)
# f, axes = plt.subplots(1, 1, figsize=(11, 7), sharex=True)
# sns.despine(left=True)
# sns.distplot(np.log10(trains.trip_duration), axlabel = 'log10(Trip Duration)', label = 'Trip Duration', bins = 50, color="b")
# plt.setp(axes, yticks=[])
# plt.tight_layout()
# plt.show()
# + _cell_guid="725881a6-8023-4be0-9f75-7b2e7657a78c" _uuid="41d8321f847758443297d8fd2a4a808bdcce9019"
# trains.trip_duration.describe()
# + _cell_guid="d314ec1d-1dd5-4a03-a983-40fcb26226ae" _uuid="1497ff36321b305191d97b9427ac468717db6385"
# # %matplotlib inline
# sns.set(style="white", palette="muted", color_codes=True)
# f, axes = plt.subplots(1, 1, figsize=(11, 7), sharex=True)
# sns.despine(left=True)
# sns.distplot(np.log10(trains_drop1.trip_duration), axlabel = 'log10(Trip Duration)', label = 'Trip Duration', bins = 50, color="b")
# plt.setp(axes, yticks=[])
# plt.tight_layout()
# plt.show()
# + _cell_guid="334a2d04-8ed2-4d49-87d6-153173f741cc" _uuid="c5a861c1091c5992680e65ba246aaaa806a6ef9a"
# sns.set(style="white", palette="muted", color_codes=True)
# f, axes = plt.subplots(2,2,figsize=(10, 10), sharex=False, sharey = False)
# sns.despine(left=True)
# sns.distplot(trains_drop1['pickup_latitude'].values, label = 'pickup_latitude',color="m",bins = 100, ax=axes[0,0])
# sns.distplot(trains_drop1['pickup_longitude'].values, label = 'pickup_longitude',color="m",bins =100, ax=axes[0,1])
# sns.distplot(trains_drop1['dropoff_latitude'].values, label = 'dropoff_latitude',color="m",bins =100, ax=axes[1, 0])
# sns.distplot(trains_drop1['dropoff_longitude'].values, label = 'dropoff_longitude',color="m",bins =100, ax=axes[1, 1])
# plt.setp(axes, yticks=[])
# plt.tight_layout()
# plt.show()
# + _cell_guid="9d740cea-d447-41e5-8dbe-50401a8d388c" _uuid="fd87181c5e4b251d9e0914b24f6f9e730498b67b"
# objects = trains_drop1['vendor_id'].unique()
# y_pos = np.arange(len(objects))
# performance = trains['vendor_id'].value_counts()
# plt.bar(y_pos, performance, align='center', alpha=0.5, color = ['b','r'])
# plt.xticks(y_pos, objects)
# plt.ylabel('Number of trips')
# plt.title('Vendor_ID')
# plt.show()
# + _cell_guid="53f7695a-01fe-4ed3-b7dd-7a5e9680f3ef" _uuid="ac2a054ba947d7a39de4863b8a921f5e049a1646"
# objects = trains_drop1['store_and_fwd_flag'].unique()
# y_pos = np.arange(len(objects))
# performance = trains['store_and_fwd_flag'].value_counts()
# plt.bar(y_pos, np.log10(performance), align='center', alpha=0.5, color = ['b','r'])
# plt.xticks(y_pos, objects)
# plt.ylabel('Number of flags')
# plt.title('store_and_fwd_flag')
# plt.show()
# + _cell_guid="42be423c-9dbb-4088-9fb5-a0ef93619aa2" _uuid="355e6ac7dd5a6b1159a1e1a1546aaf678403ebee"
trains.info()
# + [markdown] _cell_guid="ae7948a5-e70b-4ddc-a8e0-ee1d1dd1160b" _uuid="9e1822478f44760c52e5fb91de237c127070fd1a"
# #Removing Outliers (Cleaning)
# + _cell_guid="fde9f1a8-9eb0-4f06-8fdc-28bcaea247b5" _uuid="c0cac4baea550daece328760c28bbf5e1e4a0e90"
trains.loc[trains.trip_duration<=120]
# + _cell_guid="6ec9440c-ed37-4fb7-8c91-503ee625d8a5" _uuid="4dce6d401d5dcf130332b1dee3333eba79195e4b"
new_trains=trains.loc[(trains.trip_duration>=120) & (trains.trip_duration<=32400)]
# + _cell_guid="99c02904-e5ca-4fa7-95b3-f2f3e4608994" _uuid="82821d7facceeaf89fe69d7a5c2d88f5a8ca5592"
new_trains.head()
# + [markdown] _cell_guid="da89b2c7-5f52-45cb-933d-c438854398e4" _uuid="6a81d2549e74fb813a0843e0cf4b07f061293625"
# ***Adding Additional Features***
# + _cell_guid="4c58f767-a713-4a5b-9d6c-2813727a56a1" _uuid="83a5351d902e6e9dc9cd5efd4c5c8d7bf8c17ab3"
lon1 = new_trains['pickup_longitude']
lat1 = new_trains['pickup_latitude']
lon2 = new_trains['dropoff_longitude']
lat2 = new_trains['dropoff_latitude']
new_trains['haversine'] = haversine_(lon1,lat1,lon2,lat2)
# + _cell_guid="5fc923c6-52ea-4964-a225-e984f3957c98" _uuid="66bb5410df7b3af40bfcbbe1294a8392392001aa"
new_trains=new_trains.loc[new_trains.haversine!=0]
# + _cell_guid="6d655f08-77c1-4b4f-97cf-0fd35bf5b783" _uuid="18f372e1c5114f46bd7751613690263a5776f534"
new_trains.loc[:, 'pick_month'] = new_trains['pickup_datetime'].dt.month
new_trains.loc[:, 'week_of_year'] = new_trains['pickup_datetime'].dt.weekofyear
new_trains.loc[:, 'day_of_year'] = new_trains['pickup_datetime'].dt.dayofyear
new_trains.loc[:, 'day_of_week'] = new_trains['pickup_datetime'].dt.dayofweek
# + _cell_guid="e25bf770-27f7-4751-a5d4-47ba125344f8" _uuid="e11043ba1f7d14c2018dd53981526e204cb80ecb"
# + _cell_guid="d4888bba-5597-45a5-9ea8-0b6bc407e2ff" _uuid="47763bf3716629c9cbc0329840be34b188c2ed02"
# + _cell_guid="50bd4958-3bce-410b-8b82-ef68f05e382d" _uuid="c2a68ef852c05853650d9cef4d692bb00b1bba6a"
# new_trains.columns
# + [markdown] _cell_guid="a0645433-b856-4829-a136-56a32c5088c5" _uuid="7a5f1eee87a0cc056da654da0252d4c0007a8c61"
# ***Sampling And Model Training***
# + _cell_guid="5830a8ae-6af7-4ff0-8402-d58b19f670e4" _uuid="f03046165a5b1297f3a7a54bf7f39aee76a78f86"
X_train_ann = new_trains[['vendor_id','pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude','pickup_time','haversine', 'pick_month',
'week_of_year', 'day_of_year', 'day_of_week']]
X_train_ann.describe()
# + _cell_guid="748fe543-8042-4216-947a-fcfb8324c0ed" _uuid="cbcef715cfdf6e28aff79029f6211e7033387845"
y_train_ann = new_trains['trip_duration']
y_train_ann.describe()
# + _cell_guid="0fe22fb3-4bc2-4113-8b31-467fbf3ef8eb" _uuid="37ea394f7e4934ecf0bc4fed5b9755ca884760a6"
X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(X_train_ann, y_train_ann, test_size=0.2, random_state=60)
# + _cell_guid="629e8290-22e9-4d4b-baae-df69e3703b03" _uuid="fa26965ad9a25e9ac5017a6c40d2958191a6b1b2"
nn = MLPRegressor(
hidden_layer_sizes=(10,2), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
random_state=9, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# + _cell_guid="95d5f586-91f6-4fdf-9154-097bb0c0c6ef" _uuid="ec2b5fa9c407f214f07e93b45b28f25ba500080b" _kg_hide-output=false
start=time.time()
n = nn.fit(X_train_1, y_train_1)
end=time.time()
# + _cell_guid="138bdda3-4400-445c-ad04-0e934ae19859" _uuid="af0c2236c0a2e9588b808a93df027ab8a07c7240"
print("Time Taken In Fitting: ",end-start)
# + _cell_guid="918d07a0-d013-4eae-96d3-e1bab037fe01" _uuid="92f65b8ee32213fcd47989c9777fe31256085be0"
rough = nn.predict(X_test_1)
msle(y_test_1,rough)
# + [markdown] _cell_guid="3da9a330-1f5c-473b-8175-307f5c4a1252" _uuid="c64a3255df4d78f71eea61506a24e0fdc1f2ccc7"
# ***USING OSRM DATA***
# + _cell_guid="e7d6c534-82fc-4eb0-bcf2-47e4d91f9d2c" _uuid="10eeba8181e7d4a074e444a89503d5c7c6e1e01c"
# train_osrm_1 = pd.read_csv('../input/new-york-city-taxi-with-osrm/fastest_routes_train_part_1.csv')
# train_osrm_2 = pd.read_csv('../input/new-york-city-taxi-with-osrm/fastest_routes_train_part_2.csv')
# train_osrm = pd.concat([train_osrm_1, train_osrm_2])
# train_osrm = train_osrm[['id', 'total_distance', 'total_travel_time', 'number_of_steps']]
# train_df = pd.read_csv('../input/new-york-city-taxi-with-osrm/train.csv')
# train_osrm = pd.merge(train_df, train_osrm, on = 'id', how = 'left')
# + _cell_guid="65476259-87d0-4988-80cc-e2094e64e7ba" _uuid="a76cf6aa2f856b5c0241b393a97ccba6f78205c1"
# train_osrm.info()
# + _cell_guid="004eb31a-7e1c-42eb-ba92-99aa5ec12e9c" _uuid="5fc15703094b1ede6e929fd46d31d0e156f8b365" _kg_hide-output=true
# train_osrm.loc[train_osrm.store_and_fwd_flag == 'N', 'store_and_fwd_flag'] = 0
# train_osrm.loc[train_osrm.store_and_fwd_flag == 'Y', 'store_and_fwd_flag'] = 1
# train_osrm['store_and_fwd_flag'] = pd.to_numeric(train_osrm['store_and_fwd_flag'])
# + _cell_guid="1b2bc2d8-4e51-48b1-9cea-c7a8d6a4dcdf" _uuid="925ea03b4abfd73b110f6ac82aefcd77bc28f852" _kg_hide-output=true
# train_osrm['pickup_datetime'] = pd.to_datetime(train_osrm.pickup_datetime)
# train_osrm['dropoff_datetime'] = pd.to_datetime(train_osrm.dropoff_datetime)
# ptime = train_osrm.pickup_datetime.dt.hour*100+train_osrm.pickup_datetime.dt.minute+train_osrm.pickup_datetime.dt.second*0.01
# train_osrm.loc[:, 'pickup_time'] = ptime
# + _cell_guid="e46f184c-ca21-48eb-8adf-15714fe8cf5d" _uuid="a1d76a0e6531d5aa91a36d34467fe56b63b696a3"
# new_train_osrm=train_osrm.loc[(train_osrm.trip_duration>=120) & (train_osrm.trip_duration<=32400)]
# + _cell_guid="8a6f9619-15ec-4cbb-9c8a-69b36c38e337" _uuid="62891bb84dc166eacb8dd38608c04d1b342268de"
# lon1 = new_train_osrm['pickup_longitude']
# lat1 = new_train_osrm['pickup_latitude']
# lon2 = new_train_osrm['dropoff_longitude']
# lat2 = new_train_osrm['dropoff_latitude']
# new_train_osrm['haversine'] = haversine_(lon1,lat1,lon2,lat2)
# new_train_osrm=new_train_osrm.loc[new_train_osrm.haversine!=0]
# new_train_osrm.loc[:, 'pick_month'] = new_train_osrm['pickup_datetime'].dt.month
# new_train_osrm.loc[:, 'week_of_year'] = new_train_osrm['pickup_datetime'].dt.weekofyear
# new_train_osrm.loc[:, 'day_of_year'] = new_train_osrm['pickup_datetime'].dt.dayofyear
# new_train_osrm.loc[:, 'day_of_week'] = new_train_osrm['pickup_datetime'].dt.dayofweek
# + _cell_guid="7009a06d-0777-47d4-910b-8ac9d959855a" _uuid="a1901440eeeb12db83e5e3924b3a40370ff64dd5"
# new_train_osrm=new_train_osrm.loc[(new_train_osrm.passenger_count!=0)]
# + _cell_guid="a5db0c52-4252-4614-9789-49e35d82ca49" _uuid="f261f95fba5f7f5c75769ab8dd866dc115331c74"
# new_train_osrm.columns
# + _cell_guid="6f0eacc8-a14b-4968-a9ee-5f9b357c11e1" _uuid="6191f1871ea6defc6f1315fbaf286ae6d60153e2"
# X_train_ann = new_train_osrm[['vendor_id','pickup_longitude', 'pickup_latitude',
# 'dropoff_longitude', 'dropoff_latitude','total_distance',
# 'pickup_time','haversine', 'pick_month',
# 'week_of_year', 'day_of_year', 'day_of_week']]
# y_train_ann = new_train_osrm['trip_duration']
# X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(X_train_ann, y_train_ann, test_size=0.2, random_state=60)
# + _cell_guid="b0fd2f0a-0ae9-4037-83e1-86977e2cde95" _uuid="b6674617612e59bdfa68bf1449ea16f8526c1b8a"
# nn = MLPRegressor(
# hidden_layer_sizes=(10,2), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
# learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
# random_state=9, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
# early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# + _cell_guid="8d54dd96-a57b-4339-8708-721f4125948a" _uuid="92bbe122562402a5a283b4af06805ccc4d61425e"
# start=time.time()
# n = nn.fit(X_train_1, y_train_1)
# end=time.time()
# + _cell_guid="2dcf20fa-8427-4008-972b-618350642e48" _uuid="3362c314d068d615acbae352235e1c69f33231c7"
# print("Time Taken In Fitting: ",end-start)
# + _cell_guid="da06a827-bbb5-481e-b6c7-96c3f1cfb347" _uuid="3e3c367f2f3c8d2209d2f213a7c3ba8a400baf92"
# rough = nn.predict(X_test_1)
# msle(y_test_1,rough)
# rough
# + [markdown] _cell_guid="121c4262-3c73-4fb2-be9d-06a982a73191" _uuid="ae157856c68bc2918b94da27e08136915b3e416f"
# ***Testing & Prediction***
# + _cell_guid="e22ba616-f677-4388-b59b-5d5e92af6842" _uuid="d42ad3a99aae2fe38744962f9a471a01ca7770b7"
test=pd.read_csv('../input/nyc-taxi-trip-duration/test.csv')
test.head()
# + _cell_guid="a5b077f7-36d2-43d4-b929-5df698a8d888" _uuid="1d39cdb3809328ec2ee94ef764b940498cefaae8" _kg_hide-output=true
test.loc[test.store_and_fwd_flag == 'N', 'store_and_fwd_flag'] = 0
test.loc[test.store_and_fwd_flag == 'Y', 'store_and_fwd_flag'] = 1
test['store_and_fwd_flag'] = pd.to_numeric(test['store_and_fwd_flag'])
test['pickup_datetime'] = pd.to_datetime(test.pickup_datetime)
# + _cell_guid="b4aed2df-c6c8-4300-b61d-99517996cf10" _uuid="774fb73aed602e01075e04a0ae560077a9509277"
ptime_test = test.pickup_datetime.dt.hour*100+test.pickup_datetime.dt.minute+test.pickup_datetime.dt.second*0.01
test.loc[:, 'pickup_time'] = ptime_test
# + _cell_guid="8328ce8b-3e2e-4b90-b2e9-47e4b5065e40" _uuid="506d0f3c5c0b71d96c9c93a547af8a42c596e892"
test.info()
# + _cell_guid="4b370979-7afe-4374-bfe0-cb67f371eb9b" _uuid="0196e5915c715d9af897eeaa083ec549ba440bc4"
lon1 = test['pickup_longitude']
lat1 = test['pickup_latitude']
lon2 = test['dropoff_longitude']
lat2 = test['dropoff_latitude']
test['haversine'] = haversine_(lon1,lat1,lon2,lat2)
test.loc[:, 'pick_month'] = test['pickup_datetime'].dt.month
test.loc[:, 'hour'] = test['pickup_datetime'].dt.hour
test.loc[:, 'week_of_year'] = test['pickup_datetime'].dt.weekofyear
test.loc[:, 'day_of_year'] = test['pickup_datetime'].dt.dayofyear
test.loc[:, 'day_of_week'] = test['pickup_datetime'].dt.dayofweek
# + _cell_guid="f58335c6-f535-425a-be9a-17824289de86" _uuid="fc263380c1d001e94b9f24ee16cedda021ab6dc5"
X_test_prediction=test[['vendor_id','pickup_longitude', 'pickup_latitude',
'dropoff_longitude', 'dropoff_latitude','pickup_time','haversine', 'pick_month',
'week_of_year', 'day_of_year', 'day_of_week']]
# + _cell_guid="cd437131-a985-4dff-9b3b-1e1c63336c89" _uuid="d6a7b5a3d36e6f1e4a09db6fa2f67ce58c3746a4"
X_test_prediction.info()
# + _cell_guid="6ea6fc17-5858-45dd-bfb9-615742804a94" _uuid="ebf72a5932d3e17bd905c21df1f5d69220fc89ac"
y_test_predicted=nn.predict(X_test_prediction)
# + _cell_guid="ac98430a-ba8b-42c9-ac8d-a91e94a01981" _uuid="be271c2c79a5d58443dad1d458ab45616fd0c275"
df_benchmark = pd.DataFrame()
df_benchmark['id'] = test['id']
df_benchmark['trip_duration'] = y_test_predicted
# + _cell_guid="bf39d7a4-b04d-4308-b15e-e5e0e435d2a3" _uuid="5bd157603840982b487814c91a0b4d0247758b8e"
y_test_predicted
# + _cell_guid="29df722e-180c-452d-9467-60ea1fc5a43a" _uuid="be4287bde3f9bd2b21862bbfcaa7b97aacf633ff"
df_benchmark.to_csv("sample_submission2.csv",index=False)
# + [markdown] _cell_guid="a04ff3e3-9c87-47f4-86a4-9922baf683fc" _uuid="8b51ac7de5c5e9ffbda1921fc3971eb91bb91404"
# #using OSRM
# + _cell_guid="b7028e53-3599-47c3-9457-2c19967b2d69" _uuid="34fdb3169476b7e8e7033e89e5a1e7f283ec6aaa"
# test_df = pd.read_csv('../input/nyc-taxi-trip-duration/test.csv')
# test_fr = pd.read_csv('../input/new-york-city-taxi-with-osrm/fastest_routes_test.csv')
# test_fr_new = test_fr[['id', 'total_distance', 'total_travel_time', 'number_of_steps']]
# test_df = pd.merge(test_df, test_fr_new, on = 'id', how = 'left')
# test_df.head()
# + _cell_guid="4087ae96-3d94-46a6-88b0-f07991d6ce38" _uuid="5d025029b35607e801eed861390c82d70e6c45c5"
# test_df.loc[test_df.store_and_fwd_flag == 'N', 'store_and_fwd_flag'] = 0
# test_df.loc[test_df.store_and_fwd_flag == 'Y', 'store_and_fwd_flag'] = 1
# test_df['store_and_fwd_flag'] = pd.to_numeric(test_df['store_and_fwd_flag'])
# test_df['pickup_datetime'] = pd.to_datetime(test_df.pickup_datetime)
# ptime = test_df.pickup_datetime.dt.hour*100+test_df.pickup_datetime.dt.minute+test_df.pickup_datetime.dt.second*0.01
# test_df.loc[:, 'pickup_time'] = ptime
# + _cell_guid="7dd3c717-a620-4b9e-8b2e-22b80eed89a9" _uuid="5f568549cdf498db5370df04bea6f4bcd81585f8"
# lon1 = test_df['pickup_longitude']
# lat1 = test_df['pickup_latitude']
# lon2 = test_df['dropoff_longitude']
# lat2 = test_df['dropoff_latitude']
# test_df['haversine'] = haversine_(lon1,lat1,lon2,lat2)
# test_df=test_df.loc[test_df.haversine!=0]
# test_df.loc[:, 'pick_month'] = test_df['pickup_datetime'].dt.month
# test_df.loc[:, 'week_of_year'] = test_df['pickup_datetime'].dt.weekofyear
# test_df.loc[:, 'day_of_year'] = test_df['pickup_datetime'].dt.dayofyear
# test_df.loc[:, 'day_of_week'] = test_df['pickup_datetime'].dt.dayofweek
# + _cell_guid="14a86a97-9307-4d86-8e0d-81998490e30c" _uuid="b8ee7bd68e310574b3cc0c4fa30a0cb729a2b0fd"
# X_test_prediction=test_df[['vendor_id','pickup_longitude', 'pickup_latitude',
# 'dropoff_longitude', 'dropoff_latitude','total_distance',
# 'pickup_time','haversine', 'pick_month',
# 'week_of_year', 'day_of_year', 'day_of_week']]
# + _cell_guid="45a65d8e-3c36-4d88-b859-3ab0760d180f" _uuid="ee643adeeefec578b77ed75174e92fe7589646b8"
# y_test_predicted=nn.predict(X_test_prediction)
# + _cell_guid="c1394370-6561-4e1f-8559-e30b440df708" _uuid="d5ea32a44aaf352192c4acc3c32e9f718a34e0b8"
# y_test_predicted
# + _cell_guid="d73a6926-e425-42ce-bd50-ddd58ce058be" _uuid="95b29bdd4c206f2df52129072965669b5aa406f2"
# df_benchmark = pd.DataFrame()
# df_benchmark['id'] = test_df['id']
# df_benchmark['trip_duration'] = y_test_predicted
# df_benchmark.to_csv("sample_submission3.csv",index=False)
# + _cell_guid="4ca43c9c-c5d4-4209-ba61-c299709690d7" _uuid="dea0edd3b7a90fbe367bea23ca06ecb253359268"
| Analysis & Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The first five row of the table originally obtained from telbib.eso.org
# + The affiliations of authors are obtained from NASA ADS
# + The countries of the affiliations are interpreted from the affiliations.
# + The list of the proposal authors are obtained from ALMA Science Archive.
# + The values in the "archive" column are defined by whether any of article authors are included in the proposal authors of the project that is used in the article.
# + The values in the "archivedata" columns are the same as in the "arhive" columns, but collapsed.
# + The "region" column is a country in which the affiliation of the first author is located.
# + See Appendix to see which countries are classified to which region. Note 'EA' includs Japan, Taiwan and Korea.
# +
from astroquery.alma import Alma
from astroquery import nasa_ads as na
import urllib
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
df_xml = pd.read_pickle('./df_telbib_alma_aff_archive.pkl')
df_xml.head()
# -
# ## Fig.1
# ### The number of publications per region between 2012 and 2018
# +
from pandas.plotting import table
fig, ax = plt.subplots(1, 1)
table(ax,df_xml["region"].value_counts(),
loc='upper right', colWidths=[0.2, 0.2, 0.2])
df_xml["region"].value_counts().plot(kind="bar",
title='The number of publications per region',
ax=ax)
# -
# ## Fig.2
# ### The fraction of publications per region between 2012 and 2018
df_xml['region'].value_counts().plot.pie(autopct='%i',title='The fraction of publications per region')
# ## Table 2
# ### The number of publications making use of PI and archival data
# +
#for df1['e'] = Series(np.random.randn(sLength), index=df1.index)
df_archive = pd.DataFrame({'All':df_xml['archivedata'].value_counts(),
'EA':df_xml[df_xml["region"]=='EA']['archivedata'].value_counts(),
'NA':df_xml[df_xml["region"]=='NA']['archivedata'].value_counts(),
'EU':df_xml[df_xml["region"]=='EU']['archivedata'].value_counts(),
'Chile':df_xml[df_xml["region"]=='Chile']['archivedata'].value_counts()})
df_archive
#.plot(kind="pie")
# -
# ## Fig.3
# ### The fraction of publications making use of PI and/or archival data
df_archive.plot(kind='pie',subplots=True,figsize=(20,10),autopct='%i')
# ## Fig.4
# ### The fraction of publications making use of PI and/or archival data per region.
df_archive_transposed = (df_archive/df_archive.sum()).T
#df_archive_transposed
df_archive_transposed.plot.bar(stacked=True,rot=45,title='Fraction of publications making use of archival data ')
# ## Fig.5
# ### Evolution of fraction of publications making use of PI and/or archival data per region.
def evolutionpub(df,addtitle):
tmp_dict = {}
for year in range(2012,2020):
tmp_dict.update({year:df[df["year"]==str(year)]['archivedata'].value_counts()})
df_archive = pd.DataFrame(tmp_dict)
#df_archive.plot(kind='pie',subplots=True,figsize=(30,6),autopct='%i')
df_archive_transposed = (df_archive/df_archive.sum()).T
#df_archive_transposed
df_archive_transposed.plot.bar(stacked=True,rot=45,title='Evolution of fraction of publications {:}'.format(addtitle))
df_archive_transposed = df_archive.T
df_archive_transposed.plot.bar(stacked=True,rot=45,title='Evolution of number of publications {:}'.format(addtitle))
print('Table the number of publications', addtitle)
display(df_archive_transposed)
# +
evolutionpub(df_xml,'All')
df_xml_trim = df_xml[df_xml["region"]=='EA']
evolutionpub(df_xml_trim,'EA')
df_xml_trim = df_xml[df_xml["region"]=='EU']
evolutionpub(df_xml_trim,'EU')
df_xml_trim = df_xml[df_xml["region"]=='NA']
evolutionpub(df_xml_trim,'NA')
# -
# # Appendix
# ## A List of countries by regional classification
appendix_df = pd.read_pickle('./df_region.pkl')
print('EA\n',appendix_df.index[appendix_df[0]=='EA'])
print('EU (=ESO)\n',appendix_df.index[appendix_df[0]=='EU'])
print('NA\n',appendix_df.index[appendix_df[0]=='NA'])
# ## The fraction of publications per Asian country
df_asia = df_xml[df_xml['region']=='Oceania+Asia']
tmp = []
for i in df_asia.index:
#print(i,end='')
tmp.append(df_asia[df_asia.index==i]['countries'].values[0][0])
df_asia['first_country'] = tmp
df_asia['first_country'].value_counts().plot.pie(autopct='%i',
title='The fraction of publications per Asian country ({:})'.format(len(df_asia)))
# ## The fraction of publications making use of PI/archive data for Asian countries
df_asia['archivedata'].value_counts().plot.pie(autopct='%i',
title='The number of publications of Asian countries except EA ({:})'.format(len(df_ea)))
| content/post/2019-06-13-ALMA-publication-statistics/Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="lP6JLo1tGNBg"
# # Artificial Neural Network
# + [markdown] id="gWZyYmS_UE_L"
# ### Importing the libraries
# + id="V7KR6mytHqBD"
#Google Colab already comes installed with tensorflow but we must import it
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="UE-0T-egIkVS" outputId="dcc8c622-da23-438b-a046-f301d3ed1fe8"
#To check if tensorflow library is loaded
tf.__version__
# + [markdown] id="1E0Q3aoKUCRX"
# ## Part 1 - Data Preprocessing
# + [markdown] id="cKWAkFVGUU0Z"
# ### Importing the dataset
# + id="x23fKyylJrGc"
dataset=pd.read_csv('Churn_Modelling.csv')
#Removing the unwanted columns.Same operation can done using drop() method.
X=dataset.iloc[:,3:-1].values
Y=dataset.iloc[:,-1].values
# + id="VYP9cQTWbzuI" colab={"base_uri": "https://localhost:8080/"} outputId="2dbc4b24-5047-46b5-aa9a-625f3a4483bb"
#Displaying the Features or X
print(X)
# + id="38vKGE6Nb2RR" colab={"base_uri": "https://localhost:8080/"} outputId="e7c71a7a-4b17-44fa-c4e5-7c1ccbf612c3"
#Displaying the target label or Y
print(y)
# + [markdown] id="N6bQ0UgSU-NJ"
# ### Encoding categorical data
# + id="isS4ytAAK0AT"
from sklearn.preprocessing import LabelEncoder
#Using LabelEncoder() for gender as it has 2 categories
le=LabelEncoder()
X[:,2]=le.fit_transform(X[:,2])
# + [markdown] id="le5MJreAbW52"
# Label Encoding the "Gender" column
# + id="-M1KboxFb6OO" colab={"base_uri": "https://localhost:8080/"} outputId="0606cb08-efd7-4268-9b43-bb99fd8c1b62"
print(X[1])
# + [markdown] id="CUxGZezpbMcb"
# One Hot Encoding the "Geography" column
# + id="GtQkC96nLlXw"
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
#Using OneHotEncoder() for country as it has more than 2 categories
ct=ColumnTransformer(transformers=[('encoder',OneHotEncoder(),[1])],remainder='passthrough')
X=np.array(ct.fit_transform(X))
# + id="ZcxwEon-b8nV" colab={"base_uri": "https://localhost:8080/"} outputId="84004dba-4ab4-4863-e52f-96dcfb522a6f"
print(X[1])
# + [markdown] id="vHol938cW8zd"
# ### Splitting the dataset into the Training set and Test set
# + id="4CS40S4fN47K"
from sklearn.model_selection import train_test_split
#Splitiing the train and test data using sklearn library predefined method train_test_split()
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=0)
# + [markdown] id="RE_FcHyfV3TQ"
# ### Feature Scaling
# + id="XLwmXDeeQVmr"
from sklearn.preprocessing import StandardScaler
#1.Scaling the x_train and x_test data.
#2.fit_transform must be used first while scaling to fit the appropriate data and then transform() can be used to compute based on the same fitted data
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
# + [markdown] id="-zfEzkRVXIwF"
# ## Part 2 - Building the ANN
# + [markdown] id="KvdeScabXtlB"
# ### Initializing the ANN
# + id="JovDEfJTS0EE"
#creating the instance of the artificial neural network using keras.Sequential()
ann=tf.keras.models.Sequential()
# + [markdown] id="rP6urV6SX7kS"
# ### Adding the input layer and the first hidden layer
# + id="5MHpoJYcTgJE"
#Creating the input layer and first hidden layer of the Aritificial Neural Network
#units : Number of neurons in the layer, 6 neurons are created here
#activation : activation function used , 'relu' --> rectifier function
ann.add(tf.keras.layers.Dense(units=6, activation='relu'))
# + [markdown] id="BELWAc_8YJze"
# ### Adding the second hidden layer
# + id="Vzu_38C0V9i9"
ann.add(tf.keras.layers.Dense(units=6, activation='relu'))
# + [markdown] id="OyNEe6RXYcU4"
# ### Adding the output layer
# + id="T9Ju2DxoV-iX"
#'sigmoid' activation function is used as it gives the probability also
ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# + [markdown] id="JT4u2S1_Y4WG"
# ## Part 3 - Training the ANN
# + [markdown] id="8GWlJChhY_ZI"
# ### Compiling the ANN
# + id="TxsJpzhDWyCp"
#optimizer: function used to optimise the NN using backpropogation, 'adam' refers to stochiastic gradient descent
#loss: function used to calculate loss function, 'binary_crossentropy' is used as there are only 2 possible outcomes
ann.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
# + [markdown] id="0QR_G5u7ZLSM"
# ### Training the ANN on the Training set
# + colab={"base_uri": "https://localhost:8080/"} id="hxPIioZxYHPf" outputId="e38a1ea6-7254-4785-9c2f-529de8f1ddc7"
#batch_size: Number of rows that should be taken as a batch to compute and optimise the neural network
#epochs: Number of cycles(going through entire train dataset) the model should undergo
ann.fit(x_train,y_train,batch_size=32,epochs=111)
# + [markdown] id="tJj5k2MxZga3"
# ## Part 4 - Making the predictions and evaluating the model
# + [markdown] id="84QFoqGYeXHL"
# ### Predicting the result of a single observation
# + [markdown] id="CGRo3eacgDdC"
#
#
# Using our ANN model to predict if the customer with the following informations will leave the bank:
#
# Geography: France
#
# Credit Score: 600
#
# Gender: Male
#
# Age: 40 years old
#
# Tenure: 3 years
#
# Balance: \$ 60000
#
# Number of Products: 2
#
# Does this customer have a credit card? Yes
#
# Is this customer an Active Member: Yes
#
# Estimated Salary: \$ 50000
#
# So, should we say goodbye to that customer?
# + colab={"base_uri": "https://localhost:8080/"} id="JHSmqPprZayX" outputId="c5006178-f23d-4432-9aad-198f8867b2f6"
#Passing the data to predict as 2-D array is necessary.Above details are converted to model understandable data as passed through predict() method.
print(ann.predict(sc.transform([[1, 0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])) > 0.5)
# + [markdown] id="ZhU1LTgPg-kH"
# **Solution**
# + [markdown] id="wGjx94g2n7OV"
# Therefore, our ANN model predicts that this customer stays in the bank!
#
# **Important note 1:** Notice that the values of the features were all input in a double pair of square brackets. That's because the "predict" method always expects a 2D array as the format of its inputs. And putting our values into a double pair of square brackets makes the input exactly a 2D array.
#
# **Important note 2:** Notice also that the "France" country was not input as a string in the last column but as "1, 0, 0" in the first three columns. That's because of course the predict method expects the one-hot-encoded values of the state, and as we see in the first row of the matrix of features X, "France" was encoded as "1, 0, 0". And be careful to include these values in the first three columns, because the dummy variables are always created in the first columns.
# + [markdown] id="u7yx47jPZt11"
# ### Predicting the Test set results
# + colab={"base_uri": "https://localhost:8080/"} id="wSytbSumbJrj" outputId="23ce1fb1-c8f6-4f2a-c361-6391238c8454"
#Using the predict() for the test data and converting them into yes/no category based on their probability
y_pred=ann.predict(x_test)
y_pred = (y_pred > 0.5)
print(np.concatenate((y_pred.reshape(len(y_pred),1),y_test.reshape(len(y_test),1)),1))
# + [markdown] id="o0oyfLWoaEGw"
# ### Making the Confusion Matrix
# + colab={"base_uri": "https://localhost:8080/"} id="Fwp2N5zicW8V" outputId="aabac951-8709-452d-c867-52e8b45e8acb"
#Building the confusion matrix from predefined method from sklearn library
from sklearn.metrics import confusion_matrix,accuracy_score
cm=confusion_matrix(y_test,y_pred)
print(cm)
accuracy_score(y_test,y_pred)
| Artificial_Neural_Network_for_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # NBodySimulator tests
#
# Stuff I found at https://github.com/SciML/NBodySimulator.jl
import Pkg; Pkg.add("NBodySimulator"); Pkg.add("StaticArrays")
const G = 6.67e-11 # m^3/kg/s^2
using NBodySimulator
using StaticArrays
#body1 = MassBody(SVector(0.0, 1.0, 0.0), SVector( 5.775e-6, 0.0, 0.0), 2.0)
#body2 = MassBody(SVector(0.0,-1.0, 0.0), SVector(-5.775e-6, 0.0, 0.0), 2.0)
body1 = MassBody(SVector(0.0, 1.0, 0.0), SVector( 1e-6, 0.0, 0.0), 2.0)
body2 = MassBody(SVector(0.0,-1.0, 0.0), SVector(-1e-6, 0.0, 0.0), 2.0)
system = GravitationalSystem([body1,body2], G)
tspan = (0.0, 1111150.0)
simulation = NBodySimulation(system, tspan)
sim_result = run_simulation(simulation)
using Plots
animate(sim_result, "path_to_animated_particles.gif")
# +
plt = plot3d(
2,
xlim = (-1, 1),
ylim = (-1, 1),
zlim = (-0.5, 0.5),
title = "Orbit",
marker = :circle,
linetype = :scatter3d
)
@gif for i=1:19
r = get_position(sim_result, i*(tspan[2]/19))
#println(rotl90(r)[1:2], rotl90(r)[3:4], rotl90(r)[5:6])
push!(plt, rotl90(r)[1:2], rotl90(r)[3:4], rotl90(r)[5:6])
#scatter!(plt, x = rotl90(r)[1:2], y = rotl90(r)[3:4], z = rotl90(r)[5:6])
end
# -
# Grabbed another example from https://rosettacode.org/wiki/N-body_problem#Julia
# +
using StaticArrays, Plots, NBodySimulator
const bodies = [
MassBody(SVector(0.0, 1.0, 0.0), SVector( 5.775e-6, 0.0, 0.0), 2.0),
MassBody(SVector(0.0,-1.0, 0.0), SVector(-5.775e-6, 0.0, 0.0), 2.0),
MassBody(SVector(0.0, 4.5, 0.0), SVector(-2.5e-6, 0.0, 0.0), 1.0)
]
const timespan = (0.0, 1111150.0)
function nbodysim(nbodies, tspan)
system = GravitationalSystem(nbodies, G)
simulation = NBodySimulation(system, tspan)
run_simulation(simulation)
end
simresult = nbodysim(bodies, timespan)
# +
#animate(simresult)
#plt2 = plot3d(
plt2 = plot(
3,
xlim = (-5, 5),
ylim = (-5, 5),
#zlim = (-0.1, 0.1),
title = "Orbit 2",
marker = :circle,
#linetype = :scatter3d
)
@gif for i=1:19
r = get_position(simresult, i*(tspan[2]/19))
#println(rotl90(r)[1:3], rotl90(r)[4:6], rotl90(r)[7:9])
#push!(plt2, rotl90(r)[1:3], rotl90(r)[4:6], rotl90(r)[7:9])
push!(plt2, rotl90(r)[1:3], rotl90(r)[4:6])
end
| NBodySimulator tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:sogos]
# language: python
# name: conda-env-sogos-py
# ---
# ### Climatology plot
#
# A mean plot to introduce the region, and the mean tracer gradients that are being stirred.
import numpy as np
import gsw
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
import cmocean
data_dir = '/Users/dhruvbalwada/OneDrive/sogos_data/data/'
# Climatology data
Tclim = xr.open_dataset(data_dir + "raw/RG_climatology/RG_ArgoClim_Temperature_2017.nc", decode_times=False)
Sclim = xr.open_dataset(data_dir + "raw/RG_climatology/RG_ArgoClim_Salinity_2017.nc", decode_times=False)
Climextra = xr.open_mfdataset(data_dir+ 'raw/RG_climatology/RG_ArgoClim_201*', decode_times=False)
# +
RG_clim = xr.merge([Tclim, Sclim, Climextra])
# Calendar type was missing, and giving errors in decoding time
RG_clim.TIME.attrs['calendar'] = '360_day'
RG_clim = xr.decode_cf(RG_clim);
# -
## Add density and other things
SA = xr.apply_ufunc(gsw.SA_from_SP, RG_clim.ARGO_SALINITY_MEAN+RG_clim.ARGO_SALINITY_ANOMALY, RG_clim.PRESSURE ,
RG_clim.LONGITUDE, RG_clim.LATITUDE,
dask='parallelized', output_dtypes=[float,]).rename('SA')
CT = xr.apply_ufunc(gsw.CT_from_t, SA, RG_clim.ARGO_TEMPERATURE_MEAN+RG_clim.ARGO_TEMPERATURE_ANOMALY, RG_clim.PRESSURE,
dask='parallelized', output_dtypes=[float,]).rename('CT')
SIGMA0 = xr.apply_ufunc(gsw.sigma0, SA, CT, dask='parallelized', output_dtypes=[float,]).rename('SIGMA0')
RG_clim = xr.merge([RG_clim, SA, CT, SIGMA0])
# +
font = {'family' : 'sans',
'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
# -
# ### Make a section plot
# +
CT_region = RG_clim.CT.sel(LONGITUDE=30, method='nearest').sel(
LATITUDE=slice(-60,-45), PRESSURE=slice(0,1200)
).groupby('TIME.month').mean().load()
SA_region = RG_clim.SA.sel(LONGITUDE=30, method='nearest').sel(
LATITUDE=slice(-60,-45), PRESSURE=slice(0,1200)
).groupby('TIME.month').mean().load()
rho_region = RG_clim.SIGMA0.sel(LONGITUDE=30, method='nearest').sel(
LATITUDE=slice(-60,-45), PRESSURE=slice(0,1200)
).groupby('TIME.month').mean().load()
Clim_region = xr.merge([CT_region, SA_region, rho_region])
# +
plt.figure(figsize=(5,3.2))
im = CT_region.mean(['month']).plot.contourf(levels=np.linspace(-0.3,8,21), cmap=cmocean.cm.thermal,
add_colorbar=False
)
cbar = plt.colorbar(im, ticks= [0, 1,2,3,4,5,6,7, 8])
plt.xticks([-58, -54, -50, -46], labels=['58S', '54S', '50S', '46S'])
#plt.xtick_labels(['58S', '54S', '50S', '46S'])
cbar.ax.set_ylabel('Conservative Temperature ($^oC$)')
plt.gca().invert_yaxis()
CS = rho_region.mean(['month']).plot.contour(levels=[26.7, 27,27.3, 27.6, 27.8], linewidths=.75,
colors='k')
class nf(float):
def __repr__(self):
s = f'{self:.1f}'
return f'{self:.0f}' if s[-1] == '0' else s
CS.levels = [str(nf(val)) for val in CS.levels]
plt.clabel(CS, CS.levels, inline=True, fontsize=10)
plt.gca().invert_yaxis()
plt.annotate('', xy=(0.4, 1.05), xycoords='axes fraction', xytext=(.65, 1.05),
arrowprops=dict(arrowstyle="<->", color='k'))
plt.ylabel('Depth (m)')
plt.xlabel('Latitude')
plt.title('')
plt.savefig('CT_mean.pdf')
# +
plt.figure(figsize=(10,3.2))
plt.subplot(121)
im = CT_region.sel(month=1).plot.contourf(levels=np.linspace(-0.3,8,21), cmap=cmocean.cm.thermal,
add_colorbar=False
)
cbar = plt.colorbar(im, ticks= [0, 1,2,3,4,5,6,7, 8])
plt.xticks([-58, -54, -50, -46], labels=['58S', '54S', '50S', '46S'])
#plt.xtick_labels(['58S', '54S', '50S', '46S'])
cbar.ax.set_ylabel('Conservative Temperature ($^oC$)')
plt.gca().invert_yaxis()
CS = rho_region.sel(month=1).plot.contour(levels=[26.7, 27,27.3, 27.6, 27.8], linewidths=.75,
colors='k')
class nf(float):
def __repr__(self):
s = f'{self:.1f}'
return f'{self:.0f}' if s[-1] == '0' else s
CS.levels = [str(nf(val)) for val in CS.levels]
plt.clabel(CS, CS.levels, inline=True, fontsize=10)
plt.gca().invert_yaxis()
plt.annotate('', xy=(0.4, 1.05), xycoords='axes fraction', xytext=(.65, 1.05),
arrowprops=dict(arrowstyle="<->", color='k'))
plt.ylabel('Depth (m)')
plt.xlabel('Latitude')
plt.title('January')
plt.subplot(122)
im = CT_region.sel(month=6).plot.contourf(levels=np.linspace(-0.3,8,21), cmap=cmocean.cm.thermal,
add_colorbar=False
)
cbar = plt.colorbar(im, ticks= [0, 1,2,3,4,5,6,7, 8])
plt.xticks([-58, -54, -50, -46], labels=['58S', '54S', '50S', '46S'])
#plt.xtick_labels(['58S', '54S', '50S', '46S'])
cbar.ax.set_ylabel('Conservative Temperature ($^oC$)')
plt.gca().invert_yaxis()
CS = rho_region.sel(month=6).plot.contour(levels=[26.7, 27,27.3, 27.6, 27.8], linewidths=.75,
colors='k')
class nf(float):
def __repr__(self):
s = f'{self:.1f}'
return f'{self:.0f}' if s[-1] == '0' else s
CS.levels = [str(nf(val)) for val in CS.levels]
plt.clabel(CS, CS.levels, inline=True, fontsize=10)
plt.gca().invert_yaxis()
plt.annotate('', xy=(0.4, 1.05), xycoords='axes fraction', xytext=(.65, 1.05),
arrowprops=dict(arrowstyle="<->", color='k'))
plt.ylabel('Depth (m)')
plt.xlabel('Latitude')
plt.title('June')
#plt.savefig('CT.pdf')
plt.tight_layout()
# -
# ### Make a lat-lon plot of gradients on a particular density surface
# +
# Select region
CT_region2 = RG_clim.CT.sel(LONGITUDE=slice(20, 50)).sel(
LATITUDE=slice(-60,-45), PRESSURE=slice(0,1200)
).groupby('TIME.month').mean().load()
SA_region2 = RG_clim.SA.sel(LONGITUDE=slice(20, 50)).sel(
LATITUDE=slice(-60,-45), PRESSURE=slice(0,1200)
).groupby('TIME.month').mean().load()
rho_region2 = RG_clim.SIGMA0.sel(LONGITUDE=slice(20, 50)).sel(
LATITUDE=slice(-60,-45), PRESSURE=slice(0,1200)
).groupby('TIME.month').mean().load()
Clim_region2 = xr.merge([CT_region2, SA_region2, rho_region2])
# -
from xgcm import Grid
from xgcm.autogenerate import generate_grid_ds
# +
# Interpolate onto density surface
Clim_region2 = generate_grid_ds(Clim_region2,
{'Z':'PRESSURE', 'Y':'LATITUDE', 'X':'LONGITUDE'})
grid = Grid(Clim_region2, periodic=False)
theta_target = np.linspace(26.7, 27.9, 20)
# +
PRESSURE_broad, _ = xr.broadcast(Clim_region2.PRESSURE, Clim_region2.SIGMA0)
#need to do this because PRESSURE is 1D
PRES_transformed = grid.transform(PRESSURE_broad, 'Z',
theta_target, target_data = Clim_region2.SIGMA0)
CT_transformed = grid.transform(Clim_region2.CT, 'Z',
theta_target, target_data = Clim_region2.SIGMA0)
SA_transformed = grid.transform(Clim_region2.SA, 'Z',
theta_target, target_data = Clim_region2.SIGMA0)
# +
plt.figure(figsize=(12, 4))
plt.subplot(121)
CT_transformed.sel(SIGMA0=27.4, method='nearest').sel(month=1).plot.contourf(levels=np.linspace(-1,4, 11),
cmap=cmocean.cm.thermal)
PRES_transformed.sel(SIGMA0=27.4, method='nearest').sel(month=1).plot.contour(levels=11,
cmap=cmocean.cm.haline)
plt.subplot(122)
PRES_transformed.sel(SIGMA0=27.4, method='nearest').sel(month=1).plot.contourf(levels=11,
cmap=cmocean.cm.haline)
plt.tight_layout()
# +
plt.figure(figsize=(12, 4))
plt.subplot(121)
PRES_transformed.sel(SIGMA0=27.4, method='nearest').sel(month=7).plot.contourf(levels=np.linspace(0,1000, 21),
cmap=cmocean.cm.haline)
plt.subplot(122)
PRES_transformed.sel(SIGMA0=27.4, method='nearest').sel(month=1).plot.contourf(levels=np.linspace(0,1000, 21),
cmap=cmocean.cm.haline)
plt.tight_layout()
# -
# Add glider track on top of temperature plot, colored by the temperature on the same isopycnal.
ds_660_rho = xr.open_dataset('data/ctd_660_isopycnal_grid_14_july_2021.nc')
# +
plt.figure(figsize=(20, 7))
plt.subplot(211)
ds_660_rho.CT.plot()
plt.gca().invert_yaxis()
plt.subplot(212)
ds_660_rho.CT.sel(rho_grid=27.4).plot()
# +
sigma_sel = 27.6
plt.figure(figsize=(7,4))
CT_transformed.sel(SIGMA0=sigma_sel, method='nearest').mean('month').plot.contourf(levels=np.linspace(0.8,2.7, 11),
cmap=cmocean.cm.thermal)
PRES_transformed.sel(SIGMA0=sigma_sel, method='nearest').sel(month=1).plot.contour(
levels=[200], colors='k', linestyles='--')
PRES_transformed.sel(SIGMA0=sigma_sel, method='nearest').sel(month=7).plot.contour(
levels=[200], colors='k', linestyles='-.')
plt.scatter(ds_660_rho.longitude.sel(rho_grid=sigma_sel , method='nearest'),
ds_660_rho.latitude.sel(rho_grid=sigma_sel, method='nearest'),
c = ds_660_rho.CT.sel(rho_grid=sigma_sel, method='nearest'),
s=10, cmap=cmocean.cm.thermal,
vmin=0.8, vmax=2.7)
plt.xlim([26, 42])
plt.ylim([-58, -49])
plt.title('$\sigma$ = ' + str(sigma_sel))
# +
sigma_sel = 27.5
plt.figure(figsize=(5,3.2))
CT_transformed.sel(SIGMA0=sigma_sel, method='nearest').mean('month').plot.contourf(levels=np.linspace(0.8,2.7, 11),
cmap=cmocean.cm.thermal)
CS = PRES_transformed.sel(SIGMA0=sigma_sel, method='nearest').mean('month').plot.contour(
levels=[200, 500, 800],
colors=['w','k','r'], linestyles='--',
linewidths=1)
class nf(float):
def __repr__(self):
s = f'{self:.1f}'
return f'{self:.0f}' if s[-1] == '0' else s
CS.levels = [str(nf(val)) for val in CS.levels]
#plt.clabel(CS, CS.levels, inline=True, fontsize=7)
plt.scatter(ds_660_rho.longitude.sel(rho_grid=sigma_sel), ds_660_rho.latitude.sel(rho_grid=sigma_sel),
c = ds_660_rho.CT.sel(rho_grid=sigma_sel), s=5, cmap=cmocean.cm.thermal,
vmin=0.8, vmax=2.7)
plt.title('$\sigma$ = ' + str(sigma_sel))
plt.yticks([-58, -54, -50, -46], labels=['58S', '54S', '50S', '46S'])
plt.xticks([26, 30, 34, 38, 42], labels=['26E', '30E', '34E', '38E', '42E'])
plt.xlim([26, 42])
plt.ylim([-58, -48])
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.savefig('CT_sigma27_5.pdf')
# -
| notebooks/notebooks_variability_paper/climatology_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# * References
# * https://handong1587.github.io/deep_learning/2015/10/09/genrative-models.html
# # Theoretical Issues
#
# ## Generative Adversarial Networks
#
# * Source code
# * https://github.com/goodfeli/adversarial
# * Contribution Points
# * Induce the concept of Generative Adversarial Networks for replacing traditional Probabilistic Graphical Models
# * Prove the existence and the convergence of the solution of it's main objective function
# <img src="DQU1J420QGGCHE6KIGCI4JQWRA43ASWD.png"/>
#
# <img src="A0DL8S6YCH58V36HT847IS3RBH15QVMO.png"/>
# ## Unsupervised Representation Learning With Deep Convolutional Generative Adversarial Networks
# * Source code
# * https://github.com/Newmu
# * https://github.com/soumith/dcgan.torch
# * https://github.com/soumith/ganhacks
# * Contribution Points
# * Make GAN stable to train in most settings
# * Replace any pooling layers with "Strided convolutions(discriminator) and fractional strided convolutions (generator).
# * Use batchnorm in both the generator and the discriminator except for the generator output layer and the discriminator input layer.
# * Remove fully onnected hidden layers for deeper architecture.
# * Use ReLU activation in generator for all layers except for the output, which uses "tanh".
# * Use LeakyReLU activation in discriminator for all layers.
# * Preprocess scaling training images to the range of [-1, 1] for "tanh"
# * Show the Generators have interesting vector arithmetic properties allowing for easy manipulation of many semantic qualities of generated samples
#
# <img src="VYWS76LPVRVUP3EPHTQEVY5H74DQS411.png"/>
#
# <img src="P95PEWQLRYFICYYHT66HB3VJBRRH132S.png"/>
#
# ## InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets
# * Source code
# * https://github.com/openai/InfoGAN
# <img src="J828LRLIO0YOCG73FVJY5TKQYW9NYWGM.png"/>
# <img src="QV04FGSRJERRBLAVF14613ATV1DYJMAD.png"/>
# ## Energy-based Generative Adversarial Networks
# * Source code
# * Not support the author's official code
# * Contribution Points
# * An Energy-based Formulation for generative adversarial training
# * A proof
# * A EBGAN framework with the discriminator using an auto-encoder architecture in which the energy is the reconstruction error.
# * A set of systematic experiments to explore the set of hyper-parameters and architectural choices that produce good results for EBGANs and conventional GANs.
# * EBGAN is more robust
# * EBGAN can generate reasonable-looking high-resolution images from 256x256 pixel resolution
# ## SeqGAN
# * Source code
# * https://github.com/LantaoYu/SeqGAN
# # Applications
#
# ## Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
#
# * $\hat{x} = G(z)$
# * Z is Low Resolution Image, not noise vector Z
# * Output X is Super Resolution Image from G
# * $D(x)$
# * X is Ground Truth High Resolution Images or Generated Super Resolution Images
# * Upscaling using Sub pixel Convlutional Neural Network
# > We increase the resolution of the input image with two trained sub-pixel convolution
# [ <NAME>, and <NAME>. Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network. ]
#
# <img src="XR3C47HP252S2GXV23382MNTDGWGWGJ4.png"/>
#
# <img src="QU3Q92FICTH9IO4MOI9XJKWOVM8GHKGU.png"/>
# # Semantic Segmentation using Adversarial Network
#
# * Set Segmentation Networks as Typical G(z)
# * invent new network D(x) for matching function, If ground truth class map and segmentation map from G(z) is similar -> 1 otherwise 0
# * New D(x)
# * Input: Target image to segment and Class map
# * ground truth or Segmented map from G(z) Here z is the target image, There is no noise Z
#
# <img src="PCLHSA29BNAGTNS2562S24SJV7V69JG2.png"/>
# <img src="WIFUGPYQLO1QCDVRX972XXBMO5CBOKBR.png"/>
# ## Generative Adversarial Text to Image Synthesis
# *
# <img src="DL5OB6I70R2ELF2VL5O5PU44VB06IXTU.png"/>
#
# <img src="CKMCOSPCUBA6Y3HLBNK9QEQT51XDJAKU.png"/>
| papers/Generative_Models/Generative Adversarial Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Assimilation examples used during Bingewatch Academy
# ## Bingewatch Academy
# In my Bingewatch Academy talk I explored the question: "Why is Daredevil better at predicting events than normal humans?" I explained how data assimilation is used in weather forecasting to contiously combine observations and model predictions to get the best possible forecast of the future. In this notebook I will generate the figures that were used in my presentation.
#
# ## Data assimilation
# Data assimilation is the science of combining observational data and knowledge of system behavior to get an optimal estimation, including an estimation of the uncertainty in your estimation, of a systems past, current and/or future states. There are many different data asismilation variations: some focussing on a specific family of systems, some on specific use cases. In this example the data assimilation method that I use is the Ensemble Kalman Filter. The 'system', or 'model' that I use is the Lorenz-96 model.
#
# ## the Lorenz 96 model
# The Lorenz 96 model <cite data-cite="2916206/TVEEWNWX"></cite> is a typical chaotic dynamical sytem that is often used as a benchmark model in data assimilation studies. It was desigende by Lorenz as a toy-model for aAtmospheric circulation. It is defined for $i=1,...,N$ by
# \begin{equation}
# \frac{dx_{i}}{dt}=\left(x_{i+1} - x_{i-2}\right)x_{i-1} - x_{i} + F
# \end{equation}
# where i is cyclical, ie. $x_{0}=x_{N}$ and $x_{-1} = x_{N-1}$. $F$ is an external force acting on the system. A value of $F=8$ is known to create chaotic bahavior and is often used. The dimension $N$ can be freely chosen and is typical $40$, but for testing very high dimension systems, higher values can be used. The Lorenz 96 model is a typical chaotic model where, although, the model is deterministic, slight variations in the input state will over time result in complete different states.
#
# ## Numerical implementation of the Lorenz 96 model
# A fourth order Runga Kutta scheme is used to implement the Lorenz 96 model. Writing the entire state-vector as $\vec{x}$ and using $f\left(\vec{x}\right)$ as the right hand side of the model, ie:
# \begin{eqnarray}
# f\left(x_{i}\right) = \left(x_{i+1} - x_{i-2}\right)x_{i-1} - x_{i} + F
# \\
# f\left(\vec{x}\right) = \left\{f\left(x_{1}\right),...,f\left(x_{N}\right)\right\}
# \end{eqnarray}
# the implementation is given by:
# \begin{eqnarray}
# \vec{k}_{1}=f\left(\vec{x}\left(t\right)\right)
# \\
# \vec{k}_{2}=f\left(\vec{x}\left(t\right) + \frac{1}{2}\vec{k}_{1}\Delta t\right)
# \\
# \vec{k}_{3}=f\left(\vec{x}\left(t\right) + \frac{1}{2}\vec{k}_{2}\Delta t\right)
# \\
# \vec{k}_{4}=f\left(\vec{x}\left(t\right) + \vec{k}_{3}\Delta t\right)
# \end{eqnarray}
# and finally
# \begin{equation}
# \vec{x}\left(t + \Delta t\right) = \vec{x}\left(t\right) + \frac{1}{6}\left(\vec{k}_{1} + 2\vec{k}_{2} + 2 \vec{k}_{3} + \vec{k}_{4}\right)
# \end{equation}
#
# ## The Basic Model Interface (BMI)
# The basic model interface allows communicating with models in a generic fashion. It requires a few standard methods to be available such as 'initialize()' and 'update()'. Methods that are not relevant for the model need still be implemented, but can simply raise a one line exception. See <cite data-cite="2916206/VXTQPCA7"></cite> for more information. Implementing the BMI allows easy interaction with the model. The cells below initiate one instance of the model. For reasons that will become clear we will call this instance "truthModel".
#
# BMI models are typically initialized with a settings-file. This is overkill here, but for completeness, we generate the settings-file first and than pass it to the model.
# ## Ensemble Kalman Filter example using Lorenz-96 model and BMI
# The Ensemble Kalman Filter (EnKF) is a variant on the Kalman Filter used when dealing with models for which it is hard to define a tangiant model. Data Assimilation methods, including all variants of the Kalman Filter Family, set to provide the (mathimatically) optimal estimation of the true state of a system, given that a (often phyiscal/physially based) model is available that can project the current state of the model into the future and that at the same time observations are available that measure (parts of) the state, either directly or indirectly.
#
# A mathematical overview of the EnKF is given in <cite data-cite="2916206/GVM9N4GZ"></cite>. This notebook is intended as an introduction on how to do data assimilation within the eWaterCycle framework, with models that communicate through BMI. It is not intended as an indepth explenation of the EnKF.
#
# ## data assimilation jargon
# The following terms are often used in data assimilation:
#
# - **ensemble** is a collection of model-instances. Often these are multiple instances of the same model where the spread in the model state represents the uncertainty in our knowledge of that model state.
# - **model** a mathematical and/or computer code represenation of how the state of the system evolves in time.
# - **observation** a measurement (or set of measurements, including images)that relate to (part of) the state of the system
# - **observation model** a mathematical and/or computer code representation of how the state relates to the observations. Often donated by $\mathbf{H}$.
# - **forecast** The forecasted state using the model and a previous state
# - **analyses** The best estimate of the state using both a forecast and an observation. The analyses (or analyses-ensemble) is the output of a data assimilation method.
#
#
# +
#required libraries and settings
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import yaml
import io
import math
import BMILorenz
import EnKF
# -
# ## settings
# The settings for this experiment are split between settings for the model, for the observations and for the data assimilation method and finally for the experiment
# +
##model
J = 40 #dimension of Lorenz-96 Model
commonStartState = [0 for i in range(J)] #start-vector
commonStartState[5]=0.01
#settings data in dict for export to YAML file
settings = {'J': J,
'F': 8.0,
'startTime': 0.0,
'endTime': 10.0,
'dt':1e-3,
'startState': commonStartState}
##Observations
observationSigma = [0.05,0.5] #standard deviation of the observations. I'm running two different versions!
obsVector = range(math.floor(J/2)) #only observe half of the state
obsSize = len(obsVector); #size of the observations vector
def H(state): #the "observation model" that links model-space to observation-space
return state[obsVector]
##Ensemble Kalman Filter
N = 100 #numeber of ensemble members, needs to be higher than dimension of the
# model for stability, since no inflation is implemented.
## Experiment
spinUpTime = 3 #time that the ensemble is run before data assimilation starts to
updateInterval = 1 #how often is the ensemble updated with observations
plotState = 5 #which state of the model (both truth and ensemble) to plot
obsPlotState = 5 #which state of the observations to plot
# -
# Write YAML setting file for BMI model
with io.open('settings.yaml', 'w', encoding='utf8') as outfile:
yaml.safe_dump(settings, outfile, default_flow_style=False, allow_unicode=True)
# +
#start with two empty ensembles. create one ensemble for the "high observational error" case and one for low errors.
ensembleLow = []
ensembleHigh = []
#create and initialize an instance of the BMILorenz class
truthModel = BMILorenz.BMILorenz ()
truthModel.initialize('settings.yaml')
output = pd.DataFrame(columns = ['truth','observation'])
for n in range (N):
#add an ensemble methods
ensembleLow.append(BMILorenz.BMILorenz ())
ensembleLow[n].initialize('settings.yaml')
ensembleLow[n].set_value_at_indices('state',5,ensembleLow[n].get_value_at_indices('state',5) + np.random.randn(1)*0.01)
ensembleHigh.append(BMILorenz.BMILorenz ())
ensembleHigh[n].initialize('settings.yaml')
ensembleHigh[n].set_value_at_indices('state',5,ensembleHigh[n].get_value_at_indices('state',5) + np.random.randn(1)*0.01)
#also add a column to the output dataframe to store the output
output['ensembleLow' + str(n)]= np.nan
output['ensembleHigh' + str(n)]= np.nan
# +
#spin up the Ensemble.
while truthModel.get_current_time()< spinUpTime:
truthModel.update()
output.loc[truthModel.get_current_time(),'truth'] = truthModel.get_value_at_indices('state',plotState)
#observationLow = truthModel.get_value('state') + observationSigma[0] * np.random.randn(J)
#output.at[truthModel.get_current_time(),'observationLow'] = observationLow[plotState]
#observationHigh = truthModel.get_value('state') + observationSigma[1] * np.random.randn(J)
#output.at[truthModel.get_current_time(),'observationJigh'] = observationHigh[plotState]
#loop through the ensemble members and store the state after each update
for n in range (N):
ensembleLow[n].update()
output.at[ensembleLow[n].get_current_time(),'ensembleLow' + str(n)] = ensembleLow[n].get_value_at_indices('state',plotState)
ensembleHigh[n].update()
output.at[ensembleHigh[n].get_current_time(),'ensembleHigh' + str(n)] = ensembleHigh[n].get_value_at_indices('state',plotState)
updateTime = spinUpTime
# +
#run
foreCastEnsembleLow = np.zeros([J,N])
foreCastEnsembleHigh = np.zeros([J,N])
observationEnsembleLow = np.zeros([obsSize,N])
observationEnsembleHigh = np.zeros([obsSize,N])
while truthModel.get_current_time()<truthModel.get_end_time():
truthModel.update()
output.loc[truthModel.get_current_time(),'truth'] = truthModel.get_value_at_indices('state',plotState)
#loop through the ensemble members and store the state after each update
for n in range (N):
ensembleLow[n].update()
ensembleHigh[n].update()
#observationEnsemble[:,n] = observation + observationSigma*np.random.randn(obsSize)
output.at[ensembleLow[n].get_current_time(),'ensembleLow' + str(n)] = ensembleLow[n].get_value_at_indices('state',plotState)
output.at[ensembleHigh[n].get_current_time(),'ensembleHigh' + str(n)] = ensembleHigh[n].get_value_at_indices('state',plotState)
#TODO update ensemble on bases of observation
if truthModel.get_current_time() > updateTime:
observationLow = H(truthModel.get_value('state')) + observationSigma[0] * np.random.randn(obsSize)
output.at[truthModel.get_current_time(),'observationLow'] = observationLow[plotState]
observationHigh = H(truthModel.get_value('state')) + observationSigma[1] * np.random.randn(obsSize)
output.at[truthModel.get_current_time(),'observationHigh'] = observationHigh[plotState]
for n in range (N):
observationEnsembleHigh[:,n] = observationHigh + observationSigma[1]*np.random.randn(obsSize)
observationEnsembleLow[:,n] = observationLow + observationSigma[0]*np.random.randn(obsSize)
foreCastEnsembleLow[:,n] = ensembleLow[n].get_value('state')
foreCastEnsembleHigh[:,n] = ensembleHigh[n].get_value('state')
analysesEnsembleLow = EnKF.EnKF(foreCastEnsembleLow,observationEnsembleLow,H)
np.clip(analysesEnsembleLow, -10, 20, out=analysesEnsembleLow)
for n in range (N):
ensembleLow[n].set_value('state',analysesEnsembleLow[:,n])
analysesEnsembleHigh = EnKF.EnKF(foreCastEnsembleHigh,observationEnsembleHigh,H)
np.clip(analysesEnsembleHigh, -10, 20, out=analysesEnsembleHigh)
for n in range (N):
ensembleHigh[n].set_value('state',analysesEnsembleHigh[:,n])
updateTime = updateTime + updateInterval
# -
# ## Here come the plots
plt.plot(output.loc[output.index < 3,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('truth.eps')
plt.show()
plt.plot(output.loc[output.index < 3,output.columns.str.startswith('ensembleHigh')],'k')
plt.plot(output.loc[output.index < 3,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('truthEnsemble.eps')
plt.show()
plt.plot(output.loc[output.index < 4,output.columns.str.startswith('ensembleHigh')],'k')
plt.plot(output.loc[output.index < 4,'observationHigh'],'r.',output.loc[output.index < 4,'observationHigh']+2*observationSigma[1],'r*',output.loc[output.index < 4,'observationHigh']-2*observationSigma[1],'r*')
plt.plot(output.loc[output.index < 4,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFHigh1step.eps')
plt.show()
plt.plot(output.loc[output.index < 6,output.columns.str.startswith('ensembleHigh')],'k')
plt.plot(output.loc[output.index < 6,'observationHigh'],'r.',output.loc[:,'observationHigh']+2*observationSigma[1],'r*',output.loc[:,'observationHigh']-2*observationSigma[1],'r*')
plt.plot(output.loc[output.index < 6,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFHigh.eps')
plt.show()
plt.plot(output.loc[output.index < 6,output.columns.str.startswith('ensembleLow')],'k')
plt.plot(output.loc[output.index < 6,'observationLow'],'r.',output.loc[:,'observationLow']+2*observationSigma[0],'r*',output.loc[:,'observationLow']-2*observationSigma[0],'r*')
plt.plot(output.loc[output.index < 6,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFLow.eps')
plt.show()
plt.plot(output.loc[output.index < 6,output.columns.str.startswith('ensembleLow1')],'k')
plt.plot(output.loc[output.index < 6,'observationLow'],'r.',output.loc[:,'observationLow']+2*observationSigma[0],'r*',output.loc[:,'observationLow']-2*observationSigma[0],'r*')
plt.plot(output.loc[output.index < 6,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFLow10ens.eps')
plt.show()
# <div class="cite2c-biblio"></div>
| .ipynb_checkpoints/Data Assimilation examples for bingeWatchAcademy-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
import psycopg2
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String, Float
from api_keys import client_id
from twitch import TwitchClient
from pprint import pprint
csvpath = './Priya_Notebooks/Website/static/csv/'
client = TwitchClient(client_id= f'{client_id}')
# +
#getting live streams data
live_streams = client.streams.get_live_streams(limit = 100)
pprint(live_streams[0])
#lsdf = pd.DataFrame.from_dict(live_streams[0].channel, orient = 'index')
# -
lsdf = pd.DataFrame.from_dict(live_streams[0].channel, orient = 'index')
#live_streams[0].values()
lsdf.transpose()
# +
channels = []
game_name = []
viewers = []
channel_created_at = []
channel_followers = []
channel_id = []
channel_display_name = []
channel_game = []
channel_lan = []
channel_mature = []
channel_partner = []
channel_views = []
channel_description = []
for game in live_streams:
channel_created_at.append(game.channel.created_at)
channel_followers.append(game.channel.followers)
channel_game.append(game.channel.game)
channel_lan.append(game.channel.language)
channel_mature.append(game.channel.mature)
channel_partner.append(game.channel.partner)
channel_views.append(game.channel.views)
channel_description.append(game.channel.description)
channel_id.append(game.channel.id)
channel_display_name.append(game.channel.display_name)
viewers.append(game.viewers)
toplivestreams = pd.DataFrame({
"channel_id":channel_id,
"channel_display_name":channel_display_name,
"channel_description" : channel_description,
"channel_created_at" : channel_created_at,
"channel_followers" : channel_followers,
"channel_game" : channel_game,
"channel_lan" : channel_lan,
"channel_mature" : channel_mature,
"channel_partner" : channel_partner,
"channel_views" : channel_views,
"stream_viewers" : viewers})
toplivestreams.head(5+1)
# -
toplivestreams.to_csv(csvpath+'toplivestreams.csv', index = False, header = True)
df = pd.Panel(live_streams[0])
top_videos = client.videos.get_top(limit = 100)
pprint(top_videos[1])
# +
channels1 = []
game_name1 = []
views1 = []
vid_length = []
vid_title = []
vid_total_views = []
channel_created_at1 = []
channel_followers1 = []
channel_id1 = []
channel_display_name1 = []
channel_game1 = []
channel_lan1 = []
channel_mature1 = []
channel_partner1 = []
channel_views1 = []
channel_description1 = []
for game in top_videos:
channel_created_at1.append(game.channel.created_at)
channel_followers1.append(game.channel.followers)
channel_game1.append(game.channel.game)
channel_lan1.append(game.channel.language)
channel_mature1.append(game.channel.mature)
channel_partner1.append(game.channel.partner)
channel_views1.append(game.channel.views)
channel_description1.append(game.channel.description)
channel_id1.append(game.channel.id)
channel_display_name1.append(game.channel.display_name)
views1.append(game.views)
vid_length.append(game.length)
vid_title.append(game.title)
vid_total_views.append(round(((game.views*game.length)/(60*60)),2))
topvideos = pd.DataFrame({
"vid_title":vid_title,
"vid_length":vid_length,
"video_views" : views1,
"total_view_time-calc-hours":vid_total_views,
"channel_id":channel_id,
"channel_display_name":channel_display_name1,
"channel_description" : channel_description1,
"channel_created_at" : channel_created_at1,
"channel_followers" : channel_followers1,
"channel_game" : channel_game1,
"channel_lan" : channel_lan1,
"channel_mature" : channel_mature1,
"channel_partner" : channel_partner1,
"channel_views" : channel_views1,
})
topvideos.head(5+1)
# -
topvideos.to_csv(csvpath+'topvideos.csv', index = False, header = True)
toplivestreams.channel_game.value_counts()
topvideos.channel_game.value_counts()
gamesummary = client.stream.get_summary(toplivestreamgames[0])
topvidchan = topvideos.channel_display_name.unique()
topstreamchan = toplivestreams.channel_display_name.unique()
topchan = set(topvidchan).intersection(topstreamchan)
topchan
len(serverlocations)
serverlocations = []
for server in servers:
serverlocations.append(server.name)
serverlocations
servers = client.ingests.get_server_list()
pprint(servers)
| TwitchAPIMining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy import stats
import json
import csv
import datetime as dt
# import scipy.stats as sts
# Import API key
from api_keys import api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "../output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# ()=tuples/cannot change numbers
# -
pip install citipy
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for c in lat_lngs:
city = citipy.nearest_city(c[0], c[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# -
print(cities)
# +
units= "imperial"
# unitc="metric"
url= "http://api.openweathermap.org/data/2.5/weather?"
query_url=f"{url}appid={api_key}&units={units}"
# query_url=f"{url}appid={api_key}&q={city}&units={units}"
# query_url=f"{url}appid={api_key}&q={city}&units={unitc}"
# api.openweathermap.org/data/2.5/weather?q={city name},{state code}&appid={API key}
# api.openweathermap.org/data/2.5/weather?q={city name},{country}&appid={API key}
# +
# cities_prac1=cities[0:5]
# cities_prac1
# +
# cities_prac=['Denver','Boston','Pietown','Los Angeles','Oregon','Quito','Kampala', 'Padang']
# cities_prac
# -
cityp=[]
max_temp=[]
lat=[]
lng=[]
humidity=[]
cloudiness=[]
wind_speed=[]
country=[]
date=[]
# +
# API calls
for city in cities:
try:
response = requests.get(f"{query_url}&q={city}").json()
cityp.append(response['name'])
max_temp.append(response['main']['temp_max'])
lat.append(response['coord']['lat'])
lng.append(response['coord']['lon'])
humidity.append(response['main']['humidity'])
cloudiness.append(response['clouds']['all'])
wind_speed.append(response['wind']['speed'])
country.append(response['sys']['country'])
date.append(response['dt'])
except (KeyError):
print("Missing_data")
time.sleep(1)
# City_ID,City,Cloudiness,Country,Date,Humidity,Lat,Lng,Max Temp,Wind Speed
# -
cityp
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# create a data frame from cities, lat, and temp
weather_dict = {
"City": cityp,
"Latitude": lat,
"Longitude": lng,
"Max_temp": max_temp,
"Humidity": humidity,
"Cloudiness": cloudiness,
"Wind_speed": wind_speed,
"Country": country,
"Date": date
}
weather_data = pd.DataFrame(weather_dict)
weather_data
weather_data.describe()
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
# +
# Get the indices of cities that have humidity over 100%.
weather_data= weather_data.loc[(weather_data['Humidity'])<100,:]
weather_data
# -
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
len(weather_data)
weather_data.to_csv(output_data_file)
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
# +
# Build a scatter plot for each data type
plt.scatter(weather_data["Latitude"], weather_data["Max_temp"], marker="o")
# Incorporate the other graph properties
plt.title("Latitude vs. Max Temperature(03/19/21)")
plt.ylabel("Max Temperature (F)")
plt.xlabel("Latitude")
plt.grid(True)
# # Save the figure
plt.savefig("../output_data/CitylatitudevsMaxTemp.png")
# # Show plot
plt.show()
# -
# ## Latitude vs. Humidity Plot
# +
# Create a Scatter Plot for temperature vs latitude
x_values = weather_data['Latitude']
y_values = weather_data['Humidity']
plt.scatter(x_values,y_values)
plt.xlabel('Latitude')
plt.title("Latitude vs. Humidity(03/19/21)")
plt.ylabel('Humidity(%)')
plt.grid(True)
plt.savefig("../output_data/CitylatitudevsHumidity.png")
plt.show()
# -
# ## Latitude vs. Cloudiness Plot
# +
# Build a scatter plot for each data type
plt.scatter(weather_data["Latitude"], weather_data["Cloudiness"], marker="o")
# Incorporate the other graph properties
plt.title("Latitude vs. Cloudiness(03/19/21)")
plt.ylabel("Cloudiness(%)")
plt.xlabel("Latitude")
plt.grid(True)
# # Save the figure
plt.savefig("../output_data/CitylatitudevsCloudiness.png")
# # Show plot
plt.show()
# -
# ## Latitude vs. Wind Speed Plot
# +
# Build a scatter plot for each data type
plt.scatter(weather_data["Latitude"], weather_data["Wind_speed"], marker="o")
# Incorporate the other graph properties
plt.title("Latitude vs. Wind Speed(03/19/21)")
plt.ylabel("Wind Speed (mph)")
plt.xlabel("Latitude")
plt.grid(True)
# # Save the figure
plt.savefig("../output_data/CitylatitudevsWindSpeed.png")
# # Show plot
plt.show()
# -
# ## Linear Regression
# +
# # Use API to determine actual temperature
# response = requests.get(query_url + "Florence").json()
# florence_actual_temp = response['main']['temp']
# print(f"The actual temperature of Florence is {florence_actual_temp}")
weather_data.dtypes
# -
northernhem= weather_data.loc[(weather_data["Latitude"]) > 0, :]
southernhem = weather_data.loc[(weather_data["Latitude"]) < 0, :]
# northernhem.head()
southernhem.head()
# # Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
x_values = northernhem['Latitude']
y_values = northernhem['Max_temp']
northernhem.plot(kind="scatter", x="Latitude", y="Max_temp")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(10,0),fontsize=15,color="red")
plt.title("Northern Hemisphere - Max Temp vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Max Temp.')
print(f"The r-squared is: {rvalue**2}")
print("The Linear Regression has a negative slope, stating that the Max Temp. of cities drops as we get \nfurther away from the equator, therefor these cities tend to be cooler or colder the \nfurther they are from the equator.")
plt.savefig("../output_data/NorthernHem-LatitudevsMaxTempLR.png")
plt.show()
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
x_values = southernhem['Latitude']
y_values = southernhem['Max_temp']
southernhem.plot(kind="scatter", x="Latitude", y="Max_temp")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.title("Southern Hemisphere - Max Temp vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Max Temp.')
plt.xlim(-60,5)
plt.ylim(40,100)
print(f"The r-squared is: {rvalue**2}")
plt.annotate(line_eq,(-30,47),fontsize=15,color="red")
plt.savefig("../output_data/SouthernHem-LatitudevsMaxTempLR.png")
print("The Linear Regression has a positive slope, stating that the Max Temp. of cities rises as we get closer \nto the equator, therefor these cities are hotter as they get closer to the equator.")
plt.show()
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
x_values = northernhem['Latitude']
y_values = northernhem['Humidity']
northernhem.plot(kind="scatter", x="Latitude", y="Humidity")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.title("Northern Hemisphere - Humidity(%) vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Humidity(%)')
print(f"The r-squared is: {rvalue**2}")
plt.annotate(line_eq,(45,15),fontsize=15,color="red")
print("The Linear Regression has a small positive slope, although the variation between the cities and humidity is scattered \namongst the board in the Northern Hemisphere.")
plt.savefig("../output_data/NorthernHem-LatitudevsHumidityLR.png")
plt.show()
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
x_values = southernhem['Latitude']
y_values = southernhem['Humidity']
southernhem.plot(kind="scatter", x="Latitude", y="Humidity")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-55,18),fontsize=15,color="red")
plt.title("Southern Hemisphere - Humidity(%) vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Humidity(%)')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/SouthernHem-LatitudevsHumidityLR.png")
print("The Linear Regression has a small positive slope, although the variation between cities and humidity is less \nscattered in the Southern Hemisphere than the Northern Hemisphere this chart tells us that the levels in Humidity \nare much higher in the Southern areas closest to the equator ")
plt.show()
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = northernhem['Latitude']
y_values = northernhem['Cloudiness']
northernhem.plot(kind="scatter", x="Latitude", y="Cloudiness")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.title("Northern Hemisphere - Cloudiness(%) vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness(%)')
print(f"The r-squared is: {rvalue**2}")
plt.annotate(line_eq,(5,15),fontsize=15,color="red")
plt.savefig("../output_data/NorthernHem-LatitudevsCloudinessLR.png")
print("The Linear Regression has a slight positive slope, although the variation between cities and cloudiness is scattered \nin the Northern Hemisphere amongst the board, seems like there is clouds everywhere in the Northern Hemisphere.")
plt.show()
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = southernhem['Latitude']
y_values = southernhem['Cloudiness']
southernhem.plot(kind="scatter", x="Latitude", y="Cloudiness")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.title("Southern Hemisphere - Cloudiness(%) vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness(%)')
print(f"The r-squared is: {rvalue**2}")
plt.annotate(line_eq,(-50,50),fontsize=15,color="red")
plt.savefig("../output_data/SouthernHem-LatitudevsCloudinessLR.png")
print("The Linear Regression has a strong positive slope, the distribution between cities and cloudiness in the Southern \nHemisphere states that there are more clouds in the Southern Hemishpere closest to the equator.")
plt.show()
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
x_values = northernhem['Latitude']
y_values = northernhem['Wind_speed']
northernhem.plot(kind="scatter", x="Latitude", y="Wind_speed")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
# plt.annotate(line_eq,(10,4),fontsize=15,color="red")
plt.title("Northern Hemisphere - Wind Speed(mph) vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed(mph)')
print(f"The r-squared is: {rvalue**2}")
plt.annotate(line_eq,(5,25),fontsize=15,color="red")
plt.savefig("../output_data/NorthernHem-LatitudevsWindSpeedLR.png")
print("The Linear Regression has a small positive slope close to no slope at all, seems as if the wind speeds are evenly \ndistributed amongst the board, stating that in the Northern Hemisphere the winds are similar wherever you are \nregardless if you are close or far away from the equator.")
plt.show()
# -
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
x_values = southernhem['Latitude']
y_values = southernhem['Wind_speed']
southernhem.plot(kind="scatter", x="Latitude", y="Wind_speed")
(slope, intercept, rvalue, pvalue, stderr) = stats.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(x_values,regress_values,"r-")
plt.title("Northern Hemisphere - Wind Speed(mph) vs. Lat. Linear Regression")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed(mph)')
print(f"The r-squared is: {rvalue**2}")
plt.annotate(line_eq,(-23,20),fontsize=15,color="red")
plt.savefig("../output_data/Southernhem-LatitudevsWindSpeedLR.png")
print("The Linear Regression has a negative slope telling us that the wind speeds are lower in the Southern Hemisphere as we get closer to the equator. That could be due to the fact that it is more humid in these zones, and/or vice versa, \nit is more humid because there is less wind. ")
plt.show()
| code/WeatherPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_8rd0TCYNwpo" colab_type="text"
# # **Title: Comparison of Data Mining and Natural Language Processing Techniques for Crash Analysis**
#
# 1. **GRU_Word2Vec_Unbalanced**
# 3. **GRU_Word2Vec_Balanced_Pos**
# 4. **LSTM_Word2Vec_Balanced**
# 5. **LSTM_Word2Vec_Unbalanced**
# 6. **LSTM_Word2Vec_Balanced_Pos**
# 7. **SVM_CF_Unbalanced**
# 8. **SVM_CF_Balanced_Pos**
# 9. **LGR_CF Balanced**
# 10. **Bigram NoisyOR**
#
# **Bigram NoisyOR**
#
#
#
# * Word2Vec=Pretrained GoogleNews-vectors-negative300
# * Balanced= keep all the workzone crashes and pull same amount of non workzone crashes from random sampling
# *Pos = keep only the sentances which has positive unigram onky from **Unigram- NoisyOR**
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# + id="Y0niNyFn-cd3" colab_type="code" outputId="11df0cb6-75b4-45db-8f68-7816df6c2690" executionInfo={"status": "ok", "timestamp": 1592203974109, "user_tz": 300, "elapsed": 26094, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="e_Awfr5D1Og1" colab_type="text"
#
# # **Dependencies**
#
#
# + id="Ur8S4b_OdlKE" colab_type="code" outputId="40ba35e4-37f9-4221-df72-225597a2108b" executionInfo={"status": "ok", "timestamp": 1592203977567, "user_tz": 300, "elapsed": 3441, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 119}
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, GRU, Embedding,LSTM
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.metrics import f1_score
from sklearn.metrics import roc_curve
import string
import nltk
import math
nltk.download('averaged_perceptron_tagger')
nltk.download('stopwords')
from nltk.corpus import stopwords
import pandas as pd
import numpy as np
import os
# + [markdown] id="OHeIFUV21jIA" colab_type="text"
# # **Load google word-embedding**
#
# + id="Q2k2mpOd0_2c" colab_type="code" outputId="bbc842a9-d0aa-4b66-cec9-28339b019bf8" executionInfo={"status": "ok", "timestamp": 1592204055432, "user_tz": 300, "elapsed": 75416, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 105}
from gensim.models import KeyedVectors
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
word2vec_model = KeyedVectors.load_word2vec_format('/content/drive/My Drive/New/GoogleNews-vectors-negative300.bin', binary=True)
# + [markdown] id="xlGrGVVS19xN" colab_type="text"
# # **Data porcessing**
# + [markdown] id="88hlVP944gj1" colab_type="text"
# Write a function to rpocess the data
# + id="yPI2iioh4hVW" colab_type="code" colab={}
def clean_doc(textlist):
#list to store the clean data
output=[]
#read by narratives.Here, A narrative is a string of multiple sentences.
for doc in textlist:
# make all lower case
doc=doc.lower()
# create temporary token
tokens=doc.split()
# remove all punctuation
table=str.maketrans('','',string.punctuation)
tokens=[w.translate(table) for w in tokens]
# remove numeric values
tokens1=[word for word in tokens if word.isalpha()]
# remove stop words
stop_words=set(stopwords.words('english'))
tokens2=[w for w in tokens1 if not w in stop_words]
# remove words which has <= 2 characters
tokens3=' '.join([word for word in tokens2 if len(word)>=2])
# return the clean sentance again
output.append(tokens3)
return output
# + [markdown] id="cspe14Rq4sGn" colab_type="text"
# Call the data to process
#
# > Trainning data 2017-2018 which includes reported WZ and NWZ. WZ=1 and NWZ=0.
#
#
# 1. Delete all records that has null values in construction and narrative columns
# 2. Format date field for data analysis
#
#
#
#
# + id="97oVEcgYdoMf" colab_type="code" colab={}
# Train and Test file name
trainFile='/content/drive/My Drive/New/Train_raw_data/TrainingDataSet_Balanced.csv'
testFile='/content/drive/My Drive/New/Train_raw_data/Random_200_Manual Review_WZ_NWZ_6_10.csv'
# which is consisted of 2017-2018 WZ and NWZ (balanced data)
traindf=pd.read_csv(trainFile)
# drop null rows as it will results error in classifying algorithm.
traindf=traindf.dropna(subset=['CONSZONE','OFFRNARR'])
# + [markdown] id="cO8BodkqYkXh" colab_type="text"
# Clean the data and Prepare train and test data
# + id="GmrzdGLXX6kL" colab_type="code" outputId="99ca2591-4f9f-4ae8-eacc-807cd9f91041" executionInfo={"status": "ok", "timestamp": 1592204822932, "user_tz": 300, "elapsed": 2074, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 156}
###### Train data
# no need. you can use it for result analysis, or make query by data
'''traindf['CRSHDATE']=pd.to_datetime(traindf['CRSHDATE'])'''
# use only the narrative as train data and make it a list/arrary
x_train_text=traindf["OFFRNARR"].values
x_train_tok=traindf[traindf.CONSZONE==1]["OFFRNARR"].values
# print a narrative before data cleaning
print ("Before Data cleaning:\n", x_train_text[0])
#clean the data
x_train_text=clean_doc(x_train_text)
x_train_tok=clean_doc(x_train_tok)
# print the clean data
print ("After Data cleaning:\n",x_train_text[0])
# it was y as conszone in the field. but here, this data has conszone (WZ) with Y=1, and Null=0=NWZ. It was done before calling it here.
y_train_text=traindf["CONSZONE"].values
###### Test data
#test data consists of 2019 dataset, for our case it is only NWZ part of 2019
testdf=pd.read_csv(testFile)
# drop null rows as it will results error in classifying algorithm.
testdf=testdf.dropna(subset=['CONSZONE','OFFRNARR'])
testdf.to_csv('test2019.csv',index=None)
# do the same for test data
x_test_text=testdf["OFFRNARR"].values
x_test_text=clean_doc(x_test_text)
y_test_text=testdf["Manual_Review"].values
print(y_test_text.shape)
# print number of narratives fro train and test
print (len(x_train_text),len(x_test_text),len(x_train_tok))
# + [markdown] id="9CHMUqFq8lTi" colab_type="text"
# # **Tokenization and word vectorization**
# + id="mVffOZ-veEBL" colab_type="code" outputId="bf2b8e83-3926-48bf-9fce-e303996d4796" executionInfo={"status": "ok", "timestamp": 1592210551064, "user_tz": 300, "elapsed": 640, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# Apply tokenizer on the train data only. it will create words/tokens from train data only. so any new words in test data will be converted into 0 and be used as padding value
tokenizer=Tokenizer(num_words=None)
tokenizer.fit_on_texts(x_train_tok)
count_thres = 1000
low_count_words = [w for w,c in tokenizer.word_index.items() if c < count_thres]
for w in low_count_words:
del tokenizer.word_index[w]
x_train_tokk=tokenizer.texts_to_sequences(x_train_tok)
x_train_tokens=tokenizer.texts_to_sequences(x_train_text)
x_test_tokens=tokenizer.texts_to_sequences(x_test_text)
num_tokens=[len(tokens) for tokens in x_train_tokk]
num_tokens=np.array(num_tokens)
max_tokens=np.max(num_tokens)
max_tokens_3std = math.ceil(np.mean(num_tokens) + 3* np.std(num_tokens))
pad='post'
x_train_pad = pad_sequences(x_train_tokens, maxlen=max_tokens_3std,padding=pad, truncating=pad)
x_test_pad = pad_sequences(x_test_tokens, maxlen=max_tokens_3std,padding=pad, truncating=pad)
print(x_train_pad[10])
# + [markdown] id="4fSJS4Lh9HGq" colab_type="text"
# Analyze the length of vector as well as Narrative length to decide the vector length
# + id="twlHbcuheOFj" colab_type="code" outputId="a9be6cd2-f52c-4e1a-be5d-0befc1982533" executionInfo={"status": "ok", "timestamp": 1590696857707, "user_tz": 300, "elapsed": 339, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
num_tokens=[len(tokens) for tokens in x_train_tokens]
num_tokens=np.array(num_tokens)
max_tokens=np.max(num_tokens)
print ("maximum Narrative length after data cleaning:",max_tokens)
max_tokens_3std = math.ceil(np.mean(num_tokens) + 3* np.std(num_tokens))
print ("average Narrative length at 3rd STD after data cleaning:", max_tokens_3std)
# + [markdown] id="1HP3bZjD-c03" colab_type="text"
# Apply the 3rd STD value as the maximum narrative length.
# + id="P1ZQ7f8-eRJS" colab_type="code" outputId="d3fc0951-c362-47fb-d1b4-be23f5032198" executionInfo={"status": "ok", "timestamp": 1590702591232, "user_tz": 300, "elapsed": 414, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
# to apply fixed sequence length, pad with 0 at the beginning of sequence
pad='post'
x_train_pad = pad_sequences(x_train_tokens, maxlen=max_tokens_3std,padding=pad, truncating=pad)
x_test_pad = pad_sequences(x_test_tokens, maxlen=max_tokens_3std,padding=pad, truncating=pad)
#print(x_train_pad)
# next apply classification algorithm.
# + [markdown] id="-xu0pgTg_Atd" colab_type="text"
# Write functions to apply word embedding. NUM_WORDS is the length of embedding vector. As we are using Google word-embeding, i chose a value of 300. Added 1 to store the dimensions for the words for which no pretrained word embeddings exist
# + id="W6sZK-nVeoOq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="36fc2e80-9540-4f4e-e657-14f3421c3ffb" executionInfo={"status": "ok", "timestamp": 1592207213616, "user_tz": 300, "elapsed": 244, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}}
num_words=len(tokenizer.word_index)+300
def getVector(str):
if str in word2vec_model:
return word2vec_model[str]
else:
return None;
def isInModel(str):
return str in word2vec_model
embedding_size=300
embedding_matrix = np.zeros((num_words, embedding_size))
for word, i in tokenizer.word_index.items():
if i>=embedding_size:
continue
try:
#print(word)
embedding_vector = word2vec_model[word]
embedding_matrix[i] = embedding_vector
except KeyError:
embedding_matrix[i]=np.random.normal(0,np.sqrt(0.25),embedding_size)
print(embedding_matrix[6514])
# + id="yOq7WuQg2e5N" colab_type="code" outputId="8d2fa50f-eed2-4886-8652-d155ab981445" executionInfo={"status": "ok", "timestamp": 1592207459205, "user_tz": 300, "elapsed": 242961, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
model=Sequential()
model.add(Embedding(input_dim=num_words,output_dim=embedding_size,input_length=max_tokens_3std,name='layer_embedding',weights=[embedding_matrix],trainable=False))
model.add(GRU(units=32, return_sequences=True))
model.add(GRU(units=32))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
mc = ModelCheckpoint('best_model'+str('TopFrequency')+'model_name'+'.hdf5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=1,patience=10)
model.fit(x_train_pad, y_train_text,validation_split=0.15, epochs=80, batch_size=32, callbacks=[es,mc])
# + [markdown] id="9cP0XsRZ_sMh" colab_type="text"
# # **Develop the Deep Learning Model**
#
#
# 1. num_words is the length of vocabulary
# 2. List item
#
#
# + id="3URU3SvcsloY" colab_type="code" colab={}
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import SGDClassifier,LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier,LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
import matplotlib.pylab as plt
import operator
def plot_roc(clf,model_name, y_test, y_pred):
fpr, tpr, _ = roc_curve(y_test, y_pred)
plt.plot(fpr, tpr,label=model_name+': ROC curve (area = %0.2f)' % roc_auc_score(y_test, y_pred))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
def print_scores(clf, X_train, y_train, X_test, y_test):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('F1 score: {:3f}'.format(f1_score(y_test, y_pred)))
print('AUC score: {:3f}'.format(roc_auc_score(y_test, y_pred)))
def vectorizebyCount(text, maxx_features):
cv = CountVectorizer(max_features=maxx_features, stop_words='english')
countWord = cv.fit_transform(text)
#index_value={i[1]:i[0] for i in cv.vocabulary_.items()}
#print (dict(sorted(index_value.items(), key=operator.itemgetter(0),reverse=True)))
return cv,countWord
#optimizer = Adam(lr=1e-3)
def evaluation(model_name_list, x_trains,y_trains, x_tests,y_tests,test_split,test_file_df,TopFrequency):
outputdf=test_file_df.copy()
#for model_name in model_name_list:
# outputdf[model_name]=np.NaN
validation=int(y_test_text.shape[0]*.15)
tempList=[999]*validation
plt.figure(figsize=(10,5))
for model_name in model_name_list:
cv,X = vectorizebyCount(x_trains,TopFrequency)
tx=cv.transform(x_tests)
x_train, x_test, y_train, y_test= X,tx,y_trains,y_tests
#x_train, x_test, y_train, y_test= X,tx[validation:],y_trains,y_tests[validation:]
if model_name=='LGR':
clf = LogisticRegression(random_state=0).fit(x_train, y_train)
y_pred = clf.predict_proba(x_test)
y_pred=[i[1] for i in y_pred ]
tn, fp, fn, tp=confusion_matrix(y_test, clf.predict(x_test)).ravel()
outputdf[model_name]=np.array(y_pred)
#outputdf[model_name]=np.array(tempList+y_pred)
print("tn, fp, fn, tp",tn, fp, fn, tp)
#resultList.append([y_test,y_pred])
plot_roc(clf,model_name, y_test,y_pred)
print (model_name+" done")
elif model_name=='NB':
x_train,x_test=x_train.toarray(), x_test.toarray()
clf= GaussianNB().fit(x_train, y_train)
y_pred=clf.predict_proba(x_test)
y_pred=[i[1] for i in y_pred ]
tn, fp, fn, tp=confusion_matrix(y_test, clf.predict(x_test)).ravel()
outputdf[model_name]=np.array(y_pred)
#outputdf[model_name]=np.array(tempList+y_pred)
print("tn, fp, fn, tp",tn, fp, fn, tp)
# resultList.append([i[1] for i in pr])
plot_roc(clf,model_name, y_test,y_pred)
print (model_name+" done")
elif model_name=='SVC':
clf= SVC(kernel='linear',probability=True).fit(x_train, y_train)
y_pred=clf.predict_proba(x_test)
y_pred=[i[1] for i in y_pred ]
tn, fp, fn, tp=confusion_matrix(y_test, clf.predict(x_test)).ravel()
outputdf[model_name]=np.array(y_pred)
#outputdf[model_name]=np.array(tempList+y_pred)
print("tn, fp, fn, tp",tn, fp, fn, tp)
# resultList.append([i[1] for i in pr])
plot_roc(clf,model_name, y_test,y_pred)
print (model_name+" done")
elif model_name=='RF':
clf= RandomForestClassifier().fit(x_train, y_train)
y_pred=clf.predict_proba(x_test)
y_pred=[i[1] for i in y_pred ]
tn, fp, fn, tp=confusion_matrix(y_test, clf.predict(x_test)).ravel()
outputdf[model_name]=np.array(y_pred)
#outputdf[model_name]=np.array(tempList+y_pred)
print("tn, fp, fn, tp",tn, fp, fn, tp)
# resultList.append([i[1] for i in pr])
plot_roc(clf,model_name, y_test,y_pred)
print (model_name+" done")
elif model_name=='Knn':
clf= KNeighborsClassifier(n_neighbors=8).fit(x_train, y_train)
y_pred=clf.predict_proba(x_test)
y_pred=[i[1] for i in y_pred ]
tn, fp, fn, tp=confusion_matrix(y_test, clf.predict(x_test)).ravel()
outputdf[model_name]=np.array(y_pred)
#outputdf[model_name]=np.array(tempList+y_pred)
print("tn, fp, fn, tp",tn, fp, fn, tp)
#resultList.append([i[1] for i in pr])
#resultList.append(clf.predict(x_test))
plot_roc(clf,model_name, y_test,y_pred)
#skplt.metrics.plot_roc_curve(y_test,pr)
#plt.show()
print (model_name+" done")
elif model_name=="GRU":
tokenizer=Tokenizer(num_words=None)
tokenizer.fit_on_texts(['constructions','zoo'])
count_thres = 2
'''low_count_words = [w for w,c in tokenizer.word_counts.items() if c < count_thres]
for w in low_count_words:
del tokenizer.word_index[w]'''
low_count_words = [w for w,c in tokenizer.word_index.items() if c < count_thres]
for w in low_count_words:
del tokenizer.word_index[w]
del tokenizer.word_docs[w]
del tokenizer.word_counts[w]
x_train_tokk=tokenizer.texts_to_sequences(x_train_tok)
x_train_tokens=tokenizer.texts_to_sequences(x_trains)
x_test_tokens=tokenizer.texts_to_sequences(x_tests)
num_tokens=[len(tokens) for tokens in x_train_tokk]
num_tokens=np.array(num_tokens)
max_tokens=np.max(num_tokens)
#max_tokens_3std = math.ceil(np.mean(num_tokens) + 3* np.std(num_tokens))
pad='post'
x_train_pad = pad_sequences(x_train_tokens, maxlen=max_tokens,padding=pad, truncating=pad)
x_test_pad = pad_sequences(x_test_tokens, maxlen=max_tokens,padding=pad, truncating=pad)
num_words=len(tokenizer.word_index)+5
print(num_words)
embedding_size=300
embedding_matrix = np.zeros((num_words, embedding_size))
for word, i in tokenizer.word_index.items():
if i>=embedding_size:
continue
try:
#print(word)
embedding_vector = word2vec_model[word]
embedding_matrix[i] = embedding_vector
except KeyError:
embedding_matrix[i]=np.random.normal(0,0,embedding_size)
model=Sequential()
model.add(Embedding(input_dim=num_words,output_dim=embedding_size,input_length= max_tokens,name='layer_embedding',weights=[embedding_matrix],trainable=False))
model.add(GRU(units=32, return_sequences=True))
model.add(GRU(units=32))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
mc = ModelCheckpoint('best_model'+str(TopFrequency)+model_name+'.hdf5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=1,patience=10)
model.fit(x_train_pad, y_trains,validation_split=0.15, epochs=80, batch_size=32, callbacks=[es,mc])
#model.fit(x_train_pad, y_trains,validation_data=(x_test_pad[:validation], y_tests[:validation]), epochs=80, batch_size=32, callbacks=[es,mc])
y_pred=model.predict(x_test_pad)
#y_pred=model.predict(x_test_pad[validation:])
pr=[1 if i>0.5 else 0 for i in y_pred]
tn, fp, fn, tp=confusion_matrix(y_tests, pr).ravel()
#tn, fp, fn, tp=confusion_matrix(y_tests[validation:], pr).ravel()
#print((tempList)+[i for i in y_pred])
outputdf[model_name]=np.array([i[0] for i in y_pred])
#outputdf[model_name]=np.array(tempList+[i[0] for i in y_pred])
print("tn, fp, fn, tp",tn, fp, fn, tp)
plot_roc(model,model_name, y_tests,y_pred)
#plot_roc(model,model_name, y_tests[validation:],y_pred)
print (model_name+" done")
else:
print ("Warning!!The {} model has not been developed yet.".format(model_name))
#outputdf.to_csv('test_prediction_TopFrequency('+str(TopFrequency)+').csv',index=False)
# + id="vbRiDX4UjVbQ" colab_type="code" outputId="dc0100af-071b-4cdb-efab-7278a6dae7ce" executionInfo={"status": "ok", "timestamp": 1592211254377, "user_tz": 300, "elapsed": 30447, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
modelname_list=['GRU','NB','LGR','RF','Knn']
evaluation(modelname_list, x_train_text,y_train_text,x_test_text,y_test_text,0.15,testdf,500)
# + id="rxQtwBQoe1Ou" colab_type="code" colab={}
m=testdf.copy()
m
# + id="0_PyD341KLnR" colab_type="code" outputId="f0facd42-f8a5-4e55-dd31-e570da3a867c" executionInfo={"status": "ok", "timestamp": 1591582478771, "user_tz": 300, "elapsed": 9829, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 296}
#x=df3.OFFRNARR.values
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score
#cv = TfidfVectorizer(max_features=300)
#X = cv.fit_transform(x_train_text)
cv = TfidfVectorizer(max_features=5000)
X = cv.fit_transform(x_train_text)
tx=cv.transform(x_test_text)
d=cv.transform(x_train_text).toarray()
x_train, x_test, y_train, y_test= X.toarray(),tx.toarray(),y_train_text,y_test_text
#print(x_train[0], d[0])
clf =LogisticRegression().fit(x_train, y_train)
#pred = cross_val_predict(clf, x_test, y_test, cv=3, n_jobs=4)
pr=clf.predict(x_test)
#pr=[i[1]for i in pr]
results=confusion_matrix(y_test, clf.predict(x_test))
print(roc_auc_score(y_test, pr))
plot_roc(clf,'model_name', y_test,pr)
#skplt.metrics.plot_roc_curve(y_test,pr)
#plt.show()
# + id="u8tYLknGrByI" colab_type="code" outputId="60f68ef8-9018-47a8-f891-1b22f269914c" executionInfo={"status": "ok", "timestamp": 1592208695870, "user_tz": 300, "elapsed": 300, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13014345754275758358"}} colab={"base_uri": "https://localhost:8080/", "height": 272}
m={'a':1,'b':2,'c':3,'d':4,'e':5}
n=max(m.values())
for j,i in m.items():
if i>2 and i<max(m.values()):
print(i)
print(np.random.normal(0,0,embedding_size))
| ML_NLP_Word2Vec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cltl/python-for-text-analysis/blob/colab/Chapters-colab/Chapter_14_Reading_and_writing_text_files.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="XMjRZ9SWCmjW"
# %%capture
# !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Data.zip
# !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/images.zip
# !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Extra_Material.zip
# !unzip Data.zip -d ../
# !unzip images.zip -d ./
# !unzip Extra_Material.zip -d ../
# !rm Data.zip
# !rm Extra_Material.zip
# !rm images.zip
# + [markdown] id="D2qStBHnClao"
# # Chapter 14: Reading and writing text files
# *We use some materials from [this other Python course](https://github.com/kadarakos/python-course).*
# + [markdown] id="_0_B2ZkzClar"
# In this chapter, you will learn how to read data from files, do some analysis, and write the results to disk.
#
# Reading and writing files are quite an essential part of programming, as it is the first step for your program to communicate with the outside world. In most cases, you will write programs that take data from some source, manipulates it in some way, and writes some results out somewhere.
#
# For example, if you would write a survey, you could take input from participants on a webserver and save their answers in some files or in a database. When the survey is over, you would read these results in and do some analysis on the data you have collected, maybe do some visualizations and save your results.
#
# In **Natural Language Processing (NLP)**, you often process files containing raw texts with some code and write the results to some other file.
#
# ### At the end of this chapter, you will be able to:
#
# * open one or multiple text files
# * work with the modules `os` and `glob`
# * read the contents of a file
# * write new or manipulated content to new (or existing) files
# * close a file
#
#
# ### If you want to learn more about these topics, you might find the following links useful:
#
# * [Video: File Objects - Reading and Writing to Files](https://www.youtube.com/watch?v=Uh2ebFW8OYM)
# * [Video: Automate Parsing and Renaming of Multiple Files](https://www.youtube.com/watch?v=ve2pmm5JqmI)
# * [Video: OS Module - Use Underlying Operating System Functionality](https://www.youtube.com/watch?v=tJxcKyFMTGo)
# * [Blog post: 6 Ways the Linux File System is Different From the Windows File System](http://www.howtogeek.com/137096/6-ways-the-linux-file-system-is-different-from-the-windows-file-system/)
# * [Blog post: Gotcha — backslashes in Windows filenames](https://pythonconquerstheuniverse.wordpress.com/2008/06/04/gotcha-%E2%80%94-backslashes-in-windows-filenames/)
#
# If you have **questions** about this chapter, please contact us **(<EMAIL>)**.
# + [markdown] id="quNTV_-mClas"
# # 1. Reading a file
# + [markdown] id="z65miz9SClas"
# In Python, you can read the content of a file, store it as the type of object that you need (string, list, etc.) and manipulate it (e.g., replacing or removing words). You can also write new content to an existing or a new file.
#
# Here, we will discuss how to:
#
# * open a file
# * read in the content
# * store the context in a variable (to do something), e.g., as a string or list
# * close the file
# + [markdown] id="2XHHh0_tClas"
# ## 1.1. File paths
# + [markdown] id="pfg_Qd-XClat"
# To open a file, we need to associate the file on disk with a variable in Python. First, we tell Python **where the file is stored on your disk**. The **location of your file** is often referred to as the **file path**.
#
# Python will start looking in the 'working' or 'current' directory (which often will be where your Python script is). If it's in the working directory, you only have to tell Python the name of the file (e.g., `charlie.txt`). If it's not in the working directory, as in our case, you have to tell Python the exact path to your file. We will create a string variable to store this information:
# + id="6rhU8NztClau"
filename = "../Data/Charlie/charlie.txt"
# The double dots mean 'go up one level in the directory tree'.
# + [markdown] id="PES_xSV2Clav"
# Sometimes you see double dots at the beginning of the file path; this means 'the parent of the current directory'. When writing a file path, you can use the following:
#
# - / means the root of the current drive;
# - ./ means the current directory;
# - ../ means the parent of the current directory.
#
# Consider the directory tree below.
#
# * If you want to go from your current working directory (**cwd**) to the one directly above (dir3), your path is ../.
# * If you want to go to dir1, your path is ../../
# * If you want to go to dir5, your path is ../dir5/
# * If you want to go to dir2, your path is ../../dir2/
#
# You will learn how to navigate your directory tree quite intuitively with a bit of practice. If you have any doubts, it is always a good idea to follow a quick tutorial on basic command-line operations.
# + colab={"base_uri": "https://localhost:8080/", "height": 394} id="m5GsVuTCDYUw" outputId="c904b61f-b677-4552-f0cc-22dc81b96b1f"
from IPython.display import Image
Image('images/directory_tree.png')
# + [markdown] id="iGUar1FrClaw"
# **Navigating your directory tree on Windows**
#
# Also, note that the formatting of file paths is different across operating systems. The file path, as specified above, should work on any UNIX platform (Linux, Mac). If you are using Windows, however, you might run into problems when formatting file paths in this way **outside of this notebook**, because Windows uses backslashes instead of forward slashes (Jupyter Notebook should already have taken care of these problems for you). In that case, it might be useful to have a look at [this page](http://www.howtogeek.com/137096/6-ways-the-linux-file-system-is-different-from-the-windows-file-system/) about the differences between the file systems, and at [this page](https://pythonconquerstheuniverse.wordpress.com/2008/06/04/gotcha-%E2%80%94-backslashes-in-windows-filenames/) about solving this problem in Python. In short, it's probably best if you use the code below (we will talk about the `os` module in more detail later today). This is very useful to know if you are a Windows user, and it will become relevant for the final assignment.
# + id="o9rSyRktClaw"
# For windows:
import os
windows_file_path = os.path.normpath("C:/somePath/someFilename") # Use forward slashes
# + [markdown] id="UmoAHYSdClaw"
# ## 1.2 Opening a file
# + [markdown] id="cFZI9jR2Clax"
# We can use the file path to tell Python which file to open by using the built-in function `open()`. The `open()` function does not return the actual text that is saved in the text file. It returns a 'file object' from which we can read the content using the `.read()` function (more on this later). We pass three arguments to the `open()` function:
#
# * the **path to the file** that you wish to open
# * the **mode**, a combination of characters explaining the purpose of the file opening (like read or write) and type of content stored in the file (like textual or binary format). For instance, if we are reading a plain text file, we can use the characters 'r' (represents read-mode) and 't' (represents plain text-mode).
# * the last argument, a keyword argument (**encoding**), specifies the encoding of the text file, but you can forget about this for now.
#
# The most important **mode** arguments the `open()` function can take are:
#
# * **r** = Opens a file for reading only. The file pointer is placed at the beginning of the file.
# * **w** = Opens a file for writing only. Overwrites the file if the file exists. If the file does not exist, creates a new file for writing.
# * **a** = Opens a file for appending. The file pointer is at the end of the file if the file exists. If the file does not exist, it creates a new file for writing. Use it if you would like to add something to the end of a file
#
# Then, to open the file 'charlie.txt' for reading purposes, we use the following:
# + id="lIKeIGH9Clax"
filepath = "../Data/Charlie/charlie.txt"
infile = open(filepath, "r") # 'r' stands for READ mode
# Do something with the file
infile.close() # Close the file (you can ignore this for now)
# + [markdown] id="Y0Xq_vARClax"
# Overview of possible mode arguments (the most important ones are 'r' and 'w'):
#
# | Character | Meaning |
# | --------- | ------- |
# |'r' | open for reading (default)|
# |'w' | open for writing, truncating the file first|
# |'x' | open for exclusive creation, failing if the file already exists|
# |'a' | open for writing, appending to the end of the file if it exists|
# |'b' | binary mode|
# |'t' | text mode (default)|
# |'+' | open a disk file for updating (reading and writing)|
# |'U' | universal newlines mode (deprecated)|
#
# + [markdown] id="iAuk5_u_Clax"
# So far, we have opened the file. This, however, does not yet show us the file content. Try printing 'infile':
# + colab={"base_uri": "https://localhost:8080/"} id="m2kicXa2Clay" outputId="8f1a1635-f0a0-454a-f249-ba998203ff6f"
infile = open("../Data/Charlie/charlie.txt" , "r")
print(infile)
infile.close()
# + [markdown] id="t1j73Ds0Clay"
# This `TextIOWrapper` thing is Python's way of saying it has *opened* a connection to the file `charlie.txt`. To actually see its content, we need to tell python to read the file.
# + [markdown] id="5ORUESITClaz"
# ## 1.3 Reading a file
# + [markdown] id="m3qJjtWpClaz"
# Here, we will discuss three ways of reading the contents of a file:
#
# * `read()`
# * `readlines()`
# * `readline()`
# + [markdown] id="vp_doXi8Claz"
# ### 1.3.1 `read()`
# + [markdown] id="qiFiUKZNClaz"
# The `read()` method is used to access the **entire text in a file**, which we can assign to a variable. Consider the code below.
#
# The variable `content` now holds the entire content of the file `charlie.txt` as a single string, and we can access and manipulate it just like any other string. When we are done with accessing the file, we use the `close()` method to close the file.
# + colab={"base_uri": "https://localhost:8080/"} id="PT-4cowKClaz" outputId="18f4f58e-c462-4cd3-90e9-6448bf876bde"
# Opening the file using the filepath and and the 'read' mode:
infile = open("../Data/Charlie/charlie.txt" , "r")
# Reading the file using the `read()` function and assigning it to the variable `content`
content = infile.read()
print(content)
print()
print('This function returns a', type(content))
# closing the file (more on this below)
infile.close()
# + [markdown] id="qzntQU5lCla0"
# ### 1.3.2 `readlines()`
# + [markdown] id="EXf19wsiCla0"
# The `readlines()` function allows you to access the content of a file as a list of lines. This means, it splits the text in a file at the new lines characters ('\n') for you):
# + colab={"base_uri": "https://localhost:8080/"} id="0F1BrZXwCla0" outputId="c8482309-b726-481e-a3c0-db706872468c"
# Opening the file using the filepath and and the 'read' mode:
infile = open("../Data/Charlie/charlie.txt" , "r")
# Reading the file using the `read()` function and assigning it to the variable `content`
lines = infile.readlines()
print(lines)
print()
print('This function returns a', type(lines))
# closing the file
infile.close()
# + [markdown] id="towY9DO1Cla0"
# Now you can, for example, use a for-loop to print each line in the file (note that the second line is just a newline character):
# + colab={"base_uri": "https://localhost:8080/"} id="yx-srm2lCla0" outputId="1f48590e-94a7-4ec9-db74-c6cc53b7d343"
for line in lines:
print("LINE:", line)
# + [markdown] id="CJ8jHtzxCla0"
# **Important note**
#
# When we open a file, we can only use one of the read operations **once**. If we want to read it again, we have to open a new file variable. Consider the code below:
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="d0F7XO_5Cla1" outputId="dca9b14d-127e-4e41-aa38-1f54f36a7bf7"
infile = open("../Data/Charlie/charlie.txt" , "r")
content = infile.read()
lines = infile.readlines()
print(content)
print(lines)
infile.close()
# + [markdown] id="i2T_JieyCla1"
# The code returns an empty list. To fix this, we have to open the file again:
# + colab={"base_uri": "https://localhost:8080/"} id="PBEoh6A7Cla1" outputId="3519d7a0-1916-47f1-d0cb-47b0ec9b1849"
filepath = "../Data/Charlie/charlie.txt"
infile = open(filepath , "r")
content = infile.read()
infile = open(filepath, "r")
lines = infile.readlines()
print(content)
print(lines)
infile.close()
# + [markdown] id="A6mBVIvGCla1"
# ### 1.3.3 `Readline()`
# + [markdown] id="uB2JBOvWCla1"
# The third operation `readline()` returns the next line of the file, returning the text up to and including the next newline character (*\n*, or *\r\n* on Windows). More simply put, this operation will read a file line-by-line. So if you call this operation again, it will return the next line in the file. Try it out below!
# + colab={"base_uri": "https://localhost:8080/"} id="b1lpqi5CCla2" outputId="6f4f86d7-60ba-4100-b9e2-f8716ca90398"
filepath = "../Data/Charlie/charlie.txt"
infile = open(filepath, "r")
next_line = infile.readline()
print(next_line)
# + colab={"base_uri": "https://localhost:8080/"} id="AKnipdn1Cla2" outputId="87d2a9d3-2470-413b-96f4-c1f0e5b05aa2"
next_line = infile.readline()
print(next_line)
# + colab={"base_uri": "https://localhost:8080/"} id="D149NsE6Cla2" outputId="46d576b6-0a32-43d2-8d14-b9746e7a7d90"
next_line = infile.readline()
print(next_line)
infile.close()
# + [markdown] id="3i3gXFQkCla2"
# **Which function to choose**
#
# For small files that you want to load entirely, you can use one of these three methods (readline, read, or readlines). Note, however, that we can also simply do the following to read a file line by line (this is recommended for larger files and when we are really only interested in a small portion of the file):
# + colab={"base_uri": "https://localhost:8080/"} id="7vVE4XP3Cla3" outputId="0ef2e6d9-6f51-48d7-c80f-af0c8b0eae1f"
infile = open(filename, "r")
for line in infile:
print(line)
infile.close()
# + [markdown] id="kqSAGYwSCla3"
# Note the last line of this code snippet: `infile.close()`. This closes our file, which is a very important operation. This prevents Python from keeping files that are unnecessary anymore still open. In the next subchapter, we will also see a more convenient way to ensure files get closed after their usage.
# + [markdown] id="ygx4wyyHCla3"
# ## 1.4. Closing the file
#
# Here, we will introduce closing a file with the method `close()` and using a **context manager** to open and close files. After reading the contents of a file, the `TextWrapper` no longer needs to be open since we have stored the content as a variable. In fact, it is good practice to close the file as soon as you do not need it anymore.
# + [markdown] id="IRPjd7RhCla3"
# ### 1.4.1 `close()`
# + [markdown] id="SPyU6kFGCla3"
# We do this by using the `close()` method as already shown several times above.
# + colab={"base_uri": "https://localhost:8080/"} id="SyAfAk2RCla3" outputId="d10e28a6-a0ba-4937-ac15-0ee754e20ee9"
filepath = "../Data/Charlie/charlie.txt"
# open file
infile = open(filepath , "r")
# assign content to a varialbe
content = infile.read()
# close file
infile.close()
# do whatever you want with the context, e.g. print it:
print(content)
# + [markdown] id="AjRxgiccCla4"
# ### 1.4.2 Using a context manager
# + [markdown] id="9Xy6IsseCla4"
# There is actually an easier (and preferred) way to make sure that the file is closed as soon as you don't need it anymore, namely using what is called a **context manager**. Instead of using `open()` and `close()`, we use the syntax shown below.
#
# The main advantage of using the with-statement is that it automatically closes the file once you leave the local context defined by the indentation level. If you 'manually' open and close the file, you risk forgetting to close the file. Therefore, context managers are considered a best-practice, and we will use the with-statement in all of our following code.
#
#
# **From now on, we highly recommend using a context manager in your code.**
# + colab={"base_uri": "https://localhost:8080/"} id="QvMi5zboCla4" outputId="037b1ab8-d86e-4ec4-a3af-688d5996ffb8"
filepath = "../Data/Charlie/charlie.txt"
with open(filepath, "r") as infile:
# the file is only open here
# get content while file is open
content = infile.read()
# the context manager took care of closing the file again
# we can now work with the content without having to worry about
# closing the file
print(content)
# + [markdown] id="tw3U57EsCla4"
# # 2 Manipulating file content
# + [markdown] id="9NX0APj_Cla4"
# Once your file content is loaded in a Python variable, you can manipulate its content as you can manipulate any other variable. You can edit it, add/remove lines, count word occurrences, etc. Let's say we read the file content in a list of its lines, as shown below. Note that we can use all of the different methods for reading files in the context manager.
# + colab={"base_uri": "https://localhost:8080/"} id="BsYZzvckCla4" outputId="8992d3a4-0b2f-4ee6-c601-288624769c79"
filepath = "../Data/Charlie/charlie.txt"
with open(filepath, "r") as infile:
lines = infile.readlines()
print(lines)
# + [markdown] id="8WEIWPSwCla4"
# Then we can for instance preserve only the first 2 lines of the file, in a new variable:
# + colab={"base_uri": "https://localhost:8080/"} id="CD4GHi7bCla4" outputId="4112ba43-0d45-44b2-ed51-99024e818f71"
first_two_lines=lines[:2]
first_two_lines
# + [markdown] id="-C3S1SBcCla5"
# We can count the lines that are longer than 15 characters:
# + colab={"base_uri": "https://localhost:8080/"} id="zWFuD1IUCla5" outputId="edcf5698-df46-4d6b-b20f-6d716d1fb325"
counter=0
for line in lines:
if len(line)>15:
counter+=1
print(counter)
# + [markdown] id="j7JCYbmiCla5"
# We will soon see how to perform text processing once we have loaded the file, by using an external module in the next chapter. But let's first write store the modified text in a new file to preserve the changes.
# + [markdown] id="BQH4o8ghCla5"
# # 3 Writing files
# + [markdown] id="s8oT5qBjCla5"
# To write content to a file, we can open a new file and write the text to this file by using the **`write()`** method. Again, we can do this by using the context manager. Remember that we have to specify the **mode** using **`w`**.
# + [markdown] id="3yP7wF2hCla5"
# Let's first slightly adapt our Charlie story by replacing the names in the text:
# + id="N47_S_MMCla5"
filepath = "../Data/Charlie/charlie.txt"
# read in file and assign content to the variable content
with open(filepath, "r") as infile:
content = infile.read()
# manipulate content
your_name = "<NAME>" #type in your name
friends_name = "a b" #type in the name of a friend
# Replace all instances of Charlie Bucket with your name and save it in new_content
new_content = content.replace("Charlie Bucket", your_name)
# Replace all instancs of <NAME> with your friends name and save it in new_new_content
new_new_content = new_content.replace("<NAME>", friends_name)
# + [markdown] id="ZF7frzoqCla6"
# We can now save the manipulated content to a new file:
# + id="ieCZn8VqCla6"
filename = "../Data/Charlie/charlie_new.txt"
with open(filename, "w") as outfile:
outfile.write(new_new_content)
# + [markdown] id="Uv1ljCVACla6"
# Open the file `charle_new.txt` in the folder `../Data/Charlie` in any text editor and read a personalized version of the story!
# + [markdown] id="iE9Rd_dXCla6"
# **Note about append mode (`a`):**
#
# The third mode of opening a file is *append* ('a'). If the file 'charlie_new.txt' does not exist, then append and write act the same: they create this new file and fill it with content. The difference between write and append occurs when this file would exist. In that case, the write mode overwrites its content, while the append mode adds the new content at the end of the existing one.
# + [markdown] id="AK-5FI_iCla6"
# # 4 Reading and writing multiple files
# + [markdown] id="O8EJHvYkCla6"
# You will often have multiple files to work with. The folder ../Data/Dreams contains 10 text files describing dreams of Vickie, a 10-year-old girl. These texts are extracted from [DreamBank](http://www.dreambank.net/).
#
# To process multiple files, we often want to *iterate* over a list of files. These files are usually stored in one or multiple directories on your computer.
#
# Instead of writing out every single file path, it is much more convenient to iterate over all the files in the directory `../Data/Dreams`. So we need to find a way to tell Python: "I want to do something with all these files at this location!"
#
# There are two modules which make dealing with multiple files a lot easier.
#
# * glob
# * os
#
# We will introduce them below.
# + [markdown] id="h3zhNSg8Cla6"
# ## 4.1 The `glob` module
# + [markdown] id="8Y1loyU2Cla6"
# The `glob` module is very useful to find all the pathnames matching a specified pattern according to the rules used by the Unix shell. You can use two wildcards: the asterisk (`*`) and the question mark (`?`). An asterisk matches zero or more characters in a segment of a name, while the question mark matches a single character in a segment of a name.
#
# For example, the following code gives all filenames in the directory `../Data/dreams`:
# + id="llD2ib5YCla6"
import glob
# + colab={"base_uri": "https://localhost:8080/"} id="Egbro9l0Cla7" outputId="20c31433-a14c-4928-f88b-e6a442583af0"
for filename in glob.glob("../Data/Dreams/*"):
print(filename)
# + [markdown] id="a6H5cSvqCla7"
# If we only want to consider text files and ignore everything else (here a file called 'IGNORE_ME!'), we can specify this in our search by only looking for files with the extension `.txt`:
# + colab={"base_uri": "https://localhost:8080/"} id="shScUS4ICla7" outputId="565743a4-6df1-4503-c0e5-343730fc93bd"
for filename in glob.glob("../Data/Dreams/*.txt"):
print(filename)
# + [markdown] id="5Df_aTtOCla7"
# A question mark (`?`) matches any single character in that position in the name. For example, the following code prints all filenames in the directory `../Data/dreams` that start with 'vickie' followed by exactly 1 character and end with the extension `.txt` (note that this will not print `vickie10.txt`):
# + colab={"base_uri": "https://localhost:8080/"} id="yHfqsn1XCla7" outputId="6952cea1-31da-4898-8974-c12616838e28"
for filename in glob.glob("../Data/Dreams/vickie?.txt"):
print(filename)
# + [markdown] id="olMowMcOCla7"
# You can also find filenames recursively by using the pattern `**` (the keyword argument `recursive` should be set to `True`), which will match any files and zero or more directories and subdirectories. The following code prints all files with the extension `.txt` in the directory `../Data/` and in all its subdirectories:
# + colab={"base_uri": "https://localhost:8080/"} id="FYPZyx1vCla7" outputId="c8f4aedd-29b4-4364-cdc3-4ff09766cebf"
for filename in glob.glob("../Data/**/*.txt", recursive=True):
print(filename)
# + [markdown] id="JLX-Rh6hCla8"
# ## 4.2 The `os` module
# + [markdown] id="v-RJMrr_Cla8"
# Another module that you will frequently see being used in examples is the `os` module. The `os` module has many features that can be very useful and which are not supported by the `glob` module. We will not go over each and every useful method here, but here's a list of some of the things that you can do (some of which we have seen above):
#
# - creating single or multiple directories: `os.mkdir()`, `os.mkdirs()`;
# - removing single or multiple directories: `os.rmdir()`, `os.rmdirs()`;
# - checking whether something is a file or a directory: `os.path.isfile()`, `os.path.isdir()`;
# - split a path and return a tuple containing the directory and filename: `os.path.split()`;
# - construct a pathname out of one or more partial pathnames: `os.path.join()`
# - split a filename and return a tuple containing the filename and the file extension: `os.path.splitext()`
# - get only the basename or the directory path: `os.path.basename()`, `os.path.dirname()`.
#
# Feel free to play around with these methods and figure out how they work yourself :-)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="aNKv_qcCCla8" outputId="239d354a-c9cf-4f88-9503-e66ddc043875"
# Start by importing the module:
import os
# let's use a filepath for testing it out:
filepath = "../Data/Charlie/charlie.txt"
os.path.basename(filepath)
# + [markdown] id="NveFzki_Cla8"
# # Exercises
# + [markdown] id="i-XIMt0KCla8"
# **Exercise 1:**
#
# Write a program that opens `RedCircle.txt` in the `../Data/RedCircle` folder and prints its content as a single string:
# + id="AkB-hHYKCla8"
# your code here
# + [markdown] id="uI5kznrOCla8"
# **Exercise 2:**
#
# Write a program that opens `RedCircle.txt` in the `../Data/RedCircle` folder and prints a list containing all lines in the file:
# + id="OFwCAwCTCla8"
# your code here
# + [markdown] id="JmaaunPiCla8"
# **Exercise 3:**
#
# Create a counter dictionary like in block 2 (the dictionaries chapter), where you will count the number of occurences of each word in a file.
# + id="S7vrmftoCla9"
# your code here
# + [markdown] id="hweEabgmCla9"
# **Exercise 4:**
#
# The module `os` implements functions that allow us to work with the operating system (see folder contents, change directory, etc.). Use the function `listdir` from the module `os` to see the contents of the current directory. Then print all the items that do not start with a dot.
# + id="4gKE9fN5Cla9"
# your code here
# + id="ibhwdP1mCla9"
| Chapters-colab/Chapter_14_Reading_and_writing_text_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Monte Carlo on CCS for DFT energies (per cycle)
# +
import numpy as np
import os
from os.path import *
import pandas as pd
from scipy.stats.stats import pearsonr
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import NullFormatter
from scipy.stats import gaussian_kde
# -
names = ['Harmine', '1-Methylguanosine', 'Sphingosine', 'riboflavin', 'Mandelonitrile', 'Creatinine', 'Methyleugenol',
'N6-methyladenosine', 'Cholic Acid', 'Astilbin', 'SDGRG', 'Biliverdin', 'Anthranilic acid', 'Aminohippuric acid',
'3\'-O-methylguanosine', 'Sucrose', 'Naringin', 'PE 16:1/16:1']
adducts = ['+H','+H','+H','+H','+H','+Na','+Na','+Na','+Na','+Na','+Na','+Na', '-H', '-H', '-H', '-H', '-H', '-H']
molids = ['02', '03', '04', '05', '10', '11', '12', '14', '16', '17', '18', '19', '21', '22', '24', '25', '28', '29']
hexs = ['f90000', 'f95200', 'f98800', 'f9ae00', 'f9db00', 'ecf900', '9df900', '00f94a', '00f9c7', '00f4f9', '00b6f9',
'007cf9', '0014f9', 'ae00f9', 'f900f9', 'f90091', 'a5a5a5', 'e3e3e3']
# ### compares BW of per cycle to across cycle (incomplete, not all molecules have run per-cycle MC ccs)
# +
# Compare per cycle boltzmann weighted averaged vs across cycle
plt.figure(figsize=(10,40))
plt.subplot(1,2,1)
plt.title('BW per cycle', fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=20)
for j, ID in enumerate(molids):
file = f'../data/MC-ccs-results/molid{ID}_MC_ccs_iter1000_percycle.csv'
if os.path.exists(file):
pass
else:
print(f'not found for {ID}')
continue
df = pd.read_csv(file)
if ID == '17' or ID == '25':
cbw = '#bfbfbf'
else:
cbw = '#000000'
color = [cbw, '#545454', '#7a7878', '#bfbfbf']
color2 = [f'#{hexs[j]}', '#bf4e7f', f'#{hexs[j]}', '#542339']
i = 0
#for i in evens:
plt.scatter(df['nlst'], df[df.columns[i]], s=.1)
plt.errorbar(df['nlst'], df[df.columns[i]], yerr=df[df.columns[i+1]],
fmt=color2[int(i/2)], ecolor=color[int(i/2)])
#-------------------------------
ax = plt.subplot(1,2,2)
plt.title('BW across cycle', fontsize=24)
for j, ID in enumerate(molids):
file = f'../data/MC-ccs-results/molid{ID}_MC_ccs_iter1000.csv'
if os.path.exists(file):
pass
else:
print(f'not found for {ID}')
continue
df = pd.read_csv(file)
if ID == '17' or ID == '25':
cbw = '#bfbfbf'
else:
cbw = '#000000'
color = [cbw, '#545454', '#7a7878', '#bfbfbf']
color2 = [f'#{hexs[j]}', '#bf4e7f', f'#{hexs[j]}', '#542339']
i = 0
#for i in evens:
plt.scatter(df['nlst'], df[df.columns[i]], s=.1)
plt.errorbar(df['nlst'], df[df.columns[i]], yerr=df[df.columns[i+1]], fmt=color2[int(i/2)], ecolor=color[int(i/2)])
plt.tick_params(axis='both', which='major', labelsize=20)
plt.show()
# -
# ### compares LE of per cycle to across cycle (incomplete, not all molecules have run per-cycle MC ccs)
# +
# Compare per cycle boltzmann weighted averaged vs across cycle
plt.figure(figsize=(10,40))
plt.subplot(1,2,1)
plt.title('LE per cycle', fontsize=24)
plt.tick_params(axis='both', which='major', labelsize=20)
for j, ID in enumerate(molids):
file = f'../data/MC-ccs-results/molid{ID}_MC_ccs_iter1000_percycle.csv'
if os.path.exists(file):
pass
else:
print(f'not found for {ID}')
continue
df = pd.read_csv(file)
if ID == '17' or ID == '25':
cbw = '#bfbfbf'
else:
cbw = '#000000'
color = [cbw, '#545454', '#7a7878', '#bfbfbf']
color2 = [f'#{hexs[j]}', '#bf4e7f', f'#{hexs[j]}', '#542339']
i = 4
#for i in evens:
plt.scatter(df['nlst'], df['Lowest Energy CCS'], s=.1)
plt.errorbar(df['nlst'], df['Lowest Energy CCS'], yerr=df['lec std'],
fmt=color2[int(i/2)], ecolor=color[int(i/2)])
#-------------------------------
ax = plt.subplot(1,2,2)
plt.title('LE across cycle', fontsize=24)
for j, ID in enumerate(molids):
file = f'../data/MC-ccs-results/molid{ID}_MC_ccs_iter1000.csv'
if os.path.exists(file):
pass
else:
print(f'not found for {ID}')
continue
df = pd.read_csv(file)
if ID == '17' or ID == '25':
cbw = '#bfbfbf'
else:
cbw = '#000000'
color = [cbw, '#545454', '#7a7878', '#bfbfbf']
color2 = [f'#{hexs[j]}', '#bf4e7f', f'#{hexs[j]}', '#542339']
i = 4
#for i in evens:
plt.scatter(df['nlst'], df['Lowest Energy CCS'], s=.1)
plt.errorbar(df['nlst'], df['Lowest Energy CCS'], yerr=df['lec std'], fmt=color2[int(i/2)], ecolor=color[int(i/2)])
plt.tick_params(axis='both', which='major', labelsize=20)
plt.show()
# -
# ### layered BW, LE, SA monte carlo on ccs, individual molecules, per-cycle (incomplete, not all molecules were run though MC ccs per-cycle)
# ### Note that while BW and LE had little or no distinguishable difference between across vs within cycles, SA for some molecules is wider for within-cycles.
for d, ID in enumerate(molids):
######
#
# Plot CCS selection techniques Monte Carlo simulation
#
#
file = f'../data/MC-ccs-results/molid{ID}_MC_ccs_iter1000_percycle.csv'
if os.path.exists(file):
pass
else:
continue
MC_df = pd.read_csv(file)
evens = [4,0,6]
color = ['#000000', '#545454', '#7a7878', '#bfbfbf']
color2 = ['#ff66a8', '#bf4e7f', '#963f65', '#542339']
fig = plt.figure(figsize = (8, 6))
ax = fig.add_subplot(111)
plt.title(f'{names[d]} [M{adducts[d]}]$^{adducts[d][0]}$', fontsize=32)
ytic = np.linspace(MC_df['Lowest Energy CCS'].min()-MC_df['lec std'].max(),
MC_df['Lowest Energy CCS'].max()+MC_df['lec std'].max(),
5)
for i in evens:
plt.scatter(MC_df['nlst'], MC_df[MC_df.columns[i]], s=.1)
plt.errorbar(MC_df['nlst'], MC_df[MC_df.columns[i]], yerr=MC_df[MC_df.columns[i+1]],
fmt=color2[int(i/2)], ecolor=color[int(i/2)])
plt.ylabel('CCS ($\AA^2$)', fontsize=28)
plt.xlabel('Sample Size', fontsize=28)
plt.xticks([0,700,1000])
plt.yticks(ytic)
plt.tick_params(axis='both', which='major', labelsize=22)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
plt.show()
| notebooks/MC_ccs_convergence_per_cycle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dictionaries
#
# In the previous lessons you used **lists** in your Python code.
# An object of **class list** is a container of ordered values.
#
# The fact that a **list** is *ordered* is a fundamental concept: **lists** work the best when the items stored there can be ordered in a natural and meaningful way.
#
# However, that's not always the case.
# +
# This list contains how much you earned each month of the last year (from January to December)
# The items have a meaningful order, so working with them it's easy
payslips = [100, 120, 100, 90, 100, 110, 100, 120, 95, 100, 90, 120]
june_index = 5
print("The pay in January is", payslips[0], "and the pay in july is", payslips[june_index + 1])
# This list contains consecutive observations of an experiment
# The order of the elements is important: if you mix them, they would provide a different result
obs = [1, 5, 10]
last = 0
for x in obs:
if x > last:
print("The observed value increased to", x)
last = x
# This list contains the price of the tickets in different museums
# These items DO NOT have a meaningful natural order.
# They could be ordered in any way that it would not make any difference:
# you still need a "map" that tells you which element is in which position.
museum_tickets = [5, 2, 8]
science_museum_index = 0
history_musuem_index = 1
art_museum_index = 2
print("The ticket for the science museum costs", museum_tickets[science_museum_index])
# -
# ### Dictionaries
#
# A **list** is not the only container **class** in Python. There are also **dictionaries**: a container **class** that is meant to solve the problem described above.
#
# A **dictionary** is a container of multiple **key-value** pairs.
#
# Think about what a dictionary is in your everyday experience: a dictionary is a book where, for each word ( the **key**) there is a description (the **value**). Each word is defined only once, but multiple words may have the same definition (e.g. if they are synonyms). Even if the dictionary is ordered (alphabetically), when you have to use it you don't say things like "I need the definition of the third word after this one", but rather "I need the definition of the word *cephalopod*".
#
# A Python **dictionary** has all the properties described above.
# +
# This is a dictionary
museum_tickets = {
"science_museum" : 5,
"history_museum" : 2,
"art_museum" : 8
}
x = museum_tickets["science_museum"]
print("The ticket for the science museum costs", x)
print("The ticket for the art museum costs", museum_tickets["art_museum"])
# -
# Let's analyze how to create and use a **dictionary**.
#
# A **dictionary** is initialized using multiple key-value pairs between curly brackets `{`, `}`.
# First you have a **key**, then its corresponding **value**; between the **key** and the **value** there is a colon `:`.
# **Key-value** pairs are separated by commas `,`.
#
# In order to access a **value** in the **dictionary**, you use a variation of the **indexing operator** used for lists. The only difference is that you have to provide a **key** between brackets and not a position.
#
# Note that you can use different **classes** for **keys** and **values**: **keys** can be of any Python immutable **class**, i.e. **int**, **float**, **string**, but you can't use **lists** as **keys**, on the other hand **values** can be of any **class**, both mutable and immutable.
# ### Exercise
#
# Write a **dictionary** that allows to convert some numbers into their English text representation (i.e. `1` to `"one"`). Try to access some elements.
#
# Hints: differently from the example above, here **keys** are **int** and **values** are **string**.
# ### Checking for elements existence
#
# If you try to access an element of a **list** using a non-existing index you get an out-of-bound error.
# You should always check if an index is valid before using it to access an element of a container.
#
# For **lists**, this check consists in making sure that the index is less than the length of the **list**.
# +
my_list = [1, 10, 100]
my_indices = [1, 6]
for index in my_indices:
if index < len(my_list):
print("The index", index, "is valid and the element is", my_list[index])
else:
print("The index", index, "is out of bound, I can't use it")
# -
# The same problem applies also to **dictionaries**.
# If you try to **read** the value for a non-existing **key**, you will get an error.
# +
my_dict = { "a" : 10 }
# KeyError: key "b" does not exist in the dictionary
print(my_dict["b"])
# -
# Similarly to **lists** also **dictionaries** provide a way for checking if a **key** is valid or not.
# +
my_dict = { "a" : 10 }
my_key = "a"
if my_key in my_dict:
print("The key", my_key, "has been found and the value is", my_dict[my_key])
# The syntax on the right hand side of the assignmenet operator evaluates to a boolean value
# That's why you can use it in an `if` statement above
# `in` is a boolean operator exactly as `>` or `!=`
found = "my_fancy_key" in my_dict
print("Is the key found?", found)
# -
# ### Exercise
#
# Encoding is an invertible operation that takes some data and represent it in a different format.
#
# Use the provided encoding **dictionary** to convert a sequence of characters into their encoded form.
# Note that not all values in the **list** have a valid encoding described in the **dictionary**. Encode any missing value as `0`.
# +
encoding_dict = {
"_" : 0,
"a" : 1,
"b" : 2,
"c" : 3,
"d" : 4
}
x = ["a", "d", "h", "b", "b", "_"] # This should encode to `[1, 4, 0, 2, 2, 0]`
# -
# ### Iterating over a dictionary
#
# You can use a `for` loop with a **list** object to iterate over all its values.
#
# The `for` loop can be used also with **dictionary** **objects**, but note that in this case the placeholder loop **variable** will represent each of the **keys** in the **dictionary**. The current **key** can then be used to access its corresponding value in the **dictionary**.
#
# The `enumerate()` **function** can't be used with **dictionaries** as the elements do not have a meaningful order.
# +
my_dict = {
1 : "uno",
2 : "dos",
5 : "cinco",
10 : "diez"
}
for k in my_dict:
print(k, "corresponds to", my_dict[k])
# -
# ### Modifying a dictionary
#
# The **dictionary** is a mutable **class**, as the **list**.
# This means that you can modify the **value** for an existing **key** or add new **key-value** pairs.
#
# Using the **indexing operator** on the left hand side of an assignment, you can modify the value in a **list** at the position indicated by the index.
# The same applies to **dictionaries**, but you have to specify the **key** instead of the index.
# Note that you can't modify an existing **key** (that's why it's required that they are of an immutable **class**), but only its associated **value**.
#
# In a **list** you can add new elements at the end using the `append()` **method**.
# This does not exists for **dictionaries** because **key-value** pairs do not have a meaningful order.
# The same operator described above for modifying existing values, can be used to add new **key-value** pairs to a **dictionary**: if the provided **key** does not exist it will be automatically created.
# This is different from **lists** where the **indexing** operator would give you an out-of-bound error regardless if you are trying to **read** or **write** values.
#
# Remember that **keys** are unique in a **dictionary**.
# +
my_dict = {
"a" : 10
}
print(my_dict["a"])
my_dict["a"] = 20 # key "a" already exist in the dictionary, so modify its value
print(my_dict["a"])
my_dict["b"] = 40 # key `b` does not exist in the dictionary, create a new key-value pair
print(my_dict)
# -
# Note that in the last example you are trying to modify the **dictionary** entry for **key** `b`. `my_dict["b"]` is on the left side of the assignement operator.
#
# You can't **read** a value from a non-existing **key** (i.e. by using it on the right hand side of an assignement operator), but you can use a non-existing **key** to **write** a new **key-value** pair into the **dictionary**.
#
# Note that the following 2 notations result in exactly the same **dictionary**.
# +
# This syntax is preferred when you already know all the elements
a = {
"a" : 10,
"b" : 20
}
print("Dictionary a:", a)
# This syntax is preferred when you have to add key-value pairs according to some conditions
b = {}
b["a"] = 10
b["b"] = 20
print("Dictionary b:", b)
# -
# When creating **dictionaries**, a different behavior is often required depending on if a certain **key** (and thus also its corresponding value) is already present or not.
#
# This for example because you want to know if you are creating a new **key-value** pair or just overwriting an existing **value**.
# +
my_dict = {}
my_keys = ["<KEY>"]
for k in my_keys:
if k in my_dict:
my_dict[k] = my_dict[k] * 10
else:
my_dict[k] = 1
print(my_dict)
# -
# Remember that mutable **classes** in Python are copied by reference.
#
# This means that if you copy a **dictionary** and then you modify it, the changes will reflect also on the copy as it happens with **lists**.
#
# You can avoid copy by reference with the same mechanisms used for **lists**, i.e. creating a new empty **dictionary** and adding all the elements of the original one to it.
#
# Note that the **slicing** operator is not available for **dictionaries**.
# +
a = {}
b = a
b["a"] = 1
print("a is:", a)
print("b is:", b)
# -
# ### Exercise
#
# Define a **function** that given an input number returns a **dictionary** where the **keys** are all the integer numbers from `1` to the input number included and the **values** are their square.
#
# Hint: you should use the `range()` function in your loop.
# Input values
x = 2
y = 5
# ### Exercise
#
# Define a **function** that takes as input a **list** of **strings** and it returns a **dictionary** where each **key** is a character that is present in one of the **strings** and the **value** is its total number of occurrences within all the **strings** in the **list**.
#
# Hints:
# - Use a nested for loop and treat each **value** in the **dictionary** as a counter.
# - When you encounter a new character, initialize its counter within the dictionary (i.e. create a new **key-value** pair).
# - When you encounter an already present character, just increment its counter by 1 (i.e. update its **value**).
# Input lists
x = ["hello", "world", "test"]
y = ["what you doing?"]
z = ["ABABA"]
# ### Exercise
#
# Decoding is the inverse of the encoding operation. It converts an encoded information back to its original form.
# Use the provided encoding **dictionary** to create a decoding **dictionary**, i.e. its opposite where the encoded value is the **key** and the decoded version is the **value**.
#
# Hint: you have to iterate over the encoding **dictionary** and create a new **dictionary** where **keys** and **values** are swapped.
# +
encoding_dict = {
"_" : 0,
"a" : 1,
"b" : 2,
"c" : 3,
"d" : 4
}
z = [1, 4, 0, 2, 2, 0]
# -
# ### Exercise
#
# Define a **function** that takes as input a **string** and a **list** of **strings** representing a database.
# The **function** should compare the provided input **string** with every **string** in the databse and find the two **strings** in the database that are the most similar with it.
# The **function** should return a **list** of 2 elements corresponding to the 2 most similar **strings** in order.
#
# Similarity is measured by same elements in the exact same position among the **strings**.
# +
# The database
db = [
"ATATATATATAT",
"AGCTAGCTAGCT",
"GCGCGCATATAT",
"TGCAATGACGTA"
]
# Input strings
x = "AAAAAAAAAAAA"
y = "GAGAGACTCTCT"
| 09_dictionaries/dictionaries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import math
import zipfile
import time
import requests
from tqdm import tqdm
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import imageio
import skimage.transform
import tensorflow as tf
# %matplotlib inline
show_n_images = 25
# +
# %matplotlib inline
import os
from glob import glob
from matplotlib import pyplot
from PIL import Image
import numpy as np
import math
# Image configuration
IMAGE_HEIGHT = 28
IMAGE_WIDTH = 28
data_dir=r'C:\Users\liori\datasets'
data_files = glob(os.path.join(data_dir, 'celebA/*.jpg'))
shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, 3
def get_image(image_path, width, height, mode):
"""
Read image from image_path
"""
image = Image.open(image_path)
if image.size != (width, height):
# Remove most pixels that aren't part of a face
face_width = face_height = 108
j = (image.size[0] - face_width) // 2
i = (image.size[1] - face_height) // 2
image = image.crop([j, i, j + face_width, i + face_height])
image = image.resize([width, height], Image.BILINEAR)
return np.array(image.convert(mode))
def get_batch(image_files, width, height, mode='RGB'):
"""
Get a single image
"""
data_batch = np.array(
[get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)
# Make sure the images are in 4 dimensions
if len(data_batch.shape) < 4:
data_batch = data_batch.reshape(data_batch.shape + (1,))
return data_batch
def get_batches(batch_size):
"""
Generate batches
"""
IMAGE_MAX_VALUE = 255
current_index = 0
while current_index + batch_size <= shape[0]:
data_batch = get_batch(
data_files[current_index:current_index + batch_size],
*shape[1:3])
current_index += batch_size
yield data_batch / IMAGE_MAX_VALUE - 0.5
def images_square_grid(images, mode='RGB'):
"""
Helper function to save images as a square grid (visualization)
"""
# Get maximum size for square grid of images
save_size = math.floor(np.sqrt(images.shape[0]))
# Scale to 0-255
images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
# Put images in a square arrangement
images_in_square = np.reshape(
images[:save_size*save_size],
(save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))
# Combine images to grid image
new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))
for col_i, col_images in enumerate(images_in_square):
for image_i, image in enumerate(col_images):
im = Image.fromarray(image, mode)
new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))
return new_im
test_images = get_batch(glob(os.path.join(data_dir, 'celebA/*.jpg'))[:10], 56, 56)
pyplot.imshow(images_square_grid(test_images))
# +
import tensorflow as tf
def model_inputs(image_width, image_height, image_channels, z_dim):
"""
Create the model inputs
"""
inputs_real = tf.placeholder(tf.float32, shape=(None, image_width, image_height, image_channels), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return inputs_real, inputs_z, learning_rate
def discriminator(images, reuse=False):
"""
Create the discriminator network
"""
alpha = 0.2
with tf.variable_scope('discriminator', reuse=reuse):
# using 4 layer network as in DCGAN Paper
# Conv 1
conv1 = tf.layers.conv2d(images, 32, 5, 2, 'SAME')
lrelu1 = tf.maximum(alpha * conv1, conv1)
# Conv 2
conv2 = tf.layers.conv2d(lrelu1, 64, 5, 2, 'SAME')
batch_norm2 = tf.layers.batch_normalization(conv2, training=True)
lrelu2 = tf.maximum(alpha * batch_norm2, batch_norm2)
# Conv 3
conv3 = tf.layers.conv2d(lrelu2, 128, 5, 2, 'SAME')
batch_norm3 = tf.layers.batch_normalization(conv3, training=True)
lrelu3 = tf.maximum(alpha * batch_norm3, batch_norm3)
# Flatten
flat = tf.reshape(lrelu3, (-1, 4*4*128))
# Logits
logits = tf.layers.dense(flat, 1)
# Output
out = tf.sigmoid(logits)
return out, logits
# -
def generator(z, out_channel_dim, is_train=True):
"""
Create the generator network
"""
alpha = 0.2
with tf.variable_scope('generator', reuse=False if is_train==True else True):
# First fully connected layer
x_1 = tf.layers.dense(z, 2*2*128)
# Reshape it to start the convolutional stack
deconv_2 = tf.reshape(x_1, (-1, 2, 2, 128))
batch_norm2 = tf.layers.batch_normalization(deconv_2, training=is_train)
lrelu2 = tf.maximum(alpha * batch_norm2, batch_norm2)
# Deconv 1
deconv3 = tf.layers.conv2d_transpose(lrelu2, 64, 5, 2, padding='VALID')
batch_norm3 = tf.layers.batch_normalization(deconv3, training=is_train)
lrelu3 = tf.maximum(alpha * batch_norm3, batch_norm3)
# Deconv 2
deconv4 = tf.layers.conv2d_transpose(lrelu3, 32, 5, 2, padding='SAME')
batch_norm4 = tf.layers.batch_normalization(deconv4, training=is_train)
lrelu4 = tf.maximum(alpha * batch_norm4, batch_norm4)
# Output layer
logits = tf.layers.conv2d_transpose(lrelu4, out_channel_dim, 5, 2, padding='SAME')
out = tf.tanh(logits)
return out
# +
def model_loss(input_real, input_z, out_channel_dim):
"""
Get the loss for the discriminator and generator
"""
label_smoothing = 0.9
g_model = generator(input_z, out_channel_dim)
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_model_real) * label_smoothing))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_model_fake)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_model_fake) * label_smoothing))
return d_loss, g_loss
# -
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""
Get optimization operations
"""
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
# Optimize
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
# +
def images_square_grid(images, mode='RGB'):
"""
Helper function to save images as a square grid (visualization)
"""
# Get maximum size for square grid of images
save_size = math.floor(np.sqrt(images.shape[0]))
# Scale to 0-255
images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
# Put images in a square arrangement
images_in_square = np.reshape(
images[:save_size*save_size],
(save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))
# Combine images to grid image
new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))
for col_i, col_images in enumerate(images_in_square):
for image_i, image in enumerate(col_images):
im = Image.fromarray(image, mode)
new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))
return new_im
def show_generator_output(sess, n_images, input_z, out_channel_dim):
"""
Show example output for the generator
"""
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(
generator(input_z, out_channel_dim, False),
feed_dict={input_z: example_z})
pyplot.imshow(images_square_grid(samples))
pyplot.show()
# -
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape):
"""
Train the GAN
"""
input_real, input_z, _ = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)
d_loss, g_loss = model_loss(input_real, input_z, data_shape[3])
d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
steps = 0
my_session = tf.Session()
with my_session as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
# values range from -0.5 to 0.5, therefore scale to range -1, 1
batch_images = batch_images * 2
steps += 1
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))
_ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_opt, feed_dict={input_real: batch_images, input_z: batch_z})
if steps % 400 == 0:
# At the end of every 10 epochs, get the losses and print them out
train_loss_d = d_loss.eval({input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(epoch_i+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
_ = show_generator_output(sess, 1, input_z, data_shape[3])
return my_session
# +
batch_size = 16
z_dim = 100
learning_rate = 0.0002
beta1 = 0.5
epochs = 2
with tf.Graph().as_default():
sess = train(epochs, batch_size, z_dim, learning_rate, beta1, get_batches, shape)
# -
| tensorflow GAN/GAN celebA working interactive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/sahandv/science_science/blob/master/FastText_clustering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="7DsGlPjcug5a"
# # FASTTEXT CLUSTERING
#
#
# + [markdown] colab_type="text" id="32w9QFw8UNgx"
# ## Initialize
# -
import os
print(os.environ['CONDA_DEFAULT_ENV'])
# + [markdown] colab_type="text" id="jf2eUIOPOBzn"
# Local OR Colab ?
# + colab={} colab_type="code" id="M8N-lszvOBzo"
# datapath = '/mnt/6016589416586D52/Users/z5204044/GoogleDrive/GoogleDrive/Data/' # Local
datapath = '/mnt/16A4A9BCA4A99EAD/GoogleDrive/Data/' # Local
# datapath = 'drive/My Drive/Data/' # Remote
# + [markdown] colab_type="text" id="ljzjoEyD3bYD"
# ### Clone Project Git Repo
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="r8SdxRGf3a0x" outputId="aca1300d-6bbb-4a24-fb76-<PASSWORD>"
# !rm -rf 'science_science'
username = "sahandv"#@param {type:"string"}
# password = ""#@param {type:"string"}
# !git clone https://github.com/$username/science_science.git
# !ls
# + [markdown] colab_type="text" id="HD8c6Or13dtv"
# ### Mount Google Drive
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="nULtHthH3gD_" outputId="19d3530e-37fa-444a-f790-d727d4aba61e"
from google.colab import drive
drive.mount('/content/drive/')
# + [markdown] colab_type="text" id="CNNt3ipo3jcj"
# ### Install requirements
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="bfJbt-ZY3lPU" outputId="ed87cd6e-52af-4191-f136-bad1bb25972e"
# !pip install -r 'science_science/requirements.txt'
# + colab={"base_uri": "https://localhost:8080/", "height": 326} colab_type="code" id="OceD23EmDRed" outputId="331b55ef-9c4b-4f9c-ec21-f0f75ab47a36"
# ! pip install gensim==3.8.1
# + [markdown] colab_type="text" id="TI9uR66quitf"
# ### Import Libs
# + colab={"base_uri": "https://localhost:8080/", "height": 173} colab_type="code" id="NAvF8UcHui1K" outputId="b9bf6256-5bb1-4426-a66d-4506f2b593e4"
import sys
import time
import gc
import collections
import json
import re
import os
import pprint
from random import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering, AffinityPropagation
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from yellowbrick.cluster import KElbowVisualizer
import scipy.cluster.hierarchy as sch
from scipy import spatial,sparse,sign
from sklearn.feature_extraction.text import TfidfTransformer , TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import preprocessing
from bokeh.io import push_notebook, show, output_notebook, output_file
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LabelSet
from gensim.models import FastText as fasttext_gensim
from gensim.test.utils import get_tmpfile
import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
stop_words = set(stopwords.words("english"))
tqdm.pandas()
# from science_science.sciosci.assets import text_assets as kw
# from science_science.sciosci.assets import generic_assets as sci
# from science_science.sciosci.assets import advanced_assets as aa
from sciosci.assets import text_assets as kw
from sciosci.assets import generic_assets as sci
from sciosci.assets import advanced_assets as aa
# + [markdown] colab_type="text" id="XsARoc-hTPfP"
# ## Load and Prepare Embeddings
# + [markdown] colab_type="text" id="y9bqFgBOuwdl"
# #### Option A - Load Corpus Vector Data
# + colab={} colab_type="code" id="UHBSgYtTuwky"
file_address = datapath+'Corpus/improved_copyr_lemmatized_stopword_removed_thesaurus/FastText vector 1990-2018.json'
with open(file_address) as f:
vectors = json.loads(f.read())
# + [markdown] colab_type="text" id="eOdBBG5f8Ljg"
# #### Option B - Load Document Vectors and Main Data
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="mS_hX39m921E" outputId="72ead27d-d380-45a6-fe69-56b8f833e759"
#@markdown Usually it is not required to alter these years.
year_from = 1990#@param {type:"number"}
year_to = 2019#@param {type:"number"}
#@markdown File address for main WoS file:
file_address = datapath+'/Relevant Results _ DOI duplication - scopus keywords - document types - 31 july.csv'#@param {type:"string"}
data_full = pd.read_csv(file_address)
data_full = data_full[data_full['PY'].astype('int')>year_from-1]
data_full = data_full[data_full['PY'].astype('int')<year_to]
data_full = data_full[pd.notnull(data_full['AB'])]
doc_titles = data_full['TI'].str.lower().values
doc_sources = data_full['SO'].str.lower().values
doc_research_topic = data_full['SC'].str.lower().values
doc_wos_topic = data_full['WC'].str.lower().values
doc_keywords = data_full['DE'].str.lower().values
# doc_keywords = data_full['ID'].str.lower().values
doc_year = data_full['PY'].astype('int').values
doc_index = data_full.index
doc_meta = data_full[['TI','AB','PY','Z9','U1','SO','JI','DE','ID','WC','SC']]
#@markdown Z9: Total Times Cited Count * U1: Usage Count (Last 180 Days) *
#@markdown SO: Publication Name * JI: Source Abbreviation *
#@markdown DE: Author Keywords * ID: Keywords Plus *
#@markdown WC: Web of Science Categories * SC: Research Areas
# + colab={} colab_type="code" id="412_NOu78LR1"
#@markdown File address for doc vectors:
file_address = datapath+"/corpus/improved_copyr_lemmatized_stopwords_removed_thesaurus_n-grams/1990-2018 FastText doc vectors - SIF - full.csv"#@param {type:"string"}
doc_vectors = pd.read_csv(file_address)
doc_vectors = doc_vectors.values
doc_vectors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="8aJF01zk9HlE" outputId="bf86e792-aabd-400e-8e91-172885d30fe8"
#@markdown Make sure all dimensions match:
print(doc_sources.shape,doc_year.shape,doc_titles.shape,data_full.shape,doc_vectors.shape)
# + [markdown] colab_type="text" id="RzBQmJ0-Vdyw"
# #### Option C - Load (author/LDA) Term Vector Data
# + colab={} colab_type="code" id="2UWS-yNzVeOC"
dim = '15'
dim_comment = ''
period = '1990-2018'
subfolder = ''
file_address = datapath+'Author keywords - 29 Oct 2019/vectors/'+dim+'D'+dim_comment+'/'+subfolder+'FastText vector '+period+'.json'
with open(file_address) as f:
vectors = json.loads(f.read())
# + [markdown] colab_type="text" id="aDH7j8jl7xMw"
# #### Option D - Load Abstract with n-grams
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="Uu6gtse-8GRj" outputId="45dcf275-3372-48c6-91bb-363594136ddf"
#@markdown Don't forget to set the "year_to" to a year higher than intended!
# year_from = 2017#@param {type:"number"}
# year_to = 2020#@param {type:"number"}
period = ''#str(year_from)+'-'+str(year_to-1)
#@markdown File address for main WoS file:
# file_address = datapath+'Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/by period/n-gram by 6 repetition keywords/'+period+' abstract_title'
file_address = datapath+'Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 n-gram by 6 repetition keywords'
abstracts = pd.read_csv(file_address,names=['abstract'])
print('period:',period,'\n',abstracts)
# + [markdown] colab_type="text" id="Wqn_o9W54g4K"
# Load abstract vectors (for doc clustering)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="N08JNKaO4ngS" outputId="77d1d630-9fa4-41fa-bd68-2227d6029fe3"
#@markdown File path for doc vectors:
file_address = datapath+"Corpus/AI 4k/embeddings/"#@param {type:"string"}
doc_vectors = pd.read_csv(file_address+period+'Doc2Vec patent_wos_ai corpus')
year_vectors = doc_vectors.values
print(year_vectors.shape)
# + [markdown] colab_type="text" id="11phkaSxk7F-"
# #### Option E - Load Keywords
# + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="MMn8CFQlk9Pt" outputId="153b36ca-3bd9-4263-ada0-bbc233d2de21"
#@markdown Don't forget to set the "year_to" to a year higher than intended!
year_from = 2017#@param {type:"number"}
year_to = 2019#@param {type:"number"}
period = str(year_from)+'-'+str(year_to-1)
file_address = datapath+'Corpus/copyr_lemmatized_stopword_removed_thesaurus/1900-2019 keywords'
keywords = pd.read_csv(file_address,names=['keywords'])
file_address = datapath+'Corpus/copyr_lemmatized_stopword_removed_thesaurus/1900-2019 years'
years = pd.read_csv(file_address)
keywords['year'] = years['year']
keywords = keywords[(keywords['year']>=year_from) & (keywords['year']<year_to)]
print('period:',period,'\n',keywords)
# + [markdown] colab_type="text" id="EpKGlh5gAftZ"
# Extract Keywords and Flatten
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="JS8Wf2O4AtfE" outputId="fa511352-b31e-4e3c-a43d-a93e04e27c80"
keywords['keywords_sep'],keywords_flat = kw.tokenize_series_fast(keywords['keywords'],delimiter=';',flatten=True)
# + [markdown] colab_type="text" id="WTCIeLRqrQQU"
# #### Option F - Benchmark Data
# + colab={} colab_type="code" id="wDJEzjSUrUaN"
file_address = datapath+"embedding_benchmark/kpris_data.csv"
data = pd.read_csv(file_address)
abstracts = data[['abstract']]
file_address = datapath+'embedding_benchmark/clean/Document Embedding/finetuned_embeddings_50D.csv'
doc_vectors = pd.read_csv(file_address)
task_1 = data[(data['target']=='car') | (data['target']=='camera')]
task_2 = data[(data['target']=='memory') | (data['target']=='cpu')]
period = 'all'
# columns = [1,3,4,5,7,9,10,11,13,14]
year_vectors = doc_vectors.values
# vector_low_dim = doc_vectors.values.T[columns]
# year_vectors = vector_low_dim.T
year_vectors = year_vectors[task_1.index]
print(year_vectors.shape)
# + colab={} colab_type="code" id="7M7bxq4Orewm"
year_vectors = TSNE(n_components=2, n_iter=500, verbose=2).fit_transform(year_vectors)
print("\nComputed t-SNE", vector_low_dim.shape)
# + [markdown] colab_type="text" id="ks2O9Q_v8Grb"
# #### Word Embedding
# Load embedding model
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="aYf08c-F8QvN" outputId="db3e7d19-92db-4a43-95d4-54af6068fc8c"
# !ls 'drive/My Drive/Data/FastText Models/50D May 16/fasttext-scopus-300k_docs-gensim 50D.model'
gensim_model_address = datapath+'FastText Models/50D May 16/fasttext-scopus-300k_docs-gensim 50D.model'
FT_model = fasttext_gensim.load(gensim_model_address)
# + [markdown] colab_type="text" id="DJIzcK-r8NPD"
# ##### Get Embeddings
# + [markdown] colab_type="text" id="80XezW2rC1TM"
# ###### Use duplicated words to take into the account the weight of words
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="rpKBEaeb8QOL" outputId="86bea8f3-fb08-44dd-c1e3-de18869c0e61"
# Iterate docs and flatten words to a list
word_freq = pd.DataFrame(pd.DataFrame([word for doc in abstracts['abstract'].values.tolist() for word in doc.split()],columns=['terms']).terms.value_counts()).reset_index()
word_freq.columns=['term','freq']
word_filter = word_freq[word_freq['freq']>8]
words = [word for doc in tqdm(abstracts['abstract'].values.tolist(),total=len(abstracts['abstract'].values.tolist())) for word in doc.split() if word in word_filter['term'].values.tolist()]
print('\n',len(words))
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="Cj3i4zeUbTQQ" outputId="d126b23c-23fa-42b7-e2d7-4f42a6a53b2c"
# make an embedding list
word_vectors = []
for item in tqdm(words):
phrase = item.replace("_", " ")
phrase = phrase.lower().strip()
phrase = phrase.split()
gram_vecs = []
for gram in phrase:
gram_vecs.append(FT_model.wv[gram])
phrase_vec = np.array(gram_vecs).mean(axis=0)
word_vectors.append(phrase_vec)
vector_low_dim = word_vectors
print('\n',len(word_vectors))
# + [markdown] colab_type="text" id="u-Ww32XyDW1R"
# ###### Use unique words to ignore duplicated keywords and ignore the weights of words
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ZPKaAeCiDgOX" outputId="fd2a8fd3-a1b0-4adb-b132-73b1bbba8a73"
# Iterate docs and flatten words to a list
word_freq = pd.DataFrame(pd.DataFrame([word for doc in abstracts['abstract'].values.tolist() for word in doc.split()],columns=['terms']).terms.value_counts()).reset_index()
word_freq.columns=['term','freq']
word_freq['term'] = word_freq['term'].str.lower()
word_freq['term'] = word_freq['term'].progress_apply(lambda x: x if len(x)>1 else np.nan)
word_freq = word_freq[pd.notnull(word_freq['term'])]
word_filter = word_freq[word_freq['freq']>8]
words = word_filter.term.values.tolist()
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="xbn6WmWCCqD5" outputId="255ae8b8-f830-49b2-d801-c8178a4fe9a5"
# make an embedding list
word_vectors = []
for item in tqdm(words):
phrase = item.replace("_", " ")
phrase = phrase.lower().strip()
phrase = phrase.split()
gram_vecs = []
for gram in phrase:
gram_vecs.append(FT_model.wv[gram])
phrase_vec = np.array(gram_vecs).mean(axis=0)
word_vectors.append(phrase_vec)
vector_low_dim = word_vectors
print('\n',len(word_vectors))
# + colab={} colab_type="code" id="chLDsbwn412G"
terms_flat = words
# + [markdown] colab_type="text" id="WfhkWMBkKLlk"
# ###### For keyword embedding (Option E)
# + colab={} colab_type="code" id="lkvLk9KsK_Sm"
# Iterate docs and flatten words to a list
word_freq = pd.DataFrame(keywords_flat.tokens.value_counts()).reset_index()
word_freq.columns=['term','freq']
word_filter = word_freq[word_freq['freq']>5]
words = word_filter.term.values.tolist()
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="9p9jFV4fKWqN" outputId="9ce324fb-4148-4f4b-99cb-f23abff66fcc"
# make an embedding list
word_vectors = []
for item in tqdm(words):
phrase = item.replace("_", " ")
phrase = phrase.lower().strip()
phrase = phrase.split()
gram_vecs = []
for gram in phrase:
gram_vecs.append(FT_model.wv[gram])
phrase_vec = np.array(gram_vecs).mean(axis=0)
word_vectors.append(phrase_vec)
vector_low_dim = word_vectors
print('\n',len(word_vectors))
# + colab={} colab_type="code" id="CYZg_HOBKb7O"
terms_flat = words
# + [markdown] colab_type="text" id="UzTzgazCB5h9"
# ### Pre processing (For word clustering)
# + [markdown] colab_type="text" id="FjZFZ5AsA4Z8"
# #### (If Option C - and A?) Flatten vectors and make a generic dictionary for all years-words
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="d-qDB58_A4gw" outputId="dacb8dea-223b-4f98-c710-a0529bac62d6"
vectors_flat = []
terms_flat = []
for term in tqdm(vectors.keys(),total=len(vectors.keys())):
terms_flat.append(term)
vectors_flat.append(np.array([float(i) for i in vectors[term].replace('[','').replace(']','').replace(', ',' ').split()]))
print('\nYou have ',len(vectors_flat),'unique vectors and terms')
if len(vectors_flat)!=len(terms_flat):
sys.exit('Term/Vector length mismatch. Please the terms_flat and vectors_flat variables.')
vectors_flat = np.array(vectors_flat)
# + [markdown] colab_type="text" id="-VSpVfr-K7FC"
# #### Normalize - Optional.
# Define row_sums first
# + colab={} colab_type="code" id="itFZqugXCGJL"
vectors_flat_sums = vector_low_dim.sum(axis=1)
vectors_flat_norm = vector_low_dim / row_sums[:, np.newaxis]
vectors_flat_norm.shape
# + [markdown] colab_type="text" id="L226zEeziCEI"
# OR use this
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="4ur7V-lbiFar" outputId="8efcc3dd-df88-4081-e347-f38c21a6adac"
x = np.array(vector_low_dim)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
vectors_flat_norm = list(x_scaled)
x_scaled.shape
# + [markdown] colab_type="text" id="tGf__2O_IqOy"
# #### Reduce Dimensionality for 15D
# + colab={} colab_type="code" id="tE1rGFPG41J8"
sub_dim = ''
vector_low_dim = vectors_flat.copy()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="2eSkVs3OIqnu" outputId="f4a136a9-bbc5-45c4-df31-118e92339e52"
sub_dim = '10 dim/'
tmp_df = pd.DataFrame(vectors_flat)
columns = [1,3,4,5,7,9,10,11,13,14]
vector_low_dim = tmp_df[columns].values
vector_low_dim.shape
# + [markdown] colab_type="text" id="mz7QtvB9eo7Z"
# #### (If Option A) Load year period corpora and make year based vectors
# + colab={"base_uri": "https://localhost:8080/", "height": 111} colab_type="code" id="Uh76v3sLepDi" outputId="a1ef4bb8-7354-4907-8fdd-b7f4030dfc9b"
period = '1990-2018'
file_address = datapath+'corpus/improved_copyr_lemmatized_stopword_removed_thesaurus/'+period+' corpus abstract-title'
corpus = pd.read_csv(file_address,names=['abstracts'])
corpus.head(2)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="WYIYTj9IfUqp" outputId="47112e4a-2af0-479f-d8d9-29d3455970c6"
period_vectors = []
period_terms = []
period_article_vectors = []
period_article_terms = []
period_article_indices = []
for idx,row in tqdm(corpus.iterrows(),total = corpus.shape[0]):
article_terms = []
article_vectors = []
for term in row['abstracts'].split():
period_vectors.append(vectors_flat[terms_flat.index(term)])
article_vectors.append(vectors_flat[terms_flat.index(term)])
period_terms.append(term)
article_terms.append(term)
period_article_vectors.append(article_vectors)
period_article_terms.append(article_terms)
period_article_indices.append(idx)
period_vectors = np.array(period_vectors)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="DAR9Dg-YmBi5" outputId="68028f91-0e93-4104-e7bb-5ee41fd77959"
len(period_article_vectors)
# + [markdown] colab_type="text" id="ak9kKkXtyU9_"
# ## Visualize Embeddings (vectors)
# + [markdown] colab_type="text" id="uwVYbZ5-z0MF"
# #### Get a list of terms to embed
# + colab={} colab_type="code" id="birx-yrlz0mF"
demo_terms = pd.read_csv(datapath+'LDA/selected_demo_words',names=['terms']).T.values.tolist()[0] # this is 2016 words probably
demo_vectors = np.array([np.array([float(i) for i in vectors[term].replace('[','').replace(']','').replace(', ',' ').split()]) for term in demo_terms])
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="7ae1HtF7E1Qz" outputId="d69f2a7a-f4ce-49a6-9ed4-d85946668580"
demo_vectors.shape
# + [markdown] colab_type="text" id="ark2zBd_CDIw"
# #### Manually reduce dims
# + colab={} colab_type="code" id="Tkcdh_SzCDtq"
vector_low_dim = []
dim_1 = 0
dim_2 = 1
for row in tqdm(vectors_flat,total=vectors_flat.shape[0]):
vector_low_dim.append([row[dim_1],row[dim_2]])
vector_low_dim = np.array(vector_low_dim)
vector_low_dim.shape
# + [markdown] colab_type="text" id="6hotYTrIAxNe"
# #### PCA reduce dims
# + [markdown] colab_type="text" id="l2xogx5bCqg8"
# Explore component variances
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="dqdTVhdDCoM6" outputId="e6fb8024-4648-4609-b825-22b572900dba"
pca = PCA()
pca.fit(vectors_flat)
pca.explained_variance_ratio_
# + [markdown] colab_type="text" id="chIbHQi5Coyp"
# Reduce
# + colab={"base_uri": "https://localhost:8080/", "height": 231} colab_type="code" id="tH8Ukm82-1Mt" outputId="3fa9b3cd-5a47-4ba8-90f1-5ca33a7c92d5"
pca = PCA(2)
pca.fit(vectors_flat)
#pca.n_components_
vector_low_dim_pca = pca.transform(vectors_flat)
vector_low_dim_pca.shape
# + [markdown] colab_type="text" id="H0k2Mt_JybTF"
# #### Use TSNE for reduce dims
# + colab={} colab_type="code" id="4LumQJU7yaN8"
vector_low_dim = TSNE(n_components=2, n_iter=250, verbose=2).fit_transform(vectors_flat)
print("\nComputed t-SNE", vector_low_dim.shape)
# + [markdown] colab_type="text" id="iOMc9gS0Apq4"
# #### Visualize dims
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="sUKEml4w8M_g" outputId="438b0ef4-9043-4eb3-a68c-f50aee79b943"
vector_low_dim_df = pd.DataFrame(columns=['x', 'y', 'word'])
vector_low_dim_df['x'], vector_low_dim_df['y'], vector_low_dim_df['word'] = vector_low_dim[:,0], vector_low_dim[:,1], terms_flat
vector_low_dim_df.head(2)
# + [markdown] colab_type="text" id="KwrYd-6V8Mzz"
# #### Plot
# + [markdown] colab_type="text" id="__6Bl7uODxkD"
# ##### Selective demo terms
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="54M9Jq3fDtSk" outputId="8e5f5381-c0af-42a5-fd25-f65ad607b7ba"
vector_low_dim_df = vector_low_dim_df[vector_low_dim_df['word'].isin(demo_terms)]
vector_low_dim_df.shape
# + [markdown] colab_type="text" id="M5OG26N9Lotq"
# ##### Selective by top 1 percent keywords
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="ybMBdWw-LtBx" outputId="7e906379-0e57-4dbd-ff50-4e012bd597e8"
word_filter = []
top_one_percent_keywords = pd.read_csv(datapath+'LDA/'+period+' top_99-percentile_keywords_terms.csv')
for indx,row in tqdm(top_one_percent_keywords.iterrows(),total=top_one_percent_keywords.shape[0]):
for word in row:
if word not in word_filter:
word_filter.append(word)
vector_low_dim_df = vector_low_dim_df[vector_low_dim_df['word'].isin(word_filter)]
vector_low_dim_df.shape
# + [markdown] colab_type="text" id="eRn97zWBDtz0"
# ##### Continue plotting
# + colab={} colab_type="code" id="42CTjzoQ8lLv"
source = ColumnDataSource(ColumnDataSource.from_df(vector_low_dim_df))
labels = LabelSet(x="x", y="y", text="word", y_offset=8,
text_font_size="8pt", text_color="#555555",
source=source, text_align='center')
plot = figure(plot_width=1900, plot_height=1000)
plot.circle("x", "y", size=12, source=source, line_color="black", fill_alpha=0.8)
output_file("drive/My Drive/Data/FastText term clusters/bokeh cluster visualization/15D manual top 1 percent/bokeh "+dim+"D selective - Manual 0 "+str(dim_2)+".html")
plot.add_layout(labels)
show(plot, notebook_handle=True)
# + [markdown] colab_type="text" id="7RXeAJ4IuwrT"
# # Word Clustering
# + [markdown] colab_type="text" id="moyWLo1cAgYJ"
# ### Simple Clustering
# + [markdown] colab_type="text" id="-9pUO3hlHIdp"
# #### Find optimal cluster size: Elbow / Dendrogram method
#
# The data shows that it is impossible to perform clustering on the vectors using k means.
# + cellView="both" colab={} colab_type="code" id="rEHwL2TFAnAd"
#@title Distortion should go down.
model = KMeans()
visualizer = KElbowVisualizer(model, k=(2,30))
visualizer.fit(vectors_flat) # Fit the data to the visualizer
visualizer.poof()
# + colab={"base_uri": "https://localhost:8080/", "height": 567} colab_type="code" id="Kkb7LBXOWjNa" outputId="7a13cb66-2a9b-4026-d75e-461af6589b39"
#@title Dendrogram
dendrogram = aa.fancy_dendrogram(sch.linkage(vector_low_dim, method='ward'),
truncate_mode='lastp',p=800,show_contracted=True,figsize=(15,9)) #single #average #ward
# + [markdown] colab_type="text" id="-hwKuQl6oI2t"
# #### Init clustering
# + colab={} colab_type="code" id="x5_0aa1HDowR"
pca = PCA(2)
pca.fit(vector_low_dim)
#pca.n_components_
vector_low_dim = pca.transform(vector_low_dim)
vector_low_dim[:,1].shape
reduce_dim = True
# + colab={} colab_type="code" id="AsnWr6b8Dw-1"
reduce_dim = False
# + colab={} colab_type="code" id="QsGwgG3FoGJF"
n_clusters = 12
comment = ''
dim = '50'
dim_comment = ' May 28'
sub_dim = '' #'no repetition/'
# + [markdown] colab_type="text" id="UiZ00jO5LKBv"
# #### Hierarchical Clustering
# + colab={} colab_type="code" id="gJwueiz-ju8J"
model = AgglomerativeClustering(n_clusters=n_clusters,linkage='ward').fit(vector_low_dim)
labels = model.labels_
clusters_df = pd.DataFrame({'terms':terms_flat,'clusters':labels})
clusters_df.to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# model = AgglomerativeClustering(n_clusters=n_clusters,affinity='cosine',linkage='average').fit(vectors_flat)
# labels = model.labels_
# clusters_df = pd.DataFrame({'terms':terms_flat,'clusters':labels})
# clusters_df.to_csv('drive/My Drive/Data/FastText term clusters - 29 Oct 2019/'+dim+'D'+dim_comment+'/'+sub_dim+'agglomarative cosine average '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="T9rF-s3RLd_w"
# #### K-means Clustering
# + colab={} colab_type="code" id="Gx7DBL4Xn6F5"
model = KMeans(n_clusters=n_clusters, random_state=10).fit(vector_low_dim)
labels = model.labels_
clusters_df = pd.DataFrame({'terms':terms_flat,'clusters':labels})
clusters_df.to_csv(datapath+'FastText authkeyword clusters - 29 Oct 2019/'+dim+'D'+dim_comment+'/'+sub_dim+'kmeans '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="lgHoizmL6HMl"
# #### Visualize cluster
# + [markdown] colab_type="text" id="CGCK4o9P6Z_s"
# Read saved clusters
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="thku3zqodb4H" outputId="07e575d4-c0a0-419f-cd46-ab97c9834ccd"
cluster_file_name = datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'agglomerative ward '+period+' '+str(n_clusters)+comment
cluster_file_name
# + colab={} colab_type="code" id="wn5NDRTC6dUN"
clusters_df = pd.read_csv(cluster_file_name+'.csv')
clusters_df = clusters_df[clusters_df['terms'].isin(terms_flat)]
color_palette = aa.color_palette_maker(n_clusters)
colors = aa.cluster_color_mapper(clusters_df['clusters'].values.tolist(),color_palette)
clusters_df['colors'] = colors
# + [markdown] colab_type="text" id="NhqtPeux6PyM"
# ##### Visualize
# + colab={} colab_type="code" id="OYFxQ7HX6PiU"
fig, ax = plt.subplots(figsize=(25, 18), dpi=150)
ax.scatter(vector_low_dim[:,0], vector_low_dim[:,1],color = colors)
for i, label in enumerate(terms_flat):
ax.annotate(label, (vector_low_dim[i,0], vector_low_dim[i,1]), fontsize=8, fontweight='ultralight')
fig.savefig(cluster_file_name+'.jpg')
# + [markdown] colab_type="text" id="U30HD7CSHZzQ"
# ##### Visualize - reduced samples
# + [markdown] colab_type="text" id="dDEQItUBnCjo"
# Get important words
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="WGaOi21QHZZd" outputId="490becc7-d80c-48b6-e250-117d576b011a"
whitelist = []
for idx in range(len(clusters_df.groupby('clusters').groups)):
whitelist = whitelist + clusters_df['terms'][list(clusters_df.groupby('clusters').groups[idx])].head(30).values.tolist()
fig, ax = plt.subplots(figsize=(25, 18), dpi=150)
ax.scatter(vector_low_dim_pca[:,0], vector_low_dim_pca[:,1],color = colors)
for i, label in enumerate(terms_flat):
if label in whitelist:
ax.annotate(label, (vector_low_dim_pca[i,0], vector_low_dim_pca[i,1]), fontsize=8, fontweight='ultralight')
fig.savefig(cluster_file_name+' - reduced labels.jpg')
# + [markdown] colab_type="text" id="3SyIazJAF7m5"
# ### Word Cluster Cloud
# + colab={} colab_type="code" id="k8iyAz7BKrpJ"
# dim = '15'
# period = '2017-2019'
n_clusters = 4
# method = 'kmeans '
method = 'agglomerative ward '
comment = ''
# comment = ' visually_assessed'
# + [markdown] colab_type="text" id="WtKRIXEwGB8V"
# Load corpus for the period
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="x6v37qXSGKMA" outputId="193a215c-4235-4998-a75f-f9e342d87011"
path = datapath+'corpus/improved_copyr_lemmatized_stopwords_removed_thesaurus_n-grams/'+period+' corpus abstract-title - with n-grams'
corpus = pd.read_csv(path,names=['abstract'])
path
# + [markdown] colab_type="text" id="nyVTCIFiHHz0"
# Load cluster for the period
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="SYYclGfwHLOr" outputId="dafd9583-d99e-4016-a09f-c05351519139"
path = datapath+'FastText term clusters/'+dim+'D/'+method+period+' '+str(n_clusters)+comment+'.csv'
clusters = pd.read_csv(path)
path
# + [markdown] colab_type="text" id="fUxh-VK-HMZF"
# Mask the corpus terms by cluster terms
# + colab={"base_uri": "https://localhost:8080/"} colab_type="code" id="_-U6W9jEHRGW" outputId="42809b4e-00fd-4c00-ec2b-94d33894bd6a"
cluster_corpora = []
for cluster_label in range(n_clusters):
white_list = clusters[clusters['clusters']==cluster_label].terms.values.tolist()
cluster_corpus = []
for abstract in tqdm(corpus.abstract,total=corpus.abstract.shape[0]):
tokenized_abstract = abstract.split()
cluster_corpus.append(' '.join([token for token in tokenized_abstract if token in white_list]))
cluster_corpora.append(cluster_corpus)
# + [markdown] colab_type="text" id="OL7ovRtBKzGR"
# OR - Make cluster corpora based on the doc term corpus (Option D)
# + colab={"base_uri": "https://localhost:8080/", "height": 272} colab_type="code" id="bASabrLHLLQm" outputId="f5748267-119b-4a6e-9d4f-a26deaf5db97"
cluster_corpora = []
for cluster_label in range(n_clusters):
white_list = clusters_df[clusters_df['clusters']==cluster_label].terms.values.tolist()
cluster_corpus = []
for abstract in tqdm(abstracts.abstract.values.tolist(),total=abstracts.abstract.values.shape[0]):
tokenized_abstract = abstract.split()
cluster_corpus.append(' '.join([token for token in tokenized_abstract if token in white_list]))
cluster_corpora.append(cluster_corpus)
# + [markdown] colab_type="text" id="hcPSB1cWGOhH"
# Generate word cloud for each cluster
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="LfCTMFcSGAXe" outputId="28b5447b-5d2b-4dab-a65d-5ce58298bead"
for cluster_label in range(n_clusters):
wordcloud = WordCloud(background_color='white',
stopwords=stop_words,
max_words=100,
max_font_size=50,
width=800, height=400,
random_state=42).generate(str(cluster_corpora[cluster_label]))
fig = plt.figure(1)
plt.imshow(wordcloud,interpolation="bilinear")
plt.axis('off')
plt.title('Cluster '+str(cluster_label))
plt.tight_layout(pad=0)
plt.show()
# fig.savefig('drive/My Drive/Data/FastText term clusters/'+dim+'D/wordcloud/'+period+' '+method+' cluster_'+str(cluster_label+1)+'.png', dpi=500)
fig.savefig(datapath+'FastText doc word clusters 2020/'+dim+'D'+dim_comment+'/'+sub_dim+'wordcloud/'+period+' cluster_'+str(cluster_label+1)+'.png', dpi=500)
# + [markdown] colab_type="text" id="lZhMDZ9m6OVv"
# ## Word Cluster Center Calculator
# + [markdown] colab_type="text" id="D3KyCgoR7dwJ"
# ### Read clusters
# + colab={} colab_type="code" id="tEDMf0i97eMA"
# dim = '50'
# dim_comment = ' w1'
# n_clusters = 17
# sub_dim = 'no repetition/'
# comment = ''
# method = 'kmeans '
method = 'agglomerative ward '
cluster_file_name = datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+method+period+' '+str(n_clusters)+comment
clusters_df = pd.read_csv(cluster_file_name+'.csv')
clusters_df = clusters_df[clusters_df['terms'].isin(terms_flat)]
# color_palette = aa.color_palette_maker(n_clusters)
# colors = aa.cluster_color_mapper(clusters_df['clusters'].values.tolist(),color_palette)
# clusters_df['colors'] = colors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="x-BB_kCLqa7t" outputId="90cdebda-1c16-44df-c629-953b9e2132eb"
# Get cluster term frequencies
top_words = []
for cluster in tqdm(range(n_clusters)):
top_words.append(', '.join(clusters_df[clusters_df['clusters']==cluster].groupby('terms').count()['clusters'].reset_index().sort_values(by='clusters', ascending=False).head(10)['terms'].values.tolist()))
top_words = pd.DataFrame(top_words)
top_words.to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'labels/'+method+period+' '+str(n_clusters),header=False)
# + [markdown] colab_type="text" id="kZTICNAUC0Hf"
# ### Find cluster centre
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1omSwj8XC0dn" outputId="9a80d7f3-0ed1-4f30-f472-f79e9dc32b1c"
cluster_centers = []
for cluster in tqdm(range(n_clusters),total=n_clusters):
cluster_centers.append(np.array(vector_low_dim)[clusters_df[clusters_df['clusters']==cluster].index].mean(axis=0))
pd.DataFrame(cluster_centers).to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'centers/cluster_centers - agglomerative ward '+period+' '+str(n_clusters)+comment)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="dBRukgiRq-4t" outputId="78c8026e-fdc6-4788-f9b4-d6953aee6c3d"
# !ls 'drive/My Drive/Data/Word Clustering/50D May 28/centers/'
# + [markdown] colab_type="text" id="Flky19JIq2VS"
# ## Cluster Center Similarity Calculator
# + [markdown] colab_type="text" id="XdtCWyzdru8n"
# Load clusters
# + colab={} colab_type="code" id="mulK4sSZq1-2"
sub_dim = ''#@param {type:"string"}
dim = '50'#@param {type:"string"}
comment = ''#@param {type:"string"}
dim_comment = ' w1'
period_A = '2008-2010'#@param {type:"string"}
n_clusters_A = 9#@param {type:"number"}
period_B = '2017-2018'#@param {type:"string"}
n_clusters_B = 17#@param {type:"number"}
clusters_A = pd.read_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'centers/cluster_centers - agglomerative ward '+period_A+' '+str(n_clusters_A)+comment,index_col=0)
clusters_B = pd.read_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'centers/cluster_centers - agglomerative ward '+period_B+' '+str(n_clusters_B)+comment,index_col=0)
# + [markdown] colab_type="text" id="mYnioIxltVGP"
# ### Cosine similarity calculation from period A to period B with following format:
#
#
# ```
# cluster_1,cluster_2_0,similarity_0,cluster_2_1,similarity_1,cluster_2_2,similarity_2,cluster_2_3,similarity_3,cluster_2_4,similarity_4
# 0,0,0.8512495748329945,1,0.9026553867095742,2,0.9891524888487816,3,0.8417661013507162,4,0.9009857248135538
# 1,0,0.945069537373128,1,0.9293686285273433,2,0.9089606504506658,3,0.9068286759630998,4,0.8822693454546485
# 2,0,0.8889985179190727,1,0.6474221422202824,2,0.717458586171551,3,0.7027810005644912,4,0.5373087435431511
#
#
#
# ```
#
#
# + colab={} colab_type="code" id="09M0gk1HtV0K"
names = []
names.append('cluster_1')
sim_A_to_B = []
for idx_A,vector_A in clusters_A.iterrows():
inner_similarity_scores = []
inner_similarity_scores.append(idx_A)
for idx_B,vector_B in clusters_B.iterrows():
distance_tmp = spatial.distance.cosine(vector_A.values, vector_B.values)
similarity_tmp = 1 - distance_tmp
inner_similarity_scores.append(idx_B)
inner_similarity_scores.append(similarity_tmp)
if idx_A == 0:
names.append('cluster_2_'+str(idx_B))
names.append('similarity_'+str(idx_B))
sim_A_to_B.append(inner_similarity_scores)
# print('cluster of A:',idx_A,'to cluster of B:',idx_B,'similarity',similarity_tmp)
sim_A_to_B = pd.DataFrame(sim_A_to_B,columns=names)
sim_A_to_B.to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'similarity/agglomerative ward '+period_A+'_'+str(n_clusters_A)+'-'+period_B+'_'+str(n_clusters_B)+'.csv',index=False)
# + [markdown] colab_type="text" id="f9guYcqLB_Kg"
# ### Euclidean similarity calculation
# + colab={} colab_type="code" id="kE5G99c-B-aW"
names = []
names.append('cluster_1')
sim_A_to_B = []
for idx_A,vector_A in clusters_A.iterrows():
inner_similarity_scores = []
inner_similarity_scores.append(idx_A)
for idx_B,vector_B in clusters_B.iterrows():
# distance_tmp = spatial.distance.euclidean(vector_A.values, vector_B.values) # automatic calculation
distance_tmp = np.linalg.norm(vector_A.values-vector_B.values) # manual calculation
similarity_tmp = distance_tmp
# similarity_tmp = 1 - distance_tmp
inner_similarity_scores.append(idx_B)
inner_similarity_scores.append(similarity_tmp)
if idx_A == 0:
names.append('cluster_2_'+str(idx_B))
names.append('similarity_'+str(idx_B))
sim_A_to_B.append(inner_similarity_scores)
# print('cluster of A:',idx_A,'to cluster of B:',idx_B,'similarity',similarity_tmp)
sim_A_to_B = pd.DataFrame(sim_A_to_B,columns=names)
sim_A_to_B.to_csv(datapath+'FastText authkeyword clusters - 29 Oct 2019/'+dim+'D'+dim_comment+'/'+sub_dim+'similarity/agglomerative ward '+period_A+'_'+str(n_clusters_A)+'-'+period_B+'_'+str(n_clusters_B)+' - euclidean.csv',index=False)
# + [markdown] colab_type="text" id="E_joOYcT8zGy"
# ## Term Cluster and Term Score Cluster Table Maker
# + [markdown] colab_type="text" id="vVmM_ZcPbxLq"
# Label maker based on centrality
# + colab={} colab_type="code" id="LWOrBtEvbw1K"
# dim = '50'
# period = '2017-2018'
# dim_comment = ' w1'
# n_clusters = 17
sub_dim = ''
comment = ''
# method = 'kmeans '
method = 'agglomerative ward '
cluster_file_name = datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+method+period+' '+str(n_clusters)+comment
clusters_df = pd.read_csv(cluster_file_name+'.csv')
clusters_df = clusters_df[clusters_df['terms'].isin(terms_flat)]
# Read cluster center
centers_file_name = datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'centers/cluster_centers - '+method+period+' '+str(n_clusters)+comment
cluster_centers = pd.read_csv(centers_file_name,index_col=0)
# Calculate centroid words for n-gram handling
def embed(word,model):
phrase=word.split()
gram_vecs = []
for gram in phrase:
gram_vecs.append(model.wv[gram])
return np.array(gram_vecs).mean(axis=0)
top_words = []
top_scores = []
for cluster_n in range(n_clusters-1):
cluster_center = cluster_centers.iloc[cluster_n].values
cluster_terms = clusters_df[clusters_df['clusters']==cluster_n].copy()
x = [embed(word,FT_model) for word in cluster_terms['terms'].values.tolist()]
if(reduce_dim==True):
pca.fit(x)
cluster_terms['vectors'] = pca.transform(x).tolist()
else:
cluster_terms['vectors'] = x
cluster_terms['similarity'] = [1-spatial.distance.cosine(vector, cluster_center) for vector in np.array(cluster_terms['vectors'])]
cluster_terms = cluster_terms.sort_values(by=['similarity'],ascending=False)
top_words.append(cluster_terms.head(100)['terms'].values.tolist())
top_scores.append(cluster_terms.head(100)['similarity'].values.tolist())
pd.DataFrame(top_words).to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'labels/terms '+period+' '+str(n_clusters)+comment,index=False,header=False)
pd.DataFrame(top_scores).to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'labels/similarity '+period+' '+str(n_clusters)+comment,index=False,header=False)
# + [markdown] colab_type="text" id="CgEg-XlKWUWT"
# From file
# + colab={} colab_type="code" id="JTykaaTo8zw_"
# sub_dim = '10 dim/'
# dim_comment = ' 3pcnt'
# dim = '15'
# period = '1990-2018'
# n_clusters = 8
comment = ''
corpus = pd.read_csv(datapath+'Author keywords - 29 Oct 2019/'+period+' keyword frequency',names=['keyword','frequency'])
cluster_file_name = datapath+'FastText authkeyword clusters - 29 Oct 2019/'+dim+'D'+dim_comment+'/'+sub_dim+'agglomerative ward '+period+' '+str(n_clusters)+comment
clusters = pd.read_csv(cluster_file_name+'.csv')
term_table = clusters.groupby('clusters').groups
# term_table_df = pd.DataFrame([list(clusters['terms'][term_table[x]]) for x in term_table]).T.fillna('')
# score_table_df = [list(corpus[corpus['keyword']==clusters['terms'][term_table[x]]]['frequency']) for x in term_table]
# groups.to_csv('drive/My Drive/Data/FastText authkeyword clusters - 29 Oct 2019/'+dim+'D/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment,index=False)
term_table_df = []
score_table_df = []
for cluster_items in term_table:
cluster_terms = list(clusters['terms'][term_table[cluster_items]])
cluster_scores = [corpus[corpus['keyword']==x]['frequency'].values[0] for x in cluster_terms]
score_table_df.append(cluster_scores)
term_table_df.append(cluster_terms)
comment = ' - term_cluster'
term_table_df = pd.DataFrame(term_table_df).T.to_csv(datapath+'FastText authkeyword clusters - 29 Oct 2019/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
comment = ' - term_score'
score_table_df = pd.DataFrame(score_table_df).T.to_csv(datapath+'FastText authkeyword clusters - 29 Oct 2019/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="3oru7Na3WWEK"
# From doc words (Option D )
# + colab={} colab_type="code" id="oSjnSOKWWSnc"
comment = ''
corpus = word_freq.copy()
term_table = clusters_df.groupby('clusters').groups
# term_table_df = pd.DataFrame([list(clusters['terms'][term_table[x]]) for x in term_table]).T.fillna('')
# score_table_df = [list(corpus[corpus['keyword']==clusters['terms'][term_table[x]]]['frequency']) for x in term_table]
# groups.to_csv('drive/My Drive/Data/FastText authkeyword clusters - 29 Oct 2019/'+dim+'D/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment,index=False)
term_table_df = []
score_table_df = []
for cluster_items in term_table:
cluster_terms = list(clusters_df['terms'][term_table[cluster_items]])
cluster_scores = [corpus[corpus['term']==x]['freq'].values[0] for x in cluster_terms]
score_table_df.append(cluster_scores)
term_table_df.append(cluster_terms)
comment = ' - term_cluster'
term_table_df = pd.DataFrame(term_table_df).T.to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/'+period+' '+str(n_clusters)+comment+'.csv',index=False)
comment = ' - term_score'
score_table_df = pd.DataFrame(score_table_df).T.to_csv(datapath+'Word Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/'+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="T5WmGKe7vCOk"
# # Document Clustering
# + [markdown] colab_type="text" id="WTNx8Pr6yf_A"
# ## Simple averaging
# + [markdown] colab_type="text" id="uYYeCoJHv-Ge"
# #### Calculate document vectors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="W6zjDa-uvCU7" outputId="2d60686e-3380-4418-c4d1-03e5015a70cd"
article_vectors = []
for idx,article in tqdm(enumerate(period_article_vectors),total=len(period_article_vectors)):
article_vectors.append(np.array(pd.DataFrame(article).mean().values.tolist()))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="xg7-fUsan8Ch" outputId="84411679-2567-4da1-f674-7c11c9602fa8"
article_vectors_np = np.array(article_vectors)
article_vectors_np.shape
# + [markdown] colab_type="text" id="7BHhKYjA_LiL"
# #### Save doc vectors to disk
# + colab={} colab_type="code" id="5BXZ0PCU_J7H"
article_vectors_df = pd.DataFrame(article_vectors_np)
article_vectors_df['article'] = period_article_indices
article_vectors_df.to_csv(datapath+'FastText doc vectors/'+'FastText doc vectors '+period+'.csv')
# + [markdown] colab_type="text" id="5CDndjuDwCw4"
# #### Find out the N-clusters
# + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" id="h2exOgrBwC2E" outputId="82accf24-5b83-4e6b-8060-b26b75664486"
#@title K-Means elbow (Distortion should go down.)
model = KMeans()
visualizer = KElbowVisualizer(model, k=(3,100))
visualizer.fit(year_vectors) # Fit the data to the visualizer
visualizer.poof()
# + colab={"base_uri": "https://localhost:8080/", "height": 381} colab_type="code" id="bnyg7c4YoXb1" outputId="de9106bc-66ca-4563-b66d-bb883db7fbaf"
#@title K-Means silhouette score
silhouette_avg_all = []
cluster_range =list(range(2,50,2))
print("Gridsearching the cluster ranges . . . ")
for n_clusters in tqdm(cluster_range,total=len(cluster_range)):
# clustering = AgglomerativeClustering(n_clusters=n_clusters,affinity='cosine',linkage='complete').fit(articles_vectors_filtered_np)
clustering = KMeans(n_clusters=n_clusters, random_state=10).fit(year_vectors)
# clustering = AffinityPropagation().fit(article_vectors_np)
cluster_labels = clustering.labels_
silhouette_avg = silhouette_score(year_vectors, cluster_labels)
silhouette_avg_all.append(silhouette_avg)
fig = plt.figure()
plt.plot(silhouette_avg_all)
plt.show()
# + colab={} colab_type="code" id="1A3DAsf0oXHt"
#@title Dendrogram
dendrogram = aa.fancy_dendrogram(sch.linkage(year_vectors, method='ward'),
truncate_mode='lastp',p=500,show_contracted=True,figsize=(15,8)) #single #average #ward
# + [markdown] colab_type="text" id="Nwa9hHoCuBrG"
# #### Cluster - simple kmeans
# + colab={} colab_type="code" id="swNysf_juB1n"
n_clusters = 9
model = KMeans(n_clusters=n_clusters, random_state=10).fit(article_vectors_np)
labels = model.labels_
clusters_df = pd.DataFrame({'articles':period_article_indices,'clusters':labels})
clusters_df.to_csv(datapath+'FastText doc clusters/'+'FastText doc clusters kmeans '+period+' '+str(n_clusters)+'.csv',index=False)
# + [markdown] colab_type="text" id="Q3E8fTxAyrQA"
# ## Weighted averaging / SIF
# + [markdown] colab_type="text" id="wC87uXUlywoA"
# #### Prepare vectors - skip if D
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="VoD4xUbDyxuT" outputId="bf2b9bbc-c2a7-4765-a59b-cc7f1e3b7b42"
year_from = 1990#@param {type:"number"}
year_to = 2005#@param {type:"number"}
#@markdown Don't forget to set year_to to a year higher than intended
year_index = []
year_sources = []
year_titles = []
year_vectors = []
year_topics = []
year_topics_sep = []
year_keywords = []
year_keywords_sep = []
year_meta = []
period = str(year_from)+'-'+str(year_to-1)
for i,tmp_year in tqdm(enumerate(doc_year),total=doc_year.shape[0]):
if tmp_year > year_from-1 and tmp_year < year_to:
tmp_index = doc_index[i]
tmp_source = doc_sources[i]
tmp_vector = doc_vectors[i,:]
tmp_title = doc_titles[i]
tmp_topics = doc_research_topic[i]
tmp_keywords = doc_keywords[i]
tmp_meta = doc_meta.iloc[i]
year_sources.append(tmp_source)
year_topics.append(tmp_topics)
year_index.append(tmp_index)
# print(tmp_topics)
if pd.isnull(tmp_topics):
year_topics_sep.append('')
else:
year_topics_sep.append([x.strip() for x in tmp_topics.split(';')])
if pd.isnull(tmp_keywords):
year_keywords_sep.append('')
else:
year_keywords_sep.append([x.strip() for x in tmp_keywords.split(';')])
year_titles.append(tmp_title)
year_vectors.append(tmp_vector)
year_meta.append(tmp_meta)
year_vectors = np.array(year_vectors)
print('\n',period,year_vectors.shape)
# + [markdown] colab_type="text" id="DN-JfG0FQzVz"
# #### Topic preparation - WC (Web of science Categories)
# Prepare label names (topics) - Doing this, is faster than using a function to this operation in every loop whenever we need it. However, this and similar things are not memory friendly.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="YOe2cx4TQzE0" outputId="82aa18a8-a4ee-454b-f59b-b7284c10f26d"
year_topics_sep_joined = []
for pub_topics in tqdm(year_topics_sep):
if pub_topics == '':
year_topics_sep_joined.append('*')
else:
year_topics_sep_joined.append(';'.join(pub_topics))
# + [markdown] colab_type="text" id="XIxvz5gyOrK5"
# #### Keyword preparation
# Prepare label names (keywords) - Doing this, is faster than using a function to this operation in every loop whenever we need it. However, this and similar things are not memory friendly.
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="nbpHf5M1VNUz" outputId="17522762-75fa-4c31-c347-c7bbbe29bf41"
# Clean keywords
year_keywords_sep = [list(map(str.strip, x)) for x in year_keywords_sep]
year_keywords_sep = [list(map(str.lower, x)) for x in year_keywords_sep]
regex = re.compile("\((.*?)\)")
tmp_data = []
for row in year_keywords_sep:
tmp_data.append([regex.sub('',x).strip() for x in row])
year_keywords_sep = tmp_data.copy()
tmp_data = []
for string_list in tqdm(year_keywords_sep, total=len(year_keywords_sep)):
tmp_data.append([x for x in string_list if x!=''])
year_keywords_sep = tmp_data.copy()
del tmp_data
# Thesaurus
year_keywords_sep = kw.thesaurus_matching(year_keywords_sep,'science_science/data/thesaurus/thesaurus_for_ai_keyword_with_().csv',have_n_grams=True,verbose=0
)
tmp_data = []
for string_list in tqdm(year_keywords_sep, total=len(year_keywords_sep)):
tmp_data.append([x for x in string_list if x!=''])
year_keywords_sep = tmp_data.copy()
del tmp_data
# Lemmatize
keywords_orig = year_keywords_sep.copy()
tmp_data = []
print("\nString pre processing for keywords")
for string_list in tqdm(year_keywords_sep, total=len(year_keywords_sep)):
tmp_list = [kw.string_pre_processing(x,stemming_method='False',lemmatization=True,stop_words_extra=stop_words,verbose=False,download_nltk=False) for x in string_list]
tmp_data.append(tmp_list)
year_keywords_sep = tmp_data
del tmp_data
# Thesaurus
year_keywords_sep = kw.thesaurus_matching(year_keywords_sep,'science_science/data/thesaurus/thesaurus_for_ai_keyword_with_().csv',have_n_grams=False)
tmp_data = []
for string_list in tqdm(year_keywords_sep, total=len(year_keywords_sep)):
tmp_data.append([x for x in string_list if x!=''])
year_keywords_sep = tmp_data.copy()
del tmp_data
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="SRHR4LYBOoYO" outputId="fed7cece-c559-4f34-d5ed-d9b8416a8406"
year_keywords_sep_joined = []
for pub_keywords in tqdm(year_keywords_sep):
if pub_keywords == '':
year_keywords_sep_joined.append('*')
else:
year_keywords_sep_joined.append(';'.join(pub_keywords))
# + [markdown] colab_type="text" id="pQaJn5WWXoqr"
# ### Clustering
# + [markdown] colab_type="text" id="9ocJ1fJlGdPq"
# #### Find optimal cluster size: Dendrogram method
# + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" id="xBG6WrFE73gU" outputId="e4fd62f0-dc41-4ad0-8fc9-adaf396e854d"
#@title Distortion should go down.
model = KMeans()
visualizer = KElbowVisualizer(model, k=(4,500))
visualizer.fit(year_vectors) # Fit the data to the visualizer
visualizer.poof()
# + colab={"base_uri": "https://localhost:8080/", "height": 639} colab_type="code" id="0QtZEXdHzXKt" outputId="daa9a16f-25a8-4263-8a7d-8e0a90d1056d"
#@title Dendrogram choose either single OR average OR ward
print(period)
dendrogram = aa.fancy_dendrogram(sch.linkage(year_vectors, method='ward'),
truncate_mode='lastp',p=800,show_contracted=True,figsize=(15,10))
# + [markdown] colab_type="text" id="JxiGCH1cG2hq"
# #### Init clustering
# + colab={} colab_type="code" id="cy9SFizgG23i"
n_clusters = 13
top_labeles_to_draw = 5
chance_of_printing_label = 0.3
comment = ''
dim = '100D 4k'
# + [markdown] colab_type="text" id="tHHu9tcvG_FJ"
# #### Cluster
# + colab={} colab_type="code" id="t_irHzyOHBGe"
model = AgglomerativeClustering(n_clusters=n_clusters,linkage='ward').fit(year_vectors)
labels = model.labels_
# + colab={} colab_type="code" id="x2444QJ1GPQu"
clusters_df = pd.DataFrame({'sources':year_sources,'clusters':labels})
clusters_df.to_csv(datapath+'Document Clustering/'+dim+'/agglomerative ward '+period+' '+str(n_clusters)+'.csv',index=False)
# + colab={} colab_type="code" id="7GAB85vBGPH6"
clusters_df = pd.DataFrame({'topic':year_topics_sep_joined,'clusters':labels})
clusters_df.to_csv(datapath+'Document Clustering/'+dim+'/agglomerative ward '+period+' '+str(n_clusters)+' - topic_labels.csv',index=False)
# + colab={} colab_type="code" id="W32314G1GO8d"
plus = ''
# plus = ' -plus'
clusters_df = pd.DataFrame({'topic':year_keywords_sep_joined,'clusters':labels})
clusters_df.to_csv(datapath+'Document Clustering/'+dim+'/agglomerative ward '+period+' '+str(n_clusters)+plus+' - keyword_labels.csv',index=False)
# + colab={} colab_type="code" id="3vQkYFPTf91c"
# Abstracts
# file_address = datapath+'Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/by period/n-gram by 6 repetition keywords/'+period+' abstract_title'
year_abstracts = pd.read_csv(file_address,names=['abstract'])['abstract'].values
clusters_df = pd.DataFrame({'topic':year_abstracts,'clusters':labels})
clusters_df.to_csv(datapath+'Document Clustering/'+dim+'/agglomerative ward '+period+' '+str(n_clusters)+' - abstract_labels.csv',index=False)
# + colab={} colab_type="code" id="4TiHFJ11_wAC"
# read abstract clusters instead of doing a fresh clustering
cluster_file_name = datapath+"Corpus/AI 4k/embeddings/clustering/k10/Doc2Vec patent_wos_ai corpus DEC 200,500,10 k10 labels"
clusters_df = pd.read_csv(cluster_file_name)
# + [markdown] colab_type="text" id="tasSqpziLffd"
# ##### TF-IDF labels
# + colab={} colab_type="code" id="sFL-zkmDA1B9"
def get_abstract_keywords(corpus,keywords_wanted,max_df=0.9,max_features=None):
cv=CountVectorizer(max_df=max_df,stop_words=stop_words, max_features=max_features, ngram_range=(1,1))
X=cv.fit_transform(corpus)
# get feature names
feature_names=cv.get_feature_names()
tfidf_transformer=TfidfTransformer(smooth_idf=True,use_idf=True)
tfidf_transformer.fit(X)
keywords_tfidf = []
keywords_sorted = []
for doc in tqdm(corpus,total=len(corpus)):
tf_idf_vector=tfidf_transformer.transform(cv.transform([doc]))
sorted_items=kw.sort_coo(tf_idf_vector.tocoo())
keywords_sorted.append(sorted_items)
keywords_tfidf.append(kw.extract_topn_from_vector(feature_names,sorted_items,keywords_wanted))
return keywords_tfidf
def get_corpus_top_keywords(abstract_keywords_dict=None):
if abstract_keywords_dict == None:
print("keywords should be provided")
return False
terms = []
values = []
for doc in abstract_keywords_dict:
if doc != None:
terms = terms+list(doc.keys())
values = values+list(doc.values())
terms_df = pd.DataFrame({'terms':terms,'value':values}).groupby('terms').sum().sort_values('value',ascending=False)
return terms_df
def find_max_item_value_in_all_cluster(haystack,needle,cluster_exception=None):
max_val = 0
max_index = None
counter = 0
for item in haystack:
try:
if item[needle]>max_val:
if cluster_exception==None:
max_val = item[needle]
max_index = counter
else:
if cluster_exception != counter:
max_val = item[needle]
max_index = counter
except:
pass
counter+=1
if max_index!=None:
row_max = haystack[max_index][list(haystack[max_index].keys())[0]] # Will give the maximum value (first item) of the row with max value of the needle. This gives us a perspective to see how this score compares to the max in the same row.
else:
row_max = 0
# except:
# row_max = None
return max_val,row_max
# + [markdown] colab_type="text" id="6wqRZ3f95aSS"
# ##### Regular TF-IDF labels
# + colab={} colab_type="code" id="rIo_K5Hug1tI"
# TF-IDF (normal)
year_tfidf_keywords_sep_joined = []
year_tfidf_labels = []
year_abstracts = pd.read_csv(datapath+"Corpus/copyr_lemmatized_stopword_removed_thesaurus/by period/n-gram by 2 repetition keywords/"+period+" abstract_title",names=['abstract'])['abstract']
cluster_groups = clusters_df.groupby('label').groups
for cluster in cluster_groups.keys():
cluster_abst_ids = list(cluster_groups[cluster])
cluster_abstracts = year_abstracts[cluster_abst_ids].values
cluster_keywords_tfidf = get_abstract_keywords(cluster_abstracts,10,max_df=0.5)
cluster_top_words = list(get_corpus_top_keywords(cluster_keywords_tfidf).index[:6])
year_tfidf_labels.append(get_corpus_top_keywords(cluster_keywords_tfidf).head(6).reset_index().values.tolist())
year_tfidf_keywords_sep_joined.append(';'.join(cluster_top_words))
# print('\n',cluster_top_words_joined)
year_tfidf_keywords_sep_joined
pd.DataFrame(year_tfidf_labels).to_csv(datapath+"Document Clustering/"+dim+"/labels/agglomerative ward "+period+" "+str(n_clusters)+" - TF-IDF_labels.csv",index=False,header=False)
# + [markdown] colab_type="text" id="Va6Zee0g5WMY"
# ##### CTF-ICF labels
# + colab={} colab_type="code" id="tTXFxYla7cxJ"
# TF-IDF (CTF-ICF)
cluster_as_string = []
year_abstracts = pd.read_csv(file_address,names=['abstract'])['abstract']
clusters = clusters_df.groupby('label').groups
for key in clusters.keys():
cluster_as_string.append(' '.join(year_abstracts[list(clusters[key])]))
cluster_keywords_tfidf = get_abstract_keywords(cluster_as_string,100,max_df=0.8)
cluster_keywords = []
cluster_index = 0
for items in cluster_keywords_tfidf:
items_tmp = []
for item in items:
max_data = find_max_item_value_in_all_cluster(cluster_keywords_tfidf,item,cluster_index)
items_tmp.append(item+' ('+str(items[item])+' | '+str(max_data[0])+'/'+str(max_data[1])+')') # (item+' :'+str(items[item])+' / '+str( max of item in all other rows))
cluster_keywords.append(items_tmp)
cluster_index+=1
pd.DataFrame(cluster_keywords).to_csv(cluster_file_name+" keywords",index=False,header=False)
# + colab={} colab_type="code" id="wjPFdnSDG-bF"
# Get term cluster labels (just terms and not scores)
cluster_keywords_terms = []
cluster_keywords_scores = []
for item in cluster_keywords_tfidf:
cluster_keywords_terms.append(list(item.keys()))
cluster_keywords_scores.append(list(item.values()))
pd.DataFrame(cluster_keywords_terms).T.to_csv(cluster_file_name+" keywords - term_cluster.csv",index=False)
pd.DataFrame(cluster_keywords_scores).T.to_csv(cluster_file_name+" keywords - term_score.csv",index=False)
# + colab={} colab_type="code" id="yJrZhBBuDFpg"
# Get term frequencies for each period
terms = ' '.join(cluster_as_string).split()
terms = [x for x in terms if x not in list(stop_words)]
pd.DataFrame(terms,columns=['terms'])['terms'].value_counts().to_csv(datapath+'Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/by period/n-gram by 6 repetition keywords/'+period,header=False)
# + [markdown] colab_type="text" id="CTCy_IP77Jw7"
# ##### Word centrality labels
# + colab={} colab_type="code" id="QqQBSUtIbPV5"
sub_dim = ''
# dim = '50'
dim_comment = ''
period = '2014-2016'
n_clusters = 11
# read abstract clusters instead of doing a fresh clustering
cluster_file_name = datapath+'Document Clustering/'+dim+'/agglomerative ward '+period+' '+str(n_clusters)+' - abstract_labels.csv'
clusters_df = pd.read_csv(cluster_file_name)
# + colab={} colab_type="code" id="qK0QsTeA7cL0"
def embed(word,model,sep):
phrase=word.split(sep)
gram_vecs = []
for gram in phrase:
gram_vecs.append(model.wv[gram])
return np.array(gram_vecs).mean(axis=0)
gensim_model_address = datapath+'FastText Models/50D May 16/fasttext-scopus-300k_docs-gensim 50D.model'
FT_model = fasttext_gensim.load(gensim_model_address)
# + colab={} colab_type="code" id="sEmO6d3F7KxG"
# Read cluster center
center_address = datapath+'Document Clustering/'+dim+'/'+sub_dim+'centers/agglomerative ward '+period+' '+str(n_clusters)+comment
cluster_centers = pd.read_csv(center_address,index_col=0)
# + colab={} colab_type="code" id="09LGEQtrCYTp"
term_scores = []
for cluster_n in range(n_clusters-1):
cluster_center = cluster_centers.iloc[cluster_n].values
cluster_abstrcts = clusters_df[clusters_df['clusters']==cluster_n].copy()
cluster_abstrcts = cluster_abstrcts['topic'].str.lower().values.tolist()
cluster_abstrcts = ' '.join(cluster_abstrcts)
cluster_terms = cluster_abstrcts.split()
cluster_terms = pd.DataFrame(cluster_terms)
cluster_terms.columns = ['terms']
cluster_terms_tmp = list(cluster_terms.groupby('terms').groups.keys())
cluster_terms = pd.DataFrame([])
cluster_terms['terms'] = cluster_terms_tmp
x = [embed(word,FT_model,'_') for word in cluster_terms.terms.values.tolist()]
cluster_terms['vectors'] = x
cluster_terms['similarity'] = [1-spatial.distance.cosine(vector, cluster_center) for vector in np.array(cluster_terms['vectors'])]
cluster_terms = cluster_terms.sort_values(by=['similarity'],ascending=False)
cluster_terms = cluster_terms[['terms','similarity']]
term_scores.append(cluster_terms.set_index('terms').T.to_dict('records'))
# top_words = cluster_terms.head(1000)['terms'].values.tolist()
# top_scores = cluster_terms.head(1000)['similarity'].values.tolist()
# + [markdown] colab_type="text" id="EwapCbDdX56A"
# ###### Combine CTF-ICF and Cluster centrality
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="BHkZPEVtX-75" outputId="c6bbb8ee-146c-495e-9ad2-3d5c4de3d4ad"
cluster_as_string = []
year_abstracts = clusters_df['topic'].values
clusters = clusters_df.groupby('clusters').groups
for key in clusters.keys():
cluster_as_string.append(' '.join(year_abstracts[list(clusters[key])]))
cluster_keywords_tfidf = get_abstract_keywords(cluster_as_string,1000,max_df=0.8)
new_scores = []
for cluster_n in range(n_clusters-1):
terms = list(cluster_keywords_tfidf[cluster_n].keys())
scores = [cluster_keywords_tfidf[cluster_n][term]*term_scores[cluster_n][0][term] for term in terms]
new_data = dict(zip(terms, scores))
new_scores.append(new_data)
# + colab={} colab_type="code" id="rLdNoIWRsefl"
saving_path = datapath+"Document Clustering/"+dim+"/labels/agglomerative ward "+period+" "+str(n_clusters)+" - central_tficf labels_withmax.csv"
cluster_keywords = []
cluster_index = 0
for items in cluster_keywords_tfidf:
items_tmp = []
for item in items:
max_data = find_max_item_value_in_all_cluster(cluster_keywords_tfidf,item,cluster_index)
items_tmp.append(item+' ('+str(items[item])+' | '+str(max_data[0])+'/'+str(max_data[1])+')') # (item+' :'+str(items[item])+' / '+str( max of item in all other rows))
cluster_keywords.append(items_tmp)
cluster_index+=1
print('saving to ',saving_path)
pd.DataFrame(cluster_keywords).to_csv(saving_path,index=False,header=False)
# + [markdown] colab_type="text" id="8JQ4GpYSS7Hr"
# #### Visualize clusters - with source labels
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="XkibDZbtS7gl" outputId="452bfa55-7d62-42eb-8672-ddaca52e28ab"
cluster_file_name = datapath+'FastText doc clusters - SIP/'+dim+'D/agglomerative ward '+period+' '+str(n_clusters)+'.csv'
cluster_file_name
# + colab={} colab_type="code" id="sko527Y5S_zp"
clusters_df = pd.read_csv(cluster_file_name)
clusters_df = clusters_df[clusters_df['sources'].isin(year_sources)]
color_palette = aa.color_palette_maker(n_clusters)
colors = aa.cluster_color_mapper(clusters_df['clusters'].values.tolist(),color_palette)
clusters_df['colors'] = colors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="-PniuZMmTCpF" outputId="a0d88963-75b0-4874-f71d-9090e3a4e073"
pca = PCA(2)
pca.fit(year_vectors)
#pca.n_components_
vector_low_dim = pca.transform(year_vectors)
vector_low_dim[:,1].shape
# + [markdown] colab_type="text" id="0LDoAxJjPAGx"
# ##### Visualize clusters - with sources as labels
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="On8FXjeSTFDo" outputId="b1101721-d483-4723-8623-96e2c7017e85"
whitelist = []
for idx in range(len(clusters_df.groupby('clusters').groups)):
# Get sources for this cluster
this_cluster_df = clusters_df.iloc[list(clusters_df.groupby('clusters').groups[idx])]
# Group sources to get unique source names and sort them by frequency
source_counts = this_cluster_df.groupby('sources').count()['clusters'].reset_index().sort_values(by=['clusters'], ascending=False)
source_counts.columns = ['sources','frequency']
# Get top N sources by frequency
whitelist = whitelist + source_counts['sources'].head(top_labeles_to_draw).values.tolist()
fig, ax = plt.subplots(figsize=(25, 18), dpi=150)
ax.scatter(vector_low_dim[:,0], vector_low_dim[:,1],color = colors)
year_sources_abbr = []
year_sources_orig = []
for i, label in enumerate(year_sources):
if label in whitelist:
if len(label)>20:
year_sources_orig.append(label)
label = aa.abbreviator(label)
year_sources_abbr.append(label)
ax.annotate(label, (vector_low_dim[i,0], vector_low_dim[i,1]), fontsize=8, fontweight='ultralight')
fig.savefig(cluster_file_name[:-4]+' - reduced labels.jpg')
# + colab={"base_uri": "https://localhost:8080/", "height": 419} colab_type="code" id="BQppnTUV2vtT" outputId="46320053-e0cd-440a-ce34-83282ae9250d"
year_sources_abbr = pd.DataFrame(year_sources_abbr)
year_sources_abbr['original'] = year_sources_orig
year_sources_abbr.columns = ['abbreviation','original']
year_sources_abbr.to_csv(cluster_file_name[:-4]+' - labels guide.csv')
year_sources_abbr
# + [markdown] colab_type="text" id="ndv0eQjqSOr6"
# #### Visualize clusters - with topic labels
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UIMu-7HVSSvx" outputId="7e8d5a05-5b36-481f-e442-940ce6253a5d"
cluster_file_name = datapath+'FastText doc clusters - SIP/'+dim+'D/agglomerative ward '+period+' '+str(n_clusters)+' - topic_labels.csv'
cluster_file_name
# + colab={} colab_type="code" id="SndGj30uSS-3"
clusters_df = pd.read_csv(cluster_file_name)
# clusters_df = clusters_df[clusters_df['topic']!='*']
color_palette = aa.color_palette_maker(n_clusters)
colors = aa.cluster_color_mapper(clusters_df['clusters'].values.tolist(),color_palette)
clusters_df['colors'] = colors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="wcjmuIMBSTO4" outputId="027571bd-65cd-4fcf-b7ba-0f70b5992368"
pca = PCA(2)
pca.fit(year_vectors)
#pca.n_components_
vector_low_dim = pca.transform(year_vectors)
vector_low_dim[:,1].shape
# + colab={} colab_type="code" id="dTSUUd_5QD8Q"
whitelist = []
topic_terms = []
topic_frequencies = []
for idx in range(len(clusters_df.groupby('clusters').groups)):
# Get indices for this cluster
this_cluster_df = clusters_df.iloc[list(clusters_df.groupby('clusters').groups[idx])]
# Flatten topics of cluster
cluster_topics = [x.split(';') for x in this_cluster_df['topic'].to_list()]
topics = pd.DataFrame([j for sub in cluster_topics for j in sub])
topics.columns = ['topic']
# Group topics to get unique source names and sort them by frequency
g_tmp = pd.DataFrame(topics['topic'].value_counts()).reset_index()
g_tmp.columns = ['topic','count']
g_tmp = g_tmp.sort_values(by=['count'], ascending=False)
# Get top N sources by frequency
whitelist = whitelist + g_tmp['topic'].head(top_labeles_to_draw).values.tolist()
tmp_terms = []
tmp_freqs = []
for i,x in enumerate(g_tmp['topic'].tolist()):
if x!='':
tmp_terms.append(x)
tmp_freqs.append(g_tmp['count'].tolist()[i])
if idx == 0:
top_topic = g_tmp.head(15).copy()
else:
top_topic = pd.concat([top_topic, g_tmp.head(15)], axis=1)
topic_terms.append(tmp_terms)
topic_frequencies.append(tmp_freqs)
whitelist = [element for element in whitelist if element!='computer science']
# top_topic
# + colab={} colab_type="code" id="OoO1jWqhklzS"
top_topic.to_csv(cluster_file_name[:-4]+' - topic cluster frequency.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="pnn8WuhYPVXh" outputId="a63a9231-f474-4490-fa91-b02d592da778"
fig, ax = plt.subplots(figsize=(30, 20), dpi=200)
ax.scatter(vector_low_dim[:,0], vector_low_dim[:,1],color = colors)
year_sources_abbr = []
year_sources_orig = []
for i, topics_joined in enumerate(year_topics_sep_joined):
if any(element in whitelist for element in topics_joined.split(';')):
if random()<=chance_of_printing_label:
ax.annotate(topics_joined, (vector_low_dim[i,0], vector_low_dim[i,1]), fontsize=8, fontweight='ultralight')
fig.savefig(cluster_file_name[:-4]+' - reduced labels - topic labels.jpg')
# + [markdown] colab_type="text" id="w3aopT5U0COH"
# #### Visualize clusters - with keyword labels
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UBUso96R0A1N" outputId="dccf1403-9836-4a2f-c8ba-9ea39f986709"
plus = ''
# plus = ' -plus'
cluster_file_name = datapath+'FastText doc clusters - SIP/'+dim+'D/agglomerative ward '+period+' '+str(n_clusters)+plus+' - keyword_labels.csv'
cluster_file_name
# + colab={} colab_type="code" id="dlT0Pf2K0OKI"
clusters_df = pd.read_csv(cluster_file_name)
# clusters_df = clusters_df[clusters_df['topic']!='*']
color_palette = aa.color_palette_maker(n_clusters)
colors = aa.cluster_color_mapper(clusters_df['clusters'].values.tolist(),color_palette)
clusters_df['colors'] = colors
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="xUer6kag0TiR" outputId="a681e557-79a6-4f35-ba44-37375a5a8d8d"
pca = PCA(2)
pca.fit(year_vectors)
#pca.n_components_
vector_low_dim = pca.transform(year_vectors)
vector_low_dim[:,1].shape
# + colab={} colab_type="code" id="-nFl0iPi0Vyl"
whitelist = []
keywords_terms = []
keywords_frequencies = []
clusters_df['topic'] = clusters_df['topic'].fillna('')
for idx in range(len(clusters_df.groupby('clusters').groups)):
# Get indices for this cluster
this_cluster_df = clusters_df.iloc[list(clusters_df.groupby('clusters').groups[idx])]
# Flatten topics of cluster
cluster_topics = [x.split(';') for x in this_cluster_df['topic'].to_list()]
topics = pd.DataFrame([j for sub in cluster_topics for j in sub])
topics.columns = ['topic']
# Group topics to get unique source names and sort them by frequency
g_tmp = pd.DataFrame(topics['topic'].value_counts()).reset_index()
g_tmp.columns = ['topic','count']
g_tmp = g_tmp.sort_values(by=['count'], ascending=False)
# Get top N sources by frequency
whitelist = whitelist + g_tmp['topic'].head(top_labeles_to_draw).values.tolist()
tmp_terms = []
tmp_freqs = []
for i,x in enumerate(g_tmp['topic'].tolist()):
if x!='':
tmp_terms.append(x)
tmp_freqs.append(g_tmp['count'].tolist()[i])
if idx == 0:
top_topic = g_tmp.head(15).copy()
else:
top_topic = pd.concat([top_topic, g_tmp.head(15)], axis=1)
keywords_terms.append(tmp_terms)
keywords_frequencies.append(tmp_freqs)
whitelist = [element for element in whitelist if element!='']
# top_topic
# + colab={} colab_type="code" id="wiagicC40ZR1"
top_topic.to_csv(cluster_file_name[:-4]+' - keyword cluster frequency.csv')
# + colab={} colab_type="code" id="ojpuFqkW0bL6"
fig, ax = plt.subplots(figsize=(30, 20), dpi=200)
ax.scatter(vector_low_dim[:,0], vector_low_dim[:,1],color = colors)
year_sources_abbr = []
year_sources_orig = []
for i, topics_joined in enumerate(year_keywords_sep_joined):
if any(element in whitelist for element in topics_joined.split(';')):
if random()<=chance_of_printing_label:
ax.annotate(topics_joined, (vector_low_dim[i,0], vector_low_dim[i,1]), fontsize=8, fontweight='ultralight')
fig.savefig(cluster_file_name[:-4]+' - reduced labels - keyword labels.jpg')
# + [markdown] colab_type="text" id="6gxzs7lXTHar"
# ### Cluster Center Calc
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="USgFv_IQTH9J" outputId="bfa3626f-b510-4e57-8914-46bc22dc0fe0"
sub_dim = ''
# dim = '50'
dim_comment = ''
# period = '1990-2004'
#@markdown Period should be set at vector year preparation section!!
comment = ''
use_current_cluster_file_name = True #@param {type:"boolean"}
use_current_cluster_numbers = True #@param {type:"boolean"}
if use_current_cluster_file_name is False:
print('Using custom cluster file name...')
cluster_file_name = ''#@param {type:"string"}
if use_current_cluster_numbers is False:
n_clusters_new = 6#@param {type:"number"}
else:
n_clusters_new == n_clusters
if cluster_file_name == '':
print('Custom cluster file name not provided! Trying to get the default location...')
cluster_file_name = datapath+'FastText doc clusters - SIP/'+dim+'/'+dim_comment+'/'+sub_dim+'agglomerative ward '+period+' '+str(n_clusters_new)+comment
clusters_df = pd.read_csv(cluster_file_name)
vectors_df = pd.DataFrame(year_vectors)
# color_palette = aa.color_palette_maker(n_clusters_new)
# colors = aa.cluster_color_mapper(clusters_df['clusters'].values.tolist(),color_palette)
# clusters_df['colors'] = colors
if(clusters_df.shape[0] != vectors_df.shape[0]):
print('Fix year vector section. periods here should be set from that section.')
else:
print('Shapes:',clusters_df.shape[0],'matche. Good to go!')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="TdB49ZJiFFpC" outputId="16f464c7-7573-42f5-84f9-bdae7459d199"
print(cluster_file_name)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1VkCEkkfVuC8" outputId="c7614750-4cd7-4b6e-d03f-b88787df496f"
# n_clusters = n_clusters_new
cluster_centers = []
for cluster in tqdm(range(n_clusters),total=n_clusters):
cluster_centers.append(vectors_df.iloc[clusters_df[clusters_df['clusters']==cluster].index].mean(axis=0))
# + colab={} colab_type="code" id="imOp3W6JVT6m"
pd.DataFrame(cluster_centers).to_csv(datapath+'Document Clustering/'+dim+'/'+sub_dim+'centers/agglomerative ward '+period+' '+str(n_clusters)+comment)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="YOprcaik-T55" outputId="0b1ab3f6-0b10-485b-b28f-57f55a02f3f0"
# !ls 'drive/My Drive/Data/Document Clustering/50D 4k/centers/'
# + [markdown] colab_type="text" id="MNTIUfstP7-m"
# ### Cluster Similarity Calc
# + colab={} colab_type="code" id="IBIg2kv2Pq-s"
sub_dim = ''#@param {type:"string"}
dim = '50'#@param {type:"string"}
dim_comment = ' 4k'#@param {type:"string"}
comment = ''#@param {type:"string"}
period_A = '1990-2004'#@param {type:"string"}
n_clusters_A = 10#@param {type:"number"}
period_B = '2005-2007'#@param {type:"string"}
n_clusters_B = 8#@param {type:"number"}
clusters_A = pd.read_csv(datapath+'Document Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'centers/agglomerative ward '+period_A+' '+str(n_clusters_A)+comment,index_col=0)
clusters_B = pd.read_csv(datapath+'Document Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'centers/agglomerative ward '+period_B+' '+str(n_clusters_B)+comment,index_col=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="svjgK7h-QzDo" outputId="a4af7a68-713d-4f38-bc5d-2f99223c4f5f"
names = []
names.append('cluster_1')
sim_A_to_B = []
for idx_A,vector_A in clusters_A.iterrows():
inner_similarity_scores = []
inner_similarity_scores.append(idx_A)
for idx_B,vector_B in clusters_B.iterrows():
distance_tmp = spatial.distance.cosine(vector_A.values, vector_B.values)
similarity_tmp = 1 - distance_tmp
inner_similarity_scores.append(idx_B)
inner_similarity_scores.append(similarity_tmp)
if idx_A == 0:
names.append('cluster_2_'+str(idx_B))
names.append('similarity_'+str(idx_B))
sim_A_to_B.append(inner_similarity_scores)
print('cluster of A:',idx_A,'to cluster of B:',idx_B,'similarity',similarity_tmp)
sim_A_to_B = pd.DataFrame(sim_A_to_B,columns=names)
sim_A_to_B.to_csv(datapath+'Document Clustering/'+dim+'D'+dim_comment+'/'+sub_dim+'similarity/agglomerative ward '+period_A+'_'+str(n_clusters_A)+'-'+period_B+'_'+str(n_clusters_B)+'.csv',index=False)
# + [markdown] colab_type="text" id="656Tbsv-g6_P"
# ### Cluster Classification similarity calc
# + cellView="both" colab={} colab_type="code" id="Rk9-AOebhBst"
classification_type = "applications" #@param {type:"string"}
classification_type_dir = "application" #@param {type:"string"}
sub_dim = ''
dim = '50D 4k'#@param {type:"string"}
dim_comment = ''
comment = ''#@param {type:"string"}
period_A = '2017-2019'#@param {type:"string"}
n_clusters_A = 11#@param {type:"number"}
clusters_A = pd.read_csv(datapath+'Document Clustering/'+dim+'/'+sub_dim+'centers/agglomerative ward '+period_A+' '+str(n_clusters_A)+comment,index_col=0)
clusters_B = pd.read_csv(datapath+'Corpus/AI Wiki Classifications/'+classification_type+'/clean/vectors/all')
clusters_B_labels = pd.DataFrame(clusters_B.clusters.values.tolist(),columns=['label'])
clusters_B = clusters_B.drop(['clusters'],axis=1)
# + colab={} colab_type="code" id="NfwgyHRWwoMI"
names = []
names.append('clusters')
sim_A_to_B = []
for idx_A,vector_A in clusters_A.iterrows():
inner_similarity_scores = []
inner_similarity_scores.append(idx_A)
for idx_B,vector_B in clusters_B.iterrows():
distance_tmp = spatial.distance.cosine(vector_A.values, vector_B.values)
similarity_tmp = 1 - distance_tmp
# inner_similarity_scores.append(idx_B)
inner_similarity_scores.append(similarity_tmp)
if idx_A == 0:
# names.append('classification_'+str(idx_B))
# names.append('similarity_'+str(idx_B))
names.append(clusters_B_labels['label'][idx_B])
sim_A_to_B.append(inner_similarity_scores)
# print('cluster of A:',idx_A,'to cluster of B:',idx_B,'similarity',similarity_tmp)
sim_A_to_B = pd.DataFrame(sim_A_to_B,columns=names)
sim_A_to_B.to_csv(datapath+'Document Clustering/'+dim+'/'+sub_dim+classification_type_dir+' estimation/'+period_A+'_'+str(n_clusters_A)+' - AI Wiki Classifications.csv',index=False)
# + [markdown] colab_type="text" id="EEq8EGiieDb9"
# ##### Classification similarity labeling
# + colab={} colab_type="code" id="T4J82EWxeDwB"
# classification_type = "application" #@param {type:"string"}
# dim = '50D May 23'#@param {type:"string"}
# sub_dim = ''
# period_A = '1990-2018'#@param {type:"string"}
# n_clusters_A = '12'#@param {type:"string"}
similarity = pd.read_csv(datapath+'Document Clustering/'+dim+'/'+sub_dim+classification_type_dir+' estimation/'+period_A+'_'+str(n_clusters_A)+' - AI Wiki Classifications.csv')
similarity = similarity.drop(['clusters'],axis=1)
# + colab={} colab_type="code" id="9TzDwmq9t-iY"
top_2 = pd.DataFrame(similarity.apply(lambda s: s.abs().nlargest(2).index.tolist(), axis=1))
top_2.columns = ['label']
top_2['label'] = top_2['label'].apply(lambda x: x[0]+' & '+x[1])
top_2.to_csv(datapath+'Document Clustering/'+dim+'/'+sub_dim+classification_type_dir+' estimation/top2 '+period_A+'_'+str(n_clusters_A)+'- AI Wiki Classifications.csv')
# + [markdown] colab_type="text" id="2h4_MdSKcj-U"
# ### Term-cluster tables
# + [markdown] colab_type="text" id="fy_AAUIV9iKz"
# #### Term-cluster tables: TFIDF words
# + [markdown] colab_type="text" id="_ouB8m_ZHMOv"
# * For word tables by TFIDF go up to the corresponding section.
# + [markdown] colab_type="text" id="0WCO8MPophfa"
# #### Term-cluster tables: sources
# + colab={} colab_type="code" id="-P3Rr-yFckTn"
sub_dim = ''#@param {type:"string"}
dim_comment = ' w2'#@param {type:"string"}
dim = '50'#@param {type:"string"}
n_clusters = 7#@param {type:"number"}
comment = ''#@param {type:"string"}
save_frequency_to_disk = False #@param {type:"boolean"}
cluster_file_name = datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'agglomerative ward '+period+' '+str(n_clusters)+comment
clusters = pd.read_csv(cluster_file_name+'.csv')
term_table = clusters.groupby('clusters').groups
year_sources_df = pd.DataFrame(year_sources)
year_sources_df['clusters'] = clusters['clusters']
year_sources_df.columns = ['sources','clusters']
year_sources_df = year_sources_df.groupby('sources').count()['clusters'].reset_index().sort_values(by=['clusters'], ascending=False)
year_abbreviations = []
for item in tqdm(year_sources_df['sources'],total=year_sources_df.shape[0]):
year_abbreviations.append(aa.abbreviator(item))
year_sources_df.columns = ['sources','frequency']
year_sources_df['sources_abbr'] = year_abbreviations
if save_frequency_to_disk is True:
save_dest_address = datapath+'corpus/improved_copyr_lemmatized_stopwords_removed_thesaurus_n-grams/source_frequency_abbr.csv'#@param {type:"string"}
year_sources_df.to_csv(save_dest_address,index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="VGVp-nfM1TJs" outputId="c6a95597-8383-42ac-cfc4-bc32e954ce84"
# term_table_df = pd.DataFrame([list(clusters['terms'][term_table[x]]) for x in term_table]).T.fillna('')
# score_table_df = [list(corpus[corpus['keyword']==clusters['terms'][term_table[x]]]['frequency']) for x in term_table]
# groups.to_csv('drive/My Drive/Data/FastText authkeyword clusters - 29 Oct 2019/'+dim+'D/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment,index=False)
term_table_df = []
score_table_df = []
for cluster_items in tqdm(term_table,total=len(term_table)):
cluster_terms = list(clusters['sources'][term_table[cluster_items]])
cluster_scores = [year_sources_df[year_sources_df['sources']==x]['frequency'].values[0] for x in cluster_terms]
score_table_df.append(cluster_scores)
term_table_df.append(cluster_terms)
comment = ' - term_cluster'
term_table_df = pd.DataFrame(term_table_df).T.to_csv(datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
comment = ' - term_score'
score_table_df = pd.DataFrame(score_table_df).T.to_csv(datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="4dbxNqB0KLC4"
# #### Term-cluster tables: keywords
# + colab={} colab_type="code" id="3qQeSz_eKJjP"
sub_dir = 'authkeyword/'
# sub_dir = 'keywordplus'
comment = ' - term_cluster'
dim_comment = ''
sub_dim = ''
pd.DataFrame(keywords_terms).T.to_csv(datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/'+sub_dir+'agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
comment = ' - term_score'
pd.DataFrame(keywords_frequencies).T.to_csv(datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/'+sub_dir+'agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="KEN_4fkHBzgQ"
# #### Term-cluster tables: topics
# + colab={} colab_type="code" id="3NMhi1HjBz9d"
sub_dir = 'topics/'
comment = ' - term_cluster'
dim_comment = ''
sub_dim = ''
pd.DataFrame(topic_terms).T.to_csv(datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/'+sub_dir+'agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
comment = ' - term_score'
pd.DataFrame(topic_frequencies).T.to_csv(datapath+'FastText doc clusters - SIP/'+dim+'D'+dim_comment+'/'+sub_dim+'term_cluster-tables/'+sub_dir+'_agglomerative ward '+period+' '+str(n_clusters)+comment+'.csv',index=False)
# + [markdown] colab_type="text" id="9nk2SWqbD4NJ"
# ### Write document meta, cluster and vector to disk
#
#
#
#
# > Must run:
# >
# >
# > 1. Load vectors (option B)
# > 2. Vector preparation and year selection
# > 3. Clustering initialization
# > 4. Clustering (only the first one to get labels)
# > 5. Visualization first block to get the correct cluster_file_name
# > 6. Cluster center calc (except the last block. no need to wirte to disk again)
# > 7. All blocks below
#
#
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="qXZXjyt6Msd3" outputId="bec956e5-17c9-45a9-d1cd-b01a02b28da9"
year_meta_df = pd.DataFrame(year_meta).reset_index(drop=True)
year_meta_df.columns = ['title','abstract','year','citations','usages','publication_name','iso_name','author_keywords','keywords_plus','wos_categories','research_area']
year_meta_df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="60q28-8wOp9L" outputId="ffde0f09-55b0-44f4-82d5-c8c9b78de75c"
year_vectors_np = np.array(year_vectors)
year_vectors_df = pd.DataFrame(year_vectors_np)
year_vectors_df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ycH3CneTQdCc" outputId="48e5457b-139e-4c65-8492-36a5b08cd4ea"
# Distance from center
similarities = []
for idx,doc_vec in tqdm(enumerate(year_vectors_np),total=year_vectors_np.shape[0]):
distance_tmp = spatial.distance.cosine(doc_vec, cluster_centers[labels[idx]].values)
similarity_tmp = 1 - distance_tmp
similarities.append(similarity_tmp)
# + colab={} colab_type="code" id="KPbGLCmcD4iJ"
year_vectors_df['origingal_doc_index'] = year_index
year_vectors_df = pd.concat([year_vectors_df, year_meta_df], axis = 1)
year_vectors_df['cluster_label'] = labels
year_vectors_df['similarity_to_center'] = similarities
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="kfgGzNqmWlzE" outputId="7a8fdac5-b4f7-4ac7-8b28-0f3f4ffe9186"
# Write
year_vectors_df.to_csv(datapath+'FastText doc clusters - SIP/50D/vectors and metadata/'+period+' year_doc_vector.csv',index=False)
period
# + colab={"base_uri": "https://localhost:8080/", "height": 419} colab_type="code" id="ozJf1PbV4II8" outputId="9e39f018-c75e-4e5d-ab14-e74cab89e374"
year_vectors_df[['origingal_doc_index','abstract']]
# + [markdown] colab_type="text" id="LY68GcfNztbz"
# ### Term Frequency Calc
# + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" id="4HVdbNt5zt6e" outputId="d731944c-844d-4806-b6e0-99d5f619baee"
# Load dataset with processed abstracts
abstracts = []
abstract_corpus = pd.read_csv(datapath+'corpus/improved_copyr_abstract-sentences_cumulative/1990-2018 meta and data.csv')
abstract_corpus = abstract_corpus['processed_abstracts'].reset_index()
abstract_corpus.loc[25506]
# merged_left = pd.merge(left=year_vectors_df,right=abstract_corpus, how='left', left_on='origingal_doc_index', right_on='index')
# merged_left
| FastText_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Homework05: MNIST Deep Convolutional GAN to Generate MNIST digits
# +
import numpy
import tensorflow
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM, SimpleRNN, Input
from tensorflow.keras.layers import Embedding, BatchNormalization
from tensorflow.keras.layers import Flatten, Reshape
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.layers import Conv1D, Conv2D
from tensorflow.keras.layers import MaxPooling1D, Conv2DTranspose
from tensorflow.keras.layers import Embedding
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.optimizers import Adam
from tqdm.notebook import tqdm
from scipy import stats
# fix random seed for reproducibility
numpy.random.seed(1)
# -
# #### Homework 05
#
# 1. Adapt the MNIST GAN implementation (see Lab09) to build CNN models instead of feedforwards.
# 2. Try different noise dimensions
# 3. Try implementing some training tricks suggested in https://github.com/soumith/ganhacks and study incremental improvements.
# #### Load Data
#
# +
import tensorflow
from tensorflow.keras.datasets.mnist import load_data
(X_train, Y_train), (X_test ,Y_test) = load_data()
def preprocess(data):
result = (data - np.min(data)) / (np.max(data) - np.min(data))
return np.expand_dims(result, axis=-1)
X_train = preprocess(X_train)
X_test = preprocess(X_test)
z_dim = 100
# -
# #### Summarize the data
print('Train', X_train.shape, Y_train.shape) #Train (60000, 28, 28) (60000,)
print('Test', X_test.shape, Y_test.shape) #Test (10000, 28, 28) (10000,)
# #### Build model
# +
#GENERATOR
g = Sequential([
Dense(7 * 7 * 128, input_shape=(z_dim,)),
Reshape([7, 7, 128]),
BatchNormalization(),
Conv2DTranspose(64, kernel_size=5, strides=2, padding="same",activation="selu"),
BatchNormalization(),
Conv2DTranspose(1, kernel_size=5, strides=2, padding="same",activation="tanh")
])
g.compile(loss='binary_crossentropy', optimizer="rmsprop", metrics=['accuracy'])
#DISCRIMINATOR
d = Sequential([
Conv2D(64, kernel_size=5, strides=2, padding="same", activation=LeakyReLU(0.2), input_shape=[28, 28, 1]), Dropout(0.4),
Conv2D(128, kernel_size=5, strides=2, padding="same",activation=LeakyReLU(0.2)), Dropout(0.4),Flatten(),
Dense(1, activation="sigmoid")
])
opt = Adam(learning_rate=0.01)
d.compile(loss="binary_crossentropy", optimizer=opt)
#GAN
d.trainable = False
inputs = Input(shape=(z_dim, ))
hidden = g(inputs)
output = d(hidden)
gan = Model(inputs, output)
gan.compile(loss='binary_crossentropy', optimizer='SGD')
# -
# #### Visualization
# +
def plot_loss(losses):
"""
@losses.keys():
0: loss
1: accuracy
"""
d_loss = [v[0] for v in losses["D"]]
g_loss = [v[0] for v in losses["G"]]
plt.figure(figsize=(10,8))
plt.plot(d_loss, label="Discriminator loss")
plt.plot(g_loss, label="Generator loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
def plot_generated(n_ex=10, dim=(1, 10), figsize=(12, 2)):
noise = np.random.normal(0, 1, size=(n_ex, z_dim))
generated_images = g.predict(noise)
generated_images = generated_images.reshape(n_ex, 28, 28)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generated_images[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.show()
# -
# #### Train the model
# +
losses = {"D":[], "G":[]}
def train(epochs=1, plt_frq=1, BATCH_SIZE=128):
batchCount = int(X_train.shape[0] / BATCH_SIZE)
print('Epochs:', epochs)
print('Batch size:', BATCH_SIZE)
print('Batches per epoch:', batchCount)
for e in tqdm(range(1, epochs+1)):
if e == 1 or e%plt_frq == 0:
print('-'*15, 'Epoch %d' % e, '-'*15)
for _ in range(batchCount): # tqdm_notebook(range(batchCount), leave=False):
# Create a batch by drawing random index numbers from the training set
image_batch = X_train[np.random.randint(0, X_train.shape[0], size=BATCH_SIZE)]
# Create noise vectors for the generator
noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))
# Generate the images from the noise
generated_images = g.predict(noise)
X = np.concatenate((image_batch, generated_images))
# Create Y labels similar to last exercise.
y1 = np.zeros(2*BATCH_SIZE)
y1[:BATCH_SIZE] =1
# Train gan and disrciminator similar to last exercise.
##YOUR CODE HERE###
d.trainable = True
d_loss = d.train_on_batch(X, y1)
noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))
y2 = np.ones(BATCH_SIZE)
d.trainable = False
g_loss = gan.train_on_batch(noise, y2)
print('test')
# Only store losses from final
losses["D"].append(d_loss)
losses["G"].append(g_loss)
# Update the plots
if e == 1 or e%plt_frq == 0:
plot_generated()
plot_loss(losses)
# -
train(epochs=100, plt_frq=40, BATCH_SIZE=128)
| 01_advanced_topics_machine_learning/assignments/notebooks/5_GANs/HW05_DCGAN_Generate_MNIST_digits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #### This notebook was created to find the optimal $\Delta AF$ threshold for SNPs between paired samples using the *Replicate* Samples to approximate the noise in allele frequency calculations between samples.
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# +
import vcf
# %matplotlib inline
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker as ticker
from pylab import plot, show, savefig, xlim, figure, hold, ylim, legend, boxplot, setp, axes
from itertools import compress
from pylab import MaxNLocator
import seaborn as sns; sns.set()
from matplotlib.colors import LogNorm
from matplotlib import gridspec
import ast
import itertools
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import fastcluster
from sklearn import cluster, datasets
import scipy.cluster.hierarchy as hier
from sklearn.cluster import KMeans
import time
import sys
import Bio
from Bio.Alphabet import IUPAC
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio import pairwise2
from Bio import SeqIO
from Bio.Graphics import GenomeDiagram
from Bio.SeqUtils import GC
from Bio.Align.Applications import MuscleCommandline
from StringIO import StringIO
from Bio import AlignIO
from Bio.Align import AlignInfo
from Bio.Seq import MutableSeq
import itertools
import networkx as nx
import scipy
from collections import Counter
from itertools import compress
#for exporting to Adobe Illustrator
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
import warnings
warnings.filterwarnings("ignore", module="matplotlib")
# -
plt.style.use('ggplot')
plt.rcParams['lines.linewidth']=2
plt.rcParams['axes.facecolor']='1.0'
plt.rcParams['xtick.color']='black'
plt.rcParams['axes.grid']=True
plt.rcParams['axes.edgecolor']='black'
plt.rcParams['grid.color']= '1.0'
plt.rcParams.update({'font.size': 12})
# ### Import sample annotation for filtered Replicate & Longitudinal pairs
replicate_sample_annotation = pd.read_csv('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/CSV_files/sample_annotation_files/Replicate_fastq_path_names_and_JankyPipe_tags_filtered_final.csv' , sep = ',')
longitudinal_sample_annotation = pd.read_csv('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/CSV_files/sample_annotation_files/Longitudinal_fastq_path_names_and_JankyPipe_tags_filtered_final.csv' , sep = ',')
replicate_sample_annotation.head(n=2)
longitudinal_sample_annotation.head(n=2)
# #### Import SNPs (with $\Delta AF > 25 \% $) for Replicate Sample and Longitudinal Sample Pairs
replicate_pair_SNPs = pd.read_pickle('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/pickled_files/variant_calling/replicate_SNPs/SNPs_between_isolates_delta_25.pkl')
longitudinal_pair_SNPs = pd.read_pickle('/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/pickled_files/variant_calling/longitudinal_SNPs/SNPs_between_isolates_delta_25.pkl')
replicate_pair_SNPs.head()
longitudinal_pair_SNPs.head()
len( set(replicate_sample_annotation.patient_id) )
# ### Drop any glpK mutants that occur between any replicate pairs and longitudinal pairs
# #### glpK mutants present in replicate pairs
np.shape(replicate_pair_SNPs)
replicate_pair_SNPs[replicate_pair_SNPs.gene_symbol == 'glpK']
float( len(set(replicate_sample_annotation.patient_id)) )
# #### There were 5 *glpK* mutants (the 2 mutants from the Trauner et. al. dataset count as one since there were 3 isolates compared pair-wise) that were found among 62 replicate pairs
# Drop *glpK* mutants from the rest of the analysis
replicate_pair_SNPs = replicate_pair_SNPs[replicate_pair_SNPs.gene_symbol != 'glpK']
np.shape(replicate_pair_SNPs)
# #### glpK mutants present in longitudinal pairs
np.shape(longitudinal_pair_SNPs)
longitudinal_pair_SNPs[longitudinal_pair_SNPs.gene_symbol == 'glpK']
float( len(set(longitudinal_sample_annotation.patient_id)) )
# #### There were 4 *glpK* mutants that were found among 200 longitudinal pairs
# Drop *glpK* mutants from the rest of the analysis
longitudinal_pair_SNPs = longitudinal_pair_SNPs[longitudinal_pair_SNPs.gene_symbol != 'glpK']
np.shape(longitudinal_pair_SNPs)
# ### Split SNPs up into different types of replicate pairs
# SNPs between CETR-POOLS replicate pairs
cetr_pools_replicates = replicate_pair_SNPs[replicate_pair_SNPs.population == 'CP_REP']
np.shape(cetr_pools_replicates)
# SNPs between Re-Sequenced (due to low coverage) replicate pairs
low_cov_reseq_replicates = replicate_pair_SNPs[replicate_pair_SNPs.population == 'LC_REP']
np.shape(low_cov_reseq_replicates)
# SNPs between Trauner Patient 12 sputum sample replicate pairs
trauner_P12_replicates = replicate_pair_SNPs[replicate_pair_SNPs.population == 'TR_REP']
np.shape(trauner_P12_replicates)
# ### Replicate Sample Pairs
# +
fig , ax = plt.subplots()
bins = np.arange( np.min(replicate_pair_SNPs.alt_AF_diff) , np.max(replicate_pair_SNPs.alt_AF_diff) + 0.01 , 0.01)
n , bins , patches = plt.hist(replicate_pair_SNPs.alt_AF_diff , bins = bins , color = 'k' , rwidth = 1.0 , edgecolor='white', linewidth=1.25)
plt.title('All Filtered Replicate Pairs', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.xlabel('$\Delta$ AF', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.ylabel('Number of SNPs', fontweight = 'bold' , fontsize = 12, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelcolor = 'k')
fig = plt.gcf()
fig.set_size_inches(10.0, 5.5)
fig.tight_layout()
plt.tick_params(axis='y', which='major', labelsize=12 , labelcolor = 'k')
plt.tick_params(axis='x', which='major', labelsize=12 , labelcolor = 'k')
ax.set_xlim(0.24 , 1.01)
file_name = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/figures/replicate_vs_longitudinal_analysis/distribution_of_delta_AFs_for_all_replicate_SNPs.pdf'
plt.savefig(file_name, bbox_inches='tight', dpi = 300 , transparent = True)
plt.show()
# -
# ### Longitudinal Pairs
# +
fig , ax = plt.subplots()
bins = np.arange( np.min(longitudinal_pair_SNPs.alt_AF_diff) , np.max(longitudinal_pair_SNPs.alt_AF_diff) + 0.01 , 0.01)
n , bins , patches = plt.hist(longitudinal_pair_SNPs.alt_AF_diff , bins = bins , color = 'k' , rwidth = 1.0 , edgecolor='white', linewidth=1.25)
plt.title('All Filtered Longitudinal Pairs', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.xlabel('$\Delta$ AF', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.ylabel('Number of SNPs', fontweight = 'bold' , fontsize = 12, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelcolor = 'k')
fig = plt.gcf()
fig.set_size_inches(10.0, 5.5)
fig.tight_layout()
plt.tick_params(axis='y', which='major', labelsize=12 , labelcolor = 'k')
plt.tick_params(axis='x', which='major', labelsize=12 , labelcolor = 'k')
ax.set_xlim(0.24 , 1.01)
file_name = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/figures/replicate_vs_longitudinal_analysis/distribution_of_delta_AFs_for_all_longitudinal_SNPs.pdf'
plt.savefig(file_name, bbox_inches='tight', dpi = 300 , transparent = True)
plt.show()
# -
# ### Probe into the SNPs from the different types of replicates
# Low Coverage Samples that were Re-Sequenced
print 'Number of Replicate Pairs = ' + str( len(set(low_cov_reseq_replicates.patient_id)) )
pd.Series( Counter( list( replicate_pair_SNPs[replicate_pair_SNPs.population == 'LC_REP'].patient_id) ) ).sort_values(ascending = False)
# #### CETR - POOLS replicate pairs
print 'Number of Replicate Pairs = ' + str( len(set(cetr_pools_replicates.patient_id)) )
# +
fig , ax = plt.subplots()
n , bins , patches = plt.hist(replicate_pair_SNPs[replicate_pair_SNPs.population == 'CP_REP'].alt_AF_diff , bins = 58 , color = 'k' , rwidth = 1.0 , edgecolor='white', linewidth=0.5)
plt.title('CETR-POOLS Replicate Pairs', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.xlabel('$\Delta$ AF', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.ylabel('Number of SNPs', fontweight = 'bold' , fontsize = 12, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelcolor = 'k')
#ax.set_yscale("log", nonposy='clip')
fig = plt.gcf()
fig.set_size_inches(10.0, 5.5)
fig.tight_layout()
plt.show()
# -
pd.Series( Counter( list( replicate_pair_SNPs[replicate_pair_SNPs.population == 'CP_REP'].patient_id) ) ).sort_values(ascending = False).head(n=20)
# +
#Allele Frequence Changes for SNPs from low-coverage replicate pairs
SNP_AF_deltas = replicate_pair_SNPs[replicate_pair_SNPs.population == 'CP_REP'].alt_AF_diff.values
#create array of different Allele Frequency change thresholds
AF_delta_thresholds = np.arange(0.25 , 1.0 , 0.01)
#store the number of SNPs that pass the threshold
SNPs_passed_threshold = []
for delta_thershold in AF_delta_thresholds:
SNPs_passed_threshold.append( float( sum(SNP_AF_deltas >= delta_thershold) ) )
#plot threshold vs. number of SNPs that pass threshold
fig , ax = plt.subplots()
plt.plot( AF_delta_thresholds , SNPs_passed_threshold , color = 'black' , linewidth=2.0 )
plt.title('CETR-POOLS Replicate Pairs', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.xlabel('$\Delta$ AF Threshold', fontweight = 'bold' , fontsize = 12, color = 'k')
plt.ylabel('Number of SNPs', fontweight = 'bold' , fontsize = 12, color = 'k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelcolor = 'k')
plt.tick_params(axis='y', which='major', labelsize=12 , labelcolor = 'k')
plt.tick_params(axis='x', which='major', labelsize=12 , labelcolor = 'k')
fig = plt.gcf()
fig.set_size_inches(10.0, 5.5)
fig.tight_layout()
plt.show()
# -
cetr_pools_replicates.sort_values(by='alt_AF_diff' , ascending = False).head(n=20)
# ### Trauner Patient-12 replicate pairs
print 'Number of Replicate Pairs = ' + str( len(set(trauner_P12_replicates.patient_id)) )
pd.Series( Counter( list( replicate_pair_SNPs[replicate_pair_SNPs.population == 'TRAUNER'].patient_id) ) ).sort_values(ascending = False)
# ## Compare Avg Number of SNPs per Replicate Pair and Longitudinal Pair
# #### Allele Frequence Changes for SNPs from replicate pairs
# +
SNP_AF_deltas = replicate_pair_SNPs.alt_AF_diff.values
#create array of different Allele Frequency change thresholds
AF_delta_thresholds = np.arange(0.25 , 1.00 , 0.01)
AF_delta_thresholds = [round(threshold,2) for threshold in AF_delta_thresholds]
#store the number of SNPs that pass the threshold
SNPs_passed_threshold = []
for delta_thershold in AF_delta_thresholds:
SNPs_passed_threshold.append( float( sum(SNP_AF_deltas >= delta_thershold) ) )
num_replicate_pairs = float( len(set(replicate_sample_annotation.patient_id)) )
avg_SNPs_passed_threshold_replicate = np.array( SNPs_passed_threshold ) / num_replicate_pairs
# -
# #### Allele Frequence Changes for SNPs from longitudinal pairs
# +
SNP_AF_deltas = longitudinal_pair_SNPs.alt_AF_diff.values
#create array of different Allele Frequency change thresholds
AF_delta_thresholds = np.arange(0.25 , 1.00 , 0.01)
AF_delta_thresholds = [round(threshold,2) for threshold in AF_delta_thresholds]
#store the number of SNPs that pass the threshold
SNPs_passed_threshold = []
for delta_thershold in AF_delta_thresholds:
SNPs_passed_threshold.append( float( sum(SNP_AF_deltas >= delta_thershold) ) )
num_longitudinal_pairs = float( len(set(longitudinal_sample_annotation.patient_id)) )
avg_SNPs_passed_threshold_longitudinal = np.array( SNPs_passed_threshold ) / num_longitudinal_pairs
# -
# Set $\Delta AF$ threshold ($\Delta AF_T$) where SNPs from replicate pairs represent $\approx 5 \%$ of SNPs between pooled replicate and longitudinal isolate pairs, weighted by the number of pairs in each group, find $\Delta AF_T$ such that
# $$\frac{ [(\text{Number of Replicate SNPs} \ge \Delta AF_T) / (\text{Number of Replicate Pairs})]}{[(\text{Number of Replicate SNPs} \ge \Delta AF_T) / (\text{Number of Replicate Pairs})] + [(\text{Number of Longitudinal SNPs} \ge \Delta AF_T) / (\text{Number of Longitudinal Pairs})]} = \frac{[\text{Avg Number of SNPs per Replicate Pair}]}{[\text{Avg Number of SNPs per Replicate Pair}] + [\text{Avg Number of SNPs per Longitudinal Pair}]} \approx 5\% $$
# +
#plot threshold vs. number of SNPs that pass threshold
################################################################################################################################################
fig , ax1 = plt.subplots()
ax1.plot( AF_delta_thresholds , avg_SNPs_passed_threshold_replicate , color = 'xkcd:bright red' , linewidth=2.5 , label = 'Replicate')
ax1.plot( AF_delta_thresholds , avg_SNPs_passed_threshold_longitudinal , color = 'xkcd:grass green' , linewidth=2.5 , label = 'Longitudinal' )
ax1.set_xlabel('$\Delta$ AF Threshold', fontweight = 'bold' , fontsize = 12, color = 'k')
ax1.set_ylabel('Average Number of SNPs per Pair', fontweight = 'bold' , fontsize = 12, color = 'k')
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.tick_params(labelcolor = 'k')
ax1.tick_params(axis='x', which='major', labelsize=12 , labelcolor = 'k')
ax1.tick_params(axis='y', which='major', labelsize=12 , labelcolor = 'k')
ax1.set_xlim(0.24 , 1.0)
ax1.legend(loc='right')
################################################################################################################################################
#second super imposed plot, graph the ratio of average number of longitudinal SNPs / avg number of replicate SNPs
################################################################################################################################################
ax2 = ax1.twinx()
#for each threshold, divide the avg number of SNPs from replicate isolates OVER the avg number of SNPs from replicate isolates + the avg number of SNPs from longitudinal isolates
weighted_proportion_SNPs_from_replicates = avg_SNPs_passed_threshold_replicate / (avg_SNPs_passed_threshold_replicate + avg_SNPs_passed_threshold_longitudinal)
ax2.plot( AF_delta_thresholds , weighted_proportion_SNPs_from_replicates , color = 'xkcd:bright blue' , linewidth=2.5 , label = 'Weighted Proportion of Replicate SNPs')
#convert weighted proportion SNPs from replicates to percentages & round to the nearest whole number
weighted_percentage_replicate_SNPs_rounded = np.array( [int(np.round(float(x) , 0)) for x in 100*weighted_proportion_SNPs_from_replicates] )
#find the delta & proportion at which weighted proportion of replicate SNPs is about 5% of weighted SNPs from replicate + longitudinal pairs
delta_AF_threshold = list(compress(AF_delta_thresholds, list( weighted_percentage_replicate_SNPs_rounded <= 5 )))[0]
weighted_proportion_SNPs_from_replicates_at_threshold = list(compress(weighted_proportion_SNPs_from_replicates, list( weighted_percentage_replicate_SNPs_rounded <= 5 )))[0]
ax2.plot([delta_AF_threshold , delta_AF_threshold] , [0 , weighted_proportion_SNPs_from_replicates_at_threshold] , color = 'k' , linewidth = 1.5 , linestyle = '--' , alpha = 0.5)
ax2.plot([delta_AF_threshold , 1.0] , [weighted_proportion_SNPs_from_replicates_at_threshold , weighted_proportion_SNPs_from_replicates_at_threshold] , color = 'k' , linewidth = 1.5 , linestyle = '--', alpha = 0.5)
ax2.spines['top'].set_visible(False)
plt.rcParams["axes.grid"] = False
ax2.tick_params(labelcolor = 'k')
ax2.tick_params(axis='x', which='major', labelsize=12 , labelcolor = 'k')
ax2.tick_params(axis='y', which='major', labelsize=12 , labelcolor = 'k')
ax2.legend(loc='upper right')
################################################################################################################################################
fig = plt.gcf()
fig.set_size_inches(10.0, 5.5)
fig.tight_layout()
file_name = '/n/data1/hms/dbmi/farhat/Roger/inhost_TB_dynamics_project/figures/replicate_vs_longitudinal_analysis/avg_num_SNPs_vs_delta_AF_threshold.pdf'
plt.savefig(file_name, bbox_inches='tight', dpi = 300 , transparent = True)
plt.show()
# -
print delta_AF_threshold
print weighted_proportion_SNPs_from_replicates_at_threshold #there are over 20x as many avg SNPs per longitudinal pair than there are avg SNPs per replicate pair
# Number of Replicate Pairs
num_replicate_pairs = float( len(set(replicate_sample_annotation.patient_id)) )
print num_replicate_pairs
# Number of SNPs between Replicate Pairs with $\Delta AF \ge 70\%$
num_replicate_SNPs_above_threshold = sum( replicate_pair_SNPs.alt_AF_diff.values >= 0.70 )
print num_replicate_SNPs_above_threshold
# Number of Longitudinal Pairs
num_longitudinal_pairs = float( len(set(longitudinal_sample_annotation.patient_id)) )
print num_longitudinal_pairs
# Number of SNPs between Longitudinal Pairs with $\Delta AF \ge 70\%$
num_longitudinal_SNPs_above_threshold = sum( longitudinal_pair_SNPs.alt_AF_diff.values >= 0.70 )
print num_longitudinal_SNPs_above_threshold
# Sanity check that this $\Delta AF \ge 70\%$ results in $\approx 5\%$ SNPs from Replicate Pairs
( (float(num_replicate_SNPs_above_threshold) / num_replicate_pairs) / ( (float(num_replicate_SNPs_above_threshold) / num_replicate_pairs) + (float(num_longitudinal_SNPs_above_threshold) / num_longitudinal_pairs) ) ) * 100
# Take a look at some of the longitudinal SNPs passing this threshold
longitudinal_pair_SNPs[longitudinal_pair_SNPs.alt_AF_diff >= 0.70].sort_values(by = 'alt_AF_diff').head(n=20)
| (I) Replicate and Longitudinal SNP comparison to find optimal AF change threshold% for in-host SNPs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from scipy.optimize import minimize, curve_fit
from scipy.misc import derivative
from IPython.display import display, Math, Latex
def bmatrix(a, pref=''):
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return Math(pref + '\n'.join(rv))
def resources(x):
return Math('K = {:.4f}, L = {:.4f}'.format(*x))
# +
outer_coefs = np.array([3.2, 5.8, 14.2, 8.7])
inner_coefs = np.array([4, 8, 0, 3])
p = np.array([20, 25, 15, 15])
I = 1000
bounds = [(coef, None) for coef in inner_coefs]
def partial_derivative(f, var, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return f(args)
return derivative(wraps, point[var], dx = 1e-6)
def U(x):
return sum(outer * np.log(x_i - inner) for outer, x_i, inner in zip(outer_coefs, x, inner_coefs))
def solve(args, objective, constraints, name):
solution = minimize(
objective,
args,
method='SLSQP',
bounds=bounds,
constraints=constraints
)
display(bmatrix(solution.x, '{} = '.format(name)))
return solution.x
# -
# ## Маршаліанський підхід
args = np.array([4.2, 8.7, 0.2, 3.7])
objective = lambda x: -U(x)
constraints = {
'type': 'ineq',
'fun': lambda x: I - np.dot(p, x)
}
x = solve(args, objective, constraints, 'x')
display(Math("f(x) = {:.4f}".format(-objective(x))))
L = [partial_derivative(U, i, x) / p[i] for i in range(4)]
display(bmatrix(L, 'L = '))
#display(Math("L = {:.4f}".format(partial_derivative(U, 0, x) / p[0])))
# ## Хіксіанський підхід
args = np.array([4.7325, 8.2082228, 0.7006161, 3.66873595])
objective = lambda h: np.dot(p, h)
constraints = {
'type': 'ineq',
'fun': lambda h: U(h) - U(inner_coefs + 1)
}
h = solve(args, objective, constraints, 'h')
display(Math("f(h) = {:.4f}".format(objective(x))))
L = [p[i] / partial_derivative(U, i, h) for i in range(4)]
display(bmatrix(L, 'L = '))
#display(Math("L = {:.4f}".format(p[0]/partial_derivative(U, 0, h))))
K = [49920, 45750, 50550, 505750, 47820, 47900, 51900, 45970, 48030, 48100]
L = [10680, 10310, 10680, 10800, 10040, 10420, 10940, 10710, 9900, 9930]
F = [2860, 2940, 2950, 2880, 2510, 2690, 2990, 2800, 3000, 3070]
# ## Мультиплікативна виробнича функція
# +
def cobb_douglas(x, A, a, b):
return A * x[0]**a * x[1]**b
p0 = [3.155989, 0.68368306, 0.13993322]
coeffs, _ = curve_fit(cobb_douglas, (K, L) , F, p0)
display(Math("F = {:.2f} K^{{{:.4f}}} L^{{{:.4f}}}".format(coeffs[0], coeffs[1], coeffs[2])))
# -
# ## Ефект масштабу та еластичність заміщення
# +
if abs(coeffs[1] + coeffs[2] - 1) < 1e-3:
print('Постійний прибуток до масштабу')
elif coeffs[1] + coeffs[2] > 1:
print('Прибуток збільшується із масштабом')
else:
print('Прибуток зменшується із масштабом')
print('Еластичність заміщення для функції Кобба-Дугласа const = 1')
# +
price = 70
w = [100, 100]
def pi(x):
return np.dot(w, x) - price * cobb_douglas(x, coeffs[0], coeffs[1], coeffs[2])
bounds1 = [
(0, None),
(0, None),
]
# -
# ## Короткостроковий прибуток
# +
constraint = {
'type': 'ineq',
'fun': lambda x: 10 - (x[0] **2 + x[1] ** 2)**5
}
short_solution = minimize(pi, [1, 1], method='SLSQP', bounds=bounds1, constraints=constraint)
display(resources(short_solution.x))
display(Math('profit = {:.4f}'.format(-pi(short_solution.x))))
# -
# ## Довгостроковий прибуток
# +
def price_func(x):
return -x / 80 + 8310/83
def wK(x):
return 0.025 * x[0] - 1
def wL(x):
return 0.025 * x[1] - 2
def wM(x):
return (wK(x), wL(x))
def monopoly_pi(x):
q = cobb_douglas(x, coeffs[0], coeffs[1], coeffs[2])
mw = wM(x)
return mw[0] * x[0] + mw[1] * x[1] - price_func(q) * q
monopoly_solution = minimize(monopoly_pi, [1, 1], method='SLSQP', bounds=bounds1, constraints=[])
print("Витрати ресурсів: ")
display(resources(monopoly_solution.x))
print("Ціни ресурсів: ")
display(resources((wK(monopoly_solution.x), wL(monopoly_solution.x))))
print("Ціна: ", price_func(cobb_douglas(monopoly_solution.x, coeffs[0], coeffs[1], coeffs[2])))
print("Обсяг продукції: ", cobb_douglas(monopoly_solution.x, coeffs[0], coeffs[1], coeffs[2]))
| eco_systems/laba3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 401} id="vGKPHU6wYdwu" outputId="6cb507ac-f26f-49cf-87fa-c865ee683217"
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
earnings = pd.read_csv("./data/weeklyearnings.csv")
housing = pd.read_csv("./data/housing.csv")
housing_list = []
earnings_list = []
for (index, earn) in earnings.iterrows():
found_value = housing[housing["DATE"] == earn["DATE"]]
if len(found_value) > 0:
housing_list.append({"DATE": earn["DATE"], "MSPUS": found_value["MSPUS"]})
earnings_list.append({"DATE": earn["DATE"], "CES0500000011": earn["CES0500000011"]})
housing_df = pd.DataFrame(housing_list)
earnings_df = pd.DataFrame(earnings_list)
fig = plt.figure(figsize=(30, 10))
ax1 = fig.add_subplot(111)
ax1.plot(earnings_df["DATE"], earnings_df["CES0500000011"], 'b-')
ax1.set_ylabel('Weekly Earnings', color="b")
ax2 = ax1.twinx()
ax2.plot(earnings_df["DATE"], housing_df["MSPUS"], 'r-')
ax2.set_ylabel('Housing Prices', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
fig.autofmt_xdate()
# + id="Q_vPvHvKUu28"
| weekly_earnings_housing_prices/WeeklyEarningsOverHousingPrices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prediction of Campus Placement
#
#
# In this project we used logistic regression machine learning algorithm or RandonForestRegression to predict the placement of students, Using [Kaggle dataset](https://www.kaggle.com/benroshan/factors-affecting-campus-placement).
#
# The Dataset consists of the qualitative and the quantitative parameters of the students that are necessary for improving the prediction.
# ## Section 1 : Business Understanding
#
# First we analyze some important Question to understand the data for better prediction.
#
# ## Questions
#
# 1. Which factor influenced a candidate in getting placed?
# 2. Does percentage matters for one to get placed?
# 3. Which degree specialization is much demanded by corporate?
# 4. what is the Prediction percentage using differnt ML algorithms?
# ## Section 2: Data Understanding
#
# What is in it?
#
# This data set consists of Placement data of students in campus. It includes secondary and higher secondary school percentage and specialization. It also includes degree specialization, type and Work experience and salary offers to the placed students
# +
#importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestRegressor
# -
# This data set consists columns are
#
# sscb- Secondary School Certificate Board-> Which is lower secondary school(1st-10th Grade)
#
# hscb-Higher Secondary Certificate Board-> Which is higher secondary school(11th and 12th Grade)
#
# sl_no : serial number<br>
# gender : [male,female]<br>
# ssc_p : means--> 10th class percentage <br>
# ssc_b : which board to passed out 10th. <br>
# hsc_p : means--> 12th class percentage <br>
# hsc_b : which board to passed out 12th. <br>
# hsc_s : which stream he choose (science,commerce,arts) <br>
# degree_p : means--> Bachelor degree percentage<br>
# degree_t : which strem choose for bachelor <br>
# workex : It has a work experience or not.<br>
# etest_p : entrance test percentage<br>
# specialisation : Master degree in Mkt&HR or Mkt&Fin <br>
# mba_p : means--> Master degree percentage<br>
# status : He/She got placed or not in campus placement. <br>
# salary : placement packages.<br>
#
# Grade represents their year of study in school.
# Boards are different curriculum/education system adapted by educational institutions.
# ### Gathering data
#load the dataset
df=pd.read_csv("dataset/Placement_Data_Full_Class.csv")
#see the 5 first data
df.head()
#set the sl_no as a row no
df.set_index('sl_no',inplace=True)
df.head()
#check datatype or null
df.info()
#see some statics data
df.describe()
# # cleaning the data
#check number of rows and columns
print(df.shape[0]) #no of rows
print(df.shape[1]) #no of columns
# check null values in df
df.isnull().sum()
# only salary Column has 67 null values<br>
# so we delete or replace this null value with some value like mean, median or 0.
# Salary null value fill with salary mean value because it is average of salary column and it gives better performance in prediction time if use this column.
#fill salary null value with mode
df["salary"]=df["salary"].fillna(df["salary"].mean())
#check datatypes
df.dtypes
# ## Handling with Catagorical Data
# we convert object type data into numerical datatype
df['gender']=df['gender'].astype('category')
df['status']=df['status'].astype('category')
df['workex']=df['workex'].astype('category')
df['hsc_b']=df['hsc_b'].astype('category')
df['ssc_b']=df['ssc_b'].astype('category')
# +
#check how many unique data in these dataframe
print(df['gender'].unique())
print(df['status'].unique())
print(df['workex'].unique())
print(df['hsc_b'].unique())
print(df['ssc_b'].unique())
# +
# Replace the value in 0,1 or quantitative parameters
df["gender"].replace(["F","M"],[0,1],inplace=True)
df["status"].replace(['Placed',"Not Placed"],[1,0],inplace=True)
df['workex'].replace(to_replace ="Yes", value =1,inplace=True)
df['workex'].replace(to_replace ="No", value =0,inplace=True)
# -
df["gender"].dtype
df.head()
#create a new dataframe of only numerical values
numerical_df=df.select_dtypes(["float64","int64"])
# # Visualizing the data
# show the percentage of male or female
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.axis('equal')
langs = ['Male','Female']
students = [139,76]
ax.pie(students, labels = langs,autopct='%1.2f%%',colors = ["#1f77b4", "#ff7f0e"])
plt.title('Pie chart ')
plt.show()
# +
#show the no. of student how placed or not placed.
print("Number of not placed Student "+ str(len(df[df["status"]==0])))
print("Number of placed Student "+ str(len(df[df["status"]=="Placed"])))
plt.bar([0],height=len(df[df["status"]==0]))
plt.bar([1],height=len(df[df["status"]==1]))
plt.xlabel("Status")
plt.ylabel("Count")
plt.xticks(np.arange(2), ('Not placed', 'Placed'))
plt.title("No of Student placed\n")
plt.show()
# -
#average percentage of all education qualification
values = [(numerical_df['ssc_p'].mean()),(numerical_df['hsc_p'].mean()),(numerical_df['mba_p'].mean()),(numerical_df['degree_p'].mean())]
print('scc_p mean = ' +str(numerical_df['ssc_p'].mean()))
print('hsc_p mean = ' +str(numerical_df['hsc_p'].mean()))
print('mba_p mean = ' +str(numerical_df['mba_p'].mean()))
print('degree_p mean = ' +str(numerical_df['degree_p'].mean()))
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
names = ['ssc_p','hsc_p','mba_p','degree_p']
ax.set_ylabel('Average percentages')
ax.set_title('Average Percentage')
ax.bar(names,values,width = 0.5,color=["#2ca02c"])
plt.show()
#
#
# # Questions 1.
# # Which factor influenced a candidate in getting placed?
#
#
# +
#show the relation between diffrent qualification placement status usinng correlation.
print('ssc_p to placement ', round(numerical_df['status'].corr(numerical_df['ssc_p'])*100,1),'%')
print('hsc_p to placement ', round(numerical_df['status'].corr(numerical_df['hsc_p'])*100,1),'%')
print('mba_p to placement ', round(numerical_df['status'].corr(numerical_df['mba_p'])*100,1),'%')
print('degree_p to placement ', round(numerical_df['status'].corr(numerical_df['degree_p'])*100,1),'%')
print('etest_p to placement ', round(numerical_df['status'].corr(numerical_df['etest_p'])*100,1),'%')
print('workexp to placement ', round(numerical_df['status'].corr(numerical_df['workex'])*100,1),'%')
# -
# Answer
# 1. ssc_p feature data factor influenced a candidate in getting placed.
#
# when we see the correlation between features and placement then it show me the ssc_p data to more correlated to placcement.
#
# I know it is some tricky to say that senior secondary(ssc_p)percentage is not so much help in real world placement.
# # Question 2.
# # Does percentage matters for one to get placed?
#
# Answer
#
#
# After see the correlation between features and placement the we definatly say the
# percentage matters for get a placed
df_grade = df.groupby(['status']).mean()[['hsc_p', 'degree_p', 'mba_p']].reset_index()
# +
#Student Grades and Campus Placement
import plotly
import plotly.graph_objs as go
from plotly.subplots import make_subplots
plotly.offline.init_notebook_mode(connected=True)
import matplotlib.pyplot as plt
# %matplotlib inline
trace1 = go.Bar(
x = ['High School', 'Bachelor', 'MBA'],
y = df_grade[df_grade['status']==0].drop('status', axis=1).values[0],
name = 'Not Placed'
)
trace2 = go.Bar(
x = ['High School', 'Bachelor', 'MBA'],
y = df_grade[df_grade['status']==1].drop('status', axis=1).values[0],
name = 'Placed'
)
data = [trace1, trace2]
layout = go.Layout(
yaxis = dict(title = 'Grade'),
xaxis = dict(title = 'Stage'),
title = 'Student Grades and Campus Placement')
fig = go.Figure(data=data, layout=layout)
fig.show()
# -
# # Question 3.
# # Which degree specialization is much demanded by corporate?
#
# +
# which specialisation is more demand in campus selection
plt.bar([1],height=len(df[df["specialisation"]=="Mkt&HR"]))
plt.bar([0],height=len(df[df["specialisation"]=="Mkt&Fin"]))
plt.xlabel("specialisation in Mkt&Fin and Mkt&HR")
plt.ylabel("no.of specialisation")
print("specialisation in Mkt&Fin "+ str(len(df[df["specialisation"]=="Mkt&Fin"])))
print("specialisation in Mkt&HR "+ str(len(df[df["specialisation"]=="Mkt&HR"])))
plt.xticks(np.arange(2), ('Mkt&Fin', 'Mkt&HR'))
plt.title("which specialisation is more demand in campus selection\n")
plt.show()
# -
# # Question 4.
# # Play with the data conducting all statistical tests.
#
# Train the data and create a model to predict the placed or not.
len(df[['ssc_p','hsc_p','degree_p','workex','etest_p','mba_p']])
#
# # Splitting the dataset into the Training set and Test set
#
x_train , x_test , y_train , y_test = train_test_split(
df[['ssc_p','hsc_p','degree_p','workex','etest_p','mba_p']],
df.status,
test_size=0.2)
x_train.head()
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(x_train)
X_test = sc.transform(x_test)
# create a logistic regresssion model and fit it.
model=LogisticRegression()
model.fit(x_train,y_train)
prediction=model.predict(x_test)
accuracy_score(y_test,prediction)
# +
# Fitting Random Forest Regressoin to the Training set
# Instantiate model with 105 decision trees
rf = RandomForestRegressor(n_estimators = 105, random_state = 41)
rf.fit(x_test, y_test);
# -
# Predicting the Test set results
y_pred = rf.predict(x_test)
rf.score(x_test,y_pred)
y_pred
# +
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(x_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(x_test)
# -
y_pred
accuracy_score(y_test,y_pred)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
| .ipynb_checkpoints/placement_check-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check time in SAPHIR "synths"
# ## a synth is advected data from one orbit of the satellite
# ### advected forward or backward
# #### <NAME> and <NAME> May 2018
#
# Matlab codes have been used to create a set of hourly "synths" consisting of NaN-filled arrays with one orbit swath of data advected to the (hourly) target hour.
#
# Debugging observation time
# ---------
# ---------
import numpy as np
import glob # to list available files
import scipy.io as spio # to read the .mat files|
import datetime
# ------------
# # Set time of desired hourly product:
# ------------
# +
now = datetime.datetime(2012,1,2,0,0) + datetime.timedelta(hours=3)
prodtime = now - datetime.datetime(1970, 1, 1)
print(now.strftime('%Y%m%dT%H,'), ' time from 1970-1-1 is', prodtime)
# -
# # Now print time arrays in all synths (forward or backward)
# +
wildcard = '../SAPHIR-morphing-data-201201_7days/Synths/*backward*' + now.strftime('%Y%m%dT%H') +'*mat'
for idx,filename in enumerate(glob.iglob(wildcard), start=0):
print(filename)
mat = spio.loadmat(filename, squeeze_me=True, struct_as_record=False)
synth = mat['synth']
obtime = synth.sourcetime
print( np.size(np.where(~np.isnan(synth.gridded))), ' non-missing values')
print( np.min(obtime) , ' to ', np.max(obtime) )
print( datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds= 3600.*24*np.min(obtime)) , ' to ', \
datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds= 3600.*24*np.max(obtime)) )
print()
# +
wildcard = '../SAPHIR-morphing-data-201201_7days/Synths/*forward*' + now.strftime('%Y%m%dT%H') +'*mat'
for idx,filename in enumerate(glob.iglob(wildcard), start=0):
print(filename)
mat = spio.loadmat(filename, squeeze_me=True, struct_as_record=False)
synth = mat['synth']
obtime = synth.sourcetime
print( np.size(np.where(~np.isnan(synth.gridded))), ' non-missing values')
print( np.min(obtime) , ' to ', np.max(obtime) )
print( datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds= 3600.*24*np.min(obtime)) , ' to ', \
datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds= 3600.*24*np.max(obtime)) )
print()
# -
test
| check_time_in_synths.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training & Transformation
#
# The tidy data collected during scrapes needs to be widened (one-hot encoded or embedded) prior to training. FastMap can then pin a model with a X_transformed for future predictions.
#
# NOTE: The perceptron isn't behaving well with this dataset. Moving to sklearn's logistic regression for a bit more stability and pipeline constructs.
# General Imports
import pandas as pd
import numpy as np
from db import get_session
from models import *
# ## Filter relevant data (within square boundary)
#
# Given a model centroid, get data within given radius. For earlier versions, this can be done as a square, but future methods may require redesigning query filter statement.
# +
def get_near_data(center_coord, radius):
lat_range = [center_coord[0]-radius, center_coord[0]+radius]
lon_range = [center_coord[1]-radius, center_coord[1]+radius]
with get_session() as session:
response = session.query(
SearchResults.latitude, SearchResults.longitude,
SearchResults.category, SearchResults.num_unique).\
filter(
SearchResults.latitude > lat_range[0],
SearchResults.latitude < lat_range[1],
SearchResults.longitude > lon_range[0],
SearchResults.longitude < lon_range[1]).all()
return response
data = get_near_data((32.715736, -117.161087), 0.5)
# -
# Transform data into dataframe for encoding
columns = ['lat', 'lon', 'cat', 'num_unique']
df = pd.DataFrame(data, columns=columns)
df.head()
# One-hot encoding is probably not ideal. Categories change. Looking at a simple word embedding for the category is likely a far more robust solution in the event categories are introduced that don't meet existing. Then the one-hot isn't needed, just the expansion of a the vector list!
# Quick example with get_dummies. Not suitable for final use.
X = pd.get_dummies(df, columns=['cat']).drop(columns='num_unique').to_numpy()
y = df.num_unique.to_numpy().reshape(-1,1)
# +
# DO IT MANUALLY CUZ F' SCIKIT LEARN
from read_query import list_categories
from app_global import g
def get_categories():
if not hasattr(g, 'categories'):
g.categories = list_categories(with_id=True)
return g.categories
def encode_cat(x):
categories = get_categories()
temp_arr = np.zeros(len(categories.keys()))
temp_arr[categories[x]-1] = 1
return temp_arr
## Try to create binary vector for each row.
df.cat.apply(encode_cat)
# +
def transform_observation(obs):
assert len(obs) == 3
return np.concatenate(
(
[obs[0]], [obs[1]], encode_cat(obs[2])
),
axis=None
)
def truncate_x(obs):
return obs[0:3]
def truncate_y(obs):
return obs[-1]
transform_observation(truncate_x(data[0]))
# +
def split_transform(data):
# The perceptron is sigmoid activated. y needs to be scaled to 0:1.
X_raw = list(map(
truncate_x, data
))
X = np.array(list(map(
transform_observation, X_raw
)), dtype='f')
y = np.array(list(map(
truncate_y, data
)), dtype='f').reshape(-1,1)
return X, y/50.
X, y = split_transform(data)
# -
display(X[0], y[0])
# +
# Boom. Hand implemented one-hot encoding with X, y split that will totally drop data
# if input is > 4 and throw a key error if category added to db during run.
# -
# ## Pin model with data
#
# Using X, y
import lens
display(X.shape, y.shape)
# Initialize modelmap
modelmap = lens.ModelMap(
center_coord = [32.715736, -117.161087]
)
# Pin model
model_info = modelmap.pin_model(
X=X,
y=y,
coordinates = [32.715736, -117.161087]
)
model_info
# ### Save model data to database for later use
#
# The database will maintain a list of available perceptrons.
#
# Note: Currently, these are stored in a temporary folder and so the database references are not valid after garbage collection or system restarts. This table data will have to be dropped at the start of a run
#
# +
from write_query import write_model_metadata
write_model_metadata(model_info)
# -
# ### Check Cache for Model Perseverence in Memory
#
# The cache should be keeping a number of the most recent or most used models for quick access. This way it does not have to be loaded from disk.
modelmap.cache
# ### Create datapoint X for prediction of num_unique
#
# Input X_i must match the format of X_train
# +
def prep_input(x: tuple):
return np.array(x).reshape(1, -1)
X_test, y = split_transform(prep_input(data[0]))
# -
# Note: X is always the first out. Though slightly inefficient, no logic is particulary needed to NOT return y.
# manually from cache
model = modelmap.cache[list(modelmap.cache.keys())[0]]['model']
# a lot to get the first item haha
model.predict(X_test)*50
# # General Pipeline Construction
#
# The single perceptron did not yield good resuslts, yielding effective averages instead of the learning desired. This could be due to sparsity of the training set or fickle training hyperparameters.
#
# Because of this, and the development of the input/training set tranformation functions, a move toward logistic regression will be tried. Another thing gained by moving to Scikit-Learn's is pipeline functionality out of the box and access to a number of scaling functions.
# +
# Get Training Data around:
from read_query import get_near_data
test_center = [32.715736, -117.161087]
data = get_near_data(center_coord=test_center, radius=0.5)
data[0:10]
# +
import lens
X, y = lens.split_widen_data(data)
# +
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
lregr = LogisticRegression(
random_state=420,
solver='liblinear',
)
standard_scaler = StandardScaler()
tpipe = Pipeline([
('scaler', standard_scaler),
('logistic', lregr),
])
# -
tpipe.fit(X, y)
num = 65
test_data = X[num]
display(
tpipe.predict(test_data.reshape(1,-1)),
print('actual:', y[num]))
# +
# Attempt to pin pipeline model
from lens import ModelMap
# Initialize modelmap
modelmap = lens.ModelMap(
center_coord = [32.715736, -117.161087]
)
# Pin model
model_info = modelmap.pin_model(
X=X,
y=y,
coordinates = [32.715736, -117.161087]
)
model_info
# -
# manually from cache
model = modelmap.cache[list(modelmap.cache.keys())[0]]['model']
model.predict(X[65].reshape(1,-1))
| scraper/Train_Transformation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 02 - ML Experimentation with Custom Model
#
# The purpose of this notebook is to use [custom training](https://cloud.google.com/ai-platform-unified/docs/training/custom-training) to train a keras classifier to predict whether a given trip will result in a tip > 20%. The notebook covers the following tasks:
# 1. Preprocess the data locally using Apache Beam.
# 2. Train and test custom model locally using a Keras implementation.
# 3. Submit a Dataflow job to preprocess the data at scale.
# 4. Submit a custom training job to Vertex AI using a [pre-built container](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
# 5. Upload the trained model to Vertex AI.
# 6. Track experiment parameters from [Vertex AI Metadata](https://cloud.google.com/vertex-ai/docs/ml-metadata/introduction).
# 7. Submit a [hyperparameter tuning job](https://cloud.google.com/vertex-ai/docs/training/hyperparameter-tuning-overview) to Vertex AI.
#
# We use [Vertex TensorBoard](https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview)
# and [Vertex ML Metadata](https://cloud.google.com/vertex-ai/docs/ml-metadata/introduction) to track, visualize, and compare ML experiments.
# ## Setup
# ### Import libraries
# +
import os
import logging
from datetime import datetime
import numpy as np
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow.keras as keras
from google.cloud import aiplatform as vertex_ai
from google.cloud.aiplatform import hyperparameter_tuning as hp_tuning
from src.common import features, datasource_utils
from src.model_training import data, model, defaults, trainer, exporter
from src.preprocessing import etl
logging.getLogger().setLevel(logging.INFO)
tf.get_logger().setLevel('INFO')
print(f"TensorFlow: {tf.__version__}")
print(f"TensorFlow Transform: {tft.__version__}")
# -
# ### Setup Google Cloud project
# +
PROJECT = '[your-project-id]' # Change to your project id.
REGION = 'europe-west1' # Change to your region.
BUCKET = '[your-bucket-name]' # Change to your bucket name.
SERVICE_ACCOUNT = "[your-service-account]"
if PROJECT == "" or PROJECT is None or PROJECT == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = shell_output[0]
if SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.account)' 2>/dev/null
SERVICE_ACCOUNT = shell_output[0]
if BUCKET == "" or BUCKET is None or BUCKET == "[your-bucket-name]":
# Get your bucket name to GCP projet id
BUCKET = PROJECT
# Try to create the bucket if it doesn'exists
# ! gsutil mb -l $REGION gs://$BUCKET
print("")
PARENT = f"projects/{PROJECT}/locations/{REGION}"
print("Project ID:", PROJECT)
print("Region:", REGION)
print("Bucket name:", BUCKET)
print("Service Account:", SERVICE_ACCOUNT)
print("Vertex API Parent URI:", PARENT)
# -
# ### Set configurations
# +
VERSION = 'v01'
DATASET_DISPLAY_NAME = 'chicago-taxi-tips'
MODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}'
WORKSPACE = f'gs://{BUCKET}/{DATASET_DISPLAY_NAME}'
EXPERIMENT_ARTIFACTS_DIR = os.path.join(WORKSPACE, 'experiments')
RAW_SCHEMA_LOCATION = 'src/raw_schema/schema.pbtxt'
TENSORBOARD_DISPLAY_NAME = f'tb-{DATASET_DISPLAY_NAME}'
EXPERIMENT_NAME = f'{MODEL_DISPLAY_NAME}'
DATAFLOW_REGION = f'{REGION}'
DATAFLOW_SERVICE_ACCOUNT = 'sa-notebooks@mlops1-notebooks.iam.gserviceaccount.com'
DATAFLOW_SUBNETWORK = 'https://www.googleapis.com/compute/v1/projects/mlops1-network-host/regions/europe-west1/subnetworks/prod-notebooks-ew1'
# -
# ## Create Vertex TensorBoard instance
vertex_ai.init(location=REGION)
tensorboard_resource = vertex_ai.Tensorboard.create(display_name=TENSORBOARD_DISPLAY_NAME)
tensorboard_resource_name = tensorboard_resource.gca_resource.name
print("TensorBoard resource name:", tensorboard_resource_name)
# ## Initialize workspace
# +
REMOVE_EXPERIMENT_ARTIFACTS = False
if tf.io.gfile.exists(EXPERIMENT_ARTIFACTS_DIR) and REMOVE_EXPERIMENT_ARTIFACTS:
print("Removing previous experiment artifacts...")
tf.io.gfile.rmtree(EXPERIMENT_ARTIFACTS_DIR)
if not tf.io.gfile.exists(EXPERIMENT_ARTIFACTS_DIR):
print("Creating new experiment artifacts directory...")
tf.io.gfile.mkdir(EXPERIMENT_ARTIFACTS_DIR)
print("Workspace is ready.")
print("Experiment directory:", EXPERIMENT_ARTIFACTS_DIR)
# -
# ## Initialize Vertex AI experiment
# +
vertex_ai.init(
project=PROJECT,
location=REGION,
staging_bucket=BUCKET,
experiment=EXPERIMENT_NAME
)
run_id = f"run-local-{datetime.now().strftime('%Y%m%d%H%M%S')}"
vertex_ai.start_run(run_id)
EXPERIMENT_RUN_DIR = os.path.join(EXPERIMENT_ARTIFACTS_DIR, EXPERIMENT_NAME, run_id)
print("Experiment run directory:", EXPERIMENT_RUN_DIR)
# -
# ## 1. Preprocess the data using Apache Beam
#
# The Apache Beam pipeline of data preprocessing is implemented in the [preprocessing](src/preprocessing) directory.
EXPORTED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'exported_data')
TRANSFORMED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'transformed_data')
TRANSFORM_ARTIFACTS_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'transform_artifacts')
# ### Get Source Query from Managed Dataset
# +
ML_USE = 'UNASSIGNED'
LIMIT = 5120
raw_data_query = datasource_utils.get_training_source_query(
project=PROJECT,
region=REGION,
dataset_display_name=DATASET_DISPLAY_NAME,
ml_use=ML_USE,
limit=LIMIT
)
print(raw_data_query)
# -
# ### Test Data Preprocessing Locally
args = {
'runner': 'DirectRunner',
'raw_data_query': raw_data_query,
'write_raw_data': True,
'exported_data_prefix': EXPORTED_DATA_PREFIX,
'transformed_data_prefix': TRANSFORMED_DATA_PREFIX,
'transform_artifact_dir': TRANSFORM_ARTIFACTS_DIR,
'temporary_dir': os.path.join(WORKSPACE, 'tmp'),
'gcs_location': f'gs://{BUCKET}/bq_tmp',
'project': PROJECT,
'region': REGION
}
vertex_ai.log_params(args)
print("Data preprocessing started...")
etl.run_transform_pipeline(args)
print("Data preprocessing completed.")
# !gsutil ls {EXPERIMENT_RUN_DIR}
# ## 2. Train a custom model locally using a Keras
#
# The `Keras` implementation of the custom model is in the [model_training](src/model_training) directory.
LOG_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'logs')
EXPORT_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'model')
# ### Read transformed data
tft_output = tft.TFTransformOutput(TRANSFORM_ARTIFACTS_DIR)
transform_feature_spec = tft_output.transformed_feature_spec()
transform_feature_spec
# +
train_data_file_pattern = os.path.join(TRANSFORMED_DATA_PREFIX,'train/data-*.gz')
eval_data_file_pattern = os.path.join(TRANSFORMED_DATA_PREFIX,'eval/data-*.gz')
for input_features, target in data.get_dataset(
train_data_file_pattern, transform_feature_spec, batch_size=3).take(1):
for key in input_features:
print(f"{key} {input_features[key].dtype}: {input_features[key].numpy().tolist()}")
print(f"target: {target.numpy().tolist()}")
# -
# ### Create hyperparameters
# +
hyperparams = {
"hidden_units": [64, 32]
}
hyperparams = defaults.update_hyperparams(hyperparams)
hyperparams
# -
# ### Create and test model inputs and outputs
classifier = model.create_binary_classifier(tft_output, hyperparams)
classifier.summary()
keras.utils.plot_model(
classifier,
show_shapes=True,
show_dtype=True
)
classifier(input_features)
# ### Train the model locally.
# +
logging.getLogger().setLevel(logging.INFO)
hyperparams["learning_rate"] = 0.001
hyperparams["num_epochs"] = 5
hyperparams["batch_size"] = 512
vertex_ai.log_params(hyperparams)
# -
classifier = trainer.train(
train_data_dir=train_data_file_pattern,
eval_data_dir=eval_data_file_pattern,
tft_output_dir=TRANSFORM_ARTIFACTS_DIR,
hyperparams=hyperparams,
log_dir=LOG_DIR,
)
val_loss, val_accuracy = trainer.evaluate(
model=classifier,
data_dir=eval_data_file_pattern,
raw_schema_location=RAW_SCHEMA_LOCATION,
tft_output_dir=TRANSFORM_ARTIFACTS_DIR,
hyperparams=hyperparams,
)
vertex_ai.log_metrics(
{"val_loss": val_loss, "val_accuracy": val_accuracy})
# !tb-gcp-uploader --tensorboard_resource_name={tensorboard_resource_name} \
# --logdir={LOG_DIR} \
# --experiment_name={EXPERIMENT_NAME} --one_shot=True
# ### Export the trained model
# +
saved_model_dir = os.path.join(EXPORT_DIR)
exporter.export_serving_model(
classifier=classifier,
serving_model_dir=saved_model_dir,
raw_schema_location=RAW_SCHEMA_LOCATION,
tft_output_dir=TRANSFORM_ARTIFACTS_DIR,
)
# -
# ### Inspect model serving signatures
# !saved_model_cli show --dir={saved_model_dir} --tag_set=serve --signature_def=serving_tf_example
# !saved_model_cli show --dir={saved_model_dir} --tag_set=serve --signature_def=serving_default
# ### Test the exported SavedModel
serving_model = tf.saved_model.load(saved_model_dir)
print("Saved model is loaded.")
# +
# Test the serving_tf_example with TF Examples
file_names = tf.data.TFRecordDataset.list_files(EXPORTED_DATA_PREFIX + '/data-*.tfrecord')
for batch in tf.data.TFRecordDataset(file_names).batch(3).take(1):
predictions = serving_model.signatures['serving_tf_example'](batch)
for key in predictions:
print(f"{key}: {predictions[key]}")
# +
# Test the serving_default with feature dictionary
import tensorflow_data_validation as tfdv
from tensorflow_transform.tf_metadata import schema_utils
raw_schema = tfdv.load_schema_text(RAW_SCHEMA_LOCATION)
raw_feature_spec = schema_utils.schema_as_feature_spec(raw_schema).feature_spec
# +
instance = {
"dropoff_grid": "POINT(-87.6 41.9)",
"euclidean": 2064.2696,
"loc_cross": "",
"payment_type": "Credit Card",
"pickup_grid": "POINT(-87.6 41.9)",
"trip_miles": 1.37,
"trip_day": 12,
"trip_hour": 6,
"trip_month": 2,
"trip_day_of_week": 4,
"trip_seconds": 555,
}
for feature_name in instance:
dtype = raw_feature_spec[feature_name].dtype
instance[feature_name] = tf.constant([[instance[feature_name]]], dtype)
# -
predictions = serving_model.signatures['serving_default'](**instance)
for key in predictions:
print(f"{key}: {predictions[key].numpy()}")
# ## Start a new Vertex AI experiment run
# +
vertex_ai.init(
project=PROJECT,
staging_bucket=BUCKET,
experiment=EXPERIMENT_NAME)
run_id = f"run-gcp-{datetime.now().strftime('%Y%m%d%H%M%S')}"
vertex_ai.start_run(run_id)
EXPERIMENT_RUN_DIR = os.path.join(EXPERIMENT_ARTIFACTS_DIR, EXPERIMENT_NAME, run_id)
print("Experiment run directory:", EXPERIMENT_RUN_DIR)
# -
# ## 3. Submit a Data Processing Job to Dataflow
EXPORTED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'exported_data')
TRANSFORMED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'transformed_data')
TRANSFORM_ARTIFACTS_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'transform_artifacts')
# +
ML_USE = 'UNASSIGNED'
LIMIT = 1000000
raw_data_query = datasource_utils.get_training_source_query(
project=PROJECT,
region=REGION,
dataset_display_name=DATASET_DISPLAY_NAME,
ml_use=ML_USE,
limit=LIMIT
)
etl_job_name = f"etl-{MODEL_DISPLAY_NAME}-{run_id}"
args = {
'job_name': etl_job_name,
'runner': 'DataflowRunner',
'raw_data_query': raw_data_query,
'exported_data_prefix': EXPORTED_DATA_PREFIX,
'transformed_data_prefix': TRANSFORMED_DATA_PREFIX,
'transform_artifact_dir': TRANSFORM_ARTIFACTS_DIR,
'write_raw_data': False,
'temporary_dir': os.path.join(WORKSPACE, 'tmp'),
'gcs_location': os.path.join(WORKSPACE, 'bq_tmp'),
'project': PROJECT,
'region': DATAFLOW_REGION,
'setup_file': './setup.py',
'service_account_email': DATAFLOW_SERVICE_ACCOUNT,
'use_public_ips': False,
'subnetwork': DATAFLOW_SUBNETWORK
}
# -
vertex_ai.log_params(args)
# +
logging.getLogger().setLevel(logging.ERROR)
print("Data preprocessing started...")
etl.run_transform_pipeline(args)
print("Data preprocessing completed.")
# -
# !gsutil ls {EXPERIMENT_RUN_DIR}
# ## 4. Submit a Custom Training Job to Vertex AI
LOG_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'logs')
EXPORT_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'model')
# ### Test the training task locally
# !python -m src.model_training.task \
# --model-dir={EXPORT_DIR} \
# --log-dir={LOG_DIR} \
# --train-data-dir={TRANSFORMED_DATA_PREFIX}/train/* \
# --eval-data-dir={TRANSFORMED_DATA_PREFIX}/eval/* \
# --tft-output-dir={TRANSFORM_ARTIFACTS_DIR} \
# --num-epochs=3 \
# --hidden-units=32,32 \
# --experiment-name={EXPERIMENT_NAME} \
# --run-name={run_id} \
# --project={PROJECT} \
# --region={REGION} \
# --staging-bucket={BUCKET}
# ### Prepare training package
TRAINER_PACKAGE_DIR = os.path.join(WORKSPACE, 'trainer_packages')
TRAINER_PACKAGE_NAME = f'{MODEL_DISPLAY_NAME}_trainer'
print("Trainer package upload location:", TRAINER_PACKAGE_DIR)
# +
# !rm -r src/__pycache__/
# !rm -r src/.ipynb_checkpoints/
# !rm -r src/raw_schema/.ipynb_checkpoints/
# !rm -f {TRAINER_PACKAGE_NAME}.tar {TRAINER_PACKAGE_NAME}.tar.gz
# !mkdir {TRAINER_PACKAGE_NAME}
# !cp setup.py {TRAINER_PACKAGE_NAME}/
# !cp -r src {TRAINER_PACKAGE_NAME}/
# !tar cvf {TRAINER_PACKAGE_NAME}.tar {TRAINER_PACKAGE_NAME}
# !gzip {TRAINER_PACKAGE_NAME}.tar
# !gsutil cp {TRAINER_PACKAGE_NAME}.tar.gz {TRAINER_PACKAGE_DIR}/
# !rm -r {TRAINER_PACKAGE_NAME}
# !rm -r {TRAINER_PACKAGE_NAME}.tar.gz
# -
# ### Prepare the training job
TRAIN_RUNTIME = 'tf-cpu.2-5'
TRAIN_IMAGE = f"us-docker.pkg.dev/vertex-ai/training/{TRAIN_RUNTIME}:latest"
print("Training image:", TRAIN_IMAGE)
# +
num_epochs = 10
learning_rate = 0.001
hidden_units = "64,64"
trainer_args = [
f'--train-data-dir={TRANSFORMED_DATA_PREFIX + "/train/*"}',
f'--eval-data-dir={TRANSFORMED_DATA_PREFIX + "/eval/*"}',
f'--tft-output-dir={TRANSFORM_ARTIFACTS_DIR}',
f'--num-epochs={num_epochs}',
f'--learning-rate={learning_rate}',
f'--project={PROJECT}',
f'--region={REGION}',
f'--staging-bucket={BUCKET}',
f'--experiment-name={EXPERIMENT_NAME}'
]
# +
package_uri = os.path.join(TRAINER_PACKAGE_DIR, f'{TRAINER_PACKAGE_NAME}.tar.gz')
worker_pool_specs = [
{
"replica_count": 1,
"machine_spec": {
"machine_type": 'n1-standard-4',
"accelerator_count": 0
},
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [package_uri],
"python_module": "src.model_training.task",
"args": trainer_args,
}
}
]
# -
# ### Submit the training job
# +
print("Submitting a custom training job...")
training_job_display_name = f"{TRAINER_PACKAGE_NAME}_{run_id}"
training_job = vertex_ai.CustomJob(
display_name=training_job_display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=EXPERIMENT_RUN_DIR,
)
training_job.run(
service_account=SERVICE_ACCOUNT,
tensorboard=tensorboard_resource_name,
sync=True
)
# -
# ## 5. Upload exported model to Vertex AI Models
# !gsutil ls {EXPORT_DIR}
# ### Generate the Explanation metadata
explanation_config = features.generate_explanation_config()
explanation_config
# ### Upload model
SERVING_RUNTIME='tf2-cpu.2-5'
SERVING_IMAGE = f"us-docker.pkg.dev/vertex-ai/prediction/{SERVING_RUNTIME}:latest"
print("Serving image:", SERVING_IMAGE)
# +
explanation_metadata = vertex_ai.explain.ExplanationMetadata(
inputs=explanation_config["inputs"],
outputs=explanation_config["outputs"],
)
explanation_parameters = vertex_ai.explain.ExplanationParameters(
explanation_config["params"]
)
vertex_model = vertex_ai.Model.upload(
display_name=MODEL_DISPLAY_NAME,
artifact_uri=EXPORT_DIR,
serving_container_image_uri=SERVING_IMAGE,
parameters_schema_uri=None,
instance_schema_uri=None,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
labels={
'dataset_name': DATASET_DISPLAY_NAME,
'experiment': run_id
}
)
# -
vertex_model.gca_resource
# ## 6. Extract experiment run parameters
experiment_df = vertex_ai.get_experiment_df()
experiment_df = experiment_df[experiment_df.experiment_name == EXPERIMENT_NAME]
experiment_df.T
print("Vertex AI Experiments:")
print(
f"https://console.cloud.google.com/vertex-ai/locations{REGION}/experiments/{EXPERIMENT_NAME}/metrics?project={PROJECT}"
)
# ## 7. Submit a Hyperparameter Tuning Job to Vertex AI
#
# For more information about configuring a hyperparameter study, refer to [Vertex AI Hyperparameter job configuration](https://cloud.google.com/vertex-ai/docs/training/using-hyperparameter-tuning).
# ### Configure a hyperparameter job
# +
metric_spec = {
'ACCURACY': 'maximize'
}
parameter_spec = {
'learning-rate': hp_tuning.DoubleParameterSpec(min=0.0001, max=0.01, scale='log'),
'hidden-units': hp_tuning.CategoricalParameterSpec(values=["32,32", "64,64", "128,128"])
}
# +
tuning_job_display_name = f"hpt_{TRAINER_PACKAGE_NAME}_{run_id}"
hp_tuning_job = vertex_ai.HyperparameterTuningJob(
display_name=tuning_job_display_name,
custom_job=training_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=4,
parallel_trial_count=2,
search_algorithm=None # Bayesian optimization.
)
# -
# ### Submit the hyperparameter tuning job
# +
print("Submitting a hyperparameter tunning job...")
hp_tuning_job.run(
service_account=SERVICE_ACCOUNT,
tensorboard=tensorboard_resource_name,
restart_job_on_worker_restart=False,
sync=True,
)
# -
# ### Retrieve trial results
hp_tuning_job.trials
# +
best_trial = sorted(
hp_tuning_job.trials,
key=lambda trial: trial.final_measurement.metrics[0].value,
reverse=True
)[0]
print("Best trial ID:", best_trial.id)
print("Validation Accuracy:", best_trial.final_measurement.metrics[0].value)
print("Hyperparameter Values:")
for parameter in best_trial.parameters:
print(f" - {parameter.parameter_id}:{parameter.value}")
# -
| 02-experimentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Example of weighing the multi-task loss with uncertainty
# This is a `torchmtl` implementation of the paper [Multi-Task Learning Using Uncertainty to Weigh Losses for Scene Geometry and Semantics](https://arxiv.org/abs/1705.07115).
# Large fractions of this notebook are borrowed from <NAME>'s [example implementation](https://github.com/yaringal/multi-task-learning-example/blob/master/multi-task-learning-example-pytorch.ipynb).
# +
# PyTorch imports
import torch
from torch import nn
import torch.optim as optim
from torch.nn import (Linear, Sequential, MSELoss)
# Import networkx for visualization
import networkx as nx
# torchMTL imports
from torchmtl import MTLModel
from torchmtl.wrapping_layers import Concat
# Some imports for the visualization
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
torch.manual_seed(0)
def gen_data(N):
X = np.random.randn(N, input_size)
w1 = 2.
b1 = 8.
sigma1 = 1e1 # ground truth
Y1 = X.dot(w1) + b1 + sigma1 * np.random.randn(N, output1_size)
w2 = 3.
b2 = 3.
sigma2 = 1e0 # ground truth
Y2 = X.dot(w2) + b2 + sigma2 * np.random.randn(N, output2_size)
return X, Y1, Y2
def shuffle_data(X, Y1, Y2):
s = np.arange(X.shape[0])
np.random.shuffle(s)
return X[s], Y1[s], Y2[s]
# -
# Now, we define the multi-task model ...
# +
nb_features = 1024
input_size = 1
hidden_size = 2 # total number of output
output1_size = 1 # first output
output2_size = 1 # second output
# Define a loss function that returns the log vars
class MultiTaskLossWrapper(nn.Module):
def __init__(self, num_tasks):
super(MultiTaskLossWrapper, self).__init__()
self.num_tasks = num_tasks
self.log_vars = nn.Parameter(torch.zeros((num_tasks)))
def forward(self, *X):
# Return the log vars so precision can be computed
# in the training loop. Also return the predictions.
return self.log_vars, X
tasks = [
{
'name': "InputTask",
'layers': Sequential(*[nn.Linear(input_size, hidden_size), nn.ReLU()]),
# No anchor_layer means this layer receives input directly
},
{
'name': "Lin1",
'layers': nn.Linear(hidden_size, output1_size),
'anchor_layer': "InputTask"
},
{
'name': "Lin2",
'layers': nn.Linear(hidden_size, output2_size),
'anchor_layer': "InputTask"
},
{
'name': "MultiLoss",
'layers': MultiTaskLossWrapper(num_tasks=2),
'anchor_layer': ['Lin1', 'Lin2']
}
]
# -
# ... define from which layers we would like to receive predictions (output of their 'layers' key), loss function, and scaling factor.
output_tasks=['MultiLoss']
# ... and build and visualize it
# +
model = MTLModel(tasks, output_tasks=output_tasks)
pos = nx.planar_layout(model.g)
nx.draw(model.g, pos, font_size=14, node_color="y", node_size=450, with_labels=True)
# +
# Define an optimizer
optimizer = optim.Adam(model.parameters())
# Generate the data set
N = 100
nb_epoch = 2000
batch_size = 20
# Generate and convert data into torch from numpy array
X, Y1, Y2 = gen_data(N)
X = X.astype('float32')
Y1 = Y1.astype('float32')
Y2 = Y2.astype('float32')
losses = []
for i in range(nb_epoch):
epoch_loss = 0
X, Y1, Y2 = shuffle_data(X, Y1, Y2)
for j in range(N//batch_size):
optimizer.zero_grad()
X_ = torch.from_numpy(X[(j * batch_size):((j + 1) * batch_size)])
target1 = torch.from_numpy(Y1[(j * batch_size):((j + 1) * batch_size)])
target2 = torch.from_numpy(Y2[(j * batch_size):((j + 1) * batch_size)])
y = [target1, target2]
multi_loss_out, _, _ = model(X_)
log_vars = multi_loss_out[0][0]
y_hat = multi_loss_out[0][1]
loss = 0
for i in range(len(y)):
precision = torch.exp(-log_vars[i])
diff = (y_hat[i] - y[i])**2.
loss += torch.sum(precision * diff + log_vars[i], -1)
loss = torch.mean(loss)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
losses.append(epoch_loss * batch_size / N)
# -
# Visualize the overall loss
plt.plot(losses)
# Found standard deviations (ground truth is 10 and 1):
std_1 = torch.exp(log_vars[0])**0.5
std_2 = torch.exp(log_vars[1])**0.5
print([std_1.item(), std_2.item()])
| examples/uncertainty_weighted_loss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#import numpy as np
import glob
from PIL import Image, ImageOps
import autograd.numpy as np
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import display
def load_image(fname):
img = Image.open(fname).resize((32, 32))
img_gray = img.convert('L')
img_eq = ImageOps.autocontrast(img_gray)
img_eq = np.array(img_eq.getdata()).reshape((img_eq.size[1], -1))
return img_eq
def binarize_image(img_eq):
img_bin = np.copy(img_eq)
img_bin[img_bin < 128] = -1
img_bin[img_bin >= 128] = 1
return img_bin
def add_corruption(img):
img = img.reshape((32, 32))
t = np.random.choice(3)
if t == 0:
i = np.random.randint(32)
img[i:(i + 8)] = -1
elif t == 1:
i = np.random.randint(32)
img[:, i:(i + 8)] = -1
else:
mask = np.sum([np.diag(-np.ones(32 - np.abs(i)), i)
for i in np.arange(-4, 5)], 0).astype(np.int)
img[mask == -1] = -1
return img.ravel()
def recover(cimgs, W, b):
img_size = np.prod(cimgs[0].shape)
######################################################################
######################################################################
rimgs = []
# Complete this function
# You are allowed to modify anything between these lines
# Helper functions are allowed
#######################################################################
#######################################################################
rimgs = cimgs.copy()
num_iter = 20
for i in range(num_iter):
for j in range(len(rimgs)):
rimgs[j] = ((np.sign(1/(1+np.exp(-(Wh.dot(rimgs[j])+bh)))-0.5))).astype(int)
rimgs = rimgs.reshape((len(rimgs),32,32))
return rimgs
def learn_hebbian(imgs):
img_size = np.prod(imgs[0].shape)
######################################################################
######################################################################
weights = np.zeros((img_size, img_size))
bias = np.zeros(img_size)
# Complete this function
# You are allowed to modify anything between these lines
# Helper functions are allowed
#flatten image
imgs_f = np.reshape(imgs,(len(imgs),img_size))
for img in imgs_f:
outer = np.outer(img,img)
weights += outer
diagW = np.diag(np.diag(weights))
weights = weights - diagW
weights /= len(imgs)
#######################################################################
#######################################################################
return weights, bias
# +
#Autograd
import autograd.numpy as np
from autograd import grad, jacobian, hessian
from autograd.scipy.stats import norm
from scipy.optimize import minimize
def learn_maxpl(imgs):
img_size = np.prod(imgs[0].shape)
######################################################################
######################################################################
weights = np.zeros((img_size, img_size))
bias = np.zeros(img_size)
# Complete this function
# You are allowed to modify anything between these lines
# Helper functions are allowed
# Define PseudoLikelihood function
def log_PL(teta):
SUM=0
imgs_f = imgs.reshape((len(imgs),img_size))
for i in imgs_f:
for j in range(len(i)):
SUM=SUM+np.log(1/(1+np.exp(-(np.dot(teta[j*img_size:(j+1)*img_size].transpose(),i)+teta[(len(imgs_f))*img_size+j]))))*(i[j]/2+0.5)+(1-np.log(1/(1+np.exp(-(np.dot(teta[j*img_size:(j+1)*img_size].transpose(),i)+teta[(len(imgs_f))*img_size+j])))))*(-i[j]/2+0.5)
return SUM
#######################################################################
#######################################################################
x0 = np.ones((1024*1025,1))#np.concatenate((np.ones((1024*1024,1)),np.zeros((1024,1))),axis = 0)
d_teta=grad(log_PL,0)
num_iter = 5
alpha = 1000
for i in range(num_iter):
dx = d_teta(x0)
print(np.sum(dx>0.01))
x0 += dx*alpha
print(x0)
return x0[:img_size*img_size].reshape((img_size,img_size)), x0[img_size*img_size:].transpose()#weights, bias
# -
# Recover 2 -- Hebbian
Wh, bh = learn_maxpl(imgs)
print(Wh.shape,bh.shape)
rimgs_h = recover(cimgs, Wh, bh)
# +
# Load Images and Binarize
ifiles = sorted(glob.glob('images/*'))
timgs = [load_image(ifile) for ifile in ifiles]
imgs = np.asarray([binarize_image(img) for img in timgs])
# Add corruption
cimgs = []
for i, img in enumerate(imgs):
cimgs.append(add_corruption(np.copy(imgs[i])))
cimgs = np.asarray(cimgs)
for i in imgs:
plt.imshow(i, cmap='gray')
plt.show()
for i in cimgs:
plt.imshow(i.reshape((32,32)), cmap='gray')
plt.show()
# +
# Recover 1 -- Hebbian
Wh, bh = learn_hebbian(imgs)
rimgs_h = recover(cimgs, Wh, bh)
np.save('hebbian.npy', rimgs_h)
import matplotlib.pyplot as plt
from IPython.display import display
for i in cimgs:
plt.imshow(i.reshape((32,32)), cmap='gray')
plt.show()
for i in rimgs_h:
plt.imshow(i.reshape((32,32)), cmap='gray')
plt.show()
# +
# Recover 2 -- Hebbian
Wh, bh = learn_maxpl(imgs)
rimgs_h = recover(cimgs, Wh, bh)
np.save('hebbian.npy', rimgs_h)
import matplotlib.pyplot as plt
from IPython.display import display
for i in cimgs:
plt.imshow(i.reshape((32,32)), cmap='gray')
plt.show()
for i in rimgs_h:
plt.imshow(i.reshape((32,32)), cmap='gray')
plt.show()
# +
def log_PL(teta):
SUM=0
ifiles = sorted(glob.glob('images/*'))
timgs = [load_image(ifile) for ifile in ifiles]
imgs = np.asarray([binarize_image(img) for img in timgs])
img_size = np.prod(imgs[0].shape)
imgs_f = imgs.reshape((len(imgs),img_size))
for i in imgs_f:
for j in range(len(i)):
SUM=SUM+np.log(1/(1+np.exp(-(np.dot(teta[j*img_size:(j+1)*img_size].transpose(),i)+teta[(len(imgs_f))*img_size+j]))))*(i[j]/2+0.5)+(1-np.log(1/(1+np.exp(-(np.dot(teta[j*img_size:(j+1)*img_size].transpose(),i)+teta[(len(imgs_f))*img_size+j])))))*(-i[j]/2+0.5)
return SUM
teta = np.ones((1024*1025,1))
log_PL(teta)
# -
d_teta=grad(log_PL)
import numpy
teta = np.random.rand(1024*1025,1)
print(d_teta(teta))
| Hopfield/A2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# Lambda School Data Science
#
# *Unit 2, Sprint 3, Module 1*
#
# ---
#
#
# # Define ML problems
#
# You will use your portfolio project dataset for all assignments this sprint.
#
# ## Assignment
#
# Complete these tasks for your project, and document your decisions.
#
# - [x] Choose your target. Which column in your tabular dataset will you predict?
# - [x] Is your problem regression or classification?
# - [X] How is your target distributed?
# - Classification: How many classes? Are the classes imbalanced?
# - Regression: Is the target right-skewed? If so, you may want to log transform the target.
# - [x] Choose your evaluation metric(s).
# - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy?
# - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics?
# - [x] Choose which observations you will use to train, validate, and test your model.
# - Are some observations outliers? Will you exclude them?
# - Will you do a random split or a time-based split?
# - [x] Begin to clean and explore your data.
# - [x] Begin to choose which features, if any, to exclude. Would some features "leak" future information?
#
# If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset.
#
# Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393)
# -
import pandas as pd
data = pd.read_csv('../data/steam/steam.csv')
# ## Rating
# The rating system on steam only gives us the number of positive reviews and negative reviews. [steamdb](https://steamdb.info/blog/steamdb-rating/) uses the following formula to convert those numbers into a rating. From there, we determine a good game to be one with a rating greater than or equal to .9 out of 1. Our classification target is whether or not this is true or not.
#
# $$\text{Total Reviews} = \text{Positive Reviews} + \text{Negative Reviews}$$
# $$\text{Review Score} = \frac{\text{Positive Reviews}}{\text{Total Reviews}}$$
# $$\text{Rating} = \text{Review Score} - (\text{Review Score} - 0.5)*2^{-log_{10}(\text{Total Reviews} + 1)}$$
data.columns
import math
import numpy as np
def create_target(data:pd.DataFrame):
data = data.copy()
df = data[['positive_ratings', 'negative_ratings']]
data = data.drop(['positive_ratings', 'negative_ratings'], axis=1)
df['total_reviews'] = df['positive_ratings'] + df['negative_ratings']
df =df[df['total_reviews'] >= 500]
df['review_score'] = df['positive_ratings'] / df['total_reviews']
df['superscript']= [math.log10(x+1) for x in df['total_reviews']]
df['exponent'] = [2**(-x) for x in df['superscript']]
df['rating'] = [x-(x-0.5) * y for x,y in zip(df['review_score'], df['exponent'])]
df['good'] = df['rating'] >= 0.90
data = data.merge(df[['good']], left_index=True,right_index=True)
return data
data_with_target = create_target(data)
data_with_target.shape
data_with_target['good'].value_counts()
data_with_target['good'].value_counts(normalize=True)
# ## Evaluation Metrics
#
# Due due to the imbalanced nature of this dataset, accuracy will not be an effective metric. Instead, precicion or possibly a precicion-recall curve will be used to determine fit.
#
# I think I specifically want to play with an imbalanced dataset. If I set my threshold of "Great Game" to >= 90/100, it gives me a majority class of >96% not great (edited)
#
# Then if I optimize for precision with my model, with the idea that "I don't want to miss a truly great, must play game. I'm OK with playing a mediocre game if I must"
# In other words, make a model that minimizes false negatives, because if I miss even 1 masterpiece, than my life is incomplete.
#
# A train-test-val split will be used to testing.
#
# Some observations are not reliable. SteamDB does not provide a rating for any games with less than 500 reviews. We will disregard such games as well
from sklearn.model_selection import train_test_split
train_and_val_set, test_set = train_test_split(data_with_target, stratify = data_with_target['good'], random_state = 11)
train_set, val_set = train_test_split(train_and_val_set, stratify = train_and_val_set['good'], random_state = 11)
# +
def x_y_split(df):
target = 'good'
X = df.copy().drop(target, axis=1)
y = df.copy()[target]
return X, y
X_train, y_train = x_y_split(train_set)
X_val, y_val = x_y_split(val_set)
X_test, y_test = x_y_split(test_set)
# -
from pandas_profiling import ProfileReport
profile = ProfileReport(X_train, minimal=True).to_notebook_iframe()
profile
| module1-define-ml-problems/LS_DS_231_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HTTP Request/Response Cycle - Codealong
#
# ## Introduction
# When developing a Web application as we saw in previous lesson, the request/response cycle is a useful guide to see how all the components of the app fit together. The request/response cycle traces how a user's request flows through the app. Understanding the request/response cycle is helpful to figure out which files to edit when developing an app (and where to look when things aren't working). This lesson will show how this setup works using python.
#
# ## Objectives
#
# You will be able to:
#
# * Understand and explain the HTTP Request/Response cycle
# * Make http requests in Python using the ‘requests’ library
#
# ## The `requests` Library in Python
#
# Dealing with HTTP requests could be a challenging task any programming language. Python with two built-in modules, `urllib` and `urllib2` to handle these requests but these could be very confusing and the documentation is not clear. This requires the programmer to write a lot of code to make even a simple HTTP request.
#
# To make these things simpler, one easy-to-use third-party library, known as` Requests`, is available and most developers prefer to use it instead or urllib/urllib2. It is an Apache2 licensed HTTP library powered by urllib3 and httplib. Requests is add-on library that allows you to send HTTP requests using Python. With this library, you can access content like web page headers, form data, files, and parameters via simple Python commands. It also allows you to access the response data in a simple way.
#
# 
#
# Below is how you would install and import the requests library before making any requests.
# ```python
# # Uncomment and install requests if you don't have it already
# # # !pip install requests
#
# # Import requests to working environment
# import requests
# ```
import requests
# ## The `.get()` Method
#
# Now we have requests library ready in our working environment, we can start making some requests using the `.get()` method as shown below:
# ```python
# ### Making a request
# resp = requests.get('https://www.google.com')
# ```
resp = requests.get('https://www.google.com')
# GET is by far the most used HTTP method. We can use GET request to retrieve data from any destination.
#
# ## Status Codes
# The request we make may not be always successful. The best way is to check the status code which gets returned with the response. Here is how you would do this.
# ```python
# # Check the returned status code
# resp.status_code == requests.codes.ok
# ```
resp.status_code == requests.codes.ok
# So this is a good check to see if our request was successful. Depending on the status of the web server, the access rights of the clients and availability of requested information. A web server may return a number of status codes within the response. Wikipedia has an exhaustive details on all these codes. [Check them out here](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes).
#
# ## Response Contents
# Once we know that our request was successful and we have a valid response, we can check the returned information using `.text` property of the response object.
# ```python
# print (resp.text)
# ```
print(resp.text)
import pprint
# So this returns a lot of information which by default is not really human understandable due to data encoding, HTML tags and other styling information that only a web browser can truly translate. In later lessons we'll learn how we can use **_Regular Expressions_** to clean this information and extract the required bits and pieces for analysis.
#
# ## Response Headers
# The response of an HTTP request can contain many headers that holds different bits of information. We can use `.header` property of the response object to access the header information as shown below:
#
# ```python
# # Read the header of the response - convert to dictionary for displaying k:v pairs neatly
# dict(resp.headers)
# ```
pprint.pprint(dict(resp.headers))
# The content of the headers is our required element. You can see the key-value pairs holding various pieces of information about the resource and request. Let's try to parse some of these values using the requests library:
#
# ```python
# print(resp.headers['Date']) # Date the response was sent
# print(resp.headers['server']) # Server type (google web service - GWS)
# ```
print(resp.headers['Date'])
print(resp.headers['server'])
# ## Try `httpbin`
# `httpbin.org` is a popular website to test different HTTP operation and practice with request-response cycles. Let's use httpbin/get to analyze the response to a GET request. First of all, let's find out the response header and inspect how it looks.
#
# ```python
# r = requests.get('http://httpbin.org/get')
#
# response = r.json()
# print(r.json())
# print(response['args'])
# print(response['headers'])
# print(response['headers']['Accept'])
# print(response['headers']['Accept-Encoding'])
# print(response['headers']['Host'])
# print(response['headers']['User-Agent'])
# print(response['origin'])
# print(response['url'])
# ```
r = requests.get('http://httpbin.org/get')
response = r.json()
r.headers
response['headers']
print(r.json())
print(response['args'])
print(response['headers'])
print(response['headers']['Accept'])
print(response['headers']['Accept-Encoding'])
print(response['headers']['Host'])
print(response['headers']['User-Agent'])
print(response['origin'])
print(response['url'])
print(response['args'])
# Let's use `requests` object structure to parse the values of headers as we did above.
#
# ```python
# print(r.headers['Access-Control-Allow-Credentials'])
# print(r.headers['Access-Control-Allow-Origin'])
# print(r.headers['CONNECTION'])
# print(r.headers['content-length'])
# print(r.headers['Content-Type'])
# print(r.headers['Date'])
# print(r.headers['server'])
# ```
print(r.headers['Access-Control-Allow-Credentials'])
print(r.headers['Access-Control-Allow-Origin'])
print(r.headers['CONNECTION'])
print(r.headers['content-length'])
print(r.headers['Content-Type'])
print(r.headers['Date'])
print(r.headers['server'])
# ## Passing Parameters in GET
# In some cases, you'll need to pass parameters along with your GET requests. These extra parameters usually take the the form of query strings added to the requested URL. To do this, we need to pass these values in the `params` parameter. Let's try to access information from `httpbin` with some user information.
#
# Note: The user information is not getting authenticated at `httpbin` so any name/password will work fine. This is merely for practice.
#
# ```python
# credentials = {'user_name': 'FlatironSchool', 'password': '<PASSWORD>'}
# r = requests.get('http://httpbin.org/get', params=credentials)
#
# print(r.url)
# print(r.text)
# ```
credentials = {'user_name': 'FlatironSchool', 'password': '<PASSWORD>'}
r = requests.get('http://httpbin.org/get', params=credentials)
print(r.url)
print(r.text)
# ## HTTP POST method
#
# Sometimes we need to send one or more files simultaneously to the server. For example, if a user is submitting a form and the form includes different fields for uploading files, like user profile picture, user resume, etc. Requests can handle multiple files on a single request. This can be achieved by putting the files to a list of tuples in the form (`field_name, file_info)`.
#
#
# ```python
# import requests
#
# url = 'http://httpbin.org/post'
# file_list = [
# ('image', ('fi.png', open('images/fi.png', 'rb'), 'image/png')),
# ('image', ('fi2.jpeg', open('images/fi2.jpeg', 'rb'), 'image/png'))
# ]
#
# r = requests.post(url, files=file_list)
# print(r.text)
# ```
# +
import requests
url = 'http://httpbin.org/post'
file_list = [
('image', ('fi.png', open('images/fi.png', 'rb'), 'image/png')),
('image', ('fi2.jpeg', open('images/fi2.jpeg', 'rb'), 'image/png'))
]
r = requests.post(url, files=file_list)
print(r.text)
# -
# This was a brief introduction to how you would send requests and get responses from a web server, while totally avoiding the web browser interface. Later we'll see how we can pick up the required data elements from the contents of the web page for analytical purpose.
#
# ## Summary
# In this lesson, we provided an introduction to the `requests` library in python. We saw how to use the get method to send requests to web servers, check server status, look at the header elements of a web page and how to send extra parameters like user information.
| Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K-means base algo
#
# author: <NAME> (<EMAIL>)
# +
import numpy as np
from scipy.spatial import distance
import matplotlib.pyplot as plt
from sklearn import datasets
from scipy.spatial import Voronoi, voronoi_plot_2d
import time
class base_kmeans(object):
def __init__(self, n_clusters, seed=None):
self.seed = seed
self.n_clusters = n_clusters
# set the selected seed
np.random.seed(self.seed)
self.dict_breg_divs = {
'itakura-saito' : lambda u, v: ((u/v)-np.log(u/v)-1).sum(),
'exp' : lambda u, v: (np.exp(u)-np.exp(v)-(u-v)*np.exp(v)).sum(),
'gen_kl' : lambda u, v: ((u*np.log(u/v)).sum()-(u-v).sum()).sum(),
'euclidean' : 'euclidean'
}
def get_n_clusters(self):
return self.n_clusters
def get_centroids(self):
return self.centroids
def get_first_centroids(self):
return self.first_centroids
def get_n_dim(self):
return self.n_dim
def get_sum_total_div(self):
return np.array(self.sum_total_div)
def get_last_iter(self):
return self.last_iter
def classification_and_renewal(self, distances):
cluster_div = []
new_centroids = np.zeros_like(self.centroids)
# Classification Step
self.labels = np.argmin(distances, axis=1)
# Renewal Step
for icluster in range(self.centroids.shape[0]):
if self.X[self.labels==icluster].shape[0] != 0:
new_centroids[icluster] = np.mean(self.X[self.labels==icluster], axis=0)
# Calculate the div inter cluster
cluster_div.append(distance.cdist(self.X[self.labels==icluster], self.centroids[icluster][np.newaxis],
metric=self.dict_breg_divs[self.breg_div]).sum())
else:
new_centroids[icluster] = self.centroids[icluster]
return np.array(cluster_div).sum(), new_centroids
def predict_cluster(self, X):
dist = distance.cdist(X, self.centroids,
metric=self.dict_breg_divs[self.breg_div])
predicted_label = np.argmin(dist, axis=1)
return predicted_label
def fit(self, X_data, breg_div='euclidean', n_iter=10, tol=1e-3):
np.random.seed(self.seed)
# begin: initialize the centroids
self.tol = tol
self.X = X_data
self.breg_div = breg_div
self.n_iter = n_iter
self.n_dim = X_data.shape[1]
self.centroids = np.random.uniform(low=np.min(self.X, axis=0), high=np.max(self.X,axis=0),
size=(self.n_clusters, self.n_dim))
self.sum_total_div = []
self.labels = None
print('Begin K-means using %s divergence... ' %(self.breg_div))
self.first_centroids = self.centroids
for i_iter in range(n_iter):
print('Iteraction: %i' %(i_iter+1))
dist = distance.cdist(self.X, self.centroids,
metric=self.dict_breg_divs[self.breg_div])
# Classification and Renewal step
clust_div, new_centers = self.classification_and_renewal(dist)
# Check convergence
centers_dist = distance.cdist(new_centers, self.centroids,
metric=self.dict_breg_divs[self.breg_div])
# Save the total divergence in iteraction
self.sum_total_div.append(clust_div)
if np.diag(centers_dist).sum() < self.tol:
# Jut to log the number of iteractions
self.last_iter = i_iter+1
print('The conversion criteria was reached... Stopping!')
break
else:
self.centroids = new_centers
self.last_iter = i_iter+1
# -
iris = datasets.load_iris()
y=iris['target']
sample = iris['data']
sample.shape
kmeans = base_kmeans(n_clusters=3)
# %%time
kmeans.fit(sample, n_iter=15, tol=1e-5, breg_div='itakura-saito')
sample[:4]
kmeans.predict_cluster(sample[:4])
kmeans.get_centroids()
kmeans.get_sum_total_div()
kmeans.get_last_iter()
plt.figure(figsize=(10,8))
plt.plot(range(kmeans.get_last_iter()), kmeans.get_sum_total_div(), '--o', c='g')
plt.title('Total sum of the divergences', fontsize=15)
plt.ylabel(r'$D_{\phi}[C: D]$', fontsize=13)
plt.xlabel(r'Iteractions', fontsize=13)
plt.grid()
plt.show()
plt.figure(figsize=(10,8))
plt.plot(sample[:,0], sample[:,1], 'o', label='Data Points')
plt.plot(kmeans.get_first_centroids()[:,0], kmeans.get_first_centroids()[:,1], '*',
markersize=10, label='Initial Centroids')
plt.plot(kmeans.get_centroids()[:,0], kmeans.get_centroids()[:,1], '^',
markersize=10, label='Final Centroids')
plt.legend(loc='best', fontsize='x-large')
plt.show()
centers = kmeans.get_centroids()
proj_2d = centers[:,:2]
# Get the Voronoi diagrams
vor = Voronoi(proj_2d)
fig, axes = plt.subplots(1, 1, figsize=(10,8))
# Draw data using target to colorize them
axes.scatter(sample[:, 0], sample[:, 1], c=y, cmap='Set1',
edgecolor='k', s=50, alpha=.95)
# Draw the centroids
axes.plot(centers[:,0], centers[:,1], '^', c='black', markersize=15, label='Final Centroids')
# Draw voronoi
voronoi_plot_2d(vor, ax=axes, show_vertices=True)
plt.grid()
plt.legend(loc='best', fontsize='x-large')
plt.show()
| jupyter-notebooks/base_kmeans_algo.ipynb |
# ##### Copyright 2020 The OR-Tools Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # assignment2_sat
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/examples/assignment2_sat.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/python/assignment2_sat.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ortools.sat.python import cp_model
# Instantiate a cp model.
cost = [[90, 76, 75, 70, 50, 74, 12, 68], [35, 85, 55, 65, 48, 101, 70, 83],
[125, 95, 90, 105, 59,
120, 36, 73], [45, 110, 95, 115, 104, 83, 37,
71], [60, 105, 80, 75, 59, 62, 93,
88], [45, 65, 110, 95, 47, 31, 81, 34],
[38, 51, 107, 41, 69, 99, 115,
48], [47, 85, 57, 71, 92, 77, 109,
36], [39, 63, 97, 49, 118, 56,
92, 61], [47, 101, 71, 60, 88, 109, 52, 90]]
sizes = [10, 7, 3, 12, 15, 4, 11, 5]
total_size_max = 15
num_workers = len(cost)
num_tasks = len(cost[1])
all_workers = range(num_workers)
all_tasks = range(num_tasks)
model = cp_model.CpModel()
# Variables
total_cost = model.NewIntVar(0, 1000, 'total_cost')
x = []
for i in all_workers:
t = []
for j in all_tasks:
t.append(model.NewBoolVar('x[%i,%i]' % (i, j)))
x.append(t)
# Constraints
# Each task is assigned to at least one worker.
[model.Add(sum(x[i][j] for i in all_workers) >= 1) for j in all_tasks]
# Total task size for each worker is at most total_size_max
for i in all_workers:
model.Add(sum(sizes[j] * x[i][j] for j in all_tasks) <= total_size_max)
# Total cost
model.Add(total_cost == sum(x[i][j] * cost[i][j]
for j in all_tasks for i in all_workers))
model.Minimize(total_cost)
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.OPTIMAL:
print('Total cost = %i' % solver.ObjectiveValue())
print()
for i in all_workers:
for j in all_tasks:
if solver.Value(x[i][j]) == 1:
print('Worker ', i, ' assigned to task ', j, ' Cost = ',
cost[i][j])
print()
print('Statistics')
print(' - conflicts : %i' % solver.NumConflicts())
print(' - branches : %i' % solver.NumBranches())
print(' - wall time : %f s' % solver.WallTime())
| examples/notebook/examples/assignment2_sat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b> This notebook performs a mantel test to determine the correlations between each of the PCoAs generated for this project </b>
#
# Notebook by YB & RM
#
# Environment - Qiime2 2018.4
# %matplotlib inline
import qiime2 as q2
import seaborn as sns, numpy as np
from skbio import DistanceMatrix
from skbio.stats.distance import mantel
import pandas as pd
# /Users/rhmills/Documents/Thesis Work/UC_Severity
PREPS = {
'Serum Proteomics': 'core-metrics-results_Serum/bray_curtis_distance_matrix.qza',
'Metabolomics': 'pDB_Proteomics/core-metrics-results_Metabolomics/bray_curtis_distance_matrix.qza',
'Metaproteomics': 'pDB_Proteomics/core-metrics-results_2search/bray_curtis_distance_matrix.qza',
#'Metagenomics': 'core-metrics-results_MG/bray_curtis_distance_matrix.qza',
'Metagenomics': 'core-metrics-results-MGall13000/unweighted_unifrac_distance_matrix.qza',
#'16S': 'Genomics/16S/core-metrics-results_idswap2_newmetadata/unweighted_unifrac_distance_matrix.qza'
#'16S': 'Genomics/16S/core-metrics-results_idswap2_newmetadata/bray_curtis_distance_matrix.qza'
'16S':'Genomics/16S/core-metrics-results_idswap2_newmetadata_allsamples/unweighted_unifrac_distance_matrix.qza'
#'16S':'Genomics/16S/core-metrics-results_idswap2_newmetadata_allsamples/bray_curtis_distance_matrix.qza'
}
# +
dms = {}
for prep, path in PREPS.items():
dms[prep] = q2.Artifact.load(path).view(DistanceMatrix)
# fix the sample ids for the 16S
dms['16S'].ids = [i.replace('11549.', '') for i in dms['16S'].ids]
corr = pd.DataFrame(columns=dms.keys(), index=dms.keys(), dtype=np.float)
# -
# #Two samples, H6 and L16, were removed from 16S PCoA analysis because they did not meet rarefaction requirements
# #of <5000 - should be resolved in the allsamples file
# shared = set()
# for prep, dm in dms.items():
# if len(shared) == 0:
# shared = set(dm.ids)
# else:
# shared &= set(dm.ids)
#
# dms = {prep: dm.filter(shared) for prep, dm in dms.items()}
len(shared)
# +
from itertools import combinations
for prep_a, prep_b in combinations(dms.keys(), 2):
r, p, n = mantel(dms[prep_a], dms[prep_b])
corr.loc[prep_a, prep_b] = r
corr.loc[prep_b, prep_a] = r
# -
corr = corr.sort_values(by = ['Serum Proteomics', 'Metabolomics', 'Metagenomics', 'Metaproteomics', '16S'])
corr = corr.sort_values(by = ['Serum Proteomics', 'Metabolomics', 'Metagenomics', 'Metaproteomics', '16S'], axis = 1)
# +
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Draw the heatmap with the mask and correct aspect ratio viridis
plot1 = sns.heatmap(corr, mask=mask, cmap='Reds', annot = True,
square=True, linewidths=.5, cbar_kws={"shrink": .5}).get_figure()
plot1.savefig('./pdfs/Omics_Heatmap_UniFracMGs.pdf')
# -
| Correlations_of_Metaomics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.model_selection import train_test_split
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
all_data_df = pd.read_csv('./datasets/heterogenity/original/dataset_50_2.5.csv', header=None)
all_label_df = pd.read_csv('./datasets/heterogenity/original/dataset_labels_50_2.5.csv', names=["user", "model", "gt"])
dataset_df = pd.concat([all_data_df,all_label_df], axis=1)
train_dataset_df = dataset_df.loc[(dataset_df['user'] != 'a') & (dataset_df['user'] != 'b')]
train_reference_df = pd.get_dummies(train_dataset_df, columns=['gt'])
test_dataset_df = dataset_df.loc[(dataset_df['user'] == 'a') | (dataset_df['user'] == 'b')]
test_reference_df = pd.get_dummies(test_dataset_df, columns=['gt'])
# +
def print_dataset_statistics(train_reference_df, test_reference_df):
# Count the elements in the sets
num_train_data = (len(train_reference_df))
num_train_data_sit = sum(train_reference_df['gt_sit'] == 1)
num_train_data_stand = sum(train_reference_df['gt_stand'] == 1)
num_train_data_walk = sum(train_reference_df['gt_walk'] == 1)
num_train_data_bike = sum(train_reference_df['gt_bike'] == 1)
num_train_data_stairs_up = sum(train_reference_df['gt_stairsup'] == 1)
num_train_data_stairs_down = sum(train_reference_df['gt_stairsdown'] == 1)
num_test_data = (len(test_reference_df))
num_test_data_sit = sum(test_reference_df['gt_sit'] == 1)
num_test_data_stand = sum(test_reference_df['gt_stand'] == 1)
num_test_data_walk = sum(test_reference_df['gt_walk'] == 1)
num_test_data_bike = sum(test_reference_df['gt_bike'] == 1)
num_test_data_stairs_up = sum(test_reference_df['gt_stairsup'] == 1)
num_test_data_stairs_down = sum(test_reference_df['gt_stairsdown'] == 1)
total_df_data = num_train_data + num_test_data
print('TRAIN SET')
print('\tStand:\t\t{} ({:.2f}%)'.format(num_train_data_stand, 100 * num_train_data_stand / len(train_reference_df)))
print('\tSit:\t\t{} ({:.2f}%)'.format(num_train_data_sit, 100 * num_train_data_sit / len(train_reference_df)))
print('\tWalk:\t\t{} ({:.2f}%)'.format(num_train_data_walk, 100 * num_train_data_walk / len(train_reference_df)))
print('\tBike:\t\t{} ({:.2f}%)'.format(num_train_data_bike, 100 * num_train_data_bike / len(train_reference_df)))
print('\tStairs up:\t{} ({:.2f}%)'.format(num_train_data_stairs_up, 100 * num_train_data_stairs_up / len(train_reference_df)))
print('\tStairs down:\t{} ({:.2f}%)'.format(num_train_data_stairs_down, 100 * num_train_data_stairs_down / len(train_reference_df)))
print('')
print('\tPercentage of total\t{} ({:.2f}%)'.format(num_train_data, 100 * num_train_data/ total_df_data))
print('')
print('TEST SET')
print('\tStand:\t\t{} ({:.2f}%)'.format(num_test_data_stand, 100 * num_test_data_stand / len(test_reference_df)))
print('\tSit:\t\t{} ({:.2f}%)'.format(num_test_data_sit, 100 * num_test_data_sit / len(test_reference_df)))
print('\tWalk:\t\t{} ({:.2f}%)'.format(num_test_data_walk, 100 * num_test_data_walk / len(test_reference_df)))
print('\tBike:\t\t{} ({:.2f}%)'.format(num_test_data_bike, 100 * num_test_data_bike / len(test_reference_df)))
print('\tStairs up:\t{} ({:.2f}%)'.format(num_test_data_stairs_up, 100 * num_test_data_stairs_up / len(test_reference_df)))
print('\tStairs down:\t{} ({:.2f}%)'.format(num_test_data_stairs_down, 100 * num_test_data_stairs_down / len(test_reference_df)))
print('')
print('\tPercentage of total\t{} ({:.2f}%)'.format(num_test_data, 100 * num_test_data/ total_df_data))
print_dataset_statistics(train_reference_df, test_reference_df)
# -
print(train_reference_df)
# +
def extract_basic_features(acc_x, acc_y, acc_z):
# prova = np.array(np.apply_along_axis(np.histogram, 1, acc_x)[0]).reshape(2,1)
np_acc_x = np.array(acc_x)
np_acc_y = np.array(acc_y)
np_acc_z = np.array(acc_z)
mean_x = np.expand_dims(np.mean(np_acc_x, axis=1), axis=0).T
mean_y = np.expand_dims(np.mean(np_acc_y, axis=1), axis=0).T
mean_z = np.expand_dims(np.mean(np_acc_z, axis=1), axis=0).T
basic_features = np.concatenate( (
# insert MEANS
mean_x,
mean_y,
mean_z,
# insert STD
np.expand_dims(np.std(np_acc_x, axis=1), axis=0).T,
np.expand_dims(np.std(np_acc_y, axis=1), axis=0).T,
np.expand_dims(np.std(np_acc_z, axis=1), axis=0).T,
# insert sum of thew absolute values
np.expand_dims(np.mean(abs(np_acc_x - mean_x), axis=1), axis=1),
np.expand_dims(np.mean(abs(np_acc_y - mean_y), axis=1), axis=1),
np.expand_dims(np.mean(abs(np_acc_z - mean_z), axis=1), axis=1),
np.expand_dims(np.mean( np.sqrt( np.power(np_acc_x, 2) + np.power(np_acc_y,2) + np.power(np_acc_z, 2) ), axis=1), axis=0).T
), axis=1).tolist()
for i in range(0, len(acc_x)):
bins_x, centers_x = np.histogram(acc_x[i], bins=10)
bins_y, centers_y = np.histogram(acc_y[i], bins=10)
bins_z, centers_z = np.histogram(acc_z[i], bins=10)
basic_features[i].extend(bins_x / len(acc_x))
basic_features[i].extend(bins_y / len(acc_y))
basic_features[i].extend(bins_z / len(acc_z))
return basic_features
prova_feat = np.array(extract_basic_features( [[1,10,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10]] , [[1,3,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10]], [[1,3,3,4,5,6,7,8,9,10], [1,2,3,4,5,6,7,8,9,10]]))
print(prova_feat)
# + tags=[]
def create_dataset(reference_df, batch_size, shuffle, cache_file, center_data=False):
target = reference_df[['gt_sit','gt_stand','gt_walk','gt_bike','gt_stairsup','gt_stairsdown']].values.astype(int).tolist()
# RESHAPING DATAS
np_data = np.array(reference_df.iloc[:,0:750])
np_reshaped_data = np.reshape(np_data.copy(), (np_data.shape[0], 6, 125))
# Data centering
if center_data:
for i in range(len(np_reshaped_data)):
window = np_reshaped_data[i]
means = np.mean(window, axis=1)
centered_acc = np.array(([window[j] - means[j] for j in range(3)]))
np_reshaped_data[i] = np.concatenate((centered_acc, window[3:]), axis=0)
# Extract manual features
np_basic_features = np.array(extract_basic_features(np_data[:, 0:125], np_data[:, 125:250], np_data[:, 250: 375]))
# Create dataset obj
dataset = tf.data.Dataset.from_tensor_slices( ({"input_1": np_reshaped_data, "input_2": np_basic_features}, target) )
# Cache dataset
if cache_file:
dataset = dataset.cache(cache_file)
# Shuffle
if shuffle:
dataset = dataset.shuffle(len(target))
# Repeat the dataset indefinitely
dataset = dataset.repeat()
# Batch
dataset = dataset.batch(batch_size=batch_size)
# Prefetch
dataset = dataset.prefetch(buffer_size=1)
return dataset
batch_size = 128
training_dataset = create_dataset(train_reference_df, batch_size=batch_size, shuffle=True, cache_file=None)
val_dataset = create_dataset(test_reference_df, batch_size=batch_size, shuffle=True, cache_file=None)
for train, targ in training_dataset.take(1):
print ('Features: {}, Target: {}'.format(train, targ))
train_steps = int(np.ceil(len(train_reference_df)/batch_size))
val_steps = int(np.ceil(len(test_reference_df)/batch_size))
# +
def build_model(input_shape):
l2_reg = 5e-4
encoder = tf.keras.models.load_model('encoder.h5')
# NOT TRAIN THE MODEL
encoder.trainable = False
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
training_input = tf.keras.Input(shape=input_shape, dtype=tf.float32, name='input_1')
basic_feat_input = tf.keras.Input(shape=40, dtype=tf.float32, name='input_2')
CNN = tf.keras.layers.Conv1D(196, 16, activation='relu', padding='same')(training_input)
CNN = tf.keras.layers.MaxPool1D(4, padding='same')(CNN)
feautures_CCN = tf.keras.layers.Flatten()(CNN)
featuers_encoder = encoder(training_input)
features = tf.concat((feautures_CCN, basic_feat_input), 1)
#features = tf.concat((feautures_CCN), 1)
FFNN = tf.keras.layers.Dense(1024, activation='relu', kernel_regularizer=tf.keras.regularizers.L2(l2_reg), activity_regularizer=tf.keras.regularizers.L2(l2_reg))(features)
FFNN = tf.keras.layers.Dropout(0.05)(FFNN)
model_output = tf.keras.layers.Dense(6, activation='softmax')(FFNN)
model = tf.keras.Model(inputs = [training_input, basic_feat_input], outputs = model_output, name='OurModel')
return model
model = build_model((6,125))
adam_optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4)
loss_funct = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer = adam_optimizer, loss = loss_funct, metrics = ["accuracy"])
print(model.summary())
# +
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath='./models/checkpoint', save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
model.fit(training_dataset, epochs = 100, steps_per_epoch=train_steps, validation_data=val_dataset, validation_steps=val_steps, callbacks = [early_stopping_callback, model_checkpoint_callback])
# -
# # K-FOLD CROSS VALIDATION
# +
from sklearn.metrics import classification_report
user_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
batch_size = 128
checkpoint_filepath = './models/checkpoint'
models_accuracy = []
models_precision = []
models_recall = []
for user_to_exclude in user_list:
train_dataset_df = dataset_df.loc[(dataset_df['user'] != user_to_exclude)]
train_reference_df = pd.get_dummies(train_dataset_df, columns=['gt'])
test_dataset_df = dataset_df.loc[(dataset_df['user'] == user_to_exclude)]
test_reference_df = pd.get_dummies(test_dataset_df, columns=['gt'])
training_dataset = create_dataset(train_reference_df, batch_size=batch_size, shuffle=True, cache_file=None)
val_dataset = create_dataset(test_reference_df, batch_size=batch_size, shuffle=True, cache_file=None)
train_steps = int(np.ceil(len(train_reference_df)/batch_size))
val_steps = int(np.ceil(len(test_reference_df)/batch_size))
model = build_model((6,125))
adam_optimizer = tf.keras.optimizers.Adam(learning_rate=5e-4)
loss_funct = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer = adam_optimizer, loss = loss_funct, metrics=[tf.keras.metrics.CategoricalAccuracy(), tf.keras.metrics.Precision(), tf.keras.metrics.Recall()],
)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath, save_weights_only=True, monitor='val_categorical_accuracy', mode='max', save_best_only=True)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
model.fit(training_dataset, epochs = 100, steps_per_epoch=train_steps, validation_data=val_dataset, validation_steps=val_steps, callbacks = [early_stopping_callback, model_checkpoint_callback])
# L0AD BEST MODEL
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)
metrics = model.evaluate(val_dataset, batch_size=batch_size, steps=val_steps)
loss, accuracy, precision, recall = metrics
print('Accuracy: ' + str(accuracy))
print('Precision: ' + str(precision))
print('Recall: ' + str(recall))
models_accuracy.append(accuracy)
models_precision.append(precision)
models_recall.append(recall)
# +
import numpy as np
print(f'Accuracy:\tMean={np.mean(models_accuracy)}\tstd={np.std(models_accuracy)}')
# -
| training_model_user_splitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch
# language: python
# name: torch
# ---
# +
# test
def solution(v):
answer = []
x, y = dict(), dict()
for p in v:
if p[0] in x.keys():
x[p[0]] += 1
else:
x.update({p[0]:1})
if p[1] in y.keys():
y[p[1]] += 1
else:
y.update({p[1]:1})
answer = [min(x.keys(), key=lambda k: x[k]), min(y.keys(), key=lambda k: y[k])]
return answer
print(solution([[1, 4], [3, 4], [3, 10]]))
print(solution([[1, 1], [2, 2], [1, 2]]))
# +
# test
from copy import deepcopy
def solution(v):
answer = []
voca = dict()
src = deepcopy(v)
src = src.lower()
for c in src:
if c in voca.keys():
voca[c] += 1
else:
voca.update({c:1})
max_count = max(voca.values())
for k, v in voca.items():
if v == max_count:
answer.append(k)
answer = sorted(answer)
if 's' in answer:
answer.remove('s')
answer.insert(0, 'SS')
if 'o' in answer:
answer.remove('o')
answer.insert(0, 'O')
if 't' in answer:
answer.remove('t')
answer.insert(0, 'T')
return ''.join(answer)
print(solution("aAb"))
print(solution("BA"))
print(solution("BbA"))
print(solution("aaBBTtooSS"))
# +
# fail
BORROW_MONEY = 50_000_000
FIRST_ASSET = 100_000_000
TARGET_ASSET = FIRST_ASSET * 10
def buy_stock(asset, price):
return asset//price, asset%price
def estimate_asset(asset, stock_num, price, loan_status):
return asset + stock_num * price - int(loan_status) * BORROW_MONEY
def find_dday(price_list):
asset = FIRST_ASSET
stock = 0
loan_status = False
first_price = price_list[0]
stock, asset = buy_stock(asset, first_price)
for i, price in enumerate(price_list[1:]):
if (price / first_price < 0.5) and (not loan_status):
loan_status = True
asset += BORROW_MONEY
if asset > price:
add_stock, asset = buy_stock(asset, price)
stock += add_stock
if estimate_asset(asset, stock, price, loan_status) >= TARGET_ASSET:
return i+1
return -1
def solution(v):
answer = []
for i in range(len(v)):
answer.append(find_dday(v[i:]))
return answer
print(solution([78000, 48000, 27000, 285000, 320000, 335100]))
print(solution([34000,78000, 48000, 27000, 11000, 285000, 320000, 335100]))
# +
buy = 100_000_000 // 34_000
rest = 100_000_000 % 34_000
print(buy, rest)
buy * 285_000 + rest
# -
| Research/TossCodeTest-210814/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: base
# language: python
# name: base
# ---
# # US - Baby Names
# ### Introduction:
#
# We are going to use a subset of [US Baby Names](https://www.kaggle.com/kaggle/us-baby-names) from Kaggle.
# In the file it will be names from 2004 until 2014
#
#
# ### Step 1. Import the necessary libraries
import pandas as pd
import numpy as np
# %config Completer.use_jedi = False
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv).
# ### Step 3. Assign it to a variable called baby_names.
baby_names = pd.read_csv('https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv')
baby_names.info()
# ### Step 4. See the first 10 entries
baby_names.head(10)
# ### Step 5. Delete the column 'Unnamed: 0' and 'Id'
# +
baby_names = baby_names.drop(['Unnamed: 0', 'Id'], axis = 1)
# -
baby_names
# ### Step 6. Is there more male or female names in the dataset?
baby_names.Gender.value_counts()
# ### Step 7. Group the dataset by name and assign to names
names = baby_names.groupby('Name').sum()
names.drop('Year', axis = 1)
# ### Step 8. How many different names exist in the dataset?
names.Name.value_counts().count()
# ### Step 9. What is the name with most occurrences?
baby_names['Name'].value_counts().sort_values(ascending = False).head(1)
# ### Step 10. How many different names have the least occurrences?
[baby_names['Name'].value_counts()==1]
# ### Step 11. What is the median name occurrence?
# ### Step 12. What is the standard deviation of names?
# ### Step 13. Get a summary with the mean, min, max, std and quartiles.
| 06_Stats/US_Baby_Names/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial
# ## Neuron simulation
from odynn import nsimul as ns
import scipy as sp
t = sp.arange(0., 1200., 0.1)
i = 40. * ((t > 400) & (t < 800))
ns.simul(dt=0.1, i_inj=i, show=True, save=False)
# ## Compare two sets of parameters
from odynn.models.celeg import DEFAULT, DEFAULT_2
DEFAULT
ns.comp_pars_targ(DEFAULT, DEFAULT_2, dt=0.1, i_inj=i, show=True)
# ## Compare more sets
from copy import copy
d1 = copy(DEFAULT)
d1['C_m'] = 100.
d2 = copy(DEFAULT)
d2['C_m'] = 10.
d3 = copy(DEFAULT)
d3['C_m'] = 1
ns.comp_pars([d1, d2, d3], dt=0.1, i_inj=i, show=True)
d1 = copy(DEFAULT)
d1['g_Ca'] = 0.8
d2 = copy(DEFAULT)
d2['g_Ca'] = 1
d3 = copy(DEFAULT)
d3['g_Ca'] = 1.2
ns.comp_pars([d1, d2, d3], dt=0.1, i_inj=i, show=True)
| tutorial/Running simulations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"outputs_hidden": true}
# !pip install --upgrade --user pandas pyarrow s3fs boto3 "dask[complete]"
# +
import pandas as pd
import dask.dataframe as dd
# %reload_ext autoreload
# %autoreload 2
# -
# # Load
# +
columns = ["account_id", "url", "city_code", "state_code", "dma_code",
"country_code","user_id", "session_id", "referrer", "client",
"user_agent_platform", "user_agent_language", "user_agent_browser",
"zeta_user_id","geo_data","ip", "remote_addr"]
#columns = ["ip"]
file_path = f"s3://drose-sandbox/sizmek_zync_64m"
# +
# %%time
#dd_raw = dd.read_parquet(file_path, columns=columns).compute()
#dd_raw.compute().shape
# +
# %%time
df_raw = pd.read_parquet(file_path, columns=columns)
print(f"Shape: {df_raw.shape[0]:,} Memory: {df_raw.memory_usage(deep=True).sum()/1e9:.2f}GB")
# +
#df2 = df_raw.copy()
# -
# # IP
# +
# %%time
def replace_ip(ips, thresh=1000):
new_ips = ips.copy()
value_counts = new_ips.value_counts().to_dict()
mapping = new_ips.map(value_counts)
new_ips[mapping < thresh] = "other"
return new_ips
new_ips = replace_ip(df_raw["ip"])
# +
# %%time
new_addrs = replace_ip(df_raw["remote_addr"])
# -
print(f"Sizmek ips: {new_ips.nunique()}")
print(f"Zync ips: {new_addrs.nunique()}")
# # Geo
df_raw["geo_data"][0]
# +
# %%time
geo_split = pd.DataFrame([i for i in df_raw["geo_data"].values])
#geo_split = df_raw["geo_data"].apply(pd.Series)
geo_split
# -
# ### Coords
# +
# %%time
coords = pd.DataFrame([i for i in geo_split["coordinates"].values])
# -
coords[:3]
# ### Countries
# +
# %%time
def replace_low_counts(data, thresh=1_000):
new_data = data.copy()
value_counts = new_data.value_counts().to_dict()
mapping = new_data.map(value_counts)
new_data[mapping < thresh] = "other"
return new_data
new_countries = replace_low_counts(geo_split["country"])
new_countries.value_counts().shape
# -
# ### Cities
# +
# %%time
print(geo_split["city"].value_counts().shape)
new_cities = replace_low_counts(geo_split["city"], 1_000)
print(new_cities.value_counts().shape)
# -
# # Make new DF
# +
# %%time
cols_to_keep = [
"account_id", "url", "city_code", "state_code", "dma_code",
"country_code","user_id", "session_id", "referrer", "client",
"user_agent_platform", "user_agent_language", "user_agent_browser",
"zeta_user_id",
]
df3 = df_raw[cols_to_keep]
# +
# %%time
df3["sizmek_ip"] = new_ips
df3["zync_ip"] = new_addrs
df3["zync_country"] = new_countries
df3["zync_state"] = geo_split["subdivision"]
df3["zync_city"] = new_cities
df3["zync_lat"] = coords["latitude"]
df3["zync_long"] = coords["longitude"]
# +
#df3.head(1).T
# -
# # Filter Down
# +
# %%time
print(df3.shape)
df4 = df3.fillna("missing")
print(df4.shape)
df4 = df4.drop_duplicates()
print(df4.shape)
df4 = df4.reset_index(drop=True)
print(df4.shape)
# +
# %%time
print(df4.shape)
group_list = [e for e in (df4.columns) if e not in ("url", "referrer")]
df5 = df4.groupby(group_list)[["url", "referrer"]].agg(lambda x: list(x))
df5 = df5.reset_index()
print(df5.shape)
df5 = df5[df5["user_id"].duplicated(keep=False)]
print(df5.shape)
# -
# # Export
# %%time
df4.to_csv("s3://drose-sandbox/sizmek_zync_cleaned_793k.csv")
# %%time
df5.to_csv("s3://drose-sandbox/sizmek_zync_cleaned_grouped_30k.csv")
| m64_ip.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ZgiJmF-3pe3s" colab_type="text"
# {'buildings' -> 0,
#
# 'forest' -> 1,
#
# 'glacier' -> 2,
#
# 'mountain' -> 3,
#
# 'sea' -> 4,
#
# 'street' -> 5 }
# + [markdown] id="n3RYV4j1pM_s" colab_type="text"
#
# + id="Z0Clg3m3nxeM" colab_type="code" colab={}
labels = ['buildings', 'forest', 'glacier', 'mountain', 'sea', 'street']
# + id="XPLyRQHKmdUq" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/gdrive')
# + _uuid="f2b1b45d7222c7109884df0029385d81718f7209" id="CNui6my2ipD2" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
import os
from tqdm import tqdm, tqdm_notebook
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.applications import *
from tensorflow.keras.callbacks import *
from tensorflow.keras.initializers import *
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" id="mK84XzEvipEJ" colab_type="code" outputId="31558f57-c51a-4946-d3eb-043584294794" executionInfo={"status": "ok", "timestamp": 1554029749793, "user_tz": -330, "elapsed": 2773, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
home_dir = '/content/gdrive/My Drive/data-science-my-projects/AV-Intel-Scene-Classification-Challenge'
print(os.listdir(home_dir))
# + [markdown] _uuid="4d3374968aa7f3881f1b83408f272367f0e63619" id="cwFAsswOipEY" colab_type="text"
# ## Read and set up data
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="e-JCRYi3ipEb" colab_type="code" outputId="f127a7bf-4cf7-43b2-8811-290234509f3e" executionInfo={"status": "ok", "timestamp": 1554029750252, "user_tz": -330, "elapsed": 2385, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 204}
# Read data
dataset_dir = os.path.join(home_dir, "dataset")
train_dir = os.path.join(dataset_dir, "train")
train_df = pd.read_csv(dataset_dir + '/train.csv')
train_df.head()
# + _uuid="2ffaa5aba1e6197e0f2ced15187c06aa87cb4726" id="BXPM-y_kipEo" colab_type="code" outputId="718888b8-3b5e-4ffb-da71-959ae0938eee" executionInfo={"status": "ok", "timestamp": 1554029915623, "user_tz": -330, "elapsed": 2858, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 367}
# Read and display an image
image = plt.imread(os.path.join(train_dir, os.listdir(train_dir)[25]))
print("Image shape =", image.shape)
train_input_shape = image.shape
plt.imshow(image)
plt.show()
# + id="B7VO2ODbtTeG" colab_type="code" outputId="c0fd4e4b-8cd7-4098-ac1b-449d9e058ddb" executionInfo={"status": "ok", "timestamp": 1554030212629, "user_tz": -330, "elapsed": 1261, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Number of unique classes
n_classes = len(train_df.label.unique())
print("Number of unique classes =", n_classes)
# + [markdown] _uuid="9afc1b8533ad9c6167e3a50432127e4c6e9f44e0" id="R7b2hbCpipEy" colab_type="text"
# ## Image Augmentation
# + _uuid="5bdbbb3b523d3e82c9c83508842c8536ddb645a9" id="Hf2H6y0ZipE1" colab_type="code" outputId="6e1aa3da-d104-4cdf-e751-dd51ce3177f9" executionInfo={"status": "ok", "timestamp": 1554030294746, "user_tz": -330, "elapsed": 82133, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 68}
# Augment data
batch_size = 32
#train_input_shape = (64, 64, 3)
train_datagen = ImageDataGenerator(validation_split=0.2,
rescale=1./255.,
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.7,
zoom_range=0.7,
horizontal_flip=True,
#vertical_flip=True,
)
train_generator = train_datagen.flow_from_dataframe(dataframe=train_df,
directory=train_dir,
x_col="image_name",
y_col="label",
class_mode="other",
subset="training",
target_size=train_input_shape[0:2],
shuffle=True,
batch_size=batch_size)
valid_generator = train_datagen.flow_from_dataframe(dataframe=train_df,
directory=train_dir,
x_col="image_name",
y_col="label",
class_mode="other",
subset="validation",
target_size=train_input_shape[0:2],
shuffle=True,
batch_size=batch_size)
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
print("Total number of batches =", STEP_SIZE_TRAIN, "and", STEP_SIZE_VALID)
# + [markdown] _uuid="14162cea761f425a19a7f1a0589e445c8f0b06c7" id="FUjYGRNdipFB" colab_type="text"
# ## Build model
# + _uuid="0015c78383b92457b131f2f79379847503f01833" id="vBmJPc8yipFE" colab_type="code" outputId="e346da08-9870-4c25-f69a-f510b0053d2a" executionInfo={"status": "ok", "timestamp": 1554030308797, "user_tz": -330, "elapsed": 81476, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 156}
# Load pre-trained model
resnet50 = ResNet50(weights='imagenet', include_top=False, input_shape=train_input_shape)
#for layer in resnet50.layers:
# layer.trainable = False
#resnet50.summary()
# + _uuid="e0a60b2a913c2d77e9af8580e65af63c08b136af" id="pkPMPx20ipFQ" colab_type="code" outputId="d4eda6b0-686a-48dc-b6dc-501b05db88fe" executionInfo={"status": "ok", "timestamp": 1554030308801, "user_tz": -330, "elapsed": 79778, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 88}
# Add layers at the end
X = resnet50.output
X = Flatten()(X)
X = Dense(16, kernel_initializer='he_uniform')(X)
X = Dropout(0.5)(X)
X = BatchNormalization()(X)
X = Activation('relu')(X)
output = Dense(n_classes, activation='softmax')(X)
model = Model(inputs=resnet50.input, outputs=output)
#model.summary()
# + id="WX93DE4QxIin" colab_type="code" colab={}
model_dir = os.path.join(home_dir, "models")
MY_RESNET50_MODEL = os.path.join(model_dir, "resnet50_model.h5")
MY_RESNET50_MODEL_WEIGHTS = os.path.join(model_dir, "resnet50_weights3.h5")
from keras.models import load_model
#model = load_model(MY_RESNET50_MODEL)
model.load_weights(MY_RESNET50_MODEL_WEIGHTS)
model.summary()
# + _uuid="6ce184ce000b658fc951097822dc1c5605152d47" id="xPxVLN3YipFk" colab_type="code" colab={}
optimizer = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
#optimizer=Adam()
#optimizer=RMSprop()
for layer in model.layers[:-6]:
layer.trainable = False
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
#model.summary()
# + _uuid="4206d90e8c4dd006d25c29ff9c318fb937826ba2" id="mDZtRHTiipFu" colab_type="code" colab={}
early_stop = EarlyStopping(monitor='val_acc', patience=20, verbose=1,
mode='auto', restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5,
verbose=1, mode='auto')
# + _uuid="b2cd4cfb400f9d849029593191dd283667cfae9c" id="Ai4nSL66ipF0" colab_type="code" outputId="1f3052af-a666-4d50-ecc6-a46eae7a0c60" executionInfo={"status": "ok", "timestamp": 1554054205433, "user_tz": -330, "elapsed": 1720047, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 595}
# %%time
n_epoch = 10
history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator, validation_steps=STEP_SIZE_VALID,
epochs=n_epoch,
shuffle=True,
verbose=1,
callbacks=[early_stop, reduce_lr],
use_multiprocessing=True,
workers=10
)
# + id="x8uIuuit8paW" colab_type="code" colab={}
model_dir = os.path.join(home_dir, "models")
#model.save(model_dir + '/resnet50_model_sgd.h5')
model.save_weights(model_dir + '/resnet50_weights3.h5')
# + _uuid="898075f768db94965133df52fe650334f744541d" id="8-bDEvFOipF-" colab_type="code" outputId="97d9fd03-aa34-4e7e-c665-05d54690ad1b" executionInfo={"status": "ok", "timestamp": 1554036217927, "user_tz": -330, "elapsed": 2233, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 335}
# Plot the training graph
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
fig, axes = plt.subplots(1, 2, figsize=(15,5))
axes[0].plot(epochs, acc, 'r-', label='Training Accuracy')
axes[0].plot(epochs, val_acc, 'b--', label='Validation Accuracy')
axes[0].set_title('Training and Validation Accuracy')
axes[0].legend(loc='best')
axes[1].plot(epochs, loss, 'r-', label='Training Loss')
axes[1].plot(epochs, val_loss, 'b--', label='Validation Loss')
axes[1].set_title('Training and Validation Loss')
axes[1].legend(loc='best')
plt.show()
plot_training(history)
# + _uuid="cd4bf4945a12d11ce13c9cda6befe55c392c0d8d" id="NQtv0MCgipGI" colab_type="code" outputId="04329264-9589-4f70-ca7e-39500b40533c" executionInfo={"status": "ok", "timestamp": 1554054245909, "user_tz": -330, "elapsed": 1020844, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# Evaluate on validation set
result = model.evaluate_generator(generator=valid_generator, verbose=1)
result
# + _uuid="cd427b2643d0a1e94227977ecf9b579415f9f7d4" id="ATjwrbViipGR" colab_type="code" outputId="e487afb6-7e62-49fe-fe9b-fa2a6af37c99" executionInfo={"status": "ok", "timestamp": 1553956573625, "user_tz": -330, "elapsed": 5079930, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 597}
# Classification report and confusion matrix
from sklearn.metrics import *
import seaborn as sns
def showClassficationReport_Generator(model, valid_generator, STEP_SIZE_VALID):
# Loop on each generator batch and predict
y_pred, y_true = [], []
for i in range(STEP_SIZE_VALID):
(X,y) = next(valid_generator)
y_pred.append(model.predict(X))
y_true.append(y)
# Create a flat list for y_true and y_pred
y_pred = [subresult for result in y_pred for subresult in result]
y_true = [subresult for result in y_true for subresult in result]
y_true = np.asarray(y_true).ravel()
# Update Prediction vector based on argmax
#y_pred = np.asarray(y_pred).astype('float32').ravel()
#y_pred = y_pred >= 0.5
#y_pred = y_pred.astype('int').ravel()
y_pred = np.argmax(y_pred, axis=1)
y_pred = np.asarray(y_pred).ravel()
# Confusion Matrix
conf_matrix = confusion_matrix(y_true, y_pred, labels=[0,1,2,3,4,5])
sns.heatmap(conf_matrix, annot=True, fmt="d", square=True, cbar=False,
cmap=plt.cm.gray, xticklabels=labels, yticklabels=labels)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
print(classification_report(y_true, y_pred))
#print("\nAUC: ", roc_auc_score(y_true, y_pred, average='micro'))
showClassficationReport_Generator(model, valid_generator, STEP_SIZE_VALID)
# + [markdown] _uuid="c841e3ab0b59e0a3bf9ae1885d99533b05f06fc4" id="YQSdReezipGd" colab_type="text"
# ## Prepare data for prediction on test set
# + id="6e7bQQBIqtBu" colab_type="code" outputId="3a0434e6-8191-479a-eddc-afce7db257d9" executionInfo={"status": "ok", "timestamp": 1554054245913, "user_tz": -330, "elapsed": 1016410, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
test_df = pd.read_csv(dataset_dir + "/test_WyRytb0.csv")
test_df.shape
# + _uuid="33c7560628f0fe446b6355d26f9fee67da4fc58a" id="9WSL1H-WipGf" colab_type="code" outputId="4c195237-feda-45b7-c34d-7c83424afd60" executionInfo={"status": "ok", "timestamp": 1554054263729, "user_tz": -330, "elapsed": 1033629, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
test_datagen = ImageDataGenerator(rescale=1./255.)
test_generator = test_datagen.flow_from_dataframe(dataframe=test_df,
directory=train_dir,
x_col="image_name",
#y_col="label",
class_mode=None,
target_size=train_input_shape[0:2],
batch_size=1,
shuffle=False
)
# + [markdown] _uuid="08afdd8c3000a63d5846644d83bebd55bb2e7fdb" id="6hixVyr-ipGw" colab_type="text"
# ## Predict and Submit
# + _uuid="067733fbfd186fa1d65d4110165b345a6c9f1898" id="xX7gSEVUipG5" colab_type="code" outputId="d30ab836-82ac-4660-d70c-e35df2ea1abc" executionInfo={"status": "ok", "timestamp": 1554054469678, "user_tz": -330, "elapsed": 1236892, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# Predict on test data
test_generator.reset()
predictions = model.predict_generator(test_generator,verbose=1)
predictions = np.argmax(predictions, axis=1)
#predictions = predictions.astype('int').ravel()
predictions.shape
# + _uuid="3c52db7624172d624771f0a00205568676fcc853" id="QK9ubPZwipHB" colab_type="code" outputId="c1b214bb-8602-4dab-f2ae-a315b0a004ac" executionInfo={"status": "ok", "timestamp": 1554054469682, "user_tz": -330, "elapsed": 1236021, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Retrieve filenames
import re
#test_img_ids = [re.split("/", val)[1] for val in test_generator.filenames]
test_img_ids = test_generator.filenames
len(test_img_ids)
# + _uuid="5785b0499ce1d7425bce81e98c2d20d29dc7cc7e" id="QpsDW_baipHL" colab_type="code" outputId="3404d8ef-29da-49cb-975d-0e00543c25c5" executionInfo={"status": "ok", "timestamp": 1554054469684, "user_tz": -330, "elapsed": 1235267, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04337592286741421479"}} colab={"base_uri": "https://localhost:8080/", "height": 204}
# Create dataframe for submission
submission_df = pd.DataFrame({'image_name' : test_img_ids,
'label' : predictions })
submission_df.head()
# + _uuid="f2b7c10b6dd4103cf2477fe6f492a635f312b1e1" id="O16A0M70ipHW" colab_type="code" colab={}
# Create submission file
submission_dir = os.path.join(home_dir, "submissions")
submission_df.to_csv(submission_dir + '/submission_resnet50_10.csv', index=False)
| AV-Intel-Scene-Classification-Challenge/scene-classification-cnn-data-aug-resnet50.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="QXcSb5FrcXbl"
# ### **DATA PROCESSING**
# + [markdown] id="alrecXjCchdF"
# **Importing Dependicies**
# + id="yYiu3LNicvPR"
import pandas as pd #for manipulating data
import numpy as np #mathematical computation
import matplotlib.pyplot as plt #for plotting
import seaborn as sns #statistical graphics
from sklearn.model_selection import train_test_split #splitting data arrays into two subsets: for training data and for testing data.
from sklearn.linear_model import LinearRegression #importing algorithm
from sklearn import metrics #for accuracy calculation
# + id="OeezyoXgfML4"
# loading the data from csv file to pandas dataframe
car_dataset = pd.read_csv('/content/car data.csv')
# + [markdown] id="GCK6KZaWzWlf"
# **Analyzing our dataset**
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="ikrk5l6IfYxM" outputId="af5f2f53-2b94-4d7a-ebee-703873f576ff"
# inspecting the first 10 rows of the dataframe
car_dataset.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="klT9LHGBff5b" outputId="cff5ad06-551a-4035-8e55-83b1fd08d3fd"
# checking the number of rows and columns
car_dataset.shape
# + colab={"base_uri": "https://localhost:8080/"} id="RQ4aJ2ccfvcP" outputId="9480b848-c068-443c-ec47-bb29dc098478"
# getting some information about the dataset
car_dataset.info()
# + colab={"base_uri": "https://localhost:8080/"} id="gN1dOH5ckFFU" outputId="1f0ede76-be57-450c-fa42-d168af804807"
car_dataset.columns #Shows the category of columns
# + colab={"base_uri": "https://localhost:8080/"} id="YTIbmmqehtMi" outputId="7a70b160-4f9d-43a3-f5eb-74bf28a43f4b"
# checking the number of missing values
car_dataset.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="1YaS1c2WsI14" outputId="78947660-a3ff-4e2e-f189-1b52a7ac32ef"
car_dataset.duplicated().sum()
# + [markdown] id="sml9OSUlzs61"
# **Quality Check and data cleaning**
#
# * Remove irrelevant columns such Car_name
# * Remove null values
# * Remove duplicate value
# * Encoding Year into Age from formula (Current year-Selling Year)
# * Encoding object datatype category (Kms_Driven, Fuel_Type, Seller_Type, Transmission) into integer data
#
# + id="utLs3zKT2tKY"
car_dataset=car_dataset.drop(['Car_Name'],axis=1) #Remove Car_name column
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="-B2ukKi93IFc" outputId="a19bc9b7-611b-4980-d7e7-b7ed59901e8d"
car_dataset.head()
# + colab={"base_uri": "https://localhost:8080/"} id="gis3ltPQBL5L" outputId="1ae028b2-12ff-4958-9156-5782209ffd9c"
car_dataset.shape
# + id="SmRCB72asc5e"
car_dataset=car_dataset.drop_duplicates() #Remove duplicate value
# + colab={"base_uri": "https://localhost:8080/"} id="kO19eX20x2vw" outputId="fe4ecd74-c4a0-4afc-dc93-4686630da6b0"
car_dataset.duplicated().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="nAB2LA5c3vBe" outputId="2b634653-c969-46d7-f7f7-d9be0d6a5ac5"
car_dataset['Year'].unique() #Checking unique value in year
# + id="iq_pOE_w4Gyr"
current_year = 2021 #Declaring current year
# + id="285F5lkF4QBY"
car_dataset['Year'] = 2021 - car_dataset['Year'] #Encoding year into car age
# + id="f6gp7aQX4jWS"
car_dataset = car_dataset.rename(columns={"Year": "Car_Age"}) #Renaming columns
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="iGjrynl75gNL" outputId="f159c9f5-3818-4e7c-c12d-054e6379bc47"
car_dataset.head()
# + colab={"base_uri": "https://localhost:8080/"} id="IbFB6I2M5nsj" outputId="0263dbff-c5cc-4625-de4b-5864cd079d21"
# checking the distribution of categorical data
print(car_dataset.Fuel_Type.value_counts())
print(car_dataset.Seller_Type.value_counts())
print(car_dataset.Transmission.value_counts())
# + id="kxwhQaQS6TyO"
#Encoding object data type
# encoding "Fuel_Type" Column
car_dataset.replace({'Fuel_Type':{'Petrol':0,'Diesel':1,'CNG':2}},inplace=True)
# encoding "Seller_Type" Column
car_dataset.replace({'Seller_Type':{'Dealer':0,'Individual':1}},inplace=True)
# encoding "Transmission" Column
car_dataset.replace({'Transmission':{'Manual':0,'Automatic':1}},inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="kAIJvheR6pCk" outputId="6a66b8e4-8c02-4700-b866-02ab85422c68"
car_dataset.head(5)
# + [markdown] id="Yz0aeeZ6Aecs"
# **Plotting and detecting outliers**
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="HhOPQN5Q8CWb" outputId="b66b3c15-5475-4475-ecdc-effd853b3a28"
car_dataset.boxplot(column=["Car_Age"])
plt.show()
# + id="tNo-_dyU8Uvg"
#Defining Outlier Function
def remove_outlier(col):
sorted(col)
Q1,Q3=col.quantile([0.25,0.75])
IQR= Q3-Q1
lower_range=Q1-(1.5*IQR)
upper_range=Q3+(1.5*IQR)
return lower_range,upper_range
# + id="1zGabdTt8YFX"
lowage,upage=remove_outlier(car_dataset['Car_Age'])
car_dataset['Car_Age']=np.where(car_dataset['Car_Age']>upage,upage,car_dataset['Car_Age'])
car_dataset['Car_Age']=np.where(car_dataset['Car_Age']<lowage,lowage,car_dataset['Car_Age'])
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="ZaXn_WO985Eo" outputId="8c64f02f-c85f-432f-a188-6e279f30150f"
car_dataset.boxplot(column=["Car_Age"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="ApKZ8-Lq9Of0" outputId="5ba73006-b7a6-4efb-c429-696cbf765e1e"
car_dataset.boxplot(column=["Fuel_Type"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="yvqYOcLi9b9A" outputId="2023b871-5dfc-4012-cded-e5fec0489495"
car_dataset.boxplot(column=["Transmission"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="wDDK70HW9h4T" outputId="9900d810-5e21-45a1-a683-7d40f439f0fc"
car_dataset.boxplot(column=["Owner"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="d4aUGf5w9loO" outputId="bb417d7b-f3ec-4edf-e4a9-cf961783e993"
car_dataset.boxplot(column=["Kms_Driven"])
plt.show()
# + id="pYyULpbI90Pj"
lowage,upage=remove_outlier(car_dataset['Kms_Driven'])
car_dataset['Kms_Driven']=np.where(car_dataset['Kms_Driven']>upage,upage,car_dataset['Kms_Driven'])
car_dataset['Kms_Driven']=np.where(car_dataset['Kms_Driven']<lowage,lowage,car_dataset['Kms_Driven'])
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="yfBRbZ41-DkM" outputId="b8d15907-f8b3-4e0c-b1d3-fa29550c9abb"
car_dataset.boxplot(column=["Kms_Driven"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="S3QyEYJz-HT1" outputId="ed879e56-a8f4-4350-8527-80a7aa7412c8"
car_dataset.boxplot(column=["Present_Price"])
plt.show()
# + id="cTAitmmi-fKY"
lowage,upage=remove_outlier(car_dataset['Present_Price'])
car_dataset['Present_Price']=np.where(car_dataset['Present_Price']>upage,upage,car_dataset['Present_Price'])
car_dataset['Present_Price']=np.where(car_dataset['Present_Price']<lowage,lowage,car_dataset['Present_Price'])
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="VJbe-O9F-4bQ" outputId="a4e3f4fa-8db4-4775-a6bb-c219438847cc"
car_dataset.boxplot(column=["Present_Price"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="EQmQmiX8_dz4" outputId="bf672f56-0bb8-475b-93aa-739a948c21a5"
car_dataset.boxplot(column=["Selling_Price"])
plt.show()
# + id="WK8HNCME_nxE"
lowage,upage=remove_outlier(car_dataset['Selling_Price'])
car_dataset['Selling_Price']=np.where(car_dataset['Selling_Price']>upage,upage,car_dataset['Selling_Price'])
car_dataset['Selling_Price']=np.where(car_dataset['Selling_Price']<lowage,lowage,car_dataset['Selling_Price'])
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="j7DPMxyh_1NK" outputId="39a831e4-46d4-46af-c11d-8b087df1077d"
car_dataset.boxplot(column=["Selling_Price"])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="r3-6AjMtBFh5" outputId="aa71bd98-e3c9-4c56-e0e1-27b19db375cb"
car_dataset.corr() #corelation
# + colab={"base_uri": "https://localhost:8080/", "height": 623} id="e2W8kGl6BPyk" outputId="c2b13bb9-adf0-4bcb-cd32-75717119347c"
plt.subplots(figsize=(15,10))
ax=sns.boxplot(x='Present_Price',y='Selling_Price',data=car_dataset)
ax.set_xticklabels(ax.get_xticklabels(),rotation=40,ha='right')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 460} id="GwqWu5NTCNtB" outputId="60de4d70-3c48-46db-c8aa-1226c6237735"
plt.subplots(figsize=(14,7))
sns.boxplot(x='Fuel_Type',y='Selling_Price',data=car_dataset)
# + [markdown] id="m_z84UKiJ8qG"
# **Saving Clean data set**
# + id="VRPL23XKKCdq"
car_dataset.to_csv('Clean_car_dataset.csv')
# + [markdown] id="6Vxth3wnJKH1"
# **Splitting Traing data into Prameters and Target**
# + id="pt548-IJJTz3"
X = car_dataset.drop(['Selling_Price'],axis=1)
Y = car_dataset['Selling_Price']
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="3OsKWJMiLIID" outputId="cd5c525a-53aa-4b31-f178-573cd9646b58"
X
# + colab={"base_uri": "https://localhost:8080/"} id="95Qa91xDLbS7" outputId="16aafc82-9133-45c0-e755-a5dba57b73d0"
Y.head()
# + [markdown] id="ld8F5k0eL0Rx"
# **Splitting Traing and Testing data**
# + id="z4MEFSVZL6KP"
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state=1)
# + [markdown] id="ESdydQnUNAUM"
# # **Model Training**
# **Linear Regression**
# + id="9sgWRIx1NI7U"
# loading the linear regression model
lin_reg_model = LinearRegression()
# + colab={"base_uri": "https://localhost:8080/"} id="ac2vfQfZNO48" outputId="ec953112-aa46-4fb5-916d-283ca3657c8b"
lin_reg_model.fit(X_train,Y_train)
# + [markdown] id="IfL_iL2wO09U"
# **Model Evaluation (Training data)**
# + id="ALEt66AEO4ag"
# prediction on Training data
training_data_prediction = lin_reg_model.predict(X_train)
# + colab={"base_uri": "https://localhost:8080/"} id="374GZHxWPAeN" outputId="e1a4b6d5-89f3-4021-9da3-082ef3ac875f"
# R squared Error
error_score = metrics.r2_score(Y_train, training_data_prediction)
print("R squared Error : ", error_score)
# + [markdown] id="qzVi_GbxPbqe"
# Visualize the actual prices and Predicted prices
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="YrKXLsNxPgCf" outputId="0b38d6f4-c5de-4b64-8736-67e051cf6004"
plt.scatter(Y_train, training_data_prediction)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title(" Actual Prices vs Predicted Prices")
plt.show()
# + [markdown] id="AoT2q490Qcib"
# **Model Evaluation (Testing data)**
# + id="vqWEN7XXQA1e"
# prediction on Training data
test_data_prediction = lin_reg_model.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="DoAnvD-wQHdv" outputId="95839daa-b296-44e6-f332-343bc4316165"
error_score = metrics.r2_score(Y_test, test_data_prediction)
print("R squared Error : ", error_score)
# + [markdown] id="i1Vb_dIaQoja"
# Visualize the actual prices and Predicted prices
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="P1i5bEyjQOK6" outputId="6d18366c-da9d-4592-9b8e-08b682f2f918"
plt.scatter(Y_test, test_data_prediction)
plt.xlabel("Actual Price")
plt.ylabel("Predicted Price")
plt.title(" Actual Prices vs Predicted Prices")
plt.show()
# + [markdown] id="nDG4zE1XRP4Q"
# # **Saving machine learning model**
# + id="U_8wo_PnRWfm"
import pickle
pickle.dump(lin_reg_model, open('car_price.pkl', 'wb'))
# + id="CKdHajmoD5E1" colab={"base_uri": "https://localhost:8080/"} outputId="c4f26171-0c00-430a-a101-2049653e86df"
X_test.shape
| EDA_and_Model_Building.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('Data/mnist', one_hot=True)
# +
n_classes = 10
input_size = 784
x = tf.placeholder(tf.float32, shape=[None, input_size])
y = tf.placeholder(tf.float32, shape=[None, n_classes])
keep_prob = tf.placeholder(tf.float32)
# +
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# +
x_image = tf.reshape(x, [-1,28,28,1])
W_conv1 = weight_variable([7, 7, 1, 100])
b_conv1 = bias_variable([100])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([4, 4, 100, 150])
b_conv2 = bias_variable([150])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([4, 4, 150, 250])
b_conv3 = bias_variable([250])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
W_fc1 = weight_variable([4 * 4 * 250, 300])
b_fc1 = bias_variable([300])
h_pool3_flat = tf.reshape(h_pool3, [-1, 4*4*250])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([300, n_classes])
b_fc2 = bias_variable([n_classes])
y_pred = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# -
diff = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_pred)
cross_entropy = tf.reduce_mean(diff)
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
learning_rate = 0.001
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
n_steps = 10
batch_size = 32
dropout = 0.25
evaluate_every = 10
n_val_steps = mnist.test.images.shape[0] // batch_size
saver = tf.train.Saver(max_to_keep=5)
save_dir = 'checkpoints/'
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
best_val = 0.0
for i in range(n_steps):
x_batch, y_batch = mnist.train.next_batch(batch_size)
_, train_acc = sess.run([train_step, accuracy], feed_dict={x: x_batch, y: y_batch, keep_prob: dropout})
if i % evaluate_every == 0:
val_accs = []
for j in range(n_val_steps):
x_batch, y_batch = mnist.test.next_batch(batch_size)
val_acc = sess.run(accuracy, feed_dict={x: x_batch, y: y_batch, keep_prob: 1.0})
val_accs.append(val_acc)
print('Step {:04.0f}: train_acc: {:.4f}; val_acc: {:.4f}'.format(i, train_acc, sum(val_accs)/len(val_accs)))
if val_acc > best_val:
saver.save(sess, save_dir+'best-model', global_step=i)
print('Model saved')
best_val = val_acc
saver.save(sess, save_dir+'last-model')
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph(save_dir+'last-model.meta')
new_saver.restore(sess, save_dir+'last-model')
for i in range(35):
x_batch, y_batch = mnist.train.next_batch(batch_size)
_, train_acc = sess.run([train_step, accuracy], feed_dict={x: x_batch, y: y_batch, keep_prob: dropout})
| Section06/Storing the network topology and trained weights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Central Jakarta**
#
# Central Jakarta is the center of life for entire Jakarta. One of the most popular district is SCBD (Sudirman Center Business District) where all the business is started. If you heard about Pacific Place then the place is there.
#
# Besides, Menteng, Thamrin (Grand Indonesia), and Cempaka Putih also famous for its high end lifestyle. So some burgers are served in proper restaurant. Even though there are also bunch of cheap burger which lower than IDR 50000.
# # Content
#
# * District
# * Price Range
# * Facility
# * Rating
import pandas as pd
from pandasql import sqldf
burger = pd.read_csv('../input/burger-central/central_jakarta.csv', sep = ";")
# # District
partlor_name = sqldf("SELECT DISTINCT district FROM burger").transpose()
partlor_name
# As we can see, there are 10 districts in Central Jakarta. Bendungan Hilir, Karet, and Tanah Abang is actually one region with Thamrin as the area are not so far from each other. Meanwhile Menteng is closer to Pecenongan, the furthest is Hayam Wuruk.
#
# Then here is all the details of burger restaurant/partlors. There are some high end restaurant but the budget and fast food are also available as well.
burger = sqldf("SELECT no, partlor_name, District, unique_menu, price FROM burger")
burger.style.hide_index()
# # Price-Range
#
# We sorted the list from the most expensive and also the cheapest, also with the price range from IDR 50000 to 100000. Here is the order from the most expensive. The most expensive here is Hard Rock Cafe which needed at least 200k per menu, and the cheapest is Lemoe Burger with their Chicken Cabe Ijo burger, priced only for 14k.
#
# Most of them are budged below 100k per menu.
#most expensive
burger_price = sqldf("SELECT partlor_name, district, unique_menu, price FROM burger ORDER BY price DESC")
burger_price.head().style.hide_index()
#cheapest
burger_price = sqldf("SELECT partlor_name, district, unique_menu, price FROM burger ORDER BY price")
burger_price.head().style.hide_index()
# Then here is the price range I mentioned earlier.
#more than IDR 100000
burger_price = sqldf("SELECT partlor_name, district, unique_menu, price FROM burger WHERE price >= 100000")
burger_price.style.hide_index()
#between 50000 to 100000
burger_price = sqldf("SELECT partlor_name, district, unique_menu, price FROM burger WHERE price BETWEEN 50000 AND 100000")
burger_price.style.hide_index()
#below IDR 50000
burger_price = sqldf("SELECT partlor_name, district, unique_menu, price FROM burger WHERE price <= 50000")
burger_price.style.hide_index()
# # Facility
#
# Last but not least, we chose the restaurant whether we could takeaway, or deliver our food. We also could find out whether these partlor has outdoor seats, smoking room, wifi, and reservation. Last but not least, we researched about poultry with some dietary restriction like alcohol and halal_menu. But this halal menu will not available in other project. We changed to other_poultry instead.
#
# We can take our burger home as takeaway and delivery options are massive here. They also got some outdoor seat and smoking area for any purpose, not to mention the wifi is also on high use. Some of places needs or able to do reservation as listed below, some also got some alcohol menu and there are three vendors which contained non-halal menu.
#
# No drive thru available except for Burger King Cempaka Putih.
#
# Let's take a look for all the facility.
burger = sqldf("""SELECT no, partlor_name, district, takeaway, delivery, outdoor_seat, smoking_area,
alcohol_served, wifi, reservation, drive_thru, halal_food FROM burger""")
burger.style.hide_index()
# # Rating
#
# Based on the list, <NAME> is the best among all (scored 4,35), followed by Burger King Grand Indonesia (scored 4,04) and Le Burger (scored 4,03). If the score is more than 3.5 then the partlor is worthed to try. All the facility and menu are representated by all both google and platform rating and count, so in the meantime only ratings are counted here.
burger = sqldf("""SELECT partlor_name, district, ltrim(google_rating) as google_rating, ltrim(zomato_rating) as zomato_rating, ltrim(qraved_rating) as qraved_rating,
ltrim((google_rating + zomato_rating + qraved_rating)/3, 1) as total_rating,
CASE
WHEN ltrim((google_rating + zomato_rating + qraved_rating)/3,1) >= 3.5 THEN "Recommended"
ELSE "Reconsider"
END AS "Recommendation"
FROM burger
ORDER BY total_rating DESC""")
burger.style.hide_index()
| Burger Partlor in Jakarta/Central Jakarta.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # AlexNet in TensorFlow
#
# Credits: Forked from [TensorFlow-Examples](https://github.com/aymericdamien/TensorFlow-Examples) by <NAME>
#
# ## Setup
#
# Refer to the [setup instructions](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/Setup_TensorFlow.md)
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_iters = 300000
batch_size = 64
display_step = 100
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.8 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)
# +
# Create AlexNet model
def conv2d(name, l_input, w, b):
return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1],
padding='SAME'),b), name=name)
def max_pool(name, l_input, k):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME', name=name)
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name=name)
def alex_net(_X, _weights, _biases, _dropout):
# Reshape input picture
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
pool1 = max_pool('pool1', conv1, k=2)
# Apply Normalization
norm1 = norm('norm1', pool1, lsize=4)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer
conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
pool2 = max_pool('pool2', conv2, k=2)
# Apply Normalization
norm2 = norm('norm2', pool2, lsize=4)
# Apply Dropout
norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer
conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
# Max Pooling (down-sampling)
pool3 = max_pool('pool3', conv3, k=2)
# Apply Normalization
norm3 = norm('norm3', pool3, lsize=4)
# Apply Dropout
norm3 = tf.nn.dropout(norm3, _dropout)
# Fully connected layer
# Reshape conv3 output to fit dense layer input
dense1 = tf.reshape(norm3, [-1, _weights['wd1'].get_shape().as_list()[0]])
# Relu activation
dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc1')
# Relu activation
dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc2')
# Output, class prediction
out = tf.matmul(dense2, _weights['out']) + _biases['out']
return out
# -
# Store layers weight & bias
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
'wd2': tf.Variable(tf.random_normal([1024, 1024])),
'out': tf.Variable(tf.random_normal([1024, 10]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64])),
'bc2': tf.Variable(tf.random_normal([128])),
'bc3': tf.Variable(tf.random_normal([256])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd2': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = alex_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " \
+ "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
y: mnist.test.labels[:256],
keep_prob: 1.})
| deep-learning/tensor-flow-examples/notebooks/3_neural_networks/alexnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
heart_df = pd.read_csv("data/heart-disease.csv")
heart_df.head() # classification dataset - supervised learning
# ## 1. Tuning hyperparameters by hand
# so far we've worked with training and test datasets.
#
# You train a model on a training set and evaluate it on a test dataset.
#
# But hyperparameter tuning introduces a thrid set, **a validation set.**
#
# Now the process becomes, **train a model on the training data, (try to) improve its hyperparameters on the validation set and evaluate it on the test set.**
# +
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.get_params()
# -
# The parameters we are going to adjust (check documentation for definition)
#
# * **max_depth** - the maximum depth of the tree
# * **max_features** - the number of features to consider when looking for the best split
# * **min_samples_leaf** - the minimum number of samples required to be at a leaf node
# * **min_samples_split**
# * **n_estimators** - the number of trees in the forest
# +
# From 100 samples
# Train - 70, Validation - 15, Test - 15
# -
# #### Create an evaluation function for models
def evaluate_preds(y_true,y_preds):
"""
Performs evaluation comparison on y_true labels vs. y_pred labels
on a classification model.
"""
accuracy = accuracy_score(y_true,y_preds)
precision = precision_score(y_true,y_preds)
recall = recall_score(y_true,y_preds)
f1 = f1_score(y_true,y_preds)
metric_dict = {
"accuracy":round(accuracy,2),
"precision":round(precision,2),
"recall":round(recall,2),
"f1":round(f1,2)
} # A dictionary that stores the results of the evaluation metrics
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}")
return metric_dict
len(heart_df)
# +
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42) # Results are reproducable
# Shuffle the data
heart_df_shuffle = heart_df.sample(frac=1)
# Split into X and y
X = heart_df_shuffle.drop("target",axis=1)
y = heart_df_shuffle["target"]
# Split the data into train, validation and test splits
# train - 70%, validation - 15%, test - 15%
train_split = round(0.7 * len(heart_df_shuffle)) # 70%
valid_split = round(train_split + 0.15 * len(heart_df_shuffle)) # index + next 15% of data
# [from:to]
X_train,y_train = X[:train_split],y[:train_split]
X_valid,y_valid = X[train_split:valid_split],y[train_split:valid_split]
X_test,y_test = X[valid_split:],y[valid_split:]
# len(X_train),len(X_valid),len(X_test)
# Train the model
clf = RandomForestClassifier() # instantiates with base line parameters
clf.fit(X_train, y_train)
# Make baseline predictions (on valid set)
y_preds = clf.predict(X_valid) # tune model on valid set
# Evaluate the classifier on validation set
baseline_metrics = evaluate_preds(y_valid, y_preds)
baseline_metrics
# -
# Beautiful, now let's try and improve the results.
#
# We'll change 1 of the hyperparameters, n_estimators to 100 and see if it improves on the validation set.
# +
np.random.seed(42)
# Create a second classifier with different hyperparameters
clf_2 = RandomForestClassifier(n_estimators=100) # adjusting n_estimators
clf_2.fit(X_train, y_train)
# Make predictions
y_preds_2 = clf_2.predict(X_valid)
# Evaluate the 2nd classifier
clf_2_metrics = evaluate_preds(y_valid, y_preds_2)
clf_2_metrics
# Different models on same data
# -
# How about we try another parameter?
#
# Wait...
#
# Building new models with new hyperparameters each time (by hand) is taking a lot of time.
#
# Is there a better way?
#
# Ans) **RandomizedSearchCV/GridSearchCV** provided by Sklearn
| Model Selection - Tuning Hyperparameters/HyperParameterTuning_ByHand.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ''
# name: sagemath
# ---
# + language="html"
# <link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" />
# <link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" />
# <style>.subtitle {font-size:medium; display:block}</style>
# <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" />
# <link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. -->
# <script>
# var cell = $(".container .cell").eq(0), ia = cell.find(".input_area")
# if (cell.find(".toggle-button").length == 0) {
# ia.after(
# $('<button class="toggle-button">Toggle hidden code</button>').click(
# function (){ ia.toggle() }
# )
# )
# ia.hide()
# }
# </script>
#
# -
# **Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the "Run" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.
# $\newcommand{\identity}{\mathrm{id}}
# \newcommand{\notdivide}{\nmid}
# \newcommand{\notsubset}{\not\subset}
# \newcommand{\lcm}{\operatorname{lcm}}
# \newcommand{\gf}{\operatorname{GF}}
# \newcommand{\inn}{\operatorname{Inn}}
# \newcommand{\aut}{\operatorname{Aut}}
# \newcommand{\Hom}{\operatorname{Hom}}
# \newcommand{\cis}{\operatorname{cis}}
# \newcommand{\chr}{\operatorname{char}}
# \newcommand{\Null}{\operatorname{Null}}
# \newcommand{\lt}{<}
# \newcommand{\gt}{>}
# \newcommand{\amp}{&}
# $
# <div class="mathbook-content"></div>
# <div class="mathbook-content"><p id="p-1094">Cryptography is the study of sending and receiving secret messages. The aim of cryptography is to send messages across a channel so that only the intended recipient of the message can read it. In addition, when a message is received, the recipient usually requires some assurance that the message is authentic; that is, that it has not been sent by someone who is trying to deceive the recipient. Modern cryptography is heavily dependent on abstract algebra and number theory.</p></div>
# <div class="mathbook-content"><p id="p-1095">The message to be sent is called the <dfn class="terminology">plaintext</dfn> message. The disguised message is called the <dfn class="terminology">ciphertext</dfn>. The plaintext and the ciphertext are both written in an <dfn class="terminology">alphabet</dfn>, consisting of <dfn class="terminology">letters</dfn> or <dfn class="terminology">characters</dfn>. Characters can include not only the familiar alphabetic characters A, $\ldots\text{,}$ Z and a, $\ldots\text{,}$ z but also digits, punctuation marks, and blanks. A <dfn class="terminology">cryptosystem</dfn>, or <dfn class="terminology">cipher</dfn>, has two parts: <dfn class="terminology">encryption</dfn>, the process of transforming a plaintext message to a ciphertext message, and <dfn class="terminology">decryption</dfn>, the reverse transformation of changing a ciphertext message into a plaintext message.</p></div>
# <div class="mathbook-content"><p id="p-1096">There are many different families of cryptosystems, each distinguished by a particular encryption algorithm. Cryptosystems in a specified cryptographic family are distinguished from one another by a parameter to the encryption function called a <dfn class="terminology">key</dfn>. A classical cryptosystem has a single key, which must be kept secret, known only to the sender and the receiver of the message. If person $A$ wishes to send secret messages to two different people $B$ and $C\text{,}$ and does not wish to have $B$ understand $C$'s messages or vice versa, $A$ must use two separate keys, so one cryptosystem is used for exchanging messages with $B\text{,}$ and another is used for exchanging messages with $C\text{.}$</p></div>
# <div class="mathbook-content"><p id="p-1097">Systems that use two separate keys, one for encoding and another for decoding, are called <dfn class="terminology">public key cryptosystems</dfn>. Since knowledge of the encoding key does not allow anyone to guess at the decoding key, the encoding key can be made public. A public key cryptosystem allows $A$ and $B$ to send messages to $C$ using the same encoding key. Anyone is capable of encoding a message to be sent to $C\text{,}$ but only $C$ knows how to decode such a message.</p></div>
# <div class="mathbook-content"><nav class="summary-links"><li><a href="section-private-key-crypt.ipynb"><span class="codenumber">7.1</span><span class="title">Private Key Cryptography</span></a></li><li><a href="section-public-key-crypt.ipynb"><span class="codenumber">7.2</span><span class="title">Public Key Cryptography</span></a></li><li><a href="exercises-crypt.ipynb"><span class="codenumber">7.3</span><span class="title">Exercises</span></a></li><li><a href="crypt-exercises-additional.ipynb"><span class="codenumber">7.4</span><span class="title">Additional Exercises: Primality and Factoring</span></a></li><li><a href="crypt-references.ipynb"><span class="codenumber">7.5</span><span class="title">References and Suggested Readings</span></a></li><li><a href="crypt-sage.ipynb"><span class="codenumber">7.6</span><span class="title">Sage</span></a></li><li><a href="crypt-sage-exercises.ipynb"><span class="codenumber">7.7</span><span class="title">Sage Exercises</span></a></li></nav></div>
| aata/crypt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# argv:
# - python
# - -m
# - ipykernel_launcher
# - -f
# - '{connection_file}'
# display_name: Python 3
# env: null
# interrupt_mode: signal
# language: python
# metadata: null
# name: python3
# ---
# # Ad fundum!
# This notebook has three purposes:
# 1. Learning to use exceptions as part of a normal program workflow
# 2. Getting acquainted with recursive function
# 3. Introducing those of you who are not from Belgium with the tradition of Ad Fundum (I'm sure it exists in every country but the Belgian way is probably slightly different)
# ## The ULB way
# In this notebook, we are going to build a small system that enables you to hone your ad fundum skills from the comfort of your own house. For that, we will use a set of functions that call one another and time the whole process.
#
# The ad fundum is a student... let's call it ritual that goes more or less like this:
#
# * One person challenges another at ad fundum, the other usually agrees. They go and pick up a beer or a beverage each. A referee is appointed (in our case, the referee is the program we write and the only adversary in our case is ourself).
# * The referee asks the two contestants whether they are ready. Once it is the case, she pronounces the following formula: "À main, à bouche, à cul, nom de dieu!" (loosely translated, it means "To the hand, to the mouth, to the bottom, for god's sake"). Of course, I assume that in catholic universities, they drop the last part so our program should let the player decides if she wants to see this. Also, the word "cul" is somewhat vulgar in French and our program should be family friendly (with the difference that, if children want to play, they should drink a non-alcoholic beverage but the program cannot control that, now, can it?). The player should be able to decide if she wants to see the full word or the cleaner "c\*\*".
# * Once the referee has pronouced the formula, both contestants have to chug their beer (or their beverage) as fast as possible. For some reason, bystanders usually shout "et glou" (which in English would sounds something like "hay glue" and which is supposed to mimick the sound of somebody drinking) regularly, so we'll emulate that as well.
# * Once the first of the player is done with her glass (or whatever container she's drinking from), she's declared the winner and go on to enjoy the rest of the evening (or directly to do another ad fundum).
#
# ## The program
# So basically, our program is a glorified timer: it writes the ritual formula, starts a timer and then repeatedly writes "et glou" on the screen until the player kill it by hand (meaning by clicking on the little "stop" sign on the top of the notebook. This is not something normal for the program: it is an exception and it can be captured in the logic of the code to treat it accordingly (by default, it kills the whole program but in our case, we just want to use it to stop the never-ending stream of "et glou").
#
# Before getting down to business, there are two functions that we could see:
# +
import time # Yes, in Python, you can import time itself! God-feeling, much?
help(time.sleep)
# -
help(time.time)
start = time.time()
time.sleep(2)
finish = time.time()
print("This took ", finish - start, " seconds")
# ## The formula
# The first step of this exercise is to create a function that put the initial formula. It should be a function that takes 2 arguments and returns nothing. The two arguments should be two Boolean: the first one, called *parental_version* will be True if we must print "c\*\*" instead of "cul" and the other parameter, called *gods_sake* will also be a Boolean and will be True if we have to take out the mention to God's sake.
#
# Write this function here below:
# ## Et glou
# We are now going to write a function that takes 1 argument (an integer), lets call it *x*, but do three things sequentially:
# 1. Print the infamous "Et glou" to the screen.
# 2. Sleep for *x* number of seconds
# 3. Call the function itself
# 
#
# Whaaaat? A function that calls itself? Well, yes. It is called a [recursive function](http://algosaur.us/recursion/). Recursive functions are a fascinating topic that allow you to do many stuff. For example, certain languages do not have loop as using recursive functions can substitute any type of loop. A classical example of use of recursive function is to compute Fibonacci numbers.
#
# > "To iterate is human, to recurse is divine"
# > *<NAME>*
#
# OK, enough about recursion (although if you're into it, there is plenty to do and be said on the topic!). We actually need to modify slightly our function: if we let it as is, it will recurse until the end of time (or when the memory of your computer runs out). We need to implement a way to kill this list manually. As told before, we will implement the end of this loop as an Exception Handling problem: if the user tries to kill the program (by clicking on the square above), the exception will be capture and the function will return None, which will break the cycle. To do so, do wrap the whole body of your function in a *try* block and then in the *except* part, only capture the *KeyboardInterrupt* exception. If such an exception happen, just return *None*.
#
# Write the modified function below.
# Now it's time to tie everything together. Create a final function, that takes three arguments (the *parental_control*, the *gods_sake* and the *time_sleeping*) and that does the following:
#
# 1. Call the function that will print the ritual formula
# 2. Create a *start* variable that contains the time at which the challenge starts
# 3. Call the function that writes "et glou" recursively
# 4. Create an *end* variable containing the time after your recursiv function ends
# 5. Print the value of *end* - *start*
# That's it, you can now train your ad fundum skills from home. Drink responsibly. Prosit!
| Session_4/ad_fundum_the_programming_case.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img align="center" style="max-width: 1000px" src="https://github.com/HSG-AIML-Teaching/ML2022-Lab/blob/main/lab_3/banner.png?raw=1">
#
# + [markdown] id="B0cP5Z789_rr"
# <img align="right" style="max-width: 200px; height: auto" src="hsg_logo.png">
#
# ## Lab 03 - Convolutional Neural Networks (CNNs) - Exercise
#
# Machine Learning, University of St. Gallen, Spring Term 2022
# + [markdown] id="Rno8GqfC9_rz"
# In the last lab we learned how to enhance vanilla Artificial Neural Networks (ANNs) using `PyTorch` to classify even more complex images. Therefore, we used a special type of deep neural network referred to **Convolutional Neural Networks (CNNs)**. CNNs encompass the ability to take advantage of the hierarchical pattern in data and assemble more complex patterns using smaller and simpler patterns. In this lab, we aim to leverage that knowledge by applying it to a set of self-coding assignments. But before we do so let's start with another motivational video by NVIDIA:
# -
from IPython.display import YouTubeVideo
# NVIDIA: "Official Intro | GTC 2020 | I AM AI"
YouTubeVideo('e2_hsjpTi4w', width=1000, height=500)
# + [markdown] id="r93JK2DH9_r0"
# As always, pls. don't hesitate to ask all your questions either during the lab, post them in our CANVAS (StudyNet) forum (https://learning.<EMAIL>), or send us an email (using the course email).
# + [markdown] id="eW6dySzs9_r1"
# ## 1. Assignment Objectives:
# + [markdown] id="2uzc9Xr69_r1"
# Similar today's lab session, after today's self-coding assignments you should be able to:
#
# > 1. Understand the basic concepts, intuitions and major building blocks of **Convolutional Neural Networks (CNNs)**.
# > 2. Know how to **implement and to train a CNN** to learn a model of tiny image data.
# > 3. Understand how to apply such a learned model to **classify images** images based on their content into distinct categories.
# > 4. Know how to **interpret and visualize** the model's classification results.
# + [markdown] id="iPRKkkig9_r2"
# ## 2. Setup of the Jupyter Notebook Environment
# + [markdown] id="7mZL4i6W9_r2"
# Similar to the previous labs, we need to import a couple of Python libraries that allow for data analysis and data visualization. We will mostly use the `PyTorch`, `Numpy`, `Sklearn`, `Matplotlib`, `Seaborn` and a few utility libraries throughout this lab:
# + id="A9cwWtab9_r2"
# import standard python libraries
import os, urllib, io
from datetime import datetime
import numpy as np
# + [markdown] id="FrB_51t89_r3"
# Import Python machine / deep learning libraries:
# + id="ZH6LhB_q9_r3"
# import the PyTorch deep learning library
import torch, torchvision
import torch.nn.functional as F
from torch import nn, optim
from torch.autograd import Variable
# + [markdown] id="sfgYux7K9_r3"
# Import the sklearn classification metrics:
# + id="cFptYrnr9_r4"
# import sklearn classification evaluation library
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
# + [markdown] id="WJJ5kfaf9_r4"
# Import Python plotting libraries:
# + id="usAgsocK9_r4"
# import matplotlib, seaborn, and PIL data visualization libary
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
# + [markdown] id="kZft6q1B9_r5"
# Enable notebook matplotlib inline plotting:
# + id="BXnX3zt_9_r5"
# %matplotlib inline
# + [markdown] id="dn2cf5SqJ2m9"
# Import Google's GDrive connector and mount your GDrive directories:
# + id="m2rj2ThhJ3sA"
# import the Google Colab GDrive connector
from google.colab import drive
# mount GDrive inside the Colab notebook
drive.mount('/content/drive')
# + [markdown] id="-58e-iazJ8Aq"
# Create a structure of Colab Notebook sub-directories inside of GDrive to store (1) the data as well as (2) the trained neural network models:
# + id="LtB6DCWjJ-gD"
# create Colab Notebooks directory
notebook_directory = '/content/drive/MyDrive/Colab Notebooks'
if not os.path.exists(notebook_directory): os.makedirs(notebook_directory)
# create data sub-directory inside the Colab Notebooks directory
data_directory = '/content/drive/MyDrive/Colab Notebooks/data'
if not os.path.exists(data_directory): os.makedirs(data_directory)
# create models sub-directory inside the Colab Notebooks directory
models_directory = '/content/drive/MyDrive/Colab Notebooks/models'
if not os.path.exists(models_directory): os.makedirs(models_directory)
# + [markdown] id="wcYgp4Gl9_r6"
# Set a random `seed` value to obtain reproducable results:
# + id="vdbqEjHb9_r7"
# init deterministic seed
seed_value = 1234
np.random.seed(seed_value) # set numpy seed
torch.manual_seed(seed_value) # set pytorch seed CPU
# + [markdown] id="zpKQNDr09_r7"
# Google Colab provides the use of free GPUs for running notebooks. However, if you just execute this notebook as is, it will use your device's CPU. To run the lab on a GPU, got to `Runtime` > `Change runtime type` and set the Runtime type to `GPU` in the drop-down. Running this lab on a CPU is fine, but you will find that GPU computing is faster. *CUDA* indicates that the lab is being run on GPU.
#
# Enable GPU computing by setting the `device` flag and init a `CUDA` seed:
# + id="IAFg7INc9_r7"
# set cpu or gpu enabled device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu').type
# init deterministic GPU seed
torch.cuda.manual_seed(seed_value)
# log type of device enabled
print('[LOG] notebook with {} computation enabled'.format(str(device)))
# + [markdown] id="Y-7Ve4-_9_r7"
# Let's determine if we have access to a GPU provided by e.g. Google's COLab environment:
# + id="VCpTB9x59_r8"
# !nvidia-smi
# -
# ## 3. Convolutional Neural Networks (CNNs) Assignments
# + [markdown] id="XH1CSkRV9_r8"
# ### 3.1 CIFAR-10 Dataset Download and Data Assessment
# + [markdown] id="UWDn7IQE9_r8"
# The **CIFAR-10 database** (**C**anadian **I**nstitute **F**or **A**dvanced **R**esearch) is a collection of images that are commonly used to train machine learning and computer vision algorithms. The database is widely used to conduct computer vision research using machine learning and deep learning methods:
# + [markdown] id="awuRyFMd9_r8"
# <img align="center" style="max-width: 500px; height: 500px" src="https://raw.githubusercontent.com/HSG-AIML-Teaching/IEMBA2022-Lab/main/lab_05/cifar10.png">
#
# (Source: https://www.kaggle.com/c/cifar-10)
# + [markdown] id="pjdI5VVN9_r8"
# Further details on the dataset can be obtained via: *<NAME>., 2009. "Learning Multiple Layers of Features from Tiny Images",
# ( https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf )."*
# + [markdown] id="IaD13bmO9_r9"
# The CIFAR-10 database contains **60,000 color images** (50,000 training images and 10,000 validation images). The size of each image is 32 by 32 pixels. The collection of images encompasses 10 different classes that represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks. Let's define the distinct classs for further analytics:
# + id="1WlB2yXu9_r-"
cifar10_classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# + [markdown] id="kRslZNGV9_r-"
# Thereby the dataset contains 6,000 images for each of the ten classes. The CIFAR-10 is a straightforward dataset that can be used to teach a computer how to recognize objects in images.
#
# Let's download, transform and inspect the training images of the dataset. Therefore, we first will define the directory we aim to store the training data:
# + id="B2Bmhc-c9_r-"
train_path = data_directory + '/train_cifar10'
# + [markdown] id="b6AGBP_K9_r_"
# Now, let's download the training data accordingly:
# + id="G_-Zs4EU9_sA"
# define pytorch transformation into tensor format
transf = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# download and transform training images
cifar10_train_data = torchvision.datasets.CIFAR10(root=train_path, train=True, transform=transf, download=True)
# + [markdown] id="g79sdHOw9_sA"
# Verify the volume of training images downloaded:
# + id="uiKFBLrI9_sA"
# get the length of the training data
len(cifar10_train_data)
# + [markdown] id="mWcoDhr_9_sC"
# Let's now decide on where we want to store the evaluation data:
# + id="hKFBcveC9_sC"
eval_path = data_directory + '/eval_cifar10'
# + [markdown] id="nB5OpV4z9_sC"
# And download the evaluation data accordingly:
# + id="L-OOVFFs9_sD"
# define pytorch transformation into tensor format
transf = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# download and transform validation images
cifar10_eval_data = torchvision.datasets.CIFAR10(root=eval_path, train=False, transform=transf, download=True)
# + [markdown] id="WF4VrcHG9_sD"
# Let's also verfify the volume of validation images downloaded:
# + id="vhZRDL4X9_sD"
# get the length of the training data
len(cifar10_eval_data)
# + [markdown] id="B9Xivz3j9_sD"
# ### 3.2 Convolutional Neural Network (CNN) Model Training and Evaluation
# + [markdown] id="nswYOXvk9_r0"
# <img align="center" style="max-width: 900px" src="https://raw.githubusercontent.com/HSG-AIML-Teaching/IEMBA2022-Lab/main/lab_05/classification.png">
# + [markdown] id="Tgqmaa129_sZ"
# We recommend you to try the following exercises as part of the self-coding session:
#
# **Exercise 1: Train the neural network architecture of the lab with increased learning rate.**
# -
# > Increase the learning rate of the network training to a value of **0.1** (instead of currently 0.001) and re-run the network training for 10 training epochs. Load and evaluate the model exhibiting the lowest training loss. What kind of behavior in terms of loss convergence and prediction accuracy can be observed?
# + id="Kx4C87LF9_sZ"
# ***************************************************
# INSERT YOUR SOLUTION/CODE HERE
# ***************************************************
# ***************************************************
# Task 1: define and init neural network architecture
# ***************************************************
# implement the CIFAR10Net network architecture
class CIFAR10Net(nn.Module):
# define the class constructor
def __init__(self):
# ***************************************************
# insert the network architecture here
# ***************************************************
# define network forward pass
def forward(self, images):
# ***************************************************
# insert the network forwad pass here
# ***************************************************
# return forward pass result
return x
# init the neural network model
model = ???
# ***************************************************
# Task 2: define loss, training hyperparameters and dataloader
# ***************************************************
# define the optimization criterion / loss function
nll_loss = ???
# define learning rate and optimization strategy
learning_rate = ???
optimizer = optim.SGD(params=model.parameters(), lr=learning_rate)
# specify the training parameters
num_epochs = ??? # number of training epochs
mini_batch_size = ??? # size of the mini-batches
# define training dataloader
cifar10_train_dataloader = torch.utils.data.DataLoader(cifar10_train_data, batch_size=mini_batch_size, shuffle=True)
# ***************************************************
# Task 3: run model training
# ***************************************************
# init collection of training epoch losses
train_epoch_losses = []
# set the model in training mode
model.train()
# train the CIFAR10 model
for epoch in range(num_epochs):
# init collection of mini-batch losses
train_mini_batch_losses = []
# iterate over all-mini batches
for i, (images, labels) in enumerate(cifar10_train_dataloader):
# run forward pass through the network
output = ???
# reset graph gradients
model.zero_grad()
# determine classification loss
loss = ???
# run backward pass
loss.backward()
# update network paramaters
optimizer.step()
# collect mini-batch reconstruction loss
train_mini_batch_losses.append(loss.data.item())
# determine mean min-batch loss of epoch
train_epoch_loss = np.mean(train_mini_batch_losses)
# print epoch loss
now = datetime.utcnow().strftime("%Y%m%d-%H:%M:%S")
print('[LOG {}] epoch: {} train-loss: {}'.format(str(now), str(epoch), str(train_epoch_loss)))
# save model to local directory
model_name = 'cifar10_model_epoch_{}.pth'.format(str(epoch))
torch.save(model.state_dict(), os.path.join(models_directory, model_name))
# determine mean min-batch loss of epoch
train_epoch_losses.append(train_epoch_loss)
# ***************************************************
# Task 4: run model evaluation
# ***************************************************
# define eval dataloader
cifar10_eval_dataloader = torch.utils.data.DataLoader(cifar10_eval_data, batch_size=10000, shuffle=False)
# determine model predictions
predictions = torch.argmax(model(???, dim=1)
# determine accuracy scores
accuracy = metrics.accuracy_score(???, ???)
# print the classification accuracy percentage
print('Final CIFAR10Net classification accuracy: {}%'.format(accuracy * 100))
# + [markdown] id="UNBnGfwU9_sa"
# **2. Evaluation of "shallow" vs. "deep" neural network architectures.**
# -
# > In addition to the architecture of the lab notebook, evaluate further (more **shallow** as well as more **deep**) neural network architectures by either **removing or adding convolutional layers** to the network. Train a model (using the architectures you selected) for at least **20 training epochs**. Analyze the prediction performance of the trained models in terms of training time and prediction accuracy.
# + id="h-zirkqH9_sa"
# ***************************************************
# INSERT YOUR SOLUTION/CODE HERE
# ***************************************************
# ***************************************************
# Task 1: define and init neural network architecture
# ***************************************************
# implement the CIFAR10Net network architecture
class CIFAR10Net(nn.Module):
# define the class constructor
def __init__(self):
# call super class constructor
super(CIFAR10Net, self).__init__()
# ***************************************************
# insert the network architecture here
# ***************************************************
# define network forward pass
def forward(self, images):
# ***************************************************
# insert the network forwad pass here
# ***************************************************
# return forward pass result
return x
model = CIFAR10Net()
# ***************************************************
# Task 2: define loss, training hyperparameters and dataloader
# ***************************************************
# define the optimization criterion / loss function
nll_loss = ???
# define learning rate and optimization strategy
learning_rate = ???
optimizer = optim.SGD(params=model.parameters(), lr=learning_rate)
# specify the training parameters
num_epochs = ??? # number of training epochs
mini_batch_size = ??? # size of the mini-batches
# define training dataloader
cifar10_train_dataloader = torch.utils.data.DataLoader(cifar10_train_data, batch_size=mini_batch_size, shuffle=True)
# ***************************************************
# Task 3: run model training
# ***************************************************
# init collection of training epoch losses
train_epoch_losses = []
# set the model in training mode
model.train()
# train the CIFAR10 model
for epoch in range(num_epochs):
# init collection of mini-batch losses
train_mini_batch_losses = []
# iterate over all-mini batches
for i, (images, labels) in enumerate(cifar10_train_dataloader):
# run forward pass through the network
output = ???
# reset graph gradients
model.zero_grad()
# determine classification loss
loss = ???
# run backward pass
loss.backward()
# update network paramaters
optimizer.step()
# collect mini-batch reconstruction loss
train_mini_batch_losses.append(loss.data.item())
# determine mean min-batch loss of epoch
train_epoch_loss = np.mean(train_mini_batch_losses)
# print epoch loss
now = datetime.utcnow().strftime("%Y%m%d-%H:%M:%S")
print('[LOG {}] epoch: {} train-loss: {}'.format(str(now), str(epoch), str(train_epoch_loss)))
# save model to local directory
model_name = 'cifar10_model_epoch_{}.pth'.format(str(epoch))
torch.save(model.state_dict(), os.path.join(models_directory, model_name))
# determine mean min-batch loss of epoch
train_epoch_losses.append(train_epoch_loss)
# ***************************************************
# Task 4: run model evaluation
# ***************************************************
# define eval dataloader
cifar10_eval_dataloader = torch.utils.data.DataLoader(cifar10_eval_data, batch_size=10000, shuffle=False)
# determine model predictions
predictions = torch.argmax(model(???, dim=1)
# determine accuracy scores
accuracy = metrics.accuracy_score(???, ???)
# print the classification accuracy percentage
print('Final CIFAR10Net classification accuracy: {}%'.format(accuracy * 100))
| lab_3/colab_03_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Learning Objectives
#
# - How we can dump large dataset on S3 and read it by using Boto
#
# - Learn this by uploading churn dataset on S3, train a Keras DL model by `Churn_Modelling.csv`
# S3 + Boto:
# - pip install awscli (!pip install awscli on Google Colab)
# - $ aws configure (!aws configure on Google Colab)
# - AWS Access Key ID [None]: ...
# - AWS Secret Access Key [None]: ...
# - Default region name [None]: ...
# - Default output format [None]: ...
# +
import pandas as pd
import boto3
bucket = "makeschooldata"
file_name = "data/Churn_Modelling.csv"
s3 = boto3.client('s3')
# 's3' is a key word. create connection to S3 using default config and all buckets within S3
obj = s3.get_object(Bucket=bucket, Key=file_name)
# get object and file (key) from bucket
df = pd.read_csv(obj['Body']) # 'Body' is a key word
print(df.head())
# -
# # Churn Prediction
#
# - Lets first read: https://medium.com/@pushkarmandot/build-your-first-deep-learning-neural-network-model-using-keras-in-python-a90b5864116d
# +
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import confusion_matrix
print(df.head())
X = df.iloc[:, 3:13].values
y = df.iloc[:, 13].values
print(X)
print(X.shape)
print(y)
label_encoder_X_1 = LabelEncoder()
X[:, 1] = label_encoder_X_1.fit_transform(X[:, 1])
label_encoder_X_2 = LabelEncoder()
X[:, 2] = label_encoder_X_2.fit_transform(X[:, 2])
print(X)
print(X.shape)
one_hot_encoder = OneHotEncoder(categorical_features=[1])
X = one_hot_encoder.fit_transform(X).toarray()
X = X[:, 1:]
# print('M:')
# print(X[:, :10])
# print(X[:, 10])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
print(X_train.shape)
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim=6, init='uniform', activation='relu', input_dim=11))
# Adding the second hidden layer
classifier.add(Dense(output_dim=6, init='uniform', activation='relu'))
# Adding the output layer
classifier.add(Dense(output_dim=1, init='uniform', activation='sigmoid'))
# Compiling Neural Network
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Fitting our model
classifier.fit(X_train, y_train, batch_size=10, nb_epoch=50, verbose=1)
# Predicting the Test set results
y_predict = classifier.predict(X_test)
print(y_predict)
y_predict = (y_predict > 0.5)
cm = confusion_matrix(y_test, y_predict)
print(cm)
# -
# ## SQL
# +
import sqlite3 as lite
con = lite.connect('population.db')
with con:
cur = con.cursor()
cur.execute("CREATE TABLE Population(id INTEGER PRIMARY KEY, country TEXT, population INT)")
cur.execute("INSERT INTO Population VALUES(NULL,'Germany',81197537)")
cur.execute("INSERT INTO Population VALUES(NULL,'France', 66415161)")
cur.execute("INSERT INTO Population VALUES(NULL,'Spain', 46439864)")
cur.execute("INSERT INTO Population VALUES(NULL,'Italy', 60795612)")
cur.execute("INSERT INTO Population VALUES(NULL,'Spain', 46439864)")
# +
import pandas as pd
import sqlite3
conn = sqlite3.connect('population.db')
query = "SELECT country FROM Population WHERE population > 50000000;"
df = pd.read_sql_query(query, conn)
for country in df['country']:
print(country)
# -
# ## Setup the MongoDB and insert and have query in Python
#
# Read: https://marcobonzanini.com/2015/09/07/getting-started-with-mongodb-and-python/
# +
from pymongo import MongoClient
from datetime import datetime
client = MongoClient()
db = client['tutorial']
coll = db['articles']
doc = {
"title": "An article about MongoDB and Python",
"author": "Marco",
"publication_date": datetime.utcnow(),
# more fields
}
doc_id = coll.insert_one(doc).inserted_id
# +
from pymongo import MongoClient
client = MongoClient()
db = client['tutorial']
coll = db['articles']
for doc in coll.find():
print(doc)
# -
# ### Syntaxes:
#
# sudo mkdir -p /data/db
#
# whoami
#
# sudo chown miladtoutounchian /data/db
#
# ./bin/mongod
#
# ## Download MongoDB Compass
| Lessons/S3_Boto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Section One - Working with Machine Learning
# The world around you is increasingly powered by machine learning, but you don’t know how to start. Here we help you get the toolbox for machine learning in Python in just 30 minutes.
# # 1.1: Goals and variations in Machine Learning
# # 1.3: Exploring your data using pandas
# query
df = pd.DataFrame(np.random.rand(10, 3), columns=list('xyz'))
df
# we can query the df like this
df[(df.x< df.y) & (df.y < df.z)]
# or, equivalently
df.query('(x < y) & (y < z)')
# query based on the VALUE of the index
df.query('index > y > z')
# using in and not in
df = pd.DataFrame({'a': list('aabbccddeeff'), 'b': list('aaaabbbbcccc')})
df
df.query('a in b')
df.query('a not in b')
df.query('b == ["a", "b"]')
# +
# where
# DataFrame.where(cond, other=nan, inplace=False, axis=None,
# level=None, errors='raise', try_cast=False, raise_on_error=None)
s = pd.Series(range(5))
s
# -
# select a subset
s[s > 0]
# select a subset, but retain the shape of the Series
s.where(s > 0)
# similar, but different
s.mask(s > 0)
| Chapter 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Wrangling & Cleaning
# +
# import the library
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# convert scientific notation to decimals
pd.set_option('display.float_format', lambda x: '%.2f' % x)
# -
# ### Load & Merge the data
df_listing = pd.read_csv('data/kc_house_data.csv')
df_walking_score = pd.read_csv('data/walking_score.csv')
df_income = pd.read_csv('data/ZIP-3.csv')
# ### Summarizing your data for inspection
print('Listings')
print(df_listing.columns)
print(df_listing.head())
print(df_listing.describe())
print('')
print('Walking Score')
# TODO: print the columns, head and describe for the Walking Score dataframe
print(df_walking_score.columns)
print(df_walking_score.head())
print(df_walking_score.describe())
print('')
print('Income')
# TODO: print the columns, head and describe for the Income dataframe
print(df_income.columns)
print(df_income.head())
print(df_income.describe())
# ### Fixing column name
df_income.columns = ['zipcode', 'median_income', 'mean_income', 'population']
# ### Converting data types
df_listing['date'] = pd.to_datetime(df_listing['date'])
df_income['median_income'] = df_income['median_income'].str.replace(',', '').astype(float)
df_income['mean_income'] = df_income['mean_income'].str.replace(',', '').astype(float)
df_income.head()
# TODO: Convert the data type of the population column
df_income['population'] = df_income['population'].str.replace(',', '').astype(int)
df_income.head()
# ### Dealing with missing values
# How to deal with the missing values? Should we remove the rows or fill the gap with a value?
# Number of missing values by columns
print(df_listing.isnull().sum())
print('')
print(df_walking_score.isnull().sum())
print('')
print(df_income.isnull().sum())
# select all the rows with missing values
df_walking_score[df_walking_score.isnull().any(axis=1)]
# select all the rows with missing values
df_income[df_income.isnull().any(axis=1)]
# TODO: Create a strategy to handle the missing values on the Walking Score and Income dataframes
df_income['mean_income'] = df_income['mean_income'].fillna(df_income['median_income'])
# ### Removing outliers
# Some algorithms are very sensitive to outliers. Considering the number of bedrooms, should we remove houses with an extreme number of bedrooms? How many bedrooms are too many? (Suggestion: as a rule of thumb, three standard deviations from the mean is a good measure to identify outliers).
# bedrooms
print(df_listing['bedrooms'].value_counts())
print('mean', np.mean(df_listing['bedrooms']))
print('std', np.std(df_listing['bedrooms']))
plt.hist(df_listing['bedrooms'], bins=20)
plt.show()
# TODO: Remove the outlier houses considering the number of bedrooms
mean_bedrooms = np.mean(df_listing['bedrooms'])
std_bedrooms = np.std(df_listing['bedrooms'])
df_listing = df_listing[(df_listing['bedrooms']>mean_bedrooms-3*std_bedrooms)&(df_listing['bedrooms']<mean_bedrooms+3*std_bedrooms)]
# +
# Dealing with outliers
houses_to_remove = []
# remove based on zipcode and price
for zipcode in df_listing['zipcode'].unique():
df_zipcode = df_listing[df_listing['zipcode']==zipcode]
m = np.mean(df_zipcode['price'])
s = np.std(df_zipcode['price'])
houses_to_remove = houses_to_remove + list(df_zipcode[df_zipcode['price']>m+3.0*s].index)
print('')
print('# houses to remove', len(houses_to_remove))
df_listing = df_listing[~df_listing.index.isin(houses_to_remove)]
# -
# ### Merging Data Sets
df_merge = df_listing.copy()
df_merge = df_merge.merge(df_walking_score, on='zipcode', how='left')
df_merge = df_merge.merge(df_income, on='zipcode', how='left')
print('Total # houses', len(df_merge))
# ### Saving the processed file
df_merge.to_csv('data/house_pricing.csv', index=False)
| week3/data-wrangling-analysis_ext.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
inputs = np.random.rand(1000,1000,3)*10
print(inputs.shape)
# +
sigma = 10.0
rho = 28.0
beta = 8.0/3.0
pure1 = sigma*(inputs[:,:,1]-inputs[:,:,0])
pure2 = inputs[:,:,0]*(rho-inputs[:,:,2])-inputs[:,:,1]
pure3 = inputs[:,:,0]*inputs[:,:,2] - beta*inputs[:,:,2]
out1 = pure1#+np.random.random(out1.shape)
out2 = pure2#+np.random.random(out2.shape)
out3 = pure3#+np.random.random(out3.shape)
print(out1.shape, out2.shape, out3.shape)
# -
print(inputs.reshape(-1, inputs.shape[-1]).shape)
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.linear_model import SGDRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
#reg = make_pipeline(StandardScaler(), SGDRegressor(max_iter=1000, tol=1e-3))
reg = SGDRegressor(max_iter=1000, tol=1e-3)
# +
reg.fit(inputs.reshape(-1, inputs.shape[-1]), out1.reshape(-1))
pred1 = reg.predict(inputs.reshape(-1, inputs.shape[-1]))
reg.fit(inputs.reshape(-1, inputs.shape[-1]), out2.reshape(-1))
pred2 = reg.predict(inputs.reshape(-1, inputs.shape[-1]))
reg.fit(inputs.reshape(-1, inputs.shape[-1]), out3.reshape(-1))
pred3 = reg.predict(inputs.reshape(-1, inputs.shape[-1]))
# -
import matplotlib.pyplot as plt
plt.plot(pred1, out1.reshape(-1), '.', color='black')
plt.plot(pred2, out2.reshape(-1), '.', color='red')
plt.plot(out3.reshape(-1), pred3, '.', color='blue')
import pickle
a = pickle.dumps(reg)
print(a)
from src.GridMapper import process_grid
a = process_grid('example_data/Scans/Grid1_back/', 1)
a
a.shape
import numpy as np
b = (a*np.random.rand(7)).mean(2)
b.shape
from matplotlib.pyplot import imread, imsave
imsave('./example_data/Properties/Grid1_back/PROP0003.jpg', b, cmap='gray', vmax=255, vmin=0)
b.max()
b.min()
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="gdoSGJp1fRAX"
# [Information Visualization Tutorials](https://infovis.fh-potsdam.de/tutorials/) · FH Potsdam · Summer 2020
#
#
# # Tutorial 2: Data wrangling
#
# Welcome back! This tutorial shows you a few tricks for preparing data for visualization. You will see how data can be loaded, parsed, and examined. For this we will continue to work with the **Pandas** package, in particular with the DataFrame data structure, and get to know a few additional helpers.
#
# *Just as in the first tutorial, you should be able to run the notebook yourself, edit the contents of all cells, and try things out. Here and there particular opportunities for such edits are highlighted with a pencil.*
# + colab_type="code" id="rGgKjuCxWTrc" colab={}
import pandas as pd
# + [markdown] colab_type="text" id="6BHcIjBEGbAa"
# ## Loading
#
# The first step is to bring the data into the purview of your notebook. So regardless of data structure and format, you need to have access to the data set. We will briefly cover four common ways of loading data into your Jupyter notebook.
# + [markdown] colab_type="text" id="ntNgWbFjAG7c"
# ### Enter data directly
#
# The simplest way to add data to your notebook is to enter it verbatim into the notebook as we have seen with the capital cities in the first tutorial:
# + colab_type="code" id="KCLYK8ZTSN0d" colab={"base_uri": "https://localhost:8080/", "height": 173} executionInfo={"status": "ok", "timestamp": 1598364864944, "user_tz": -120, "elapsed": 706, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="1f3d1b29-0b30-44d5-83d1-9efaa901faab"
cities = pd.DataFrame({
"name": ["Athens", "Bratislava", "Copenhagen", "Dublin"],
"area": [39, 367.6, 86.2, 115],
"elevation": [170, 152, 14, 20],
"population": [664046, 429564, 602481, 553165]
}
)
cities
# + [markdown] colab_type="text" id="Y94kj2Doloen"
# ✏️ *Add a column for years when you have visited or plan to visit these cities*
# + [markdown] colab_type="text" id="-Q9g3MkXqlcz"
# ### Open a local file
#
# You might also want to open a local file. Because this notebook is hosted on Colab, you can use a few example files in the `sample_data` directory that you can browser on the left side under the folder icon.
#
# We can open a file using Python's built-in `open()` method, after which we can `read()` its contents into the variable `anscombe` and finally `close()` it again. In this case the data is in the JSON format, which we will need to parse. We'll get to this later. You can open all kinds of formats. Here we know that we are dealing with a JSON file because of its extension.
#
# (If you're running this notebook locally, you can download the `sample_data` directory from [here](http://infovis.fh-potsdam.de/tutorials/sample_data.zip). You need to unzip the directory at the same level where you placed this notebook, to do the following step).
#
# + colab_type="code" id="-zYvVtNatR0N" colab={"base_uri": "https://localhost:8080/", "height": 174} executionInfo={"status": "ok", "timestamp": 1598364864945, "user_tz": -120, "elapsed": 696, "user": {"displayName": "<NAME>00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="cda41222-71c3-4c40-a507-9e8a3b1d604b"
file = open("sample_data/anscombe.json")
anscombe_json = file.read()
file.close()
anscombe_json
# + [markdown] colab_type="text" id="DHDD0EivmpvJ"
# ✏️ *Take a look into the sample_data directory and try open any other file*
# + id="StaKncKIwSt9" colab_type="code" colab={}
# + [markdown] colab_type="text" id="NH5QGv2oqcZI"
# ### Get data via a URL
#
# There are some methods that can directly load a dataset via a URL, i.e., a web address. For others you might have to retrieve the file first to continue parsing it. The `requests` package helps you to send HTTP requests and retrieve the responses.
#
# In the following, the news feed of Tagesschau is retrieved via an HTTP GET request. Note that the news feed is made available as an XML format; of course you can retrieve all kinds of file formats using this method:
# + colab_type="code" id="Q_688jMurG0G" colab={"base_uri": "https://localhost:8080/", "height": 37} executionInfo={"status": "ok", "timestamp": 1598364866180, "user_tz": -120, "elapsed": 1918, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="04d7f83c-4b93-494d-a481-f420fddf7883"
import requests
response = requests.get('https://www.tagesschau.de/xml/rss2/')
tagesschau_xml = response.text
tagesschau_xml[:100] # this displays the first 100 characters
# + [markdown] colab_type="text" id="1kp9bNB1m5P7"
# ✏️ *Find the news feed for another webpage and try to load it*
# + [markdown] colab_type="text" id="TSDRXTkhv009"
# ### Use an API
#
# Some web platforms require the use of an API (application programming interface) to get access to their data. Simply put, an API is a structured way to request and retrieve data. Oftentimes it is just a specific way to format the URL.
#
# The German National Library offers an [API to query Entity Facts](https://www.dnb.de/EN/Professionell/Metadatendienste/Datenbezug/Entity-Facts/entityFacts_node.html) contained in the GND (Gemeinsame Normdatei). In this case the API provides the data in the JSON format, which has become quite common for web APIs, but you will also encounter many other formats.
#
# To retrieve information for a given GND entity by its id, such as the GND entry for the artist [<NAME>](https://en.wikipedia.org/wiki/Käthe_Kollwitz) you have to put the `base_url` together with the `gnd_id`:
# + colab_type="code" id="LSomRaYI56aB" colab={"base_uri": "https://localhost:8080/", "height": 37} executionInfo={"status": "ok", "timestamp": 1598364867811, "user_tz": -120, "elapsed": 3538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="522e619b-eca9-41c2-9cb6-d023b3a1c9c2"
base_url = "https://hub.culturegraph.org/entityfacts/"
gnd_id = "118564943" # GND identifier from the Wikipedia page on Käthe Kollwitz
gnd_response = requests.get(base_url+gnd_id).text
gnd_response[:100]
# + [markdown] colab_type="text" id="1nXg3jrQm9K9"
# ✏️ *Prepare an API request with a GND id of another person of German history*
# + [markdown] colab_type="text" id="gE3bRFXPuteI"
# ## Parsing
#
# Apart from our little cities example, so far we have only loaded the data into unstructured strings. To be able to analyze the data, we have to turn the unstructured strings of symbols into a practical data structure of the DataFrame that we can work with. This process is commonly referred to as ‘parsing’.
#
# As we have seen above, data can come in various file formats, which are in turn more or less appropriate for particular data structures. We'll cover four typical ones in the following section, but we will see more over the course of the tutorials to come.
#
# The different ways of loading data (e.g., by file path or URL) are independent from the particular data formats provided. For example, you can load CSV data from a local file or from a web address. While the files typically indicate with the extension what format they have, URLs or APIs may not have these. If it is not clear, you may have to check the documentation or take a peek into the file.
# + [markdown] colab_type="text" id="qu1WltFiAL2H"
# ### CSV
#
# The CSV format is probably the most common file format in the context of data analysis and visualization. CSV files contain tabular data that can be viewed and edited in a spreadsheet software such as Excel. CSV stands for [comma-separated values](https://en.wikipedia.org/wiki/Comma-separated_values), which seems to say it all: the data values are separated by commas and each row represents one item. However, there are also CSV files that use separators other than commas, such as tabs and semicolons.
#
# Let's load a CSV file! Thankfully Pandas has the convenient `read_csv()` method ready for us, which can open CSV data via a file path or URL, and turns it directly into a DataFrame object.
#
# Here we're retrieving information about [childcare places in Potsdam](https://opendata.potsdam.de/explore/dataset/kitaboerse-20161108/export/) from Potsdam's Open Data portal. This file happens to be in a CSV format that uses the semicolon as a separator (hence `sep=";"` as the second parameter in the method call):
# + colab_type="code" id="O6qOWzNVQfQ_" colab={"base_uri": "https://localhost:8080/", "height": 479} executionInfo={"status": "ok", "timestamp": 1598364870740, "user_tz": -120, "elapsed": 6455, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="3a11e43c-be32-4541-d2b9-adb292223e83"
kitas = pd.read_csv(
"https://opendata.potsdam.de/explore/dataset/kitaboerse-20161108/download/",
sep=";")
kitas.head()
# + [markdown] colab_type="text" id="RlHtXFWwnAk2"
# ✏️ *Try loading another CSV dataset from [Potsdam's Open Data Portal](https://opendata.potsdam.de/)*
# + id="2gpI-dOGxsZH" colab_type="code" colab={}
# + [markdown] colab_type="text" id="4_Z3eLmADxI9"
# ### JSON
#
# [JSON](https://en.wikipedia.org/wiki/JSON) is a format that grew in popularity in the context of web development. It stands for JavaScript Object Notation and makes for a compact, yet relatively human-readable format.
#
# Above we have already opened the contents of the local file `anscombe.json` using the `file.read()` method. To parse the contents into the structured form of a DataFrame, with which we can do some analysis, we can rely on Pandas and the handy `read_json()` method. Let's do this with the anscombe data we already opened:
# + colab_type="code" id="oeiqkg_GKQv3" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598364870742, "user_tz": -120, "elapsed": 6441, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="40034408-43e6-4902-be4a-15702b4eba74"
anscombe = pd.read_json(anscombe_json)
anscombe
# + [markdown] colab_type="text" id="q5A-ESb8dyrs"
# ### XML
#
# [XML](https://en.wikipedia.org/wiki/XML) (Extensible Markup Language) is a data format, which can have very different kinds of hierarchical structures. XML files are common in a wide variety of contexts, including libraries, and especially in situations, in which the interoperability of multiple systems by several vendors needs to be ensured.
#
# The 🌲 **ElementTree** module will help us to parse the elements contained in an XML file.
# + colab_type="code" id="ZZDFbj7zns35" colab={}
import xml.etree.ElementTree as ET
# + [markdown] colab_type="text" id="N7f3zyqLcCFR"
# As we have already retrieved the XML feed from Tagesschau (and saved it in the variable `tagesschau_xml`), we can now parse it directly from the string, i.e., using the method `ET.fromstring()`:
# + colab_type="code" id="a_tk2R4RcOUA" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1598364870744, "user_tz": -120, "elapsed": 6390, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="f4d38296-54d4-45a1-c94a-cb640b069040"
tagesschau = ET.fromstring(tagesschau_xml)
tagesschau
# + [markdown] colab_type="text" id="0dLGK8JZeMGQ"
# This gives the root element of the XML feed (and all its children) in the variable `tagesschau`.
#
# Going through all items with `findall` and within these with `find` for specific sub-elements, we can extract the publication date and time and the title of the respective item. In the following these elements are put together into the DataFrame `tagesschau_df`. Note that it helps to peek into the XML source of the feed to know the specific element names.
# + colab_type="code" id="2DOgw1FAy3k_" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598364870745, "user_tz": -120, "elapsed": 6368, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="e2d565ec-50b6-47af-d5cd-3a55b6de3ef0"
# create two empty lists
dates = []
titles = []
# go through all item elements in the tree
for item in tagesschau.findall('.//item'):
# extract date information and titles
dates.append( item.find('.//pubDate').text )
titles.append( item.find('.//title').text )
# create a dataframe containing the two columns
tagesschau_df = pd.DataFrame(
{'date': dates,
'title': titles,
})
tagesschau_df
# + [markdown] colab_type="text" id="XZrj9B2PnHLz"
# ✏️ *Each news item also contains a `description` element. Why not add a third column to the DataFrame?*
# + [markdown] colab_type="text" id="12tuthmXAbdo"
# ### HTML
#
# While not really a data format per se, many datasets are actually published via the document format of the web: [HTML](https://en.wikipedia.org/wiki/HTML) (hypertext markup language). All webpages are in one way or another provided in the form of HTML.
#
# Turning HTML content of webpages into structured data, also known as scraping, is necessary when information is published on webpages, while not (yet) available as structured data sets. For this scenario, the module 🍜 **Beautiful Soup** is your friend! To use it you can import the latest version by referring to `bs4`:
#
# + colab_type="code" id="Rhw1hPrUs14e" colab={}
import bs4
# + [markdown] colab_type="text" id="4EoD1npYhNKr"
# Suppose you want to extract the table of cities on the Wikipedia page [List of largest cities](https://en.wikipedia.org/wiki/List_of_largest_cities) you would first get the content of the page and then involve the magic powers of Beautiful Soup, some of which might look a bit cryptic at this point:
#
# + id="VaxnFGCobvx1" colab_type="code" colab={}
permalink = "https://en.wikipedia.org/w/index.php?title=List_of_largest_cities&oldid=952676625"
wiki = requests.get(permalink).text
# + [markdown] id="QVyBo7eWytqe" colab_type="text"
# In the variable `wiki` we now have the HTML content of the Wikipedia page. Next we initialize Beautiful Soup's `html.parser` and engage in a bit of data parsing:
# + colab_type="code" id="9GtwMof1i83I" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1598364874496, "user_tz": -120, "elapsed": 10097, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="d98f537e-f5cf-499d-bfa2-792d6207b3a4"
soup = bs4.BeautifulSoup(wiki, "html.parser")
# extract all table elements
tables = soup.find_all('table')
# create some empty lists of city names and populations
names = []
pops = []
for table in tables:
# iterate over all rows, <tr> is the html element for table rows
rows = table.find_all('tr')
for row in rows:
# get all cells per row, <td> is the html element for table cells
cells = row.find_all('td')
if len(cells) > 1:
# get span elements from cells in population column
pop = cells[3].find('span')
# if there is no value, we're omitting this city
if(pop is None):
continue
pops.append(pop.text.replace(",", ""))
# get name of cities, in first column
names.append(cells[0].text.replace("\n", ""))
# generate a dataframe based on the city names & populations
df = pd.DataFrame({"city": names, "population": pops})
df.population = df.population.astype(int) # case string to integer values
df
# + [markdown] id="fblx_me5qOGL" colab_type="text"
# ✏️ *Try to extract another column*
# + [markdown] colab_type="text" id="adgiES-8GtPg"
# ## Examining
#
# You are now able to load and parse data from several formats. At this point, here are plenty of ways to inspect these datasets. We are going to try some simple methods to peek around the datasets. Once you have a tabular dataset ready as a DataFrame, there are quite a few convenient methods to view and explore its contents.
# + [markdown] colab_type="text" id="9Oqcw_IZEv3L"
# ### Head & tail
#
# You could start with looking at the beginning of the dataset with `head()`:
# + colab_type="code" id="-b-6QczauzyR" colab={"base_uri": "https://localhost:8080/", "height": 479} executionInfo={"status": "ok", "timestamp": 1598364874906, "user_tz": -120, "elapsed": 10495, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="a4ad3feb-8211-4492-e1b5-e11d95f40d1b"
kitas.head()
# + [markdown] colab_type="text" id="UjYbpgygmWFO"
# ✏️ *What do you think happens, when you replace `head()` with `tail()` ?*
# + id="ZhpO2D13zare" colab_type="code" colab={}
# + [markdown] colab_type="text" id="lfm3uCoTmTqZ"
# ### Describe & info
#
# You can also ask Pandas to provide some statistical descriptions (which are only applied to the columns containing numeric data):
#
# + colab_type="code" id="g7tmvt4Lmi5-" colab={"base_uri": "https://localhost:8080/", "height": 317} executionInfo={"status": "ok", "timestamp": 1598364874907, "user_tz": -120, "elapsed": 10482, "user": {"displayName": "<NAME>6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="0016c7a5-2bb4-40f4-8ae4-9ce406235eb0"
kitas.describe()
# + [markdown] colab_type="text" id="bZFFTr82nafl"
# ✏️ *Try any of the other datasets that we parsed above, e.g., `anscombe`*
# + [markdown] colab_type="text" id="XZoDMYJ_tfWB"
# This may not seem that useful to you yet. You may want to know what kind of datatypes the different columns contain and how many values are present. For this the `info()` method will be of help:
# + colab_type="code" id="Q74i1KdvoO7g" colab={"base_uri": "https://localhost:8080/", "height": 969} executionInfo={"status": "ok", "timestamp": 1598364874908, "user_tz": -120, "elapsed": 10470, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="542ae020-82bb-4da4-dbe5-de66a136a3da"
kitas.info()
# + [markdown] colab_type="text" id="Rk29BztPAVcX"
# With this it is now possible to access specific columns by using their names. But did you notice the long label for the first column? Let's rename the column `name_der_kindertagesbetreuungseinrichtung` into something short and sweet such as: `name`:
# + colab_type="code" id="4cb4w4FeAbQq" colab={}
kitas = kitas.rename(columns={"name_der_kindertagesbetreuungseinrichtung": "name"})
# + [markdown] colab_type="text" id="aKnvJFExnhpL"
# ✏️ *Do you want to rename any other columns?*
# + id="p4He-0T8zppE" colab_type="code" colab={}
# + [markdown] id="DdG-stZV0JWz" colab_type="text"
# ### Select & query
# + [markdown] colab_type="text" id="kj_SJAC_0e4c"
# We can select an individual column using single [square brackets]:
# + colab_type="code" id="lsYAH1cu0foN" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1598364874909, "user_tz": -120, "elapsed": 10439, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="7d9f6b00-5253-4c5b-ff77-7f511e3c5590"
kitas["name"]
# + [markdown] colab_type="text" id="BpOYViZF0WTC"
# … and we can select multiple columns using nested [[square brackets]]:
# + colab_type="code" id="gxxYaH_r0bWP" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1598364874909, "user_tz": -120, "elapsed": 10412, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="12723375-ac11-44e0-a1ad-a8212b748fa8"
kitas[["name", "betrieb_e"]]
# + [markdown] colab_type="text" id="84LW2qoWnsrW"
# ✏️ *Which columns interest you? Replace `name` and `betrieb_e` with other column labels*
# + [markdown] colab_type="text" id="R-6AMfHUqYCd"
# Note that the `betrieb_e` columns contains a lot of `NaN` - this stands for "Not a Number" and it means here that values are missing.
#
# In order to focus on the rows which do have missing data, we can squeeze in a requirement that we only want those rows, where the values in column `betrieb_e` are not missing, i.e., `notnull()`:
# + colab_type="code" id="9csxhYgUqiDL" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1598364874910, "user_tz": -120, "elapsed": 10399, "user": {"displayName": "<NAME>00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="e8d8ad7d-d131-4929-d1a0-e87be46c8018"
kitas [kitas["betrieb_e"].notnull()] [["name", "betrieb_e"]]
# + [markdown] id="VVrgqKBYlhJc" colab_type="text"
# ✏️ *Formulate a query on another column:*
# + id="jRPD_yB0z6Rh" colab_type="code" colab={}
# + [markdown] colab_type="text" id="GtlRXt_PsGOg"
# There are four related methods for accessing rows, columns, and specific values, either by integer positons (iloc and iat) or by the labels (that is what is displayed in bold above).
#
# - `loc`: access rows and columns by label
# - `iloc`: access rows and columns by integer position
# - `at`: access a single value for a row/column label pair
# - `iat`: access a single value for a row/column pair by integer position
#
# For example, this way we can get the first entry in the `kitas` DataFrame:
# + colab_type="code" id="bMWeSrCbtQp7" colab={"base_uri": "https://localhost:8080/", "height": 867} executionInfo={"status": "ok", "timestamp": 1598364874911, "user_tz": -120, "elapsed": 10382, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="7147a158-695d-4d53-bb9e-db491110e826"
kitas.loc[0]
# because the index here uses integers, iloc and loc do the same
# + [markdown] colab_type="text" id="odyKUoYOvqAv"
# Finally, you can also retrieve rows that match a query. With this we are retrieving the names of the kitas who are operated by AWO:
# + colab_type="code" id="vxE4SwnBvvHT" colab={"base_uri": "https://localhost:8080/", "height": 391} executionInfo={"status": "ok", "timestamp": 1598364874912, "user_tz": -120, "elapsed": 10368, "user": {"displayName": "<NAME>\u00f6rk", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="426239ec-fc58-4c37-f322-7c7c9af8705f"
kitas[kitas['name'].str.contains('AWO')] ["name"]
# + [markdown] colab_type="text" id="WfTvZuiIoCe0"
# ✏️ *How about all the entries that have "Hort" in their name?*
# + [markdown] colab_type="text" id="F78Vt2vd098-"
# … or those who have sparrows in their name. Because ‘Spatzen’ is occurring both by itself and as part of compound words, we are querying for uppercase and lowercase versions using the IGNORECASE flag of the regular expression module `re`:
# + colab_type="code" id="6ixit7S4xZxH" colab={"base_uri": "https://localhost:8080/", "height": 119} executionInfo={"status": "ok", "timestamp": 1598364874912, "user_tz": -120, "elapsed": 10357, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhqiajTdRS6NXELnqFzS3NNM3uxy6nUDGSMCjvAjw=s64", "userId": "05248840544598202773"}} outputId="5acbfa0b-8c92-4cb4-ba7e-c634f162cf65"
import re # regular expressions module, use for a case-insensitive query
kitas[kitas['name'].str.contains('Spatzen', flags=re.IGNORECASE, regex=True)] ["name"]
# + [markdown] colab_type="text" id="5vcAaftO1He8"
# ✏️ *Now it's time for you to wrangle data! If you have not done so yet, follow all the pencils and change things around!*
# + [markdown] colab_type="text" id="PyJgW6U-DtiX"
# ## Sources
# - [Pandas Tutorial: DataFrames in Python - DataCamp](https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python)
# - [The ElementTree XML API](https://docs.python.org/2/library/xml.etree.elementtree.html)
# - [Where do Mayors Come From? Querying Wikidata with Python and SPARQL - Towards Data Science](https://towardsdatascience.com/where-do-mayors-come-from-querying-wikidata-with-python-and-sparql-91f3c0af22e2)
# - [External data: Local Files, Drive, Sheets, and Cloud Storage - Colaboratory](https://colab.research.google.com/notebooks/io.ipynb)
# - [Loading data: Drive, Sheets, and Google Cloud Storage](https://colab.research.google.com/notebooks/io.ipynb)
# - [Examining Data Using Pandas | Linux Journal](https://www.linuxjournal.com/content/examining-data-using-pandas)
# - [Beautiful Soup Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
| infovis2data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Multiple Linear Regression
#
# Now you know how to build a model with one X (feature variable) and Y (response variable). But what if you have three feature variables, or may be 10 or 100? Building a separate model for each of them, combining them, and then understanding them will be a very difficult and next to impossible task. By using multiple linear regression, you can build models between a response variable and many feature variables.
#
# Let's see how to do that.
# ### Step_1 : Importing and Understanding Data
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# Importing advertising.csv
advertising_multi = pd.read_csv('./advertising.csv')
# Looking at the first five rows
advertising_multi.head()
# Looking at the last five rows
advertising_multi.tail()
# What type of values are stored in the columns?
advertising_multi.info()
# Let's look at some statistical information about our dataframe.
advertising_multi.describe()
# ### Step_2: Visualising Data
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Let's plot a pair plot of all variables in our dataframe
sns.pairplot(advertising_multi)
# Visualise the relationship between the features and the response using scatterplots
sns.pairplot(advertising_multi, x_vars=['TV','Radio','Newspaper'], y_vars='Sales',size=7, aspect=0.7, kind='scatter')
# ### Step_3: Splitting the Data for Training and Testing
# +
# Putting feature variable to X
X = advertising_multi[['TV','Radio','Newspaper']]
# Putting response variable to y
y = advertising_multi['Sales']
# -
#random_state is the seed used by the random number generator. It can be any integer.
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7 , random_state=100)
# ### Step_4 : Performing Linear Regression
from sklearn.linear_model import LinearRegression
# Representing LinearRegression as lr(Creating LinearRegression Object)
lm = LinearRegression()
# fit the model to the training data
lm.fit(X_train,y_train)
# ### Step_5 : Model Evaluation
# print the intercept
print(lm.intercept_)
# Let's see the coefficient
coeff_df = pd.DataFrame(lm.coef_,X_test.columns,columns=['Coefficient'])
coeff_df
# From the above result we may infern that if TV price increses by 1 unit it will affect sales by 0.045 units.
# ### Step_6 : Predictions
# Making predictions using the model
y_pred = lm.predict(X_test)
# ### Step_7: Calculating Error Terms
from sklearn.metrics import mean_squared_error, r2_score
mse = mean_squared_error(y_test, y_pred)
r_squared = r2_score(y_test, y_pred)
print('Mean_Squared_Error :' ,mse)
print('r_square_value :',r_squared)
# ### Optional Step : Checking for P-value Using STATSMODELS
# +
import statsmodels.api as sm
X_train_sm = X_train
#Unlike SKLearn, statsmodels don't automatically fit a constant,
#so you need to use the method sm.add_constant(X) in order to add a constant.
X_train_sm = sm.add_constant(X_train_sm)
# create a fitted model in one line
lm_1 = sm.OLS(y_train,X_train_sm).fit()
# print the coefficients
lm_1.params
# -
print(lm_1.summary())
# From the above we can see that Newspaper is insignificant.
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
plt.figure(figsize = (5,5))
sns.heatmap(advertising_multi.corr(),annot = True)
# ### Step_8 : Implementing the results and running the model again
# From the data above, you can conclude that Newspaper is insignificant.
# Removing Newspaper from our dataset
X_train_new = X_train[['TV','Radio']]
X_test_new = X_test[['TV','Radio']]
# Model building
lm.fit(X_train_new,y_train)
# Making predictions
y_pred_new = lm.predict(X_test_new)
#Actual vs Predicted
c = [i for i in range(1,61,1)]
fig = plt.figure()
plt.plot(c,y_test, color="blue", linewidth=2.5, linestyle="-")
plt.plot(c,y_pred, color="red", linewidth=2.5, linestyle="-")
fig.suptitle('Actual and Predicted', fontsize=20) # Plot heading
plt.xlabel('Index', fontsize=18) # X-label
plt.ylabel('Sales', fontsize=16) # Y-label
# Error terms
c = [i for i in range(1,61,1)]
fig = plt.figure()
plt.plot(c,y_test-y_pred, color="blue", linewidth=2.5, linestyle="-")
fig.suptitle('Error Terms', fontsize=20) # Plot heading
plt.xlabel('Index', fontsize=18) # X-label
plt.ylabel('ytest-ypred', fontsize=16) # Y-label
from sklearn.metrics import mean_squared_error, r2_score
mse = mean_squared_error(y_test, y_pred_new)
r_squared = r2_score(y_test, y_pred_new)
print('Mean_Squared_Error :' ,mse)
print('r_square_value :',r_squared)
# +
X_train_final = X_train_new
#Unlike SKLearn, statsmodels don't automatically fit a constant,
#so you need to use the method sm.add_constant(X) in order to add a constant.
X_train_final = sm.add_constant(X_train_final)
# create a fitted model in one line
lm_final = sm.OLS(y_train,X_train_final).fit()
print(lm_final.summary())
# -
# ### Model Refinement Using RFE
# The goal of recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features. First, the estimator is trained on the initial set of features and the importance of each feature is obtained either through a coef_ attribute or through a feature_importances_ attribute. Then, the less important features are pruned from the the current set of features. This procedure is recursively repeated on the pruned dataset until the desired number of features to select is reached.
from sklearn.feature_selection import RFE
rfe = RFE(lm, 2)
rfe = rfe.fit(X_train, y_train)
print(rfe.support_)
print(rfe.ranking_)
# ### Simple Linear Regression: Newspaper(X) and Sales(y)
# +
import pandas as pd
import numpy as np
# Importing dataset
advertising_multi = pd.read_csv('advertising.csv')
x_news = advertising_multi['Newspaper']
y_news = advertising_multi['Sales']
# Data Split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x_news, y_news,
train_size=0.7 ,
random_state=110)
# Required only in the case of simple linear regression
X_train = X_train[:,np.newaxis]
X_test = X_test[:,np.newaxis]
# Linear regression from sklearn
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
# Fitting the model
lm.fit(X_train,y_train)
# Making predictions
y_pred = lm.predict(X_test)
# Importing mean square error and r square from sklearn library.
from sklearn.metrics import mean_squared_error, r2_score
# Computing mean square error and R square value
mse = mean_squared_error(y_test, y_pred)
r_squared = r2_score(y_test, y_pred)
# Printing mean square error and R square value
print('Mean_Squared_Error :' ,mse)
print('r_square_value :',r_squared)
# -
| Multiple_Linear_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
import shutil
import itertools
import imutils
# %matplotlib inline
img_path = 'brain_tumor_dataset/yes/Y1.jpg'
image = cv2.imread(img_path)
print('width: {} pixels'.format(image.shape[1]))
print('height: {} pixels'.format(image.shape[0]))
print('channels: {}'.format(image.shape[2]))
dim=(500,590) #pixel size adjusting
image=cv2.resize(image, dim)
plt.imshow(image)
plt.show()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY, 0.7)
plt.imshow(gray,cmap='gray')
(T, thresh) = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)
plt.imshow(thresh,cmap='gray')
# +
#(T, threshInv) = cv2.threshold(gray, 115, 255,cv2.THRESH_BINARY_INV)
#plt.imshow(threshInv,cmap='gray')
# -
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 5))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
closed = cv2.erode(closed, None, iterations = 6)
closed = cv2.dilate(closed, None, iterations = 10)
plt.imshow(closed,cmap='gray')
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
canny = auto_canny(image)
plt.imshow(canny,cmap='gray')
_,cnts,_ = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt_image = image.copy()
cv2.drawContours(cnt_image, cnts, -1, (0, 0, 255), 2)
plt.imshow(cnt_image,cmap='gray')
# +
# find contours in thresholded image, then grab the largest one
cnts = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# find the extreme points
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
# add contour on the image
img_cnt = cv2.drawContours(image.copy(), [c], -1, (0, 255, 255), 4)
plt.imshow(img_cnt,cmap='gray')
# +
# add extreme points
img_pnt = cv2.circle(img_cnt.copy(), extLeft, 8, (0, 0, 255), -1)
img_pnt = cv2.circle(img_pnt, extRight, 8, (0, 255, 0), -1)
img_pnt = cv2.circle(img_pnt, extTop, 8, (255, 0, 0), -1)
img_pnt = cv2.circle(img_pnt, extBot, 8, (255, 255, 0), -1)
# crop
ADD_PIXELS = 0
new_img = image[extTop[1]-ADD_PIXELS:extBot[1]+ADD_PIXELS, extLeft[0]-ADD_PIXELS:extRight[0]+ADD_PIXELS].copy()
plt.imshow(new_img,cmap='gray')
# -
plt.figure(figsize=(15,6))
plt.subplot(141)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
plt.title('Step 1. Get the original image')
plt.subplot(142)
plt.imshow(img_cnt)
plt.xticks([])
plt.yticks([])
plt.title('Step 2. Find the biggest contour')
plt.subplot(143)
plt.imshow(img_pnt)
plt.xticks([])
plt.yticks([])
plt.title('Step 3. Find the extreme points')
plt.subplot(144)
plt.imshow(new_img)
plt.xticks([])
plt.yticks([])
plt.title('Step 4. Crop the image')
plt.show()
# +
def crop_brain_contour(image, plot=False):
# Convert the image to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
# Find contours in thresholded image, then grab the largest one
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# extreme points
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
# crop new image out of the original image using the four extreme points (left, right, top, bottom)
new_image = image[extTop[1]:extBot[1], extLeft[0]:extRight[0]]
if plot:
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.tick_params(axis='both', which='both', top=False, bottom=False, left=False, right=False,labelbottom=False, labeltop=False, labelleft=False, labelright=False)
plt.title('Original Image')
plt.subplot(1, 2, 2)
plt.imshow(new_image)
plt.tick_params(axis='both', which='both',top=False, bottom=False, left=False, right=False,labelbottom=False, labeltop=False, labelleft=False, labelright=False)
plt.title('Cropped Image')
plt.show()
return new_image
x_crop_img = crop_brain_contour(image, True)
# -
| opencv BT projects/Brain MRI Images for Brain Tumor Detection/Example1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tuples
# *Tuples* are sequences like lists, except immutable.
# +
my_tuple = (5, 10)
# can't do the following!
# my_tuple[0] = 7
print(my_tuple[0])
also_a_tuple = (4, 8, 12)
print(also_a_tuple[1])
# -
# Why would we want such a type? Well, for instance, we might be assigning something like GPS coordinates to cities: since we don't expect the cities to be roaming around, we don't want the coordinates changed: tuples do the job! (Note: this is an actual use for mixing types in a sequence.)
NYC_COORDS = (40.7128, "N", 74.0060, "W")
MILFORD_COORDS = (41.3223, "N", 74.8024, "W")
# Or consider or earlier example of a list containing the names of the days of the week. Do we really think these might need to be changed *in the middle of a run of our program*? (Of course, in the future, we might rename some day, but it will be a *major* change announced way in advance.) So we might better write:
WEEKDAYS = ("Monday", "Tuesday", "Wednesday",
"Thursday", "Friday")
# Parentheses without a comma inside are regarded as grouping, like `6 * (3 + 4)`, not as a tuple:
# +
is_a_tuple = (1, 0)
print(type(is_a_tuple))
not_a_tuple = ("Hello")
print(type(not_a_tuple))
might_be_a_tuple = ("Hello",)
print(type(might_be_a_tuple))
print(len(might_be_a_tuple))
# +
x, y = 'a', 3.14159
z = 'a', 3.14159
print(x)
print(y)
print(type(z))
z = x, y
print(z)
print(type(z))
# -
# ### Lists versus tuples
#
# - operators `+` and `*` work with tuples
# - slicing also works
# - membership (`in`) and `for` iteration are the same as with lists.
# - `len`, `min`, `max`, `>`, `<`, , `>=`, `<=`, `sum` work on tuples
# - operations that change lists don't work on tuples (`extend`, `insert`, `remove`, `pop`, `reverse`, `sort`)
print("Tuesday" in WEEKDAYS)
for day in WEEKDAYS:
print(day, "is a weekday.")
new_tuple = NYC_COORDS + WEEKDAYS
new_tuple_alt_name = new_tuple
print(new_tuple)
print(new_tuple_alt_name)
print("ID of new_tuple:", id(new_tuple),
"ID of new_tuple_alt_name:",
id(new_tuple_alt_name))
print("Is new_tuple same object as new_tuple_alt_name?",
new_tuple is new_tuple_alt_name)
new_tuple = WEEKDAYS * 4
print(new_tuple)
print("*" * 40)
print(new_tuple_alt_name)
print("ID of new_tuple:", id(new_tuple),
"ID of new_tuple_alt_name:",
id(new_tuple_alt_name))
print("Is new_tuple same object as new_tuple_alt_name?",
new_tuple is new_tuple_alt_name)
new_tuple_alt_name = WEEKDAYS * 4
print("Is new_tuple_alt_name equal in value to new_tuple?",
new_tuple_alt_name == new_tuple)
print("Is new_tuple same object as new_tuple_alt_name?",
new_tuple is new_tuple_alt_name)
# We can create tuples from lists:
a_list = [2, 4, 6, 8]
a_tuple = tuple(a_list)
print("tuple == list?", a_tuple == a_list)
a_list[0] = 0
print("tuple:", a_tuple)
print("list:", a_list)
print("tuple == list?", a_tuple == a_list)
# can't modify the tuple
# a_tuple[0] = 0
print(max(345, 56, 12, 678, 3, 0))
(345, 56, 12, 678, 3, 0).__len__()
(345, 56, 12, 678, 3, 0).reverse()
| notebooks/Tuples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
from __future__ import print_function
from pyspark.ml.regression import LinearRegression
from pyspark.sql import SparkSession
from pyspark.ml.linalg import Vectors
# -
spark = SparkSession.builder.config('spark.sql.warehouse.dir','file:///home/sudipto21048867/Pyspark/spark-warehouse')\
.appName("LinearRegression")\
.getOrCreate()
# +
#Load up the data and convert it to a form MLLLIb expects
inputLines = spark.sparkContext.textFile('/user/sudipto21048867/data/regression_data/')
# -
inputLines.count()
inputLines.take(5)
data = inputLines.map(lambda data : data.split(",")).map(lambda x : (float(x[0]), Vectors.dense(float(x[1]))))
#Convert this RDD to DataFrame
colNames = ["label","features"]
df = data.toDF(colNames)
df.show(5)
# Note, there are lots of cases where you can avoid going from an RDD to a DataFrame.
# Perhaps you're importing data from a real database. Or you are using structured streaming
# to get your data.
trainTest = df.randomSplit([0.5,0.5])
trainingDF = trainTest[0]
testingDF = trainTest[1]
# Now create our linear regression model
lir = LinearRegression(maxIter=10, regParam=0.3, elasticNetParam=0.8)
#Train the model with the training data
model = lir.fit(trainingDF)
# +
# Now see if we can predict values in our test data.
# Generate predictions using our linear regression model for all features in our
# test dataframe:
fullPredictions = model.transform(testingDF).cache()
# -
fullPredictions.show(5)
# Extract the predictions and the "known" correct labels.
predictions = fullPredictions.select("prediction").rdd.map(lambda x: x[0])
labels = fullPredictions.select("label").rdd.map(lambda x: x[0])
# Zip them together
predictionAndLabel = predictions.zip(labels).collect()
# Print out the predicted and actual values for each point
for prediction in predictionAndLabel:
print(prediction)
| Linear_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="bDCamVkMFW0V" outputId="52cbad89-a095-4f47-ab2e-0b717f6c0dba" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="AP__nfLlT-Gp"
# #Importing Libraries
# + id="BRhpM8V7FXk5"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import cv2
import math
from google.colab.patches import cv2_imshow
from skimage.util import random_noise
from PIL import Image
import random
import glob
from skimage.measure import compare_ssim,compare_mse,compare_psnr
# + [markdown] id="mD3ZY_-McAWa"
# # #Loading Image
# + id="5VC0OeYmcb5y"
inp_img = cv2.imread('/content/drive/My Drive/Minor_6th_sem/black_n_white_image.jpeg')
gray_img = cv2.cvtColor(inp_img, cv2.COLOR_BGR2GRAY)
# + id="5dPzifGhFu8i" outputId="bae791c6-5b9e-49bd-decd-92dd08e42269" colab={"base_uri": "https://localhost:8080/", "height": 34}
gray_img.shape
# + id="JebH6p_-eCAG" outputId="6334cc47-a31e-4e17-b394-3efbb84b4f24" colab={"base_uri": "https://localhost:8080/", "height": 141}
gray_img
# + id="Q5saLRdqFzXP" outputId="6b7a318a-3e64-4d6d-b46a-245e0c9ba508" colab={"base_uri": "https://localhost:8080/", "height": 273}
cv2_imshow(gray_img)
# + [markdown] id="0jyRmeNBcD8H"
# #Adding Noise
# + id="yVmwy_nfF3iK"
noise_img = gray_img.copy()
# + id="YKC89zdYQxnU"
noise = random_noise(noise_img, mode='s&p', amount=0.5)
noise = (255*noise).astype(np.uint8)
#noise_img = Image.fromarray(noise)
# + id="4UhizHiqjRMR" outputId="b9349da5-82c5-4cec-a2de-6176609580af" colab={"base_uri": "https://localhost:8080/", "height": 141}
noise
# + id="R8BW4NWtjOpy"
m=4;
for x in range(0,noise.shape[0]-1):
for y in range(0, noise.shape[1]-1):
if(noise[x][y]==0):
noise[x][y]=random.randint(0,m)
elif(noise[x][y]==255):
noise[x][y]=random.randint(255-m,255)
# + id="oH9a_yJEiBvm" outputId="9fb31170-6712-427c-c52e-048241287b00" colab={"base_uri": "https://localhost:8080/", "height": 141}
noise
# + id="VubfBPqlpJJL"
noise_img = Image.fromarray(noise)
# + id="YcmLM13IhjVs" outputId="94300ddf-71e0-4cb7-b88a-7d60abf319e5" colab={"base_uri": "https://localhost:8080/", "height": 273}
noise_img
# + id="xqW2CZFOR14o" outputId="e0604883-d1e5-42c7-a730-f1db370c0f8a" colab={"base_uri": "https://localhost:8080/", "height": 34}
noise_img.size
# + id="z_1vbo3NSLuL"
noise_img = np.asarray(noise_img)
# + id="TZv30iQSW9t4" outputId="1e89e4b5-5565-4489-f8aa-733d70e434b4" colab={"base_uri": "https://localhost:8080/", "height": 141}
gray_img
# + id="0XUggpI_SmkK" outputId="977a51e5-8c05-4d79-cd4a-8359f03897a1" colab={"base_uri": "https://localhost:8080/", "height": 141}
noise_img
# + id="CV4-i9SnSmhg" outputId="65f7efe9-03eb-451a-cb18-206719de5685" colab={"base_uri": "https://localhost:8080/", "height": 34}
noise_img.shape
# + id="t1Dj7c7iSmfO" outputId="61f159e8-c7fd-4e55-e205-fbca89f7fcf6" colab={"base_uri": "https://localhost:8080/", "height": 34}
noise_img.size
# + id="GikavlAkFRNI" outputId="190c8ad2-6fd8-4d2b-ea56-608821d28f85" colab={"base_uri": "https://localhost:8080/", "height": 34}
count3=0
for x in range(0,noise_img.shape[0]-1):
for y in range(0, noise_img.shape[1]-1):
if(noise_img[x][y]!=gray_img[x][y]):
count3+=1;
count3
# + [markdown] id="9Q353XKRcIP1"
# #Padding
# + id="SjQWPmxfGH6B"
#edge_padding for 3 extra edges
padded_img=np.pad(noise_img,3,mode='edge')
# + id="iworVxPSGRu3" outputId="e6eae923-74a3-46df-a4a6-acb47c416420" colab={"base_uri": "https://localhost:8080/", "height": 141}
padded_img
# + id="sLuJyUeVGUxY" outputId="6ee9f8f6-12a5-4596-9b1d-cbaf1af980df" colab={"base_uri": "https://localhost:8080/", "height": 34}
padded_img.shape
# + [markdown] id="vKc5c9rGcKxT"
# #Detection
# + id="a-lB12V1GBZR"
noisy_pixels = padded_img.copy()
# + id="VeFlW41Q5MFl" outputId="9bece584-22ab-4548-e85c-7ecfff77c42e" colab={"base_uri": "https://localhost:8080/", "height": 34}
padded_img.shape[0]
# + id="caGHyo1O5PQB" outputId="715e74fe-a2a6-49f9-f84e-abd6c71f54c5" colab={"base_uri": "https://localhost:8080/", "height": 34}
padded_img.shape[1]
# + id="clK-UHznGXms" outputId="58226b5f-212f-46c4-82b4-3a00d046560b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#LEVEL 1:
no_of_noisy_pixels=0;
no_of_noise_free_pixels=0;
i=0;
for x in range(3,padded_img.shape[0]-3):
for y in range(3, padded_img.shape[1]-3):
print('pixel',i);
i+=1;
cp=padded_img[x][y];
#Step 1:
mean_a=0;
standard_deviation_a=0;
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(((x==m) and (y==n))):
mean_a=mean_a+0;
else:
mean_a=mean_a+padded_img[m,n];
mean_a=mean_a/24;
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(((x==m) and (y==n))):
standard_deviation_a=standard_deviation_a+0;
else:
standard_deviation_a=standard_deviation_a+pow((padded_img[m,n]-mean_a),2);
standard_deviation_a=standard_deviation_a/24;
standard_deviation_a=math.sqrt(standard_deviation_a);
#***************************************************************************************************#
#Step 2 and 3:
mean_p=0;
standard_deviation_p=0;
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(((x==m) and (y==n))):
mean_p=mean_p+0;
else:
mean_p=mean_p+abs((mean_a-padded_img[m,n]));
mean_p=mean_p/24;
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(((x==m) and (y==n))):
standard_deviation_p=standard_deviation_p+0;
else:
standard_deviation_p=standard_deviation_p+pow((padded_img[m,n]-mean_p),2);
standard_deviation_p=standard_deviation_p/24;
standard_deviation_p=math.sqrt(standard_deviation_p);
T1=mean_p+standard_deviation_p;
#********************************************************************************************************#
#Step 4 and 5:
mean_q=0;
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(((x==m) and (y==n))):
mean_q=mean_q+0;
else:
mean_q=mean_q+abs((cp-padded_img[m,n]));
mean_q=mean_q/24;
NS=mean_q;
#********************************************************************************************************#
#Step 6:
if((NS>T1) and ((cp<=m) or (cp>=255-m))):
print(cp,'is noisy');
noisy_pixels[x][y]=0;
no_of_noisy_pixels+=1;
#Level 2:
#Step 7:
else:
T2_min=mean_a-(0.5*standard_deviation_a);
T2_max=mean_a+(0.5*standard_deviation_a);
#Step 8:
if((cp<=T2_min or cp>=T2_max) and (cp<=m or cp>=255-m)):
print(cp,'is noisy');
noisy_pixels[x][y]=0;
no_of_noisy_pixels+=1;
#Step 9:
else:
#Level 3:
li=[];
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if((x==m) and (y==n)):
continue;
else:
li.append(padded_img[m][n]);
li.sort();
N=24;
Q1=int((N+1)/4);
Q3=int((3*(N+1))/4);
T3_min=li[Q1];
T3_max=li[Q3];
#Step 10:
if((cp<=T3_min or cp>=T3_max) and (cp<=m or cp>=255-m)):
print(cp,'is noisy');
noisy_pixels[x][y]=0;
no_of_noisy_pixels+=1;
else:
print(cp,'is noise-free');
no_of_noise_free_pixels+=1;
# + id="2VpkrExIZWFm" outputId="8e4d782f-1689-4ef9-f2d5-79ea1b04da36" colab={"base_uri": "https://localhost:8080/", "height": 34}
no_of_noisy_pixels
# + id="W8iM1Lrhgj3K" outputId="2a74d1d3-9fad-4515-f320-2acef03cec2f" colab={"base_uri": "https://localhost:8080/", "height": 34}
no_of_noise_free_pixels
# + id="2CfVMin0UijI" outputId="b94c78f9-d171-4f40-b1d2-cf559d6d3682" colab={"base_uri": "https://localhost:8080/", "height": 34}
ct=0;
for x in range(3,padded_img.shape[0]-3):
for y in range(3, padded_img.shape[1]-3):
if(noisy_pixels[x][y]==0):
ct+=1;
ct
# + id="h9T9fvVbdwG2" outputId="9838747a-9d21-4c5c-b672-5e441aa1ec19" colab={"base_uri": "https://localhost:8080/", "height": 34}
padded_img.shape[0]-3
# + [markdown] id="0MRKbYN-b3ph"
# #Filtering
# + id="LcucAQoGWZaD" outputId="60b5d02b-be5c-4648-a287-eeb656ccc830" colab={"base_uri": "https://localhost:8080/", "height": 141}
noisy_pixels
# + id="nJB7BW93jxzk"
def pixel_is_noisy(x,y):
if(noisy_pixels[x][y]==0):
return 1;
else:
return 0;
def check_noise_in_3b3(x,y):
for m in range(x-1,x+2):
for n in range(y-1,y+2):
if(noisy_pixels[m][n]==0):
continue;
else:
return 1;
return 0;
def check_noise_in_5b5(x,y):
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(noisy_pixels[m][n]==0):
continue;
else:
return 1;
return 0;
def check_noise_in_7b7(x,y):
for m in range(x-3,x+4):
for n in range(y-3,y+4):
if(noisy_pixels[m][n]==0):
continue;
else:
return 1;
return 0;
def weight(m,n,x,y):
diff=abs(padded_img[m][n]-padded_img[x][y])
if(diff<5):
wt=3;
elif(diff<10):
wt=2;
else:
wt=1;
return wt;
T1=10;
T2=30;
def fuzzy(max):
if(max<T1):
ff=0;
elif(max>=T2):
ff=1;
else:
ff=(max-T1)/(T2-T1);
return ff;
# + id="MU_JgiblbaLP" outputId="20c32df7-ec35-4fb8-d3e0-9877ce8e3ad1" colab={"base_uri": "https://localhost:8080/", "height": 70}
filtered_img=padded_img.copy();
#Step 1 and 2:
noise_free_pixels=[]
for x in range(3,padded_img.shape[0]-3):
for y in range(3, padded_img.shape[1]-3):
noise_free_pixels.clear();
max=0;
if(pixel_is_noisy(x,y)): #check central pixel
for m in range(x-1,x+2):
for n in range(y-1,y+2):
if((m==x) and (n==y)):
continue;
else:
diff=abs(padded_img[m][n]-padded_img[x][y]);
if(diff>max):
max=diff;
#*************************************************************************************************#
#Step 3:
ff=fuzzy(max);
if(check_noise_in_3b3(x,y)): #search for noise free pixels in 3*3 matrix
for m in range(x-1,x+2):
for n in range(y-1,y+2):
if(pixel_is_noisy(m,n)):
continue;
else:
wt=weight(m,n,x,y);
noise_free_pixels.append(wt*padded_img[m][n]);
elif(check_noise_in_5b5(x,y)): #search for noise free pixels in 5*5 matrix
for m in range(x-2,x+3):
for n in range(y-2,y+3):
if(pixel_is_noisy(m,n)):
continue;
else:
wt=weight(m,n,x,y);
noise_free_pixels.append(wt*padded_img[m][n]);
elif(check_noise_in_7b7(x,y)): #search for noise free pixels in 7*7 matrix
for m in range(x-3,x+4):
for n in range(y-3,y+4):
if(pixel_is_noisy(m,n)):
continue;
else:
wt=weight(m,n,x,y);
noise_free_pixels.append(wt*padded_img[m][n]);
else:
wt=weight(x-1,y-1,x,y);
noise_free_pixels.append(wt*padded_img[x-1][y-1]);
wt=weight(x,y-1,x,y);
noise_free_pixels.append(wt*padded_img[x][y-1]);
wt=weight(x-1,y,x,y);
noise_free_pixels.append(wt*padded_img[x-1][y]);
wt=weight(x,y,x,y);
noise_free_pixels.append(wt*padded_img[x][y]);
noise_free_pixels.sort();
MED=noise_free_pixels[int(len(noise_free_pixels)/2)];
restoration_term=((1-ff)*padded_img[x][y])+(ff*MED);
filtered_img[x][y]=restoration_term;
# + id="Whjr5WlVkMpQ"
unpadded_filtered_img=gray_img.copy()
for x in range(3,padded_img.shape[0]-3):
for y in range(3, padded_img.shape[1]-3):
unpadded_filtered_img[x-3][y-3]=filtered_img[x][y]
# + id="Dg9GEciTU2--" outputId="0c04428b-0491-4548-bac0-1b59e1a5939f" colab={"base_uri": "https://localhost:8080/", "height": 141}
unpadded_filtered_img
# + id="8mbIiaE5lDhz" outputId="dda9ae55-7cb4-4814-8bd4-bf10a6ce0992" colab={"base_uri": "https://localhost:8080/", "height": 273}
cv2_imshow(gray_img)
# + id="skNj2eRVlKUq" outputId="5302e0c2-239c-4a5e-b43c-479842c63aa1" colab={"base_uri": "https://localhost:8080/", "height": 273}
cv2_imshow(noise_img)
# + id="2sZ7SVWPWAQF" outputId="84276e54-6ebb-4fc6-954d-5137f4450853" colab={"base_uri": "https://localhost:8080/", "height": 273}
cv2_imshow(unpadded_filtered_img)
# + id="dGvleOIpTPPm" outputId="07399889-3d71-4eec-bf95-e0f459e9d00f" colab={"base_uri": "https://localhost:8080/", "height": 143}
mse=(compare_mse(gray_img,unpadded_filtered_img))
psnr=(compare_psnr(gray_img,unpadded_filtered_img))
ssim=(compare_ssim(gray_img,unpadded_filtered_img))
# + id="8tV9zkhg6eYK" outputId="95abe365-1b18-44f0-9b89-d77d4fb018d1" colab={"base_uri": "https://localhost:8080/", "height": 34}
print("MSE:",mse,end=" ")
print("PSNR:",psnr,end=" ")
print("SSIM:",ssim)
# + id="RgFCKFLywWj-"
# + id="45lnS9ZKwWid"
# + id="mHzmo8LPwWeV"
# + id="fAiV-OoEwWcu"
# + id="aj7nw3fwwWbb"
# + id="82ob32cdwWXl"
# + id="WifXvL4CwWVw"
# + id="WrWSHtI_wWTz"
# + id="KtWMYEizwWSU"
# + id="AwZYgJ-0wWOq"
# + id="Oyzz1Pk5wWM4"
# + id="-SlASOUodKxi" outputId="5d1e3a3a-8197-4d99-b805-19735a88deed" colab={"base_uri": "https://localhost:8080/", "height": 90}
compare_mse(gray_img,unpadded_filtered_img)
# + id="vt6qoEsjdb2Z" outputId="b261ae9d-886f-46a3-c21f-72629fc3041f" colab={"base_uri": "https://localhost:8080/", "height": 90}
compare_psnr(gray_img,unpadded_filtered_img)
# + id="SCF_yhEYk591" outputId="0e33c8ba-4d21-4751-b400-aed6549a1f59" colab={"base_uri": "https://localhost:8080/", "height": 90}
(score, diff) = compare_ssim(gray_img,unpadded_filtered_img,full=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
# + id="AThz0SZ6k58Y"
# + id="HoZlFhbqwRC9"
# + id="P0BPTMPGwQ-X"
# + id="JN3I2DaTwQ7k"
# + id="Ng3OaJj2wQ5C"
# + id="nd6CY7EjwQ3C"
# + id="jHuAuSGbwQ1H"
# + id="TY4CP0I4k54K"
#MSE
# sum=0
# M=gray_img.shape[0]
# N= gray_img.shape[1]
# for x in range(0,M):
# for y in range(0,N):
# sum=sum+pow(unpadded_filtered_img[x][y]-gray_img[x][y],2)
# MSE=sum/(M*N)
# MSE
# + id="eXOrPwpneF6o"
#PSNR
# PSNR=10*(math.log((pow(255,2)/MSE),10))
# PSNR
# + id="MvGs-EwlJdCC"
#SSIM
#(score, diff) = compare_ssim(gray_img,unpadded_filtered_img,full=True)
#diff = (diff * 255).astype("uint8")
#print("SSIM: {}".format(score),end=" ")
# + id="baF485K9baEn"
#cv2.imshow("Mean Fltered Image", inp_img1)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# + id="-Zv20_X3-2dO" outputId="d95fa4b2-01f8-46ae-8c68-69dc7a7b2838" colab={"base_uri": "https://localhost:8080/", "height": 496}
# from google.colab import drive
# drive.mount('/content/drive')
# import numpy as np
# import matplotlib.pyplot as plt
# # %matplotlib inline
# import cv2
# import math
# from google.colab.patches import cv2_imshow
# from skimage.util import random_noise
# from PIL import Image
# import random
# import glob
# from skimage.measure import compare_ssim,compare_mse,compare_psnr
# count=0
# for img in glob.glob("/content/drive/My Drive/Minor_6th_sem/tid2008/*.bmp"):
# print(img,end=" ")
# count+=1
# inp_img= cv2.imread(img)
# gray_img = cv2.cvtColor(inp_img, cv2.COLOR_BGR2GRAY)
# print("shape : ",gray_img.shape,end=" ")
# print("size :", gray_img.size)
# #cv2_imshow(gray_img)
# print("Count :",count)
# + id="mulZeZg6vJdY"
| Minor_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import keras
import pickle
import random
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from keras.utils import to_categorical
from keras.models import Model
plt.ion()
import cv2
# +
with open('train_data.pickle', 'rb') as f:
train_data = pickle.load(f)
with open('test_data.pickle', 'rb') as f:
test_data = pickle.load(f)
# +
# random.shuffle(train_data)
# random.shuffle(test_data)
# -
len(train_data)
len(test_data)
train_data_list = []
train_label = []
test_data_list = []
test_label = []
# +
for d in train_data:
train_data_list.append(d[1])
train_label.append(d[0])
train_data_list = np.array(train_data_list)
train_label = np.array(train_label)
del train_data
# -
train_data_list.shape
# +
for d in test_data:
test_data_list.append(d[1])
test_label.append(d[0])
test_data_list = np.array(test_data_list)
test_label = np.array(test_label)
del test_data
# -
test_label
train_data_list.astype('float64')
test_data_list.astype('float64')
train_label = to_categorical(train_label, 151)
test_label = to_categorical(test_label, 151)
test_label[0]
train_data_list.dtype
plt.imshow(train_data_list[92])
print(np.argmax(train_label[92]))
np.max(train_data_list[0])
train_data_list.dtype
plt.imshow(test_data_list[0])
print(np.argmax(test_label[0]))
train_X, train_y, test_X, test_y = train_data_list, train_label, test_data_list, test_label
print(train_X.shape)
print(train_y.shape)
base = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet', input_shape = (224, 224, 3))
x = base.output
x = Flatten()(x)
x = Dropout(0.3)(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.3)(x)
predict = Dense(151, activation='softmax')(x)
# +
model = Model(inputs=base.input, outputs=predict)
for layer in base.layers:
layer.trainable = False
# -
model.summary()
model.compile(optimizer=keras.optimizers.Adam(), loss='categorical_crossentropy', metrics=["accuracy"])
# train_X, train_y, test_X, test_y
batch_size = 128
epochs = 10
model.fit(train_X, train_y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(test_X, test_y),
shuffle=True)
# +
# img = (test_data_list[0])
# print(img.dtype)
# cv2.imwrite('25.png', img)
# +
# np.max(img)
# -
((test_data_list[0]*255)).dtype
# +
def data_name_dict():
raw_data = {}
with open('data_info.txt', 'r', encoding='utf-8') as f:
while True:
raw = f.readline()
if raw == '':
break
number = int(raw.split('\t')[0].split('#')[-1])
name = raw.split('\t')[3].split('*')[0]
raw_data[number] = name
return raw_data
raw_data = data_name_dict()
def test(fn, model=model, raw_data=raw_data):
img = cv2.imread(fn)[:,:,::-1]
img = cv2.resize(img, (224,224))
img = img / 255
plt.imshow(img)
img_tensor = np.expand_dims(img, axis=0)
output = model.predict(img_tensor)
print(np.max(output))
print(np.argmax(output)+1)
print("output: {}".format(raw_data[np.argmax(output)+1]))
# +
test('pika.jpg', model, raw_data)
# -
test('jeni_2.png')
test('daja.jpg')
test('jeni_1.png')
test('pika2.jpg')
model.save('pokemon.h5')
# +
# class DataGenerator(keras.utils.Sequence):
# def __init__(self, datas, labels, batch_size=1, shuffle=True):
# self.batch_size = batch_size
# self.datas = datas
# self.labels = labels
# self.indexes = np.arange(len(self.datas))
# self.shuffle = shuffle
# def __len__(self):
# #计算每一个epoch的迭代次数
# return math.ceil(len(self.datas) / float(self.batch_size))
# def __getitem__(self, index):
# #生成每个batch数据,这里就根据自己对数据的读取方式进行发挥了
# # 生成batch_size个索引
# batch_indexs = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# # 根据索引获取datas集合中的数据
# batch_datas = [(self.labels[k], self.datas[k]) for k in batch_indexs]
# # 生成数据
# X, y = self.data_generation(batch_datas)
# return X, y
# def on_epoch_end(self):
# #在每一次epoch结束是否需要进行一次随机,重新随机一下index
# if self.shuffle == True:
# np.random.shuffle(self.indexes)
# def data_generation(self, batch_datas):
# images = []
# labels = []
# # 生成数据
# for data in batch_datas:
# images.append(data[1])
# labels.append(data[0])
# return np.array(images), np.array(labels)
# +
# training_generator = DataGenerator(train_datas)
# -
| train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bias
# ***
#
# **[Bias - HyperStat Online](http://davidmlane.com/hyperstat/A9257.html)**
#
# *[http://davidmlane.com/hyperstat/A9257.html](http://davidmlane.com/hyperstat/A9257.html)*
#
# ***
# %matplotlib inline
# +
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
import seaborn as sns
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = 14, 10
# -
# ## Location and scale
# +
x = np.linspace(-10.0, 10.0, 1000)
plt.fill(x, ss.norm.pdf(x, loc= 0.0, scale=1.0), label="$\mu = 0.0, \sigma = 1.0$", c='b', alpha=0.6, lw=3.0)
plt.fill(x, ss.norm.pdf(x, loc= 2.0, scale=1.0), label="$\mu = 2.0, \sigma = 1.0$", c='r', alpha=0.6, lw=3.0)
plt.fill(x, ss.norm.pdf(x, loc= 0.0, scale=2.0), label="$\mu = 0.0, \sigma = 2.0$", c='g', alpha=0.6, lw=3.0)
plt.fill(x, ss.norm.pdf(x, loc=-1.0, scale=0.6), label="$\mu =-1.0, \sigma = 0.6$", c='y', alpha=0.6, lw=3.0)
plt.title("Normal distribution for different $\mu$ and $\sigma$")
plt.legend();
# -
# ## Probability
# +
x = np.linspace(-3.0, 3.0, 1000)
y = ss.norm.pdf(x, loc= 0.0, scale=1.0)
xseg = x[np.logical_and(-1.0 < x, x < 1.4)]
yseg = y[np.logical_and(-1.0 < x, x < 1.4)]
plt.plot(x, y, color='k', alpha=0.5)
plt.fill_between(xseg, yseg, color='b', alpha=0.5)
plt.axvline(x=-1.0, color='grey', linestyle=':')
plt.axvline(x= 1.4, color='grey', linestyle=':')
plt.text(0.2, 0.15,
r'$P (a \leq X \leq b) = \int_a^b \frac{1}{\sqrt{2\pi\sigma^2} } e^{ -\frac{(x-\mu)^2}{2\sigma^2} } \, dx$',
horizontalalignment='center', size=17)
plt.axhline(y= 0.0, color='black', linestyle='-')
plt.title("Probability of $x$ between $a$ and $b$");
# +
x = np.linspace(-5.0, 5.0, 10000)
plt.plot(x, ss.norm.pdf(x, loc=0.0, scale=1.0), 'k-', lw=1.0)
xseg = x[np.logical_and(-5.0 <= x, x <= -3.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='y', alpha=0.5, linewidth=0)
xseg = x[np.logical_and(-3.0 <= x, x <= -2.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='r', alpha=0.5, linewidth=0)
xseg = x[np.logical_and(-2.0 <= x, x <= -1.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='g', alpha=0.5, linewidth=0)
xseg = x[np.logical_and(-1.0 <= x, x <= 1.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='b', alpha=0.5, linewidth=0, label="$1 \sigma = 68.27\%$")
xseg = x[np.logical_and( 1.0 <= x, x <= 2.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='g', alpha=0.5, linewidth=0, label="$2 \sigma = 95.45\%$")
xseg = x[np.logical_and( 2.0 <= x, x <= 3.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='r', alpha=0.5, linewidth=0, label="$3 \sigma = 99.73\%$")
xseg = x[np.logical_and( 3.0 <= x, x <= 5.0)]
plt.fill_between(xseg, ss.norm.pdf(xseg), color='y', alpha=0.5, linewidth=0)
plt.title("Normal Disribution - Probability Distribution Function")
plt.legend();
# -
# ## Sampling distribution
# +
np.set_printoptions(formatter={'float': lambda x: "{0:6.3f}".format(x)})
sampsize = 10
nosamps = 1000
samp = np.random.standard_normal((nosamps, sampsize))
print(samp)
# -
mean = samp.sum(axis=1) / sampsize
print(mean)
# +
# Calculate the variance.
vari = (samp - mean[:, np.newaxis])**2
vari = vari.sum(axis=1) / (sampsize)
# Without the correction.
mean_vari = vari.sum() / nosamps
print(mean_vari)
# Using Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
print(mean_vari * (sampsize / (sampsize - 1.0)))
# -
sns.distplot(vari);
# ## End
| 52465 Programming for Data Analysis/statistical bias.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# section*{Project report - Integer Programming - <NAME>}
# subsection{Problem 1}
# Code up Formulation 0, Formulation 2, and Formulation 3 for Gurobi or for CPLEX, using a language like C++ or Python. You should not hard-code any of the problem data; your codes should work for any values of $n$ and $c$. If you have to recompile your codes between instances, you are doing it wrong. Your code should read data from an external file.
# The objective of this project is to assess the effect of different solver methodology on the time to find an optimal solution and the complexity of the algorithm. This report is presented as a combined code and report script. It is structured beginning with the import of the source data, the definition of the model formulation in the \texttt{pyomo} modeling interface package, the solving of instances using different parameter settings and finishing with a comparative analysis of the model outputs.
# \subsection*{Model formulations}
from pyomo.environ import *
import glob
import pandas as pd
import time
import numpy as np
# We begin by modeling the assigment relaxation formulation. The data, parameter, variable and constraint formulations correspond to the formulation as given in the project description.
# +
## Modeling framework assignment relaxation formulation (0)
def assignment_relax(dist):
# Create model
m = ConcreteModel()
## DATA
# initialize number of points in problem
m.P = Param(within=PositiveIntegers, initialize=len(dist)-1)
# initialize indices for points in problem
m.V = RangeSet(0,m.P)
# Transform matrix to dictionary
dist.to_dict()
dist = dist.stack().to_dict()
#print(dist)
## PARAMETERS
# initialize cost parameters
m.c = Param(m.V, m.V, initialize = dist)
#m.c.pprint()
## VARIABLES
# connection variable
m.x = Var(m.V, m.V, within = Binary, initialize = 0)
#m.x.pprint()
## CONSTRAINTS
# Constraint 1b
def single_connect1(model, i, j):
return(sum(m.x[i,j] for j in m.V) == 1)
m.s_con1 = Constraint(m.V, m.V, rule = single_connect1)
#m.s_con1.pprint()
# Constraint 1c
def single_connect2(model, i, j):
return(sum(m.x[i,j] for i in m.V) == 1)
m.s_con2 = Constraint(m.V, m.V, rule = single_connect2)
#m.s_con2.pprint()
# Constraint 1d
def no_self_routes(model, i):
return(m.x[i,i] == 0)
m.zeros = Constraint(m.V, rule = no_self_routes)
#m.zeros.pprint()
## OBJECTIVE
def objective_rule(m):
return(sum(sum(m.c[i,j] * m.x[i,j] for j in m.V) for i in m.V))
m.objective = Objective(rule = objective_rule, sense = minimize, doc='Define objective function')
return(m)
# -
# We continue with the MTZ formulation. As for the assignment relaxation formulation, the definitions follow the definitions from the project description.
# +
## Modeling framework MTZ formulation
# For testing purposes of random starting vertex
np.random.seed(123)
def MTZ(dist):
# Create model
m = ConcreteModel()
## DATA
# initialize number of points in problem
m.P = Param(within=PositiveIntegers, initialize=len(dist)-1)
# initialize indices for points in problem
m.V = RangeSet(0,m.P)
# Transform matrix to dictionary
dist.to_dict()
dist = dist.stack().to_dict()
#print(dist)
## PARAMETERS
# initialize cost parameters
m.c = Param(m.V, m.V, initialize = dist)
#m.c.pprint()
## VARIABLES
# connection variable
m.x = Var(m.V, m.V, within = Binary, initialize = 0)
#m.x.pprint()
# tour position variable
m.u = Var(m.V, within = NonNegativeReals, bounds = (0,(len(m.V)-1)), initialize = 0)
#m.u.pprint()
## CONSTRAINTS
# generate random starting vertex
r = int(np.random.uniform(0,28,1))
#print(r)
# Constraint 1b
def single_connect1(model, i, j):
return(sum(m.x[i,j] for j in m.V) == 1)
m.s_con1 = Constraint(m.V, m.V, rule = single_connect1)
#m.s_con1.pprint()
# Constraint 1c
def single_connect2(model, i, j):
return(sum(m.x[i,j] for i in m.V) == 1)
m.s_con2 = Constraint(m.V, m.V, rule = single_connect2)
#m.s_con2.pprint()
# Constraint 1d
def no_self_routes(model, i):
return(m.x[i,i] == 0)
m.zeros = Constraint(m.V, rule = no_self_routes)
#m.zeros.pprint()
# Constraint 3a
def order(model, i, j):
if i == r or j == r:
return(Constraint.Skip)
else:
return(m.u[i] - m.u[j] + 1 <= (len(m.V)-1) * (1 - m.x[i,j]))
m.order_con = Constraint(m.V, m.V, rule = order)
#m.order_con.pprint()
# Constraint 3b
def initial_vertex(model, i):
if i == r:
return(m.u[i] == 0) # using python syntax we start indexing at position zero
else:
return(Constraint.Skip)
m.start_at = Constraint(m.V, rule = initial_vertex)
#m.start_at.pprint()
# Constraint 3c
def later_vertex(model, i):
if i == r:
return(Constraint.Skip)
else:
return(inequality(1, m.u[i], len(m.V)-1)) # again we need to consider the indexing change
m.continue_at = Constraint(m.V, rule = later_vertex)
#m.continue_at.pprint()
## OBJECTIVE
def objective_rule(m):
return(sum(sum(m.c[i,j] * m.x[i,j] for j in m.V) for i in m.V))
m.objective = Objective(rule = objective_rule, sense = minimize, doc='Define objective function')
return(m)
# -
# Lastly, we create the function for the MCF formulation, which also follows the same description as the project task.
# +
## Modeling framework MCF formulation (3)
# For testing purposes of random starting vertex
np.random.seed(123)
def MCF(dist):
# Create model
m = ConcreteModel()
## DATA
# initialize number of points in problem
m.P = Param(within=PositiveIntegers, initialize=len(dist)-1)
# initialize indices for points in problem
m.V = RangeSet(0,m.P)
#m.V.pprint()
# Transform matrix to dictionary
dist.to_dict()
dist = dist.stack().to_dict()
#print(dist)
## PARAMETERS
# initialize cost parameters
m.c = Param(m.V, m.V, initialize = dist)
#m.c.pprint()
## VARIABLES
# connection variable
m.x = Var(m.V, m.V, within = Binary, initialize = 0)
#m.x.pprint()
# flow variable
m.f = Var(m.V, m.V, m.V, within = NonNegativeReals, bounds = (0, 1), initialize = 0)
## CONSTRAINTS
# generate random starting vertex
r = int(np.random.uniform(0,28,1))
#print(r)
# generate a subset of vertices that excludes r
m.sub_V = Set(dimen = 1)
for k in m.V:
if k != r:
m.sub_V.add(k)
#m.sub_V.pprint()
# Constraint 1b
def single_connect1(model, i, j):
return(sum(m.x[i,j] for j in m.V) == 1)
m.s_con1 = Constraint(m.V, m.V, rule = single_connect1)
#m.s_con1.pprint()
# Constraint 1c
def single_connect2(model, i, j):
return(sum(m.x[i,j] for i in m.V) == 1)
m.s_con2 = Constraint(m.V, m.V, rule = single_connect2)
#m.s_con2.pprint()
# Constraint 1d
def no_self_routes(model, i):
return(m.x[i,i] == 0)
m.zeros = Constraint(m.V, rule = no_self_routes)
#m.zeros.pprint()
# Constraint 4a
def flows_from_r(model, v):
if v == r:
return(Constraint.Skip)
else:
return(sum(m.f[v,r,j] for j in m.sub_V) - sum(m.f[v,j,r] for j in m.sub_V) == 1)
m.out_flows = Constraint(m.V, rule = flows_from_r)
#m.out_flows.pprint()
# Constraint 4b
def flow_conservation(model, i, v):
if i == r or v == r or i == v:
return(Constraint.Skip)
else:
# generate a subset of vertices that excludes i
m.sub_V1 = Set(dimen = 1)
for k in m.V:
if k != i:
m.sub_V1.add(k)
#m.sub_V1.pprint()
constr = (sum(m.f[v,i,j] for j in m.sub_V1) - sum(m.f[v,j,i] for j in m.sub_V1) == 0)
m.del_component(m.sub_V1)
return(constr)
m.flow_con = Constraint(m.V, m.V, rule = flow_conservation)
#m.flow_con.pprint()
# Constraint 4c
def link(model, i, j, v):
if v == r:
return(Constraint.Skip)
else:
return(m.f[v,i,j] <= m.x[i,j])
m.link_con = Constraint(m.V, m.V, m.V, rule = link)
## OBJECTIVE
def objective_rule(m):
return(sum(sum(m.c[i,j] * m.x[i,j] for j in m.V) for i in m.V))
m.objective = Objective(rule = objective_rule, sense = minimize, doc='Define objective function')
return(m)
# -
# \subsection*{Data}
# Having defined the model formulations, we can continue with reading in the data and creating model formulation instances for each formulation for each testing dataset. We begin by reading in the data.
# +
# read in source files for test data
source = r'/Users/fietekrutein/Documents/University/University of Washington/Courses/2020 Q1/IND E 599 Integer Programming/Project' # use your path
all_files = glob.glob(source + "/*.txt")
# test datasets
bays29 = pd.read_csv(source + '/bays29.txt', delimiter = "\t", skiprows = 1, header = None)
dantzig42 = pd.read_csv(source + '/dantzig42.txt', delimiter = "\t", skiprows = 1, header = None)
pr76 = pd.read_csv(source + '/pr76.txt', delimiter = "\t", skiprows = 1, header = None)
rat99 = pd.read_csv(source + '/rat99.txt', delimiter = "\t", skiprows = 1, header = None)
# -
# We now generate instances of the assignment relaxation formulation for each dataset.
# create assignment relaxation formulations for each dataset
bays29_assignment_relax_m = assignment_relax(bays29)
dantzig42_assignment_relax_m = assignment_relax(dantzig42)
pr76_assignment_relax_m = assignment_relax(pr76)
rat99_assignment_relax_m = assignment_relax(rat99)
# We further generate instances for the MTZ formulation.
# create MTZ formulations for each dataset
bays29_MTZ_m = MTZ(bays29)
dantzig42_MTZ_m = MTZ(dantzig42)
pr76_MTZ_m = MTZ(pr76)
rat99_MTZ_m = MTZ(rat99)
# Lastly, we create model instances for the MCF formulation.
# create MCF formulations for each dataset
bays29_MCF_m = MCF(bays29)
dantzig42_MCF_m = MCF(dantzig42)
pr76_MCF_m = MCF(pr76)
rat99_MCF_m = MCF(rat99)
# \subsection*{Solver Definition}
# We can now proceed to the solver definition. It was decided to create a single solver function that once called iterates the solving process for the passed instance and changing the parameter settings on \textit{Presolve} and \textit{Cuts} on the fly. The function therefore returns four results:
# \begin{enumerate}
# \item Default settings
# \item No pre-solve
# \item No cuts
# \item No pre-solve and no cuts
# \end{enumerate}
# We further pass it an indicator whether to only solve the default solver, since we will not need the alternative solver configurations for the assignment relaxation formulation.
# +
# Define solving instance
from gurobipy import *
timelimit = 3600 # in seconds
def solve(m, default):
## SOLVE
if __name__ == '__main__':
from pyomo.opt import SolverFactory
import pyomo.environ
# Create instances for each model setting
m_default = m
if default == False:
m_no_presolve = m
m_no_cuts = m
m_no_presolve_no_cuts = m
opt = SolverFactory('gurobi')
opt.options['IntFeasTol']= 10e-10
opt.options['MIPGap'] = 0
opt.options['TimeLimit'] = timelimit
# Solve default
print('')
print('DEFAULT')
print('')
results_default = opt.solve(m_default, load_solutions=False, tee = True)
print(results_default)
if default == False:
# Solve without presolve if enabled
print('')
print('NO PRESOLVE')
print('')
opt.options['Presolve'] = 0 # disable presolve
results_no_presolve = opt.solve(m_no_presolve, load_solutions=False, tee = True)
print(results_no_presolve)
# Solve without cuts if enabled
print('')
print('NO CUTS')
print('')
opt.options['Presolve'] = -1
opt.options['Cuts'] = 0 # disable cuts
results_no_cuts = opt.solve(m_no_cuts, load_solutions=False, tee = True)
print(results_no_cuts)
# Solve without cuts and without presolve if enabled
print('')
print('NO PRESOLVE NO CUTS')
print('')
opt.options['Presolve'] = 0 # disable presolve
opt.options['Cuts'] = 0 # disable cuts
results_no_presolve_no_cuts = opt.solve(m_no_presolve_no_cuts, load_solutions=False, tee = True)
print(results_no_presolve_no_cuts)
# -
# \subsection{Solutions}
# We continue by solving each model instance using the solver function defined above. Since the model output is quite large in size and would not fit the format of a report very well, we comment out the solver command in this script and report the model results in table format below. We start with the assignment relaxation formulation.
# +
## Assignment relaxation formulation
# solve each model instances
#print('Assignment relaxation formulation')
#print('Bays29 Dataset:')
#solve(bays29_assignment_relax_m, default = True)
# +
#print('Assignment relaxation formulation')
#print('Dantzig42 Dataset:')
#solve(dantzig42_assignment_relax_m, default = True)
# +
#print('Assignment relaxation formulation')
#print('Pr76 Dataset:')
#solve(pr76_assignment_relax_m, default = True)
# +
#print('Assignment relaxation formulation')
#print('Rat99 Dataset:')
#solve(rat99_assignment_relax_m, default = True)
# -
# We obtain the results as presented in Table 1 from the assignment relaxation formulation.
# \begin{tabular}{l*{6}{c}r}
# Team & P & W & D & L & F & A & Pts \\
# \hline
# Manchester United & 6 & 4 & 0 & 2 & 10 & 5 & 12 \\
# Celtic & 6 & 3 & 0 & 3 & 8 & 9 & 9 \\
# Benfica & 6 & 2 & 1 & 3 & 7 & 8 & 7 \\
# FC Copenhagen & 6 & 2 & 1 & 3 & 5 & 8 & 7 \\
# \end{tabular}
# +
## MTZ formulation
# solve each model instances
#print('MTZ formulation')
#print('Bays29 Dataset:')
#solve(bays29_MTZ_m, default = False)
# +
#print('MTZ formulation')
#print('Dantzig42 Dataset:')
#solve(dantzig42_MTZ_m, default = False)
# -
#print('MTZ formulation')
#print('Pr76 Dataset:')
solve(pr76_MTZ_m, default = False)
# +
#print('MTZ formulation')
#print('Rat99 Dataset:')
#solve(rat99_MTZ_m, default = False)
# +
## MCF formulation
# solve each model instances
#print('MCF formulation')
#print('Bays29 Dataset:')
#solve(bays29_MCF_m, default = False)
# +
#print('MCF formulation')
#print('Dantzig42 Dataset:')
#solve(dantzig42_MCF_m, default = False)
# +
#print('MCF formulation')
#print('Pr76 Dataset:')
#solve(pr76_MCF_m, default = False)
# +
#print('MCF formulation')
#print('Rat99 Dataset:')
#solve(rat99_MCF_m, default = False)
| 2020_02_25_IP_Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import pickle
import numpy as np
# Read in data (c/o <NAME>)
a = open('../data/open_clusters4_tags_dr13_30eB_nofilt_5026_norm8.pickle', 'r' ) # can do with aspcap
b = pickle.load(a)
a.close()
dataout = b[0]
dataout_labels = ["teff", "logg", "[Fe/H]", "C", "N", "O", "Na", "Mg", "Al", "Si", "S", "K", "Ca", "Ti", "V", "Mn", "Ni", "P", "Cr", "Co", "Cu", "Rb", "FWHM"]
a = open('../data/open_clusters_name4.txt', 'r')
al = a.readlines()
names = []
for each in al:
names.append(each.strip())
names = np.asarray(names)
c = np.genfromtxt('../data/NGC6791_xmatch.csv', delimiter=',', dtype=None, names=True)
inds = np.where(names == 'N6791')[0] # select NGC 6791 members only
params = dataout[inds,0:3]
abunds = dataout[inds,3:-1]
twomass_ids = np.asarray(b[-1])[inds]
abund_errs = np.empty_like(abunds)
for i in range(abunds.shape[1]):
abund_errs[:,i] = b[1][inds][3+i,3+i]
# TODO: add second error component in quadrature
param_errs = np.empty_like(params)
for i in range(params.shape[1]):
param_errs[:,i] = b[1][inds][i,i]
print param_errs #i don't think i'm doing this right, why are they so small??
plt.errorbar(params[:,-1], abunds[:,5], xerr=param_errs[:,-1], yerr=abund_errs[:,5], fmt='o')
for i,n in enumerate(c['KIC']):
plt.text(params[i,-1] + 0.005, abunds[i,5] + 0.005, n)
plt.ylabel('[Al/Fe]')
plt.xlabel('[Fe/H]')
plt.errorbar(params[:,-1], abunds[:,17], xerr=param_errs[:,-1], yerr=abund_errs[:,17], fmt='o')
for i,n in enumerate(c['KIC']):
plt.text(params[i,-1] + 0.005, abunds[i,17] + 0.005, n)
plt.ylabel('[Cu/Fe]')
plt.xlabel('[Fe/H]')
# calculate condensation temperature fits (ignoring CNO because those are complicated!)
import warnings; warnings.simplefilter('ignore')
import q2
Tc = np.asarray([q2.abundances.gettc(x+'I') for x in dataout_labels[3:-1]])
slopes = []
intercepts = []
for i in range(len(twomass_ids)):
xs = Tc[3:]
ys = abunds[i,3:]
A = np.vander(xs, 2)
m = np.ones(len(xs), dtype=bool) # mask
w = np.linalg.solve(np.dot(A[m].T, A[m]), np.dot(A[m].T, ys[m]))
plt.scatter(xs, ys)
mu = np.dot(A, w)
slopes.append(w[0])
intercepts.append(w[1])
plt.plot(xs, mu)
plt.hist(np.asarray(slopes) * 1.e4, bins=np.arange(-1.0, 2.4, 0.2))
plt.axvline(0.0, c='r')
plt.text(0.02, 2.5, 'sun', color='r', fontsize=16)
plt.xlabel(r'T$_c$ slope ($\times 10^4$ dex K$^{-1}$)', fontsize=16)
| notebooks/.ipynb_checkpoints/melissa_clusters-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
train_df = pd.read_csv(Path('Resources/2019loans.csv'))
test_df = pd.read_csv(Path('Resources/2020Q1loans.csv'))
# Convert categorical data to numeric and separate target feature for training data
train_df = pd.get_dummies(train_df)
X_train = train_df.drop(['target_high_risk', 'target_low_risk'], axis=1)
y_train =train_df['target_high_risk']
# Convert categorical data to numeric and separate target feature for testing data
test_df = pd.get_dummies(test_df)
X_test = test_df.drop(['target_high_risk', 'target_low_risk'], axis=1)
y_test =test_df['target_high_risk']
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
print(f"Training Data Score: {classifier.score(X_train, y_train)}")
print(f"Testing Data Score: {classifier.score(X_test, y_test)}")
# add missing dummy variables to testing set
X_test = X_test.reindex(columns = X_train.columns, fill_value=0)
# Train the Logistic Regression model on the unscaled data and print the model score
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
print(f"Training Data Score: {classifier.score(X_train, y_train)}")
print(f"Testing Data Score: {classifier.score(X_test, y_test)}")
# Train a Random Forest Classifier model and print the model score
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(random_state=1, n_estimators=100).fit(X_train, y_train)
print(f'Training Score: {clf.score(X_train, y_train)}')
print(f'Testing Score: {clf.score(X_test, y_test)}')
# Scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Train the Logistic Regression model on the scaled data and print the model score
classifier.fit(X_train_scaled, y_train)
print(f"Training Data Score: {classifier.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {classifier.score(X_test_scaled, y_test)}")
# Train a Random Forest Classifier model on the scaled data and print the model score
clf = RandomForestClassifier(random_state=1, n_estimators=100).fit(X_train_scaled, y_train)
print(f'Training Score: {clf.score(X_train_scaled, y_train)}')
print(f'Testing Score: {clf.score(X_test_scaled, y_test)}')
| .ipynb_checkpoints/There-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="LPRTZSJxHygW"
# ### **Presenting render() function alternatives for Google Colab**
#
# __The below code is taken from the documentation page of the OpenAI toolkit. And as you can see it is giving error with `env.render()`. The thing is render() uses `pyglet` that needs RGB color inputs from screen which is not present here. Hence, in this notebook we'll provide alternatives to this issue. So, that anyone of the options can be used as a replacement.__
#
# + id="G_q0sOaoHom2" colab={"base_uri": "https://localhost:8080/", "height": 382} outputId="677272e2-efea-48f6-bde6-887ab78a24bb"
# Don't worry about this error statement. This error points out that no display
# is connected to our Colab server to render the graphics.
'''
import gym
env = gym.make('CartPole-v0')
env.reset()
for _ in range(1000):
env.render()
env.step(env.action_space.sample()) # take a random action
env.close()
'''
# + [markdown] id="1CGhTi4brX3f"
# ### **First solution: Saving and Loading the Video as output.**
#
# __This solution is designed by <NAME> and it uses PyVirtualDisplay, python-opengl, xvfb & the ffmpeg encoder libraries for displaying outputs from OpenAI environments. Also, this rendering solution isn't as responsive as running OpenAI VM on your machine but it gets the job done.__
# + id="syUDg7X9ERYX"
# Prerequisite Information:
# " > /dev/null 2>&1" is used for dumping all the output messages while
# downloading the packages. Basically, in '/dev/null' everything is discarded.
# 2 is the file descriptor for Standard Error and 1 for Standard Out.
# Libraries required for generating a Colab display:
# xvfb, x11-utils, gym==0.17.*, pyvirtualdisplay==0.2.*,
# PyOpenGL==3.1.* PyOpenGL-accelerate==3.1.*
# apt-get update is important for adding package path of xvfb library.
# !sudo apt-get update > /dev/null 2>&1
# This specificity of version is very important for compatability reasons.
# !apt-get install -y xvfb x11-utils > /dev/null 2>&1
# gym version compatability with other libraries is required for rendering with this approach.
# !pip install pyvirtualdisplay==0.2.* PyOpenGL==3.1.* PyOpenGL-accelerate==3.1.* > /dev/null 2>&1
# Testing of xvfb can be done with ''
# + id="gOVjmwBXITLn"
# gym package related imports
import gym
from gym import logger as gymlogger
from gym.wrappers import Monitor
gymlogger.set_level(40) #error only
# Python display and environment video generation related imports
import math
import glob
import io
import base64
from IPython.display import HTML
from IPython import display as ipythondisplay
# + id="f8BlkXZhIoTy" colab={"base_uri": "https://localhost:8080/"} outputId="e770b229-1645-44b2-9249-50a31b08b6a6"
# This creates virtual display to send the frames for being rendered.
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1366, 768))
display.start()
# + id="DQdRmdEUIYpV"
def show_video():
'''
This function loads the data video inline into the colab notebook.
By reading the video stored by the Monitor class.
'''
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 256px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
def wrap_env(env):
'''
This monitoring tool records the outputs from the output and saves it a
mp4 file in the stated directory. If we don't change the video directory
the videos will get stored in 'content/' directory.
'''
env = Monitor(env, './video', force=True)
return env
# + id="cdjgSAJCIjGC"
# Wraping the environment in Monitor object to store the environment video feed.
env = wrap_env(gym.make('MountainCar-v0'))
# + id="hfkhVV_bIo4Q" colab={"base_uri": "https://localhost:8080/", "height": 277} outputId="5efbb799-6bc8-4608-a9e1-03cd20f5b5ea"
# One methodology for rendering shown below.
observation = env.reset()
while True:
env.render()
#your agent goes here
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
break;
env.close()
# By executing the below function we present the agent's recorded behavior.
show_video()
# + [markdown] id="RDntr1d3VBBC"
# ### **Second solution: Using Matplotlib to display the screen.**
#
# __We plot the state of environment after each iteration episodes for the environment under analysis after taking an action. Also, this rendering solution isn't as responsive as running OpenAI VM on your machine but it gets the job done.__
# + id="f6pdEgu8VAEa"
# Before, executing this cell click on 'Runtime' -> 'Factory reset Runtime'.
# This will reset the previously stored packages.
# Note: Similar import packages as above but not the same.
# # !sudo apt-get update > /dev/null 2>&1
# # !apt-get install -y xvfb python-opengl > /dev/null 2>&1
# # !pip install pyvirtualdisplay > /dev/null 2>&1
# + id="ZX12fTvLyMoX"
import gym
import numpy as np
import matplotlib.pyplot as plt
from IPython import display as ipythondisplay
# + id="orHrmWiyyMye" colab={"base_uri": "https://localhost:8080/"} outputId="86932e78-21fa-4038-9a68-6ddf5d0071f4"
from pyvirtualdisplay import Display
display = Display(visible=0, size=(400, 300))
display.start()
# + id="BkmCYwRaZhsP" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="16f7a939-066c-4198-9fa1-1016e87e3469"
# This method of printing output is extremely slow and impractical.
# But, can be used as a last resort for rendering an environment output.
env = gym.make('MountainCar-v0')
env.reset()
prev_screen = env.render(mode='rgb_array')
plt.imshow(prev_screen)
for i in range(16):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
screen = env.render(mode='rgb_array')
# Plotting the matplotlib graph in each iteration.
plt.imshow(screen)
ipythondisplay.clear_output(wait=True)
ipythondisplay.display(plt.gcf())
if done:
break
ipythondisplay.clear_output(wait=True)
env.close()
# + id="WV9-x1rzZhpU" colab={"base_uri": "https://localhost:8080/"} outputId="7ebbe132-c8b9-4b5a-a069-209aa0416460"
display.stop()
# + [markdown] id="2Zg7hL2kLwSS"
# ### **Conclusion**
#
# __Based on the output expressibility with better frame rate we proceed with the first approach to store and display outputs for the environment of open-ai gym.__
| milestone-two/rendering_alternatives_for_open_ai_envs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import soundfile # to read audio file
import numpy as np
import librosa # to extract speech features
import glob
import os
import pickle # to save model after training
from sklearn.model_selection import train_test_split # for splitting training and testing
from sklearn.neural_network import MLPClassifier # multi-layer perceptron model
from sklearn.metrics import accuracy_score # to measure how good we are
def extract_feature(file_name, **kwargs):
"""
Extract feature from audio file `file_name`
Features supported:
- MFCC (mfcc)
- Chroma (chroma)
- MEL Spectrogram Frequency (mel)
- Contrast (contrast)
- Tonnetz (tonnetz)
e.g:
`features = extract_feature(path, mel=True, mfcc=True)`
"""
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype="float32")
sample_rate = sound_file.samplerate
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
result = np.hstack((result, tonnetz))
return result
# +
# all emotions on RAVDESS dataset
int2emotion = {
"01": "neutral",
"02": "calm",
"03": "happy",
"04": "sad",
"05": "angry",
"06": "fearful",
"07": "disgust",
"08": "surprised"
}
# we allow only these emotions ( feel free to tune this on your need )
AVAILABLE_EMOTIONS = {
"angry",
"sad",
"neutral",
"happy"
}
def load_data(test_size=0.2):
X, y = [], []
for file in glob.glob("data/Actor_*/*.wav"):
# get the base name of the audio file
basename = os.path.basename(file)
# get the emotion label
emotion = int2emotion[basename.split("-")[2]]
# we allow only AVAILABLE_EMOTIONS we set
if emotion not in AVAILABLE_EMOTIONS:
continue
# extract speech features
features = extract_feature(file, mfcc=True, chroma=True, mel=True)
# add to data
X.append(features)
y.append(emotion)
# split the data to training and testing and return it
return train_test_split(np.array(X), y, test_size=test_size, random_state=7)
# -
# load RAVDESS dataset, 75% training 25% testing
X_train, X_test, y_train, y_test = load_data(test_size=0.25)
# print some details
# number of samples in training data
print("[+] Number of training samples:", X_train.shape[0])
# number of samples in testing data
print("[+] Number of testing samples:", X_test.shape[0])
# number of features used
# this is a vector of features extracted
# using extract_features() function
print("[+] Number of features:", X_train.shape[1])
# best model, determined by a grid search
model_params = {
'alpha': 0.01,
'batch_size': 256,
'epsilon': 1e-08,
'hidden_layer_sizes': (300,),
'learning_rate': 'adaptive',
'max_iter': 500,
}
# initialize Multi Layer Perceptron classifier
# with best parameters ( so far )
model = MLPClassifier(**model_params)
# train the model
print("[*] Training the model...")
model.fit(X_train, y_train)
# +
# predict 25% of data to measure how good we are
y_pred = model.predict(X_test)
# calculate the accuracy
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
print("Accuracy: {:.2f}%".format(accuracy*100))
# +
# now we save the model
# make result directory if doesn't exist yet
if not os.path.isdir("result"):
os.mkdir("result")
pickle.dump(model, open("result/mlp_classifier.model", "wb"))
# -
| Speech Emotion Recognizer/Speech Emotion Recognizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selection of a calibrant
#
# In this tutorial we will see how to select a calibrant for a given experimental setup.
#
# ## Experimental setup
#
# The experimental setup is a classical protein crystallography setup with:
#
# * Large Pilatus 6M detector on a translation table
# * The small and intense beam of ~50 microns in size has a wavelength of 1 Angstrom
# * The detector is in *normal* condition: orthogonal to the beam and centered in the middle of the detector.
#
# The scientist in charge of this beamline want to ensure all encoders are working properly and needs to validate the setup for distances between 10cm and 80cm.
# He will buy reference material from NIST but he does not know which calibrant is best suited for his setup.
# We will assume all reference material sold by NIST are equally suitable for ray position (no issue with grain size, ...).
#
# The calibration works best in pyFAI if more than one Debye-Scherrer ring is seen on the detector.
#
# ## Define the detector
import time
start_time = time.time()
import pyFAI
print("PyFAI version" + pyFAI.version)
dete = pyFAI.detectors.Pilatus6M()
print(dete)
# ## Select reference materials
#
# The NIST sells different [Standard Refrence Materials](http://www.nist.gov/mml/mmsd/upload/852_08_81b.pdf), among them
# Silicon (SRM640), Lanthanum hexaboride (SRM660), Alumina (SRM676) and Ceria (SRM674) are commonly used.
# Many other exists: Cr203, TiO2, Zn0, SiO2, ... Evaluating them is left as an exercise.
#
import pyFAI.calibrant
print(pyFAI.calibrant.ALL_CALIBRANTS)
# You may wonder where the names of the calibrant came from and how they have been established.
#
# The name of all calibrant available in your version of pyFAI can be listed by just printing out the content of ALL_CALIBRANTS. New calibrant may have been added in pyFAI in more recent releases, just have a look at the [developent web page](https://github.com/silx-kit/pyFAI/tree/master/pyFAI/resources/calibration).
#
# Most of those calibrant files, which contain the *d-spacing* in Angstrom between Miller plans, have been prepared from the unit cell of the compount, found in publication. This publication is referenced in the header of the file.
# If one wishes to regenerate those files, the *pyFAI.calibrant.Cell* class may be used for.
#
# We will now focus on a subset of calibrant, instanciate them and put them into a dictionnary. The Python construct used here is called *dict-comprehension* and allows the creation and population of a dictionnary in a single line.
cals = dict((name,pyFAI.calibrant.ALL_CALIBRANTS(name)) for name in ("Si", "LaB6", "CeO2", "alpha_Al2O3"))
print(cals)
# To be able to use those calibrants, one needs to define the wavelength used, here 1 Angstrom.
#
wl = 1e-10
for cal in cals.values():
cal.wavelength = wl
print(cals)
# ## Short distance images
#
# The shortest the detector can come to the sample is about 10cm (to leave space for the beamstop).
# We will generate images of diffraction at this distance.
#
# For the display of images we will use *matplotlib* inlined and some utilities from pyFAI to display images.
p1, p2, p3 = dete.calc_cartesian_positions()
poni1 = p1.mean()
poni2 = p2.mean()
print("Detector center at %s, %s"%(poni1, poni2))
ai_short = pyFAI.AzimuthalIntegrator(dist=0.1, poni1=poni1, poni2=poni2,detector=dete)
print(ai_short)
# %pylab nbagg
from pyFAI.gui import jupyter
fig, ax = subplots(2, 2, figsize=(10,10))
for idx, key in enumerate(cals):
cal = cals[key]
img = cal.fake_calibration_image(ai_short)
jupyter.display(img, label=key, ax=ax[idx//2, idx%2])
# As one can see, there are plenty of rings on the image: it should be easy to calibrate.
# By moving further away the detector, the number of rings will decrease.
#
# ## Long distance images ##
#
# To keep good calibration one should have at lease two rings for the calibration.
# The longest distance from sample to the detector is 80cm.
ai_long = pyFAI.AzimuthalIntegrator(dist=0.8, poni1=poni1, poni2=poni2, detector=dete)
print(ai_long)
fig, ax = subplots(2, 2, figsize=(10,10))
for idx, key in enumerate(cals):
cal = cals[key]
img = cal.fake_calibration_image(ai_long)
jupyter.display(img, label=key, ax=ax[idx//2, idx%2])
# The most adapted calibrant is probably the *LaB6* as 2 rings are still visible at 80 cm from the detector.
#
# ## Integration of the pattern for the two extreme cases ##
# We can integrate the image for the two extreme cases:
# +
lab6 = cals["LaB6"]
ai_short.wavelength = ai_long.wavelength = wl
fig, ax = subplots(2, 2, figsize=(10,10))
img_short = lab6.fake_calibration_image(ai_short)
jupyter.display(img_short, label="LaB6 d=0.1m", ax=ax[0,0])
jupyter.plot1d(ai_short.integrate1d(img_short,1000), ax=ax[0,1])
img_long = lab6.fake_calibration_image(ai_long)
jupyter.display(img_long, label="LaB6 d=0.8m", ax=ax[1,0])
jupyter.plot1d(ai_long.integrate1d(img_long,1000), ax=ax[1,1])
# -
# ## Conclusion ##
# The best calibrant in this case is probably LaB6.
print("Total execution time: %.3fs"%(time.time()-start_time))
| doc/source/usage/tutorial/Calibrant/Calibrant.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # El módulo random
import random
random.random() # Flotante aleatorio >= 0 y < 1.0
random.random() # Flotante aleatorio >= 0 y < 1.0
random.uniform(1,10) # Flotante aleatorio >= 1 y <10.0
random.randrange(10) # Entero aleatorio de 0 a 9, 10 excluído
random.randrange(0,101) # Entero aleatorio de 0 a 100
random.randrange(0,101,2) # Entero aleatorio de 0 a 100 cada 2 números, múltiples de 2
random.randrange(0,101,5) # Entero aleatorio de 0 a 100 cada 5 números, múltiples de 5
c = '<NAME>'
random.choice(c) # letra aleatoria
l = [1,2,3,4,5]
random.choice(l) # elemento aleatorio
random.shuffle(l) # barajar una lista, queda guardado
l
random.sample(l, 2) # muestra aleatoria de 2 elementos de la lista
| MaterialCursoPython/Fase 4 - Temas avanzados/Tema 11 - Modulos/Apuntes/Leccion 07 (Apuntes) - Random.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="XA-1D1ZKe8d3"
import pandas as pd
import numpy as np
import PIL
import os
from PIL import Image
import json
import sys
from google.colab.patches import cv2_imshow
import torch
import torchvision
from torchvision import transforms
from torchvision import datasets
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="FcfHkRD3fLBT" outputId="dacd3446-ab5c-4f71-cae6-69570dd434e2"
# !pip install timm
from timm import create_model
# + id="jHpMAa5ysWBO"
# !apt-get install -y -qq software-properties-common python-software-properties module-init-tools
# !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
# !apt-get update -qq 2>&1 > /dev/null
# !apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
# !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
# !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
# %cd /content
# !mkdir drive
# %cd drive
# !mkdir MyDrive
# %cd ..
# %cd ..
# !google-drive-ocamlfuse /content/drive/MyDrive
# + id="p77YYStDsfXv"
from torchvision.transforms.transforms import Resize
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.Resize((299,299)),
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
'val': transforms.Compose([
transforms.Resize((299,299)),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
}
# + id="NM1kUy4sso_R" colab={"base_uri": "https://localhost:8080/"} outputId="ba935d34-c4d9-4b92-a99f-a4313ac01a88"
data_dir = '/content/drive/MyDrive/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
# + id="jCoGnqNQtdPy"
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
# + colab={"base_uri": "https://localhost:8080/"} id="CChA4NUHtMaE" outputId="c3678c10-7944-41b9-902d-98913ee9161f"
class_names
# + id="7msWkKaWuMxb"
# def imshow(inp, title=None):
# """Imshow for Tensor."""
# inp = inp.numpy().transpose((1, 2, 0))
# mean = np.array([0.485, 0.456, 0.406])
# std = np.array([0.229, 0.224, 0.225])
# inp = std * inp + mean
# inp = np.clip(inp, 0, 1)
# plt.imshow(inp)
# if title is not None:
# plt.title(title)
# plt.pause(0.001) # pause a bit so that plots are updated
# # Get a batch of training data
# inputs, classes = next(iter(dataloaders['train']))
# # Make a grid from batch
# out = torchvision.utils.make_grid(inputs)
# imshow(out, title=[class_names[x] for x in classes])
# + id="i4TJ_k-Dylcj"
model_name = "convnext_large"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("device = ", device)
model = create_model(model_name, pretrained=True).to(device)
# + id="B1WAptDMnI9j"
# import torch.nn as nn
# classifier = nn.Sequential(
# nn.Linear(in_features=2048, out_features=1024),
# nn.ReLU(),
# nn.Dropout(p=0.4),
# nn.Linear(in_features=1024, out_features=64),
# nn.LogSoftmax(dim=1)
# )
# + id="B45QvjtctNYn"
import time
import copy
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
# + id="CH6rGDhvvCDh"
# def visualize_model(model, num_images=6):
# was_training = model.training
# model.eval()
# images_so_far = 0
# fig = plt.figure()
# with torch.no_grad():
# for i, (inputs, labels) in enumerate(dataloaders['val']):
# inputs = inputs.to(device)
# labels = labels.to(device)
# outputs = model(inputs)
# _, preds = torch.max(outputs, 1)
# for j in range(inputs.size()[0]):
# images_so_far += 1
# ax = plt.subplot(num_images//2, 2, images_so_far)
# ax.axis('off')
# ax.set_title('predicted: {}'.format(class_names[preds[j]]))
# imshow(inputs.cpu().data[j])
# if images_so_far == num_images:
# model.train(mode=was_training)
# return
# model.train(mode=was_training)
# + id="F0BsQxJmuQdl"
import torch.nn as nn
import torch.optim as optim
# model.num_features = nn.Linear(model.num_features, 2)
# + id="A9rxVLBjEXq8"
# criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(resnet.parameters(), lr = 0.0001, momentum = 0.7 )
# exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# + id="EWQEZrQDKpZs" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["2cbdd21a93b542b182ce186cd9d48d25", "094884a0d8fe448483bd8e12b4541b62", "00216c77876a4e5f81c55128d540def8", "c6055438aad0404eb8b38fd0aa9cc692", "ea9e76ee1bdf4b288434f647803e3381", "8e0c736462f044beae302df8dcfdf514", "8347f0fdebbf423199939ada7ac6200b", "10f1b590352c4dcca84003a879400066", "bad039a96af24ebb981fb1790dd77c48", "3b2b29d2516c49429d249b25444d546f", "5d837f3e8e4b48229f89b6958689ccca"]} outputId="e3779234-973a-4898-e5d4-eb594d1de28f"
from torchvision import models
model = models.resnet50(pretrained=True).to(device)
for param in model.parameters():
param.requires_grad = False
model.fc = nn.Sequential(
nn.Linear(2048, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 2)).to(device)
# + id="BfDmjfmKOtyF" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["6624b0c8c2bc41fd840dd0b8d0a7e08d", "692014ce9d49498f8622c7516e8f54a1", "4e34b24eedb94f4e86731cb45e41b78d", "f9e2d5cadc294e05abb568767dabfe3d", "10d7a9b8be2247dbb0f7aaf156aa7f43", "589b9415b99a4320b2783b4fc0093125", "b5126c92eee742f6999805244e7fec1a", "8203e9143d9040a0b67a795674e552c8", "acf899a3cb5a46b9aac915359cb8c033", "bb9517f0523e4fc58e5693f1d0ed4ad0", "8f6f9dd932cd4ebcaf3221f6f5473864"]} outputId="2f99eb78-3cc9-42df-85ba-57fbe8d7998a"
from torch.nn.modules import dropout
from torchvision import models
incept = models.inception_v3(pretrained=True).to(device)
incept.aux_logits = False
# incept.AuxLogits = False
for param in incept.parameters() :
param.requires_grad = False
incept.fc =nn.Sequential(
nn.Linear(incept.fc.in_features, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 2)).to(device)
# + colab={"base_uri": "https://localhost:8080/"} id="fYj_FuGehf0Z" outputId="73f57909-6660-4aeb-af4d-c79d8a930f74"
incept.fc
# + id="bZp3TFMYOvMk" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["302325974f7047ef97e129b199c874f6", "525ee8b0363146e8863f0a1187268a44", "4deaada55831450c9bd5de7c7452cd4a", "<KEY>", "d6fc1f789d4640009549c1194a409364", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "c4666c8f79f64661ae93e58c5679a7cf", "6ad7da4132cc4f528f6aab9878ff29bc"]} outputId="31ee2ef3-9d7a-4d49-d8b7-8b9787e3c2c7"
squeezenet = models.squeezenet1_0(pretrained = True ).to(device)
squeezenet.aux_logits = False
for param in squeezenet.parameters() :
param.requires_grad = True
squeezenet.fc = nn.Sequential(
nn.Linear(2048, 1024),
nn.Dropout(p=0.5),
nn.ReLU(inplace = True ),
nn.Linear(1024,128),
nn.Dropout(p=0.5),
nn.ReLU(inplace=True),
nn.Linear(128, 2)).to(device)
# + id="gY6CVhUHeHkk" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["cc83f09789d1404e960565b9a757b7d3", "c3e700b353ee42febe4ab6a8bf74da06", "5d1b2831881f411da1af6845376a5cf0", "<KEY>", "<KEY>", "fd6227bb397b405ca45d92607a99cbfb", "<KEY>", "8356b757528743238394801a81f0df9f", "fec04cd108484793a81c4f50503edf51", "e4ac1b73d67144ff9e1a6430d3f901ea", "89ddfbe728844883a51b9c8c01853199"]} outputId="1fa70952-ab93-4b38-9dd2-5641cf774f73"
shufflenet = models.shufflenet_v2_x0_5(pretrained = True ).to(device)
shufflenet.aux_logits = False
for param in shufflenet.parameters() :
param.requires_grad = False
shufflenet.fc = nn.Sequential(
nn.Linear(1024, 512),
nn.Dropout(p=0.5),
nn.ReLU(inplace = True ),
nn.Linear(512,256),
nn.Dropout(p=0.5),
nn.ReLU(inplace=True),
nn.Linear(256, 2)).to(device)
# + id="1iydzVSjQmUn"
optimizer1 = torch.optim.RMSprop(filter(lambda p: p.requires_grad, incept.parameters()), lr=0.001, weight_decay= 1e-4)
criterion = nn.CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer1, step_size=7, gamma=0.1)
# + id="kO3EUgLUEe5n"
# resnet,
model = train_model(incept, criterion, optimizer1, exp_lr_scheduler, num_epochs=70 )
# + id="42603g5W1IIZ"
torch.save(model, '/content/model_incept_new.pt')
# + id="vATodN2YEjWz"
# !cp /content/model_incept_new.pt /content/drive/MyDrive/pytorch_models
# + id="2Kd0YAShCxUD"
import torch.nn as nn
import torch
# class MyEnsemble(nn.Module):
# def __init__(self, modelA, modelB, modelC, input):
# super(MyEnsemble, self).__init__()
# self.modelA = modelA
# self.modelB = modelB
# self.modelC = modelC
# self.fc1 = nn.Linear(input, 16)
# def forward(self, x):
# out1 = self.modelA(x)
# out2 = self.modelB(x)
# out3 = self.modelC(x)
# out = out1 + out2 + out3
# x = self.fc1(out)
# return torch.softmax(x, dim=1)
import torch.nn.functional as f
class MyEnsemble(nn.Module):
def __init__(self, modelA, modelB, modelC , nb_classes=10):
super(MyEnsemble, self).__init__()
self.modelA = modelA
self.modelB = modelB
self.modelC = modelC
# Remove last linear layer
self.modelA.fc = nn.Identity()
self.modelB.fc = nn.Identity()
self.modelC.fc = nn.Identity()
# Create new classifier
self.classifier = nn.Linear(6144, nb_classes)
def forward(self, x):
x1 = self.modelA(x.clone()) # clone to make sure x is not changed by inplace methods
x1 = x1.view(x1.size(0), -1)
x2 = self.modelB(x)
x2 = x2.view(x2.size(0), -1)
x3 = self.modelC(x)
x3 = x3.view(x2.size(0), -1)
x = torch.cat((x1, x2, x3), dim=1)
x = self.classifier(f.relu(x))
return x
# + id="YkFZhXJnemi2"
import torch
modelA = models.inception_v3(pretrained = True ).to(device)
for param in modelA.parameters():
param.required_grads = False
modelA.aux_logits = False
modelA.fc = nn.Sequential(
nn.Linear(modelA.fc.in_features , 512 ),
nn.Dropout(p = 0.2 ),
nn.ReLU(inplace = True),
nn.Linear(512, 128),
nn.Dropout(p = 0.2 ),
nn.ReLU(inplace = True),
nn.Linear(128,2) ).to(device)
# + id="JrLX5JzWrFeL"
import torch
modelB = models.inception_v3(pretrained = True ).to(device)
for param in modelB.parameters():
param.required_grads = False
modelB.aux_logits = False
modelB.fc = nn.Sequential(
nn.Linear(modelB.fc.in_features , 512 ),
nn.Dropout(p = 0.2 ),
nn.ReLU(inplace = True),
nn.Linear(512, 128),
nn.Dropout(p = 0.2 ),
nn.ReLU(inplace = True),
nn.Linear(128,2) ).to(device)
import torch
modelC = models.inception_v3(pretrained = True ).to(device)
for param in modelC.parameters():
param.required_grads = False
modelC.aux_logits = False
modelC.fc = nn.Sequential(
nn.Linear(modelC.fc.in_features , 512 ),
nn.Dropout(p = 0.2 ),
nn.ReLU(inplace = True),
nn.Linear(512, 128),
nn.Dropout(p = 0.2 ),
nn.ReLU(inplace = True),
nn.Linear(128,2) ).to(device)
# + id="RjziGhjnKp1V"
model_final = MyEnsemble(modelA, modelB, modelC, 2).to(device)
# + id="Sgq4wj9Nm51-" colab={"base_uri": "https://localhost:8080/"} outputId="e9e97038-959e-433e-c78a-fbf90317a444"
model = train_model(model_final, criterion, optimizer1, exp_lr_scheduler, num_epochs= 80 )
# + id="q4h7PDZHssFb"
import torch
torch.save(model,'/content/model_incept_ensembled_1.pt')
# !cp /content/model_incept_ensembled_1.pt /content/drive/MyDrive/pytorch_models
# + id="42O7c6nZnB3f"
test_transforms = transforms.Compose([
transforms.Resize((299,299)),
transforms.ToTensor()
])
# + id="-Byf6e4sm1es"
from torch.autograd import Variable
def predict_image(image):
image_tensor = test_transforms(image).float()
image_tensor = image_tensor.unsqueeze_(0)
input = Variable(image_tensor)
input = input.to(device)
output = model(input)
index = output.data.cpu().numpy().argmax()
return index
# + id="tAte2F9AzZhP"
model_ = torch.load('/content/drive/MyDrive/pytorch_models/model_incept_new.pt')
type(model_)
# + id="HtFgR5x5tUd8"
model = torch.load('/content/drive/MyDrive/pytorch_models/model_incept_ensembled_1.pt')
type(model)
# + id="a6F3-OM7o7yT"
import matplotlib.pyplot as plt
import cv2
from google.colab.patches import cv2_imshow
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
to_pil = transforms.ToPILImage()
image = cv2.imread('/content/drive/MyDrive/hymenoptera_data/train/ants/1099452230_d1949d3250.jpg')
cv2_imshow(image)
image = to_pil(image)
image_tensor = test_transforms(image).float()
image_tensor = image_tensor.unsqueeze_(0)
input = Variable(image_tensor)
input = input.to(device)
index = predict_image(image)
if index == 0 :
print('it is an ant')
elif index == 1 :
print('it is a bee')
else :
print('Wrong input image')
# + id="3UUU65IVqJEk"
classes = ['IT IS AN ANT' , 'IT IS A BEE']
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="3E0ajDgWt-Zt" outputId="57209d06-4b7b-4de0-c0cd-680937d098a5"
classes[1]
# + id="M5O9nv2iv5Nm"
| Squeezenet_Transfer_convnext2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/EVA4-RS-Group/Phase2/blob/master/S13_SpeechRecognition/EVA4P2_Session13_Speech_recognition_model1_inference_v1a.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="QVuKGv1pvVQB"
# # 1. Loading the required libraries and dataset
# + colab={"base_uri": "https://localhost:8080/"} id="XV0Qb8sqyhsc" outputId="9443e1a7-f1fe-4cf9-cdf1-1154e0a898a7"
# !pip install torchaudio -q
# + id="Sk4SbCnKzgbG"
# !rm -rf ./*
# !wget https://github.com/EVA4-RS-Group/Phase2/releases/download/S13/sample_test_data.zip -q
# !unzip -q sample_test_data.zip
# !rm -rf /content/__MACOSX
# !rm -rf /content/sample_test_data.zip
# !wget -q https://github.com/EVA4-RS-Group/Phase2/releases/download/S13/weights_cpu_voicerec.pt
# + id="xfxu_1koAupf"
import glob
sample_file_list = list(glob.iglob('/content/sample_test_data/*.wav', recursive=True))
# + colab={"base_uri": "https://localhost:8080/"} id="AI7Xgs6Ovbni" outputId="f3af933a-b3a7-4d75-dd4e-2fe357ccdd7a"
import torch
import torchaudio
import random
# + [markdown] id="1niHG1Uwtw8u"
# # 2. Define the network
# + id="u-Uw0f25RhEp"
class SpeechRNN(torch.nn.Module):
def __init__(self):
super(SpeechRNN, self).__init__()
self.lstm = torch.nn.GRU(input_size = 12,
hidden_size= 256,
num_layers = 2,
batch_first=True)
self.out_layer = torch.nn.Linear(256, 30)
self.softmax = torch.nn.LogSoftmax(dim=1)
def forward(self, x):
out, _ = self.lstm(x)
x = self.out_layer(out[:,-1,:])
return self.softmax(x)
classes = ['cat', 'dog', 'six', 'bird', 'eight', 'no', 'tree', 'marvin', 'left',
'down', 'off', 'on', 'five', 'three', 'go', 'seven', 'sheila',
'right', 'four', 'happy', 'bed', 'zero', 'one', 'wow', 'two', 'yes',
'house', 'up', 'nine', 'stop']
# + [markdown] id="n6cWG01zyRXD"
#
# + colab={"base_uri": "https://localhost:8080/"} id="vQioLWGBvSwT" outputId="0d9c92c3-f47f-4d4e-d01b-27028cfde060"
DEVICE=torch.device('cpu')
model = SpeechRNN()
model = model.to(DEVICE)
model.load_state_dict(torch.load('/content/weights_cpu_voicerec.pt', map_location=DEVICE))
# + id="nvl7KI1vw88R" colab={"base_uri": "https://localhost:8080/"} outputId="a51a4733-a2ed-432e-a276-33dccb1caf47"
wav_file = random.choice(sample_file_list)
waveform,_ = torchaudio.load(wav_file, normalization=True)
# if the waveform is too short (less than 1 second) we pad it with zeroes
if waveform.shape[1] < 16000:
waveform = F.pad(input=waveform, pad=(0, 16000 - waveform.shape[1]), mode='constant', value=0)
mfcc_transform = torchaudio.transforms.MFCC(n_mfcc=12, log_mels=True)
mfcc = mfcc_transform(waveform).squeeze(0).transpose(0,1)
x = mfcc.unsqueeze(0)
model.eval()
y = model(x)
predicted_label = classes[y.max(1)[1].numpy().item()]
# + colab={"base_uri": "https://localhost:8080/"} id="FiVq0GixBlfh" outputId="30969bb3-eb1f-4190-9e10-46398442e67c"
print(f'Prediction of input file {wav_file.split("/")[-1]} is {predicted_label}.')
| S13_SpeechRecognition/EVA4P2_Session13_Speech_recognition_model1_inference_v1a.ipynb |