code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ryanro97/player-detector/blob/master/PlayerDetectorTrainer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="eJrHbtyZ8qXj" colab_type="text"
# # Player Detector Trainer
# ---
# Trainer for frame by frame player detection, using a custom dataset. Implemented using an untrained [PyTorch's Faster R-CNN model with a ResNet-50-FPN Backbone](https://pytorch.org/docs/stable/torchvision/models.html#faster-r-cnn).
#
# <br />
#
# ### Directory Hierarchy:
# ```
# PlayerDetector
# ├── data
# ├── train
# ├── images
# ├── *.jpg
# ├── targets
# ├── classes.txt
# ├── *.txt
# ├── predict
# ├── video
# ├── *.mp4
# ├── PlayerDetectorTrainer.ipynb
# ├── PlayerDetectorPredictor.ipynb
# ```
#
# <br />
#
# ### Target Labeling
# Using Tzuta Lin's [LabelImg](https://github.com/tzutalin/labelImg) tool, bounding boxes were hand labeled using the YOLO format.
#
# + [markdown] id="0J3sbAiIGjuS" colab_type="text"
# ### Install Dependencies for Google Colab
# + id="OpJwvBy58rni" colab_type="code" colab={}
# !pip3 install pillow torch torchvision
# + [markdown] id="58n5kRVO85m3" colab_type="text"
# ### Mount Google Drive
# + id="rlFq0FwN_aeL" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="iGBIhZf6GwLk" colab_type="text"
# ### Imports
# + id="OyFXwQ9j_HBD" colab_type="code" colab={}
import os
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional, ToTensor
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from PIL import Image
# + [markdown] id="Zx4mbsakGzEL" colab_type="text"
# ### Custom PyTorch Dataset
# Handles necessary conversions for YOLO to PyTorch's Faster R-CNN model input format. Also performs augmentation with each image having a 50% probability of being horizontally flipped.
#
# <br />
#
# #### Errors
# ```
# IndexError: Image and Target count mismatch
# TypeError: Image and Target file name mismatch
# IOError: Target read error
# ```
# + id="B2biq9E--h5-" colab_type="code" colab={}
class PlayerTrainerDataset(Dataset):
def __init__(self):
root = os.getcwd()
self.images_dir = os.path.join(root, 'data/train/images')
self.targets_dir = os.path.join(root, 'data/train/targets')
self.images = sorted(os.listdir(self.images_dir))
self.targets = [target for target in \
sorted(os.listdir(self.targets_dir)) \
if target != 'classes.txt']
if len(self.images) != len(self.targets):
raise IndexError('Image and Target count mismatch')
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image_path = os.path.join(self.images_dir, self.images[idx])
target_path = os.path.join(self.targets_dir, self.targets[idx])
image_name = os.path.splitext(os.path.basename(image_path))[0]
target_name = os.path.splitext(os.path.basename(image_path))[0]
if image_name != target_name:
raise TypeError('Image and Target file name mismatch')
image = Image.open(image_path).convert("RGB")
target = None
with open(target_path) as f:
target = f.readline().strip().split()
if not target:
raise IOError('Target read error')
w, h = image.size
center_x = float(target[1]) * w
center_y = float(target[2]) * h
bbox_w = float(target[3]) * w
bbox_h = float(target[4]) * h
x0 = round(center_x - (bbox_w / 2))
x1 = round(center_x + (bbox_w / 2))
y0 = round(center_y - (bbox_h / 2))
y1 = round(center_y + (bbox_h / 2))
boxes = [x0, y0, x1, y1]
labels = torch.as_tensor(1, dtype=torch.int64)
if random.random() < 0.5:
image = functional.hflip(image)
boxes = [w - x1 - 1, y0, w - x0 - 1, y1]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
image = ToTensor()(image)
target = [{'boxes': boxes, 'labels': labels}]
return image, target
# + [markdown] id="t97ua-r2Kn0t" colab_type="text"
# ### Train Function
# Takes in the working directory (root directory in the data hierarchy diagram), the number of classes to detect, the learning rate, momentum, and weight decay of the optimizer (using stochastic gradient descent), the number of epochs, and trains the model, then saves the weights in the working directory.
#
# <br />
#
# #### Parameters
# ```
# working_dir: String representation of the working directory
# num_classes: Integer representation of the number of classes to detect
# opt_lr: Float representation of the optimizers learning rate
# opt_mom: Float representation of the optimizers momentum
# opt_wd: Float representation of the optimziers weight decay
# num_epochs: Integer representation of the number of epochs to train for
# ```
# + id="itvWU3I9-pfw" colab_type="code" colab={}
def train_model(working_dir, num_classes, opt_lr, opt_mom, opt_wd, num_epochs):
os.chdir(working_dir)
model = fasterrcnn_resnet50_fpn(num_classes=num_classes)
device = torch.device('cuda') if torch.cuda.is_available() \
else torch.device('cpu')
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=opt_lr, momentum=opt_mom, \
weight_decay=opt_wd)
dataset = PlayerTrainerDataset()
data_loader = DataLoader(dataset, shuffle=True)
model.train()
for epoch in range(num_epochs):
running_loss = 0.0
for images, targets in data_loader:
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
optimizer.zero_grad()
losses.backward()
optimizer.step()
running_loss += losses.item()
print('epoch:%d loss:%.3f' % \
(epoch + 1, running_loss / len(data_loader)))
torch.save(model.state_dict(), 'weights.pt')
# + [markdown] id="IUwW3DUdc29e" colab_type="text"
# ### Training the Model
# + id="yjl36ow7_BBU" colab_type="code" colab={}
train_model('/content/drive/My Drive/PlayerDetector', 2, 0.005, 0.9, 0.0005, 25)
|
PlayerDetectorTrainer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python3
# ---
# !pip install plotly
import pandas as pd
import plotly.express as px
df = pd.read_csv('../data/kyoto_patients.csv', parse_dates=['date']).sort_values('date').reset_index(drop=True)
df.head()
aged_df = pd.DataFrame(df[['date', 'age']].groupby('date').size())
aged_df.columns = ['counts']
#aged_df = aged_df.reset_index()
aged_df
pd.DataFrame(df.groupby('date').size())
df = pd.read_csv('../data/kyoto_covid_patient.csv')
df[:30000]
df = pd.read_csv('../data/kyoto_covid_patient.csv')
df
|
test/data_fix.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="VAYu3ISwwGks"
import numpy as np
import pandas as pd
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
# %matplotlib inline
# + [markdown] id="y4ZKaqzq_vcD"
# # load mosaic data
# + colab={"base_uri": "https://localhost:8080/", "height": 124, "referenced_widgets": ["3aba31e272de4a109741b73375bc52aa", "623d2d20109649a6ac118adc7ef52395", "170ed07e32364ad1866b4dd8d0b0a9cb", "17070844136f44ddb01a662c46e486ad", "5693a26f68fa4dc28c4edb8d551e2a34", "48ddcde7614743c29d2a39cb13bd5556", "b5d3d0e3f7b14d5f90149043360c5edd", "455ab03f12c14c73b5e49ccb217d7924"]} id="UfVhmNGhh3aq" outputId="adf6bb46-5970-4593-9a2d-cd5852a091be"
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
# + id="c__YslqEi6bP"
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
#foreground_classes = {'bird', 'cat', 'deer'}
background_classes = {'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
#background_classes = {'plane', 'car', 'dog', 'frog', 'horse','ship', 'truck'}
fg1,fg2,fg3 = 0,1,2
# + id="AzjzM6Dki50y"
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
# + id="YLm-TB9-h11L"
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]])#.type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx])#.type("torch.DoubleTensor"))
label = foreground_label[fg_idx]-fg1 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
# + id="GSxB4JfcjHgX"
desired_num = 30000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(i)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
# + id="tQ8xfxc5h1tW"
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label, fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx], self.fore_idx[idx]
batch = 250
msd = MosaicDataset(mosaic_list_of_images, mosaic_label , fore_idx)
train_loader = DataLoader( msd,batch_size= batch ,shuffle=True)
# + id="Xh1vCTZYjFLN"
test_images =[] #list of mosaic images, each mosaic image is saved as laist of 9 images
fore_idx_test =[] #list of indexes at which foreground image is present in a mosaic image
test_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(10000):
np.random.seed(i+30000)
bg_idx = np.random.randint(0,35000,8)
fg_idx = np.random.randint(0,15000)
fg = np.random.randint(0,9)
fore_idx_test.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
test_images.append(image_list)
test_label.append(label)
# + id="bvP4f8f3jiG5"
test_data = MosaicDataset(test_images,test_label,fore_idx_test)
test_loader = DataLoader( test_data,batch_size= batch ,shuffle=False)
# + [markdown] id="ARLPPASQ_2gB"
# # models
# + id="T1Y0mCQzjSV0"
class Module1(nn.Module):
def __init__(self):
super(Module1, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,1)
def forward(self, z):
x = torch.zeros([batch,9],dtype=torch.float64)
y = torch.zeros([batch,3, 32,32], dtype=torch.float64)
x,y = x.to("cuda"),y.to("cuda")
for i in range(9):
x[:,i] = self.helper(z[:,i])[:,0]
x = F.softmax(x,dim=1) # alphas
x1 = x[:,0]
torch.mul(x1[:,None,None,None],z[:,0])
for i in range(9):
x1 = x[:,i]
y = y + torch.mul(x1[:,None,None,None],z[:,i])
return y , x
def helper(self,x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
# + id="zt9YwV5rjSK_"
class Module2(nn.Module):
def __init__(self):
super(Module2, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.fc4 = nn.Linear(10,3)
def forward(self,y): #z batch of list of 9 images
y1 = self.pool(F.relu(self.conv1(y)))
y1 = self.pool(F.relu(self.conv2(y1)))
y1 = y1.view(-1, 16 * 5 * 5)
y1 = F.relu(self.fc1(y1))
y1 = F.relu(self.fc2(y1))
y1 = F.relu(self.fc3(y1))
y1 = self.fc4(y1)
return y1
# + id="ehAfQnNwgFYX"
def calculate_attn_loss(dataloader,what,where,criter):
what.eval()
where.eval()
r_loss = 0
alphas = []
lbls = []
pred = []
fidices = []
correct = 0
tot = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
avg,alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
correct += sum(predicted == labels)
tot += len(predicted)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
loss = criter(outputs, labels)
r_loss += loss.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,analysis,correct.item(),tot,correct.item()/tot
# + id="6e9HQJMzxBhp"
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
return [ftpt,ffpt,ftpf,ffpf,amth,alth]
# + id="UJbdW7cijRsr"
torch.manual_seed(1234)
where_net = Module1().double()
where_net = where_net.to("cuda")
# print(net.parameters)
torch.manual_seed(1234)
what_net = Module2().double()
what_net = what_net.to("cuda")
# + [markdown] id="a2AlGgRa_6_H"
# # training
# + id="MOfxUJZ_eFKw" colab={"base_uri": "https://localhost:8080/"} outputId="a7482518-31d9-4309-925c-09d76354eb36"
# instantiate optimizer
optimizer_where = optim.RMSprop(where_net.parameters(),lr =0.001)#,momentum=0.9)#,nesterov=True)
optimizer_what = optim.RMSprop(what_net.parameters(), lr=0.001)#,momentum=0.9)#,nesterov=True)
scheduler_where = optim.lr_scheduler.ReduceLROnPlateau(optimizer_where, mode='min', factor=0.5, patience=3,min_lr=5e-6,verbose=True)
scheduler_what = optim.lr_scheduler.ReduceLROnPlateau(optimizer_what, mode='min', factor=0.5, patience=3,min_lr=5e-6, verbose=True)
criterion = nn.CrossEntropyLoss()
acti = []
analysis_data_tr = []
analysis_data_tst = []
loss_curi_tr = []
loss_curi_tst = []
epochs = 130
every_what_epoch = 1
# calculate zeroth epoch loss and FTPT values
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion)
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tr.append(running_loss)
analysis_data_tr.append(anlys_data)
running_loss,anlys_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion)
print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(0,running_loss,correct,total,accuracy))
loss_curi_tst.append(running_loss)
analysis_data_tst.append(anlys_data)
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what_net.train()
where_net.train()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
print(epoch+1,"updating where_net, what_net is freezed")
print("--"*40)
elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
print(epoch+1,"updating what_net, where_net is freezed")
print("--"*40)
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha = where_net(inputs)
outputs = what_net(avg)
loss = criterion(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
optimizer_where.step()
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
optimizer_what.step()
running_loss_tr,anls_data,correct,total,accuracy = calculate_attn_loss(train_loader,what_net,where_net,criterion)
analysis_data_tr.append(anls_data)
loss_curi_tr.append(running_loss_tr) #loss per epoch
print('training epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tr,correct,total,accuracy))
running_loss_tst,anls_data,correct,total,accuracy = calculate_attn_loss(test_loader,what_net,where_net,criterion)
analysis_data_tst.append(anls_data)
loss_curi_tst.append(running_loss_tst) #loss per epoch
print('test epoch: [%d ] loss: %.3f correct: %.3f, total: %.3f, accuracy: %.3f' %(epoch+1,running_loss_tst,correct,total,accuracy))
if running_loss_tr<=0.01:
break
if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 :
scheduler_what.step(running_loss_tst)
elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 :
scheduler_where.step(running_loss_tst)
print('Finished Training run ')
analysis_data_tr = np.array(analysis_data_tr)
analysis_data_tst = np.array(analysis_data_tst)
# + id="dwRqaz3Wxc23" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="6a625cce-94ff-405b-ddfb-2ec2974e59a4"
fig = plt.figure(figsize = (12,8) )
vline_list = np.arange(every_what_epoch, epoch + every_what_epoch, every_what_epoch )
# train_loss = np.random.randn(340)
# test_loss = np.random.randn(340)
epoch_list = np.arange(0, epoch+2)
plt.plot(epoch_list,loss_curi_tr, label='train_loss')
plt.plot(epoch_list,loss_curi_tst, label='test_loss')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("CE Loss")
plt.vlines(vline_list,min(min(loss_curi_tr),min(loss_curi_tst)), max(max(loss_curi_tst),max(loss_curi_tr)),linestyles='dotted')
plt.title("train loss vs test loss")
plt.show()
fig.savefig("train_test_loss_plot.pdf")
# + id="EZtPJ-rnRr-x" colab={"base_uri": "https://localhost:8080/"} outputId="1ec0cb8b-baa3-4119-9136-747c04ccfc2e"
analysis_data_tr
# + id="vjUvEFpv0R0h"
analysis_data_tr = np.array(analysis_data_tr)
analysis_data_tst = np.array(analysis_data_tst)
# + id="GR7i12R_QymL"
columns = ["epochs", "argmax > 0.5" ,"argmax < 0.5", "focus_true_pred_true", "focus_false_pred_true", "focus_true_pred_false", "focus_false_pred_false" ]
df_train = pd.DataFrame()
df_test = pd.DataFrame()
df_train[columns[0]] = np.arange(0,epoch+2)
df_train[columns[1]] = analysis_data_tr[:,-2]
df_train[columns[2]] = analysis_data_tr[:,-1]
df_train[columns[3]] = analysis_data_tr[:,0]
df_train[columns[4]] = analysis_data_tr[:,1]
df_train[columns[5]] = analysis_data_tr[:,2]
df_train[columns[6]] = analysis_data_tr[:,3]
df_test[columns[0]] = np.arange(0,epoch+2)
df_test[columns[1]] = analysis_data_tst[:,-2]
df_test[columns[2]] = analysis_data_tst[:,-1]
df_test[columns[3]] = analysis_data_tst[:,0]
df_test[columns[4]] = analysis_data_tst[:,1]
df_test[columns[5]] = analysis_data_tst[:,2]
df_test[columns[6]] = analysis_data_tst[:,3]
# + id="oiT9cXV6RjCf" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="5a28cdc1-8601-4907-b9d6-922fbb9cae47"
df_train
# + id="WtlIwUd8SdRW" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="379bfa37-72a3-49eb-f182-169c8bbbbd88"
df_test
# + id="gYIlDhfYSg4y" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="552a3df9-6fe7-489e-ead2-8b3dbe36e5a3"
plt.figure(figsize=(12,8))
plt.plot(df_train[columns[0]],df_train[columns[1]], label='argmax > 0.5')
plt.plot(df_train[columns[0]],df_train[columns[2]], label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.vlines(vline_list,min(min(df_train[columns[1]]),min(df_train[columns[2]])), max(max(df_train[columns[1]]),max(df_train[columns[2]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,8))
plt.plot(df_train[columns[0]],df_train[columns[3]], label ="focus_true_pred_true ")
plt.plot(df_train[columns[0]],df_train[columns[4]], label ="focus_false_pred_true ")
plt.plot(df_train[columns[0]],df_train[columns[5]], label ="focus_true_pred_false ")
plt.plot(df_train[columns[0]],df_train[columns[6]], label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.vlines(vline_list,min(min(df_train[columns[3]]),min(df_train[columns[4]]),min(df_train[columns[5]]),min(df_train[columns[6]])), max(max(df_train[columns[3]]),max(df_train[columns[4]]),max(df_train[columns[5]]),max(df_train[columns[6]])),linestyles='dotted')
plt.show()
# + id="UPvuau_Id_Fi" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8cd23854-8124-4fd2-cba3-eea970e415f2"
plt.figure(figsize=(12,8))
plt.plot(df_test[columns[0]],df_test[columns[1]], label='argmax > 0.5')
plt.plot(df_test[columns[0]],df_test[columns[2]], label='argmax < 0.5')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.title("On Training set")
plt.vlines(vline_list,min(min(df_test[columns[1]]),min(df_test[columns[2]])), max(max(df_test[columns[1]]),max(df_test[columns[2]])),linestyles='dotted')
plt.show()
plt.figure(figsize=(12,8))
plt.plot(df_test[columns[0]],df_test[columns[3]], label ="focus_true_pred_true ")
plt.plot(df_test[columns[0]],df_test[columns[4]], label ="focus_false_pred_true ")
plt.plot(df_test[columns[0]],df_test[columns[5]], label ="focus_true_pred_false ")
plt.plot(df_test[columns[0]],df_test[columns[6]], label ="focus_false_pred_false ")
plt.title("On Training set")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("epochs")
plt.ylabel("training data")
plt.vlines(vline_list,min(min(df_test[columns[3]]),min(df_test[columns[4]]),min(df_test[columns[5]]),min(df_test[columns[6]])), max(max(df_test[columns[3]]),max(df_test[columns[4]]),max(df_test[columns[5]]),max(df_test[columns[6]])),linestyles='dotted')
plt.show()
# + id="uC3v4I_LeviA"
|
1_mosaic_data_attention_experiments/3_stage_wise_training/alternate_minimization/on CIFAR data/old_notebooks/alternate_focus_first_classify_later_RMSprop_scheduling_every_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import statements
import requests
from bs4 import BeautifulSoup
# fetch web page
r = requests.get('https://www.udacity.com/courses/all')
soup = BeautifulSoup(r.text, 'lxml')
# Find all course summaries
summaries = soup.find_all("li", {'class': "card-list_catalogCardListItem__aUQtx"})
print('Number of Courses:', len(summaries))
# +
import re
# Remove punctuation characters
text = re.sub(r"[^0-9a-zA-Z]", " ", text)
print(text)
# +
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
words = word_tokenize(text)
words = [word for word in words if word not in stopwords.words('english')]
# +
from nltk import pos_tag, ne_chunk
sentence = word_tokenize("Just a sentence.")
res = pos_tag(sentence)
ne_chunk(res)
# +
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
stemmed = [PorterStemmer().stem(w) for w in words]
lemmed = [WordNetLemmatizer().lematize(w) for w in words]
|
lessons/NLP Pipelines/notes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ner]
# language: python
# name: conda-env-ner-py
# ---
# Read from a local copy of the ClaimReview Database
# ### Installation script
# ```
# conda create -n ner python=3.6
# source activate ner
# conda install -c conda-forge spacy -y
# conda install ipython jupyter nb_conda nltk numpy -y
# conda install -c conda-forge rake_nltk -y
# python -m spacy download en
# python -m spacy download en_core_web_sm
# python -m spacy download en_core_web_lg
# ```
# +
import spacy
nlp = spacy.load('en')
doc = nlp(u'Apple is looking at buying U.K. startup for $1 billion')
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
# +
import spacy
from spacy.symbols import nsubj, VERB
nlp = spacy.load('en_core_web_sm')
doc = nlp(u"Autonomous cars shift insurance liability toward manufacturers")
# Finding a verb with a subject from below — good
verbs = set()
for possible_subject in doc:
if possible_subject.dep == nsubj and possible_subject.head.pos == VERB:
verbs.add(possible_subject.head)
print(verbs)
# +
fc_path = "fact_checks_20180502.txt"
with open(fc_path) as f:
fc_raw = f.readlines()
# -
print("No. of Claims:", len(fc_raw))
# Functions to normalise the text
# +
import re
import json
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
lemmatizer = WordNetLemmatizer()
def strip_html(data):
p = re.compile(r'<.*?>')
return p.sub('', data)
def clean_text(data):
text = re.sub(r'[^\w\s]', ' ', data.lower()).replace("\n", "").replace(" ", " ")
text = "".join([c for c in text if (c.isalpha() or c == " ")])
text = text.split(" ")
output = ""
for word in text:
if word not in stopwords.words("english"):
output = output + " " + word
return output.strip().replace(" ", " ")
def nltk2wn_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatize_sentence(sentence):
nltk_tagged = nltk.pos_tag(nltk.word_tokenize(sentence))
wn_tagged = map(lambda x: (x[0], nltk2wn_tag(x[1])), nltk_tagged)
res_words = []
for word, tag in wn_tagged:
if tag is None:
res_words.append(word)
else:
res_words.append(lemmatizer.lemmatize(word, tag))
return " ".join(res_words)
def norm_text(data,):
raw = strip_html(data)
text = clean_text(raw)
norm_text = lemmatize_sentence(text)
return norm_text
# +
from rake_nltk import Rake
# Uses stopwords for english from NLTK, and all puntuation characters by
# default
r = Rake()
# Extraction given the text.
r.extract_keywords_from_text("In August 2018, adhesive replicas of President <NAME>'s recently-destroyed star were placed on the Walk of Fame on Hollywood Boulevard.")
# # Extraction given the list of strings where each string is a sentence.
# r.extract_keywords_from_sentences(<list of sentences>)
# To get keyword phrases ranked highest to lowest.
print(r.get_ranked_phrases())
# To get keyword phrases ranked highest to lowest with scores.
print(r.get_ranked_phrases_with_scores())
# +
# # ATTEMPTED TO EXTRACT THE VERBS WHICH IS NOT VERY HELPFUL
# import numpy as np
# random_point = np.random.randint(len(fc_raw)-200)
# print(random_point)
# for fc in fc_raw[random_point:random_point+10]:
# fc = fc.strip("\n")
# fc = fc.replace("</script>", "").replace('<script type="application/ld+json">', "")
# fc = json.loads(fc)
# claim = fc["claimReviewed"]
# # date_published = fc["datePublished"]
# review_url = fc["url"]
# print("Claim:", claim)
# r.extract_keywords_from_text(claim)
# r.get_ranked_phrases()
# print(r.get_ranked_phrases_with_scores())
# doc = nlp(claim)
# # EXTRACTING VERBS WHICH IS NOT VERY HELPFUL
# print([(e.text, e.start_char, e.end_char, e.label_) for e in doc.ents])
# verbs = set()
# for possible_subject in doc:
# if possible_subject.dep == nsubj and possible_subject.head.pos == VERB:
# verbs.add(possible_subject.head)
# print(verbs)
# print()
# -
# # DEMO
sentence_array = [
# verbatim
# 'Actor <NAME> said that the United States is "morally upside down."',
# 'Liberty University students "were mandated" to attend <NAME>’s presidential \
# announcement "or they would be fined.',
# 'A photograph shows a baseball stadium repurposed as a residential neighborhood.',
# 'Folding a $5 bill in a specific pattern will reveal a secret image of a stack of pancakes.',
# paraphrase
# '<NAME> mocked "someone with a disability."',
'The fact that President Trump mocked someone with a disability shows how idiotic he is.',
# 'We can see how <NAME> is a coward when he blocked all incoming calls and faxes.',
# 'Being tough on drug is stupid when you know that more people die from prescription \
# narcotic painkillers than from heroin and cocaine combined.',
# '<NAME> was arrested for bringing drugs to Japan.'
]
# here I am comparing the text with every claim.
# If the keyword is spread across sentences they need to be retested again.
text = "".join(sentence_array)
from rake_nltk import Rake
r1 = Rake()
r1.extract_keywords_from_text(text)
r1_scored = r1.get_ranked_phrases_with_scores()
print(r1_scored)
# +
import spacy
import numpy as np
import matplotlib.pyplot as plt
from rake_nltk import Rake
nlp = spacy.load('en_core_web_lg')
def calculate_similarity_score(value_mesh, r1_scored, r2_scored):
similarity_array = np.maximum.reduce([row for row in value_mesh[0]])
weighted_similarity = np.multiply(similarity_array, [keyword_tuple[0] for keyword_tuple in r1_scored])
weighted_similarity_sum = np.sum(weighted_similarity)
sum_of_weights = np.sum([keyword_tuple[0] for keyword_tuple in r1_scored])
weighted_similarity_sum_over_weights = weighted_similarity_sum/sum_of_weights
return weighted_similarity_sum_over_weights
def phrase_vect(text):
tokens = nlp(text)
maxpool = np.maximum.reduce([token.vector for token in tokens])
return maxpool
# minpool = np.minimum.reduce([token.vector for token in tokens])
# # https://stackoverflow.com/questions/21816433/
# result = np.concatenate((minpool,maxpool))
# return result
def meshgrid(x,y): # DW code !!!
return (
[[x_ for x_ in x] for _ in y],
[[y_ for _ in x] for y_ in y])
def cosine(u,v):
# print(np.shape(u))
return np.dot(u,v)/(np.linalg.norm(u)*np.linalg.norm(v))
def calculate_claim_text(claim, text):
r1 = Rake()
r1.extract_keywords_from_text(claim)
r1_scored = r1.get_ranked_phrases_with_scores()
print(r1_scored)
r2 = Rake()
r2.extract_keywords_from_text(text)
r2_scored = r2.get_ranked_phrases_with_scores()
# print(r2_scored)
# assign vectors
r1_scored_vec = [[claim_keyword_tuple[0],
claim_keyword_tuple[1],
phrase_vect(claim_keyword_tuple[1])] for claim_keyword_tuple in r1_scored]
# print(len(r1_scored_vec))
# print(len(r1_scored_vec[0]))
r2_scored_vec = [[text_keyword_tuple[0],
text_keyword_tuple[1],
phrase_vect(text_keyword_tuple[1])] for text_keyword_tuple in r2_scored]
# print(len(r1_scored_vec))
# print(len(r1_scored_vec[0]))
# compare statements
pair_mesh = meshgrid(r1_scored_vec, r2_scored_vec)
value_mesh = meshgrid([0]*len(r1_scored_vec), [0]*len(r2_scored_vec))
import numpy as np
for i,_ in enumerate(pair_mesh[0]):
for j,_ in enumerate(pair_mesh[0][0]):
vector1 = pair_mesh[0][i][j][2]
vector2 = pair_mesh[1][i][j][2]
value_mesh[0][i][j] = cosine(vector1, vector2)
return calculate_similarity_score(value_mesh, r1_scored, r2_scored)
# +
import numpy as np
random_point = np.random.randint(len(fc_raw)-200)
random_point = 100
print(random_point)
similarity_array = []
for fc in fc_raw[random_point:random_point+200]:
fc = fc.strip("\n")
fc = fc.replace("</script>", "").replace('<script type="application/ld+json">', "")
fc = json.loads(fc)
claim = fc["claimReviewed"]
# date_published = fc["datePublished"]
review_url = fc["url"]
print("Claim:", claim)
similarity = calculate_claim_text(claim, text)
print(similarity)
similarity_array.append(similarity)
plt.figure(figsize=(15,4))
plt.plot(similarity_array)
plt.show()
# -
|
browse-rake_keyword-wordvec_compare.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # OK Boomer, Are We Really Different?: <br>Comparing the Time Use of Americans of Various Generations.
# # Abstract
#
#    The term "Ok Boomer" arose in recent years as a counter to the prevailing image of Millennials as being the lazy or privileged generation. This clashing is a result of decades of a generation gap that exists between the Millennials who the youngest have been entering the work force, and the Baby Boomers who the youngest are leaving the work force. To verify whether or not there actually exists a difference in laziness or the time spent working or in leisure, a comparison was made in the 3 generations from Baby Boomers to Millennials. The data found in the Bureau of Labor Statistics’ American Time Use Survey (ATUS) yielded results showing no statistical significance in the difference in working hours or time spent in leisure for the 3 generations by means of the ANOVA test. While the veracity of laziness of one generation was not determined, it was concluded that one generation is not so different from other generations in their use of time at work, making the possible inference that laziness is not a trait of a single generation.
#
# # Motivation
# We experience a type of fencing across generations in Social Media. The slang phrase "Ok, Boomer" rose over recent years. By definition from [Dictionary.com](https://www.dictionary.com/e/slang/ok-boomer/?itm_source=parsely-api), Ok Boomer is a viral internet slang phrase used, often in a humorous or ironic manner, to call out or dismiss out-of-touch or closed-minded opinions associated with the baby boomer generation and older people more generally. This analysis will attempt seek whether the experiences in time use are different across generations, and also determine if there are other factors alongside generations responsible for widening this perceived gap. Through this research we would be able to really dtermine if there really is any significant or worthwhile argument or prejudice regarding one's age or generation. Finally, through this analysis, we may learn what was the norm of the current incoming work force and compare whether the incoming work force's parents had any significant differences.
# # Data Selected for Analysis
# Data was collected from the [American Time Use Survey](https://www.bls.gov/tus/datafiles-0319.htm) from the U.S. Bureau of Labor Statistics (BLS). More information regarding the American Time Use Survey can be found [here](https://www.bls.gov/tus/atususersguide.pdf).
# The Datasets used were the:
# * [ATUS 2003-2019 Activity Summary File](https://www.bls.gov/tus/special.requests/atussum-0319.zip)
# * Contains infromation about total time spent doing each activity on a day
# * [Data Dictionary](https://www.bls.gov/tus/atuscpscodebk0319.pdf)
# * Contains information on the contents of Data, and the coding of column names
# * [Activity Lexicon](https://www.bls.gov/tus/lexiconnoex0318.pdf)
# * Contains information on translating the coded activities
#
# ** Note Data has been transformed so that the .dat file is converted into a .txt file, separately, before it is read here
#
# <br>
# <br>
#
# The data from BLS is in the public domain and is free to use without specific permission. [(Link)](https://www.bls.gov/bls/linksite.htm)
# This data is suitable in that it has anonymized survey participants and we can obtain the correct demographics and generations by adjusting age with the year of survey to obtain generations of survey responders.
# The generations have been categorized by definition from [Pew Research](https://www.pewresearch.org/fact-tank/2019/01/17/where-millennials-end-and-generation-z-begins/).
#
# | Generation | Year Born |
# |--------------|-----------|
# | Boomers | 1946-1964 |
# | Generation X | 1965-1980 |
# | Millennials | 1981-1996 |
# | Generation Z | 1997-2012 |
#
# # Important to Note
# The results from this analysis is in no way definitive. The norm that is mentioned from this document to all times in this study refers to the averages within data. There may be bias introduced in the analysis through the aggregation of survey results, and bias may have been introduced in the collection of data. Participants in survey was voluntary, and the participants do not remain consistent through the various years of collection.
#
# # Hypothesis
# The hypothesis of this research study is that the overall there is no differences between generations. The time spent on leisure vs time spent on work should have been consistent throughout the years across different generations.
# # Background
# Millennials are, according to critics mentioned in [Time](https://time.com/4417064/millennial-ceo-success/), are "lazy, work-shy narcissists who lack loyalty and jump ship at the drop of a hat". However, in the article linked above, mentions that the above definition does not really define Millennials, and that placing a group of people born across nearly 2 decades in a single generation does not really make much sense. The post also argues that the desired "Work Life Balance" that most millennials place as the top priority does not really mean that millennials are lazy as unlimited sick days or unlimited vacation days has not reduced the productivity of millennial workers.
# On an interview on [NPR](https://www.npr.org/2019/10/03/766926986/why-all-those-criticisms-about-millennials-arent-necessarily-fair), it is mentioned that Millennials may have been moving out of their parents' home at an older age than past generations, but that may be explained by the different economic circumstances of purchasing a home and the higher number of young adults pursuing a college degree, pushing back the average working age.
# On [Pew Research](https://www.pewresearch.org/fact-tank/2018/03/16/how-millennials-compare-with-their-grandparents/#!7), there are several points that compare the average demographics of the various generations. The post mentions that there are more minorities, more women in the work force, more educated members, and more living in urban settings than past generations. It would be interesting to see how the hypothesis changes according to gender, race, education, and whether a person lives in an urban or rural setting.
# # Methodology
# Each generation would be compared with its adjacent generations according to how many hours, on average, are spent on each activity. The individual activities will be aggregated to get a general idea of what is spent on leisure, work or self investment in human capital (education or training). The differences between generations would also be compared via ANOVA, to gauge a statistical significance. ANOVA will be used instead of a common t-test so that a test for significance of multiple groups may be done. The comparisons will be visualized in a side-by-side column chart with a fixed y axis to quickly compare results.
# ### Reading the Dataset and Loading Dependencies
# +
# Import Packages
import numpy as np
import pandas as pd
import re
import scipy.stats as stats
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['figure.figsize'] = [20, 10]
# Reading Dataset
worker = pd.read_csv("Data/processed_atus_data.csv")
# -
# ### Preprocessing
# Preprocessing was done separately, transforming the original file. The file was not included in this repository due to file size. The code used for the Preprocessing steps, along with the writing of the file can be viewed here:
#
# [data-512-final/Data/Preprocessing.ipynb](https://github.com/jameslee0920/data-512-final/blob/main/Data/Preprocessing.ipynb)
# ### Results
# Resulting dataset (table form)
generation_worker_df = worker.groupby('generation').agg({'sleep':'mean','house_work':'mean','child_care':'mean','work':'mean','education':'mean','leisure':'mean'}).reset_index()
generation_worker_df
# +
# Visualization of table
Boomer_means = [generation_worker_df[generation_worker_df.generation == 'Boomers']['sleep'][0],
generation_worker_df[generation_worker_df.generation == 'Boomers']['house_work'][0],
generation_worker_df[generation_worker_df.generation == 'Boomers']['child_care'][0],
generation_worker_df[generation_worker_df.generation == 'Boomers']['education'][0],
generation_worker_df[generation_worker_df.generation == 'Boomers']['work'][0],
generation_worker_df[generation_worker_df.generation == 'Boomers']['leisure'][0]
]
genx_means = [generation_worker_df[generation_worker_df.generation == 'Generation X']['sleep'][1],
generation_worker_df[generation_worker_df.generation == 'Generation X']['house_work'][1],
generation_worker_df[generation_worker_df.generation == 'Generation X']['child_care'][1],
generation_worker_df[generation_worker_df.generation == 'Generation X']['education'][1],
generation_worker_df[generation_worker_df.generation == 'Generation X']['work'][1],
generation_worker_df[generation_worker_df.generation == 'Generation X']['leisure'][1]
]
millennials_means = [generation_worker_df[generation_worker_df.generation == 'Millennials']['sleep'][2],
generation_worker_df[generation_worker_df.generation == 'Millennials']['house_work'][2],
generation_worker_df[generation_worker_df.generation == 'Millennials']['child_care'][2],
generation_worker_df[generation_worker_df.generation == 'Millennials']['education'][2],
generation_worker_df[generation_worker_df.generation == 'Millennials']['work'][2],
generation_worker_df[generation_worker_df.generation == 'Millennials']['leisure'][2]
]
ind = np.arange(6)
width = 0.25
plt.bar(ind, Boomer_means, width, label='Boomers')
plt.bar(ind + width, genx_means, width,label='Gen X')
plt.bar(ind + 2* width, millennials_means, width, label='Millennials')
plt.ylabel('Hours', fontdict={'fontsize':20})
plt.title('Average Hours Spent', fontdict={'fontsize':20})
plt.xticks(ind + width / 1, ('Sleep', 'House Work', 'Child Care', 'Education', 'Work', 'Leisure'), fontsize=15)
plt.legend(loc='best')
plt.show()
# -
# ### Statistical Tests
# #### Differences in Time Spent in Work
stats.f_oneway(worker[worker.generation=="Boomers"]['work'],
worker[worker.generation=="Generation X"]['work'],
worker[worker.generation=="Millennials"]['work']
)
# #### Differences in Time Spent in Leisure
stats.f_oneway(worker[worker.generation=="Boomers"]['leisure'],
worker[worker.generation=="Generation X"]['leisure'],
worker[worker.generation=="Millennials"]['leisure']
)
# #### Differences in Time Spent in Sleep
stats.f_oneway(worker[worker.generation=="Boomers"]['sleep'],
worker[worker.generation=="Generation X"]['sleep'],
worker[worker.generation=="Millennials"]['sleep']
)
# #### Differences in Time Spent in Education
stats.f_oneway(worker[worker.generation=="Boomers"]['education'],
worker[worker.generation=="Generation X"]['education'],
worker[worker.generation=="Millennials"]['education']
)
# #### Differences in Time Spent in House Work
stats.f_oneway(worker[worker.generation=="Boomers"]['house_work'],
worker[worker.generation=="Generation X"]['house_work'],
worker[worker.generation=="Millennials"]['house_work']
)
# #### Differences in Time Spent in Child Care
stats.f_oneway(worker[worker.generation=="Boomers"]['child_care'],
worker[worker.generation=="Generation X"]['child_care'],
worker[worker.generation=="Millennials"]['child_care']
)
# <br>
#
#
#
#
# # Findings
# According to the graph and table displayed above, all 3 generations spend similar amounts of time in both liesure and in work. This is further supported by the ANOVA Statistical test where the p value is very high, failing to reject the null hypothesis. This means that the different generations are not really that different, and that one generation is not at all lazy. However, Millennials can be blamed for the lack of housework as that appears to be significantly lower than other generations. Millennials appear to sleep a lot and not do much housework when compared to other generations.
# # Discussion
# The results shown here do not indicate any matters of causality. The results in here for example, do not indicate that, for example, a person does not do house work because he or she is a Millennials. Secondly, some parts of the results may require scrutiny. For example, in the results, we see that the "Differences in Time Spent in Child Care" is statistically significant. However, this could be a result of the large age range of the dataset. Because the dataset runs from 2003 to 2019, there is a disproportionate number of higher 30s or 40 year old Baby Boomers and a disproportionate number of lower 30s or 30 year old Millennials in the dataset. Childcare would accompany greater time spent, the younger the child is, and for baby boomers, their children would be closer to an independent adult than that for Millennials'. This reasoning would apply similarly for the time spent on education or training.
#
#
#
# # Conclusion
# The result of my analysis was relieving. In a world where there is an emphasis on differences among humans, I was relieved that the year we are born in do not bring about or even correlate to a certain characteristic. As the youngest in my family with sibblings and parents much older than me, I found that while there are cultural differences, we share similar work ethics of putting in effort in our tasks.
# Millennials may be less inclined to waking up early or making their beds up after sleeping, but Millennials put in the same amount of work as other generations. This shows that while individually, we are different, but as humans we are more similar than we are different.
# # Extra
# #### Sex Separated Table
# +
sex_separated_generation_worker_df = worker.groupby(['generation','Sex']).agg({'sleep':'mean','house_work':'mean','child_care':'mean','work':'mean','education':'mean','leisure':'mean'}).reset_index()
sex_separated_generation_worker_df
# -
# Interestingly, we see a large gap in almost all activities except sleep when the data is separated by sex. However, there appears to be a trend in the number of hours spent for work and education in that the gap between each sex appears to be diminishing for later generations. The same trend appears on house work as well at a slower pace.
# # Future Work
# The separation of sex brought an interesting point. There is much work to be done in separating by the unique demographics available to this data. Place of Residence (whether the place of residence is urban or rural) appears to be an interesting factor to work with and to see if there are any changes to the result.
|
Project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Tce3stUlHN0L"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="qFdPvlXBOdUN"
# # Introduction to Variables
# + [markdown] id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/variable"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/variable.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/variable.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/variable.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="AKhB9CMxndDs"
# A TensorFlow **variable** is the recommended way to represent shared, persistent state your program manipulates. This guide covers how to create, update, and manage instances of `tf.Variable` in TensorFlow.
#
# Variables are created and tracked via the `tf.Variable` class. A `tf.Variable` represents a tensor whose value can be changed by running ops on it. Specific ops allow you to read and modify the values of this tensor. Higher level libraries like `tf.keras` use `tf.Variable` to store model parameters.
# + [markdown] id="xZoJJ4vdvTrD"
# ## Setup
#
# This notebook discusses variable placement. If you want to see on what device your variables are placed, uncomment this line.
# + id="7tUZJk7lDiGo"
import tensorflow as tf
# Uncomment to see where your variables get placed (see below)
# tf.debugging.set_log_device_placement(True)
# + [markdown] id="vORGXDarogWm"
# ## Create a variable
#
# To create a variable, provide an initial value. The `tf.Variable` will have the same `dtype` as the initialization value.
# + id="dsYXSqleojj7"
my_tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
my_variable = tf.Variable(my_tensor)
# Variables can be all kinds of types, just like tensors
bool_variable = tf.Variable([False, False, False, True])
complex_variable = tf.Variable([5 + 4j, 6 + 1j])
# + [markdown] id="VQHwJ_Itoujf"
# A variable looks and acts like a tensor, and, in fact, is a data structure backed by a `tf.Tensor`. Like tensors, they have a `dtype` and a shape, and can be exported to NumPy.
# + id="GhNfPwCYpvlq"
print("Shape: ", my_variable.shape)
print("DType: ", my_variable.dtype)
print("As NumPy: ", my_variable.numpy())
# + [markdown] id="eZmSBYViqDoU"
# Most tensor operations work on variables as expected, although variables cannot be reshaped.
# + id="TrIaExVNp_LK"
print("A variable:", my_variable)
print("\nViewed as a tensor:", tf.convert_to_tensor(my_variable))
print("\nIndex of highest value:", tf.argmax(my_variable))
# This creates a new tensor; it does not reshape the variable.
print("\nCopying and reshaping: ", tf.reshape(my_variable, [1,4]))
# + [markdown] id="qbLCcG6Pc29Y"
# As noted above, variables are backed by tensors. You can reassign the tensor using `tf.Variable.assign`. Calling `assign` does not (usually) allocate a new tensor; instead, the existing tensor's memory is reused.
# + id="yeEpO309QbB2"
a = tf.Variable([2.0, 3.0])
# This will keep the same dtype, float32
a.assign([1, 2])
# Not allowed as it resizes the variable:
try:
a.assign([1.0, 2.0, 3.0])
except Exception as e:
print(f"{type(e).__name__}: {e}")
# + [markdown] id="okeywjLdQ1tY"
# If you use a variable like a tensor in operations, you will usually operate on the backing tensor.
#
# Creating new variables from existing variables duplicates the backing tensors. Two variables will not share the same memory.
# + id="2CnfGc6ucbXc"
a = tf.Variable([2.0, 3.0])
# Create b based on the value of a
b = tf.Variable(a)
a.assign([5, 6])
# a and b are different
print(a.numpy())
print(b.numpy())
# There are other versions of assign
print(a.assign_add([2,3]).numpy()) # [7. 9.]
print(a.assign_sub([7,9]).numpy()) # [0. 0.]
# + [markdown] id="ZtzepotYUe7B"
# ## Lifecycles, naming, and watching
#
# In Python-based TensorFlow, `tf.Variable` instance have the same lifecycle as other Python objects. When there are no references to a variable it is automatically deallocated.
#
# Variables can also be named which can help you track and debug them. You can give two variables the same name.
# + id="VBFbzKj8RaPf"
# Create a and b; they will have the same name but will be backed by
# different tensors.
a = tf.Variable(my_tensor, name="Mark")
# A new variable with the same name, but different value
# Note that the scalar add is broadcast
b = tf.Variable(my_tensor + 1, name="Mark")
# These are elementwise-unequal, despite having the same name
print(a == b)
# + [markdown] id="789QikItVA_E"
# Variable names are preserved when saving and loading models. By default, variables in models will acquire unique variable names automatically, so you don't need to assign them yourself unless you want to.
#
# Although variables are important for differentiation, some variables will not need to be differentiated. You can turn off gradients for a variable by setting `trainable` to false at creation. An example of a variable that would not need gradients is a training step counter.
# + id="B5Sj1DqhbZvx"
step_counter = tf.Variable(1, trainable=False)
# + [markdown] id="DD_xfDLDTDNU"
# ## Placing variables and tensors
#
# For better performance, TensorFlow will attempt to place tensors and variables on the fastest device compatible with its `dtype`. This means most variables are placed on a GPU if one is available.
#
# However, you can override this. In this snippet, place a float tensor and a variable on the CPU, even if a GPU is available. By turning on device placement logging (see [Setup](#scrollTo=xZoJJ4vdvTrD)), you can see where the variable is placed.
#
# Note: Although manual placement works, using [distribution strategies](distributed_training.ipynb) can be a more convenient and scalable way to optimize your computation.
#
# If you run this notebook on different backends with and without a GPU you will see different logging. *Note that logging device placement must be turned on at the start of the session.*
# + id="2SjpD7wVUSBJ"
with tf.device('CPU:0'):
# Create some tensors
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c = tf.matmul(a, b)
print(c)
# + [markdown] id="PXbh-p2BXKcr"
# It's possible to set the location of a variable or tensor on one device and do the computation on another device. This will introduce delay, as data needs to be copied between the devices.
#
# You might do this, however, if you had multiple GPU workers but only want one copy of the variables.
# + id="dgWHN3QSfNiQ"
with tf.device('CPU:0'):
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.Variable([[1.0, 2.0, 3.0]])
with tf.device('GPU:0'):
# Element-wise multiply
k = a * b
print(k)
# + [markdown] id="fksvRaqoYfay"
# Note: Because `tf.config.set_soft_device_placement` is turned on by default, even if you run this code on a device without a GPU, it will still run. The multiplication step will happen on the CPU.
#
# For more on distributed training, refer to the [guide](distributed_training.ipynb).
# + [markdown] id="SzCkWlF2S4yo"
# ## Next steps
#
# To understand how variables are typically used, see our guide on [automatic differentiation](autodiff.ipynb).
|
site/en-snapshot/guide/variable.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 06 - Strings and Dictionaries
#
# This lesson will be a double-shot of essential Python types: **strings** and **dictionaries**.
# # Strings
# One place where the Python language really shines is in the manipulation of strings.
# This section will cover some of Python's built-in string methods and formatting operations.
#
# Such string manipulation patterns come up often in the context of data science work, and is one big perk of Python in this context.
#
# ## String syntax
#
# You've already seen plenty of strings in examples during the previous lessons, but just to recap, strings in Python can be defined using either single or double quotations. They are functionally equivalent.
x = 'DataCracy is meaningful!'
y = "DataCracy is meaningful!"
x == y
# Double quotes are convenient if your string contains a single quote character (e.g. representing an apostrophe).
#
# Similarly, it's easy to create a string that contains double-quotes if you wrap it in single quotes:
print("Let's join DataCracy!")
print('DataCracy stands for "Data & Democracy"')
# If we try to put a single quote character inside a single-quoted string, Python gets confused:
# + tags=["raises-exception"]
'Let's join DataCracy!'
# -
"Let's join DataCracy!"
# We can fix this by **"escaping"** the single quote with a backslash.
'Let\'s join DataCracy!'
# The table below summarizes some important uses of the **backslash(escape)** `\` character.
#
# | What you type... | What you get | example | `print(example)` |
# |--------------|----------------|-------------------------|------------------------------|
# | `\'` | `'` | `'What\'s up?'` | `What's up?` |
# | `\"` | `"` | `"That's \"cool\""` | `That's "cool"` |
# | `\\` | `\` | `"Look, a mountain: /\\"` | `Look, a mountain: /\` |
# | `\n` | new line | `"1\n2 3"` | `1`<br/>`2 3` |
# The last sequence, `\n`, represents the ***newline character***. It causes Python to start a new line.
hello = "Hello\nDataCracy"
print(hello)
# In addition, Python's **triple quote** syntax for strings lets us include newlines literally (i.e. by just hitting `Enter` on our keyboard, rather than using the special '\n' sequence). We've already seen this in the docstrings we use to document our functions, but we can use them anywhere we want to define a string.
triplequoted_hello = """hello
DataCracy"""
print(triplequoted_hello)
triplequoted_hello == hello
# The `print()` function automatically adds a newline character unless we specify a value for the keyword argument **`end`** other than the default value of `'\n'`:
print("hello")
print("DataCracy")
print("hello", end='')
print("DataCracy", end='')
# ## Strings are Sequences
#
# Strings can be thought of as **sequences of characters**. Almost everything we've seen that we can do to a **list**, we can also do to a string.
# Indexing
program = 'DataCracy'
program[0]
# Slicing
program[-3:]
# How long is this string?
len(program)
# Yes, we can even loop over them
[char+'! ' for char in program]
# But a major way in which they differ from lists is that they are ***immutable***. We can't modify them.
# + tags=["raises-exception"]
program[0] = 'B'
# program.append doesn't work either
# -
# ## String Methods
#
# Like `list`, the type `str` has lots of very useful methods. I'll show just a few examples here.
# ALL CAPS
claim = "DataCracy is fun!"
claim.upper()
# all lowercase
claim.lower()
# Searching for the first index of a substring
claim.index('fun')
claim.startswith(program)
claim.endswith('fun!')
# ### Going between strings and lists: `.split()` and `.join()`
#
# **`str.split()`** turns a string into a list of smaller strings, breaking on whitespace by default. This is super useful for taking you from one big string to a list of words.
words = claim.split()
words
# Occasionally you'll want to split on something other than whitespace:
datestr = '1956-01-31'
year, month, day = datestr.split('-')
print(year)
print(month)
print(day)
# **`str.join()`** takes us in the other direction, sewing a list of strings up into one long string, using the string it was called on as a separator.
'/'.join([month, day, year])
# Yes, we can put unicode characters right in our string literals :)
' 👏 '.join([word.upper() for word in words])
# ## Building Strings with `.format()`
#
# Python lets us concatenate strings with the `+` operator.
program + ' means Data and Democracy'
# If we want to throw in any non-string objects, we have to be careful to call **`str()`** on them first!
# + tags=["raises-exception"]
planet = 'Pluto'
position = 9
planet + " is the " + position + "th planet in the solar system."
# -
planet + " is the " + str(position) + "th planet in the solar system."
# This is getting hard to read and annoying to type. **`str.format()`** to the rescue.
"{} is the {}th planet in the solar system.".format(planet, position)
# So much cleaner! We call `.format()` on a "format string", where the Python values we want to insert are represented with `{}` placeholders.
#
# Notice how we didn't even have to call `str()` to convert `position` from an int. `format()` takes care of that for us.
#
# If that was all that `format()` did, it would still be incredibly useful. But as it turns out, it can do a *lot* more. Here's just a taste:
pluto_mass = 1.303 * 10**22 # 1.3 x 10^22
earth_mass = 5.9722 * 10**24 # 5.9 x 10^24
population = 52910390
# (2 decimal points) (3 decimal points, format as percent) (separate with commas)
"{} weighs about {:.2} kilograms ({:.3%} of Earth's mass). It is home to {:,} Plutonians.".format(
planet, pluto_mass, pluto_mass / earth_mass, population,
)
# Referring to format() arguments by index, starting from 0
s = """
Pluto is a {0}.
No, it is a {1}.
{0}!
{1}!
""".format('planet', 'dwarf planet')
print(s)
# There are much more about the capability of **`str.format`**, but it is extra for the purpose of the course.
#
# In case you want to go further, [pyformat.info](https://pyformat.info/) and [the official docs](https://docs.python.org/3/library/string.html#formatstrings) is worth being referenced.
# # Dictionaries
#
# Dictionaries are a built-in Python data structure for mapping **keys** to **values**.
#
# To define a Dictionary, we use curly braces `{}`
numbers = {'one':1, 'two':2, 'three':3}
# In this case `'one'`, `'two'`, and `'three'` are the **keys**, and 1, 2 and 3 are their corresponding **values**.
#
# Values are accessed via square bracket syntax `[]` similar to indexing into lists and strings.
numbers['one']
# We can use the same syntax to add another `key-value` pair
numbers['eleven'] = 11
numbers
# Or to change the value associated with an existing key
numbers['one'] = 'Pluto'
numbers
# Python has ***dictionary comprehensions*** with a syntax similar to the list comprehensions we saw in the previous tutorial.
planets = ['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn', 'Uranus', 'Neptune']
planet_to_initial = {planet: planet[0] for planet in planets}
planet_to_initial
# The **`in`** operator tells us whether something is a key in the dictionary
'Saturn' in planet_to_initial
'Betelgeuse' in planet_to_initial
# A for loop over a dictionary will loop over its keys
numbers = {'one':1, 'two':2, 'three':3}
for k in numbers:
print("{} = {}".format(k, numbers[k]))
# ## Dictionary Methods
#
# We can access a collection of all the keys or all the values with **`dict.keys()`** and **`dict.values()`**, respectively.
# +
# Get all the initials, sort them alphabetically, and put them in a space-separated string.
initial_keys = planet_to_initial.keys()
initial_vals = planet_to_initial.values()
print(initial_keys)
print(initial_vals)
' '.join(sorted(initial_vals))
# -
# The very useful **`dict.items()`** method lets us iterate over the keys and values of a dictionary simultaneously.
#
# (In Python jargon, an **item** refers to a **`key-value`** pair)
for planet, initial in planet_to_initial.items():
print("{} begins with \"{}\"".format(planet, initial))
# To read a full inventory of dictionaries' methods, check out the [official online documentation](https://docs.python.org/3/library/stdtypes.html#dict).
# + _kg_hide-output=true
help(dict)
# -
# # Your Turn 👋
|
python-for-data/Lec06 - Strings and Dictionaries.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""Pymongo is driver for MongoDB related persistence"""
from pymongo import MongoClient # For cluster connections, also requires dnspython package
from urllib.parse import urlparse
import pandas as pd
import os
"""Set environment variable for connection string"""
# %env MONGODB_CONNECTION=
class MongoDBConnect():
"""The Mongo database connector
Args:
host: host to connect
"""
def __init__(self, host):
self.host = host
self.connection = None
def __enter__(self):
self.connection = MongoClient(self.host)
print('Mongo connection created: {0}'.format(self.connection))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
def get_all(self, collection, limit=0, order=1):
"""
MongoDB get all items
Args:
collection: collection to get from
limit: integer of limit of items to retrieve, ie, 1000, 2000, etc.
order: datetime sort: asc 1, desc -1
"""
try:
collection = self.connection[urlparse(
self.host).path[1:]][collection]
items = collection.find().sort('created_at', order).limit(limit) # oldest default, no limit default
print('Successfully found {0} items'.format(items.count(with_limit_and_skip=True)))
return items
except Exception as e:
print('PyMongo database error: {0}'.format(str(e)))
raise e
conn = os.getenv("MONGODB_CONNECTION")
with MongoDBConnect(conn) as db:
items = db.get_all(collection="tweets", limit=1000)
df = pd.DataFrame.from_dict(items)
print(df.head())
# Change dtypes as needed (default as object)
|
jupyter-notebooks/Import-MongoDB-Example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 수정사항
# * 계산식을 코드로 바로 입력
# * Nan data table 수정
# * 추가데이터와 비교 (ex. 3월, 12월 \
# 코로나 확진자 증가, 거리두기 단계 변화와 비교 )
# * plot -> pointplot 로 변경
# * heatmap(corr)
# *시간 -> 지역
# *요일 -> 월~일
# ----------------------------------
# *ppt 제작
# *
# - 주제 선정 동기, 개요, 목적등
# * 대중교통이 감소하였고, 그럼 일반 자동차 통행량이 증가하였을까?
#
#
# - 데이터 수집 방법
#
# - 데이터 처리 및 분석
#
# - 분석 방법 : 빈도분석, 기술통계분석, 상관계수분석.
#
# 가설 1
# 데이터 설명 및 인사이트 도출 과정 (아주 간단히) 및 시각화
# 결론 1
#
# 가설 2
# 데이터 설명 및 인사이트 도출 과정 (아주 간단히) 및 시각화
# 결론 2
#
# …
#
# - 추후 개선 및 느낀점
pwd
# %cd
# %cd seoultraffic/
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import pyplot
import seaborn as sns
import matplotlib.font_manager as fm
import missingno as msno #결측데이터 검색
# %matplotlib inline
# %config InlineBackend.figure_formats = {'png', 'retina'}
# 폰트설정
from matplotlib import font_manager
plt.rcParams["font.family"] = 'NanumGothicCoding'
from matplotlib import rc
rc('font', family="Arial Unicode MS")
### 2019 pub_transpor data road
df1 = pd.read_csv("2019.csv")
### 2020 pub_transpor data road
df2 = pd.read_csv("2020.csv")
### 2019 Traffic data road
#df_2019_url = './data_2019_seoultraffic.csv'
### 2019 Traffic data road
#df_2020_url = './data_2020_seoultraffic.csv'
ddf1 = pd.read_csv("data_2019_seoultraffic.csv")
ddf2 = pd.read_csv("data_2020_seoultraffic.csv")
# ### Pub_transpor data 2019
all_traffic_m_2019 = ddf1.pivot_table("total", "month")
all_traffic_m_2019.tail(2)
all_traffic_m_2019.loc[:, "total"] = all_traffic_m_2019.sum(axis=1)
all_traffic_m_2019.tail(2)
all_traffic_m_2020 = ddf2.pivot_table("total", "month")
all_traffic_m_2020.tail(2)
all_traffic_m_2020.loc[:, "tatal"] = all_traffic_m_2020.sum(axis=1)
all_traffic_m_2020.tail(2)
# ### Pub_transpor data 2020
all_traffic_m_2019 = ddf1.pivot_table("total", "month", aggfunc=np.sum)
all_traffic_m_2019.tail(2)
all_traffic_m_2020 = ddf2.pivot_table("total", "month", aggfunc=np.sum)
all_traffic_m_2020.tail(2)
all_traffic_r_2019 = ddf2.pivot_table("total", "구", aggfunc=np.sum)
all_traffic_r_2019.tail(2)
all_traffic_r_2020 = ddf2.pivot_table("total", "구", aggfunc=np.sum)
all_traffic_r_2020.tail(2)
# ## 1-1 2019_2020 서울 전체 대중교통량 비교
y1 = all_traffic_m_2019["total"].sum()
y2 = all_traffic_m_2020["total"].sum()
all_rate_2019 = round(((y2 - y1) / y1 * 100), 2) # 감소량
all_rate_2019
# +
x1 = 0
x2 = 1
#y1 = all_traffic_m_2019["total"].sum() #2019 Total 교통량
#y2 = all_traffic_m_2020["total"].sum() #2020 Total 교통량
y1 = all_traffic_m_2019["total"].sum()
y2 = all_traffic_m_2020["total"].sum()
plt.bar(x1,y1, label='2019 traffic', color='b')
plt.bar(x2,y2, label='2020 traffic', color='g')
plt.legend()
plt.ylim([0,70*1000000000/6])
plt.text(0.17,85*100000000,all_rate_2019,fontsize=20)
plt.text(0.5,85*100000000,"% 감소",fontsize=20)
plt.title("2019_2020 서울 전체 교통량 비교", fontsize = 15)
plt.show()
# -
# ## 1-2 2019_2020 서울 전체 차량 통행량 비교
# # 2-1 2019-2020 월별 서울 교통량 비교
all_tans_2019 = round(all_traffic_m_2019["total"].mean())
print("2019년 평균 통행량 :", all_tans_2019,"대")
compare_2019 = all_traffic_m_2019[all_traffic_m_2019["total"] > all_traffic_m_2019["total"].mean()]
compare_df_2019 = pd.DataFrame(compare_df_2019, columns=["total"])
compare_df_2019
all_tans_2020 = round(all_traffic_m_2020["total"].mean())
print("2020년 평균 통행량 :", all_tans_2020,"대")
compare_2020 = all_traffic_m_2020[all_traffic_m_2020["total"] > all_traffic_m_2020["total"].mean()]
compare_df_2020 = pd.DataFrame(compare_2020, columns=["total"])
compare_df_2020
# +
X = ["1월","2월","3월","4월","5월","6월","7월","8월","9월","10월","11월","12월"]
y1 = all_traffic_m_2019.total
y2 = all_traffic_m_2020.total
plt.figure(figsize=(20, 5))
plt.plot(X,y1, label='2019 traffic(month)', color='b', marker='o')
plt.plot(X,y2, label='2020 traffic(month)', color='g', marker='*')
plt.legend()
plt.title("2019-2020 Traffic Counting", fontsize=20)
plt.axhline(y=all_tans_2019, color='r', linewidth=1)
plt.axhline(y=all_tans_2020, color='r', linewidth=1)
plt.show()
# -
# # 2-2 2019-2020 월별 차량 통행량 비교
# ## 3-0. 요일별 대중 교통량 출력
df3 = pd.read_csv("2019.csv")
day_pub_traffic_2019 = df3.drop(columns=["시간", "지명", "발생량", "도착량", "월"])
day_pub_traffic_2019
data, weeks = [], ["월", "화", "수", "목", "금", "토", "일"]
for week in weeks:
data.append(day_pub_traffic_2019[day_pub_traffic_2019["날짜"].str.contains(week)]["count"].sum())
day_pub_traffic_2019 = pd.DataFrame(data=data, index=weeks).reset_index()
day_pub_traffic_2019.columns = ["요일", "count"]
day_pub_traffic_2019
df4 = pd.read_csv("2020.csv")
day_pub_traffic_2020 = df4.drop(columns=["시간", "지명", "발생량", "도착량", "월"])
day_pub_traffic_2020
data, weeks = [], ["월", "화", "수", "목", "금", "토", "일"]
for week in weeks:
data.append(day_pub_traffic_2020[day_pub_traffic_2020["날짜"].str.contains(week)]["count"].sum())
day_pub_traffic_2020 = pd.DataFrame(data=data, index=weeks).reset_index()
day_pub_traffic_2020.columns = ["요일", "count"]
day_pub_traffic_2020
# # 3 요일별 대중교통 이용량 비교
# ### 3.1.1주말 대중교통 이용량 비교(토,일)
ddf3 = pd.read_csv("./data_2019_seoultraffic.csv")
day_all_traffic_2019 = pd.DataFrame(ddf3, columns=["요일", "total"])
day_all_traffic_2019
data, weeks = [], ["월", "화", "수", "목", "금", "토", "일"]
for week in weeks:
data.append(day_all_traffic_2019[day_all_traffic_2019["요일"].str.contains(week)]["total"].sum())
day_all_traffic_2019 = pd.DataFrame(data=data, index=weeks).reset_index()
day_all_traffic_2019.columns = ["요일", "total"]
day_all_traffic_2019
ddf4 = pd.read_csv("./data_2020_seoultraffic.csv")
day_all_traffic_2020 = pd.DataFrame(ddf4, columns=["요일", "total"])
day_all_traffic_2020
data, weeks = [], ["월", "화", "수", "목", "금", "토", "일"]
for week in weeks:
data.append(day_all_traffic_2020[day_all_traffic_2020["요일"].str.contains(week)]["total"].sum())
day_all_traffic_2020 = pd.DataFrame(data=data, index=weeks).reset_index()
day_all_traffic_2020.columns = ["요일", "total"]
day_all_traffic_2020
# ### 3.1.2 주말 차량 통행량 비교(토,일)
(np.sum(ytraffic_2020["count"][-2:]) - np.sum(ytraffic_2019["count"][-2:]))\
/ np.sum(ytraffic_2019["count"][-2:]) * 100
# +
X1 = ["2019년"]
X2 = ["2020년"]
y1 = week_2019
y2 = week_2020
plt.bar(X1, y1)
plt.bar(X2, y2)
plt.ylim([300000000, 120*10000000])
plt.text(X1, 60*10000000, week_2019, ha="center", fontsize = 20)
plt.text(X2, 60*10000000, week_2020, ha="center" ,fontsize = 20)
plt.title("rate : -10.27.%", fontsize = 20)
plt.show()
plt.show()
# -
# ### 3.2.1 평일 대중교통 이용량 비교(월-금)
(np.sum(ytraffic_2020["count"][:-2]) - np.sum(ytraffic_2019["count"][:-2]))\
/ np.sum(ytraffic_2019["count"][:-2]) * 100
y1 = sum(ytraffic_2019["count"][:-2])
y2 = sum(ytraffic_2020["count"][:-2])
y1, y2
# +
plt.bar("2019년 평일 교통량", y1)
plt.bar("2020년 평일 교통량", y2)
a = np.sum(ytraffic_2019["count"][:-2])
b = np.sum(ytraffic_2020["count"][:-2])
plt.text("2019년 평일 교통량",80*10000000, a, ha="center", fontsize = 20)
plt.text("2020년 평일 교통량", 80*10000000, b, ha="center", fontsize = 20)
plt.title("-23.57%", fontsize = 20)
plt.show()
# -
# ### 3.2.2 평일 차량통행량 비교(월-금)
dayofthweek_2020
yy1 = (np.sum(dayofthweek_2019[0:4])+np.sum(dayofthweek_2019[-1:])).values
yy2 = (np.sum(dayofthweek_2020[0:4])+np.sum(dayofthweek_2020[-1:])).values
cc = ((yy2 - yy1) / yy2 ) * 100
# +
plt.bar("2019년 평일 교통량", yy1)
plt.bar("2020년 평일 교통량", yy2)
yy1 = (np.sum(dayofthweek_2019[0:4])+np.sum(dayofthweek_2019[-1:])).values
yy2 = (np.sum(dayofthweek_2020[0:4])+np.sum(dayofthweek_2020[-1:])).values
plt.text("2019년 평일 교통량", 50*10000000, yy1, ha="center", fontsize = 13)
plt.text("2020년 평일 교통량", 50*10000000, yy2, ha="center", fontsize = 13)
plt.title(cc, fontsize = 20)
plt.show()
# -
# ## 4.지역별 비교
# ### 4.1지역별 대중교통량 비교
# +
traffic_r_2019.loc[:,'region'] = ['강남구', '강동구', '강북구', '강서구', '관악구', '광진구', '구로구', '금천구', \
'노원구', '도봉구', '동대문구', '동작구', '마포구', '서대문구', '서초구', '성동구', \
'성북구', '송파구', '양천구', '영등포구', '용산구', '은평구', '종로구', '중구', '중랑구' ]
traffic_r_2019.tail(2)
# -
traffic_r_2019['total'].mean()
traffic_r_2019_a = round(traffic_r_2019['total'].mean())
print("2019년 평균 통행량 :", traffic_r_2019_a,"대")
# +
aa = traffic_r_2019[traffic_r_2019 > traffic_r_2019.mean()]
aaa = pd.DataFrame(aa, columns=["total"])
aaa.drop()
aaa
# +
traffic_r_2020.loc[:,'region'] = ['강남구', '강동구', '강북구', '강서구', '관악구', '광진구', '구로구', '금천구', \
'노원구', '도봉구', '동대문구', '동작구', '마포구', '서대문구', '서초구', '성동구', \
'성북구', '송파구', '양천구', '영등포구', '용산구', '은평구', '종로구', '중구', '중랑구' ]
traffic_r_2020.tail(2)
# -
traffic_r_2020['total'].mean()
traffic_r_2020_a = round(traffic_r_2020['total'].mean())
print("2020년 평균 통행량 :", traffic_r_2020_a,"대")
traffic_m_2019.tail()
traffic_r_2019.tail()
dfdf_2019 = traffic_m_2019[traffic_m_2019["total"] > traffic_m_2019["total"].mean()]
dfdfdf_2019 = pd.DataFrame(dfdf_2019, columns=["total"])
dfdfdf_2019
bb = traffic_r_2020[traffic_r_2020["total"] > traffic_r_2020["total"].mean()]
bbb = pd.DataFrame(bb, columns=["total"])
bbb
# +
plt.figure(figsize=(20, 5))
X = ['강남구', '강동구', '강북구', '강서구', '관악구', '광진구', '구로구', '금천구', \
'노원구', '도봉구', '동대문구', '동작구', '마포구', '서대문구', '서초구', '성동구', \
'성북구', '송파구', '양천구', '영등포구', '용산구', '은평구', '종로구', '중구', '중랑구' ]
plt.bar(X, traffic_r_2019["total"])
plt.bar(X, traffic_r_2020["total"])
#plt.bar(traffic_r_2019["region"], traffic_r_2019["total"])
#plt.bar(traffic_r_2020["region"], traffic_r_2020["total"])
#plt.legend()
plt.axhline(y=traffic_r_2019['total'].mean(), color='g', linewidth=1) #label="2019")
plt.axhline(y=traffic_r_2020['total'].mean(), color='r', linewidth=1) #label="2020")
plt.show()
# +
#4.2 지역별 차량 교통량 비교
# -
ddf_2019 = traffic_2019
ddf_2019 = ddf_2019.pivot_table("총합","구",aggfunc=np.sum)
ddf_2019 = ddf_2019.reset_index()
ddf_2019.tail()
gein_tans_2019 = round(ddf_2019["총합"].mean())
print("2019년 평균 통행량 :", gein_tans_2019,"대")
traffic_r_2020_a = round(traffic_r_2020['total'].mean())
print("2020년 평균 통행량 :", traffic_r_2020_a,"대")
ddff_2019 = ddf_2019[ddf_2019["총합"] > ddf_2019["총합"].mean()]
ddff_2019
ddf_2020 = traffic_2020
ddf_2020 = ddf_2020.pivot_table("총합","구",aggfunc=np.sum)
ddf_2020 = ddf_2020.reset_index()
ddf_2020.tail()
gein_tans_2020 = round(ddf_2020["총합"].mean())
print("2020년 평균 통행량 :", gein_tans_2020,"대")
ddff_2020 = ddf_2020[ddf_2020["총합"] > ddf_2020["총합"].mean()]
ddff_2020
# +
plt.figure(figsize=(20, 8))
plt.bar(ddf_2019["구"], ddf_2019["총합"])
plt.bar(ddf_2020["구"], ddf_2020["총합"])
#plt.legend()
plt.axhline(y=gein_tans_2019, color='g', linewidth=1) #label="2019")
plt.axhline(y=gein_tans_2020, color='r', linewidth=1) #label="2020")
plt.show()
# +
import seaborn as sns
ddff_2019["연도"] = 2019
ddff_2020["연도"] = 2020
dddfff = pd.concat([ddff_2019, ddff_2020])
x = dddfff["구"]
y = dddfff["총합"]
plt.figure(figsize =(15, 8))
sns.barplot(x=x, y=y, hue=dddfff["연도"])
plt.axhline(y=gein_tans_2019, color='g', linewidth=2) #label="2019"
plt.axhline(y=gein_tans_2020, color='r', linewidth=2) #label="2020"
# -
|
beta_seoultraffic_rev_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import tensorflow as tf
import keras
from keras.layers import Activation
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras_pyramid_pooling_module import PyramidPoolingModule
np.random.seed(222)
tf.random.set_seed(222)
def plot_image_grid(data, figsize=(15, 15), cmap=None, cbar=True):
"""
Plot the data as a grid of images.
Args:
data: the tensor of image data to plot in
(M, N, H, W, C) format where M is the
height of the image grid, N is the width
of the image grid, H is the height of the
image, W is the width of the image, and C
is the channel dimensionality of the image
cmap: the color map to use for the data
cbar: whether to include a color bar legend
Returns:
None
"""
M, N = data.shape[0], data.shape[1]
fig, ax = plt.subplots(nrows=M, ncols=N, sharex=True, sharey=True, figsize=figsize)
for i in range(M):
for j in range(N):
idx = i + 1 + N * j
im = ax[i, j].imshow(data[i, j], cmap=cmap)
ax[i, j].axes.xaxis.set_major_locator(plt.NullLocator())
ax[i, j].axes.yaxis.set_major_locator(plt.NullLocator())
if cbar:
cb_ax = fig.add_axes([1., 0.2, 0.02, 0.6])
cbar = fig.colorbar(im, cax=cb_ax)
# # Dataset
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
# normalize images into [0, 1]
X_train = X_train[..., None] / 255.0
X_test = X_test[..., None] / 255.0
# get the target size of the images and number of classes
TARGET_SIZE = X_train.shape[1:]
NUM_CLASSES = np.max(y_train) + 1
# convert discrete labels to one-hot vectors
y_train = np.eye(NUM_CLASSES)[y_train.flatten()]
y_test = np.eye(NUM_CLASSES)[y_test.flatten()]
X_train.shape, y_train.shape
X_test.shape, y_test.shape
plot_image_grid(X_train[:25].reshape(5, 5, 28, 28, 1), cbar=False, cmap='bone')
# # Model
# ## Baseline
input_layer = Input(TARGET_SIZE)
x = input_layer
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(10, activation='softmax')(x)
model = Model(inputs=input_layer, outputs=x)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history1 = model.fit(X_train, y_train,
epochs=5,
batch_size=10,
validation_split=0.3,
shuffle=True,
)
df1 = pd.DataFrame(history1.history)
df1
ax = df1[[c for c in df1.columns if 'accuracy' in c]].plot()
ax.set_ylabel('Accuracy')
ax.set_xlabel('Epoch')
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax = df1[[c for c in df1.columns if 'loss' in c]].plot()
ax.set_ylabel('Loss')
ax.set_xlabel('Epoch')
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
loss1, accuracy1 = model.evaluate(X_test, y_test)
# ## Pyramid Pooling Near Output
input_layer = Input(TARGET_SIZE)
x = input_layer
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = PyramidPoolingModule(1, (3, 3), padding='same')(x)
x = Flatten()(x)
x = Dense(10, activation='softmax')(x)
model = Model(inputs=input_layer, outputs=x)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history2 = model.fit(X_train, y_train,
epochs=5,
batch_size=10,
validation_split=0.3,
shuffle=True,
)
df2 = pd.DataFrame(history2.history)
df2
ax = df2[[c for c in df2.columns if 'accuracy' in c]].plot()
ax.set_ylabel('Accuracy')
ax.set_xlabel('Epoch')
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax = df2[[c for c in df2.columns if 'loss' in c]].plot()
ax.set_ylabel('Loss')
ax.set_xlabel('Epoch')
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
loss2, accuracy2 = model.evaluate(X_test, y_test)
# ## Pyramid Pooling Near Input
input_layer = Input(TARGET_SIZE)
x = input_layer
x = PyramidPoolingModule(1, (3, 3), padding='same')(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Conv2D(64, (2, 2), padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(10, activation='softmax')(x)
model = Model(inputs=input_layer, outputs=x)
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history3 = model.fit(X_train, y_train,
epochs=5,
batch_size=10,
validation_split=0.3,
shuffle=True,
)
df3 = pd.DataFrame(history3.history)
df3
ax = df3[[c for c in df3.columns if 'accuracy' in c]].plot()
ax.set_ylabel('Accuracy')
ax.set_xlabel('Epoch')
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
ax = df3[[c for c in df3.columns if 'loss' in c]].plot()
ax.set_ylabel('Loss')
ax.set_xlabel('Epoch')
ax.xaxis.set_major_locator(plt.MaxNLocator(integer=True))
loss3, accuracy3 = model.evaluate(X_test, y_test)
# ## Comparisons
df = pd.DataFrame([[loss1, loss2, loss3], [accuracy1, accuracy2, accuracy3]]).T
df.columns = ['Loss', 'Accuracy']
df.index = ['Baselines', 'Pyramind Near Output', 'Pyramid Near Input']
df
ax = df.plot(kind='bar', subplots=True)
ax[0].set_ylabel('Loss')
ax[1].set_ylabel('Accuracy')
ax[1].set_xlabel('Architecture')
#
|
CIFAR10Classifier.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Canada Trade Data DP Demo - Data Scientist
# Import libraries
import syft as sy
import torch
import numpy as np
from syft.core.adp.entity import DataSubject
# # Step 1: Login and Find Data
# Login to new Domain
canada = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8081)
# Look at data already in the Domain
canada.store
data_ptr = canada.store[9]
data_ptr
# # Step 2: Calculate Trade Flow Sum
s_ptr = data_ptr.sum(0)
s_ptr.request(reason="I really really want to see it.")
# +
# s_ptr.get()
# -
out = s_ptr.publish(client=canada,sigma=1000)
out.get()
canada.approx_budget
canada.request_more_budget()
for i in range(10):
s_ptr = data_ptr.sum(0) / 10
out = s_ptr.publish(client=uk,sigma=1)
print("done")
data_ptr.get()
o = ac.send(uk)
uk.store
import syft as sy
blob = sy.serialize(ac)
sy.deserialize(blob)
out = target_ptr.publish(sigma=uk)
out
sy.serialize(uk)
data_ptr.request("I need this data to solve a case")
data = data_ptr.get()
weights = sy.Tensor(np.random.rand(3, 3)).autograd(requires_grad=True).tag("my_weights")
weights_ptr = weights.send(uk)
# + tags=[]
print(weights)
# -
autograd_tensor = sy.Tensor(np.random.randn(1, 3)).autograd(requires_grad=True).tag("autograd")
autograd_ptr = autograd_tensor.send(uk)
# +
for i in range(1):
pred = data_ptr.dot(weights_ptr)
diff = target_ptr - pred
pre_loss = diff * diff
loss = np.mean(pre_loss, axis=1)
loss.backward()
wdiff = weights_ptr.grad * 0.01
weights_ptr = -wdiff + weights_ptr
gamma_ptr = weights_ptr.gamma
# -
uk.store.pandas
|
notebooks/trade_demo/archive/21_08_13/Demo_DataScientist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %cd ..
from lib.run.args import Args
from lib.run.plot import Plot, set_theme
# %config InlineBackend.figure_format = 'svg'
n = 4
set_theme([n,n])
aa = []
a = Args(
splitter = 'Sequential',
splitter_pp = dict(
n_days_in_sample = 30,
bank_quantile = 0.9,
rtk_quantile = 0.9,
),
n_folds = 3,# 1000 == 'full train'
fold = 0,
fit_limit = 1.,
val_limit = 1.,
batch_size = 32,
lr = 2e-3,
n_epochs = 10,
check_val_every_n_epoch = 1,
bb_pp = dict(
block_size = 16,
hidden_size = 128,
intermediate_size = 128,
num_attention_heads = 1,
num_hidden_layers = 1,
num_random_blocks = 1,
),
loss = 'MarginLoss',
loss_pp = dict(),
use_unmatched = bool(0),
miner = None,
miner_pp = dict(),
avg_loss = 'mean',
avg_pred = 'mean',
)
a.update()
aa.append(a)
plot = Plot(aa[:],0,20)
plot('R1')
plot('MRR')
plot('P')
plot('loss')
|
ipynb/plot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# +
filepath_dict = {'yelp': 'data/sentiment_analysis/yelp_labelled.txt',
'amazon': 'data/sentiment_analysis/amazon_cells_labelled.txt',
'imdb': 'data/sentiment_analysis/imdb_labelled.txt'}
df_list = []
for source, filepath in filepath_dict.items():
df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\t')
df['source'] = source # Add another column filled with the source name
df_list.append(df)
df = pd.concat(df_list)
print(df.iloc[0])
# -
df_yelp = df[df['source'] == 'yelp']
sentences = df_yelp['sentence'].values
y = df_yelp['label'].values
sentences_train, sentences_test, y_raw, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000)
# +
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_raw = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
X_test.shape
# +
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print("Accuracy:", score)
# -
# # Shapely
# %reload_ext autoreload
# %autoreload 2
import os
import sys
import time
import numpy as np
from Shapley import ShapNN
from DShap import DShap
import matplotlib.pyplot as plt
import sklearn
from shap_utils import *
# %matplotlib inline
MEM_DIR = './'
problem, model = 'classification', 'logistic'
hidden_units = [] # Empty list in the case of logistic regression.
train_size = 500
num_test = 240
num_test
X, y = X_raw[:train_size], y_raw[:train_size]
#X_test, y_test = X_raw[train_size:], y_raw[train_size:]
print(X_train.toarray().shape)
print(y_test.shape)
model = 'logistic'
problem = 'classification'
# num_test = 600
directory = './temp1'
dshap = DShap(X.toarray(), y, X_test.toarray(), y_test, num_test,
sources=None,
sample_weight=None,
model_family=model,
metric='accuracy',
overwrite=True,
directory=directory, seed=0)
dshap.run(100, 0.1, g_run=False)
X, y = X_raw[:train_size], y_raw[:train_size]
# X_test, y_test = X_raw[100:], y_raw[100:]
model = 'logistic'
problem = 'classification'
# num_test = 600
directory = './temp1'
dshap = DShap(X.toarray(), y, X_test.toarray(), y_test, num_test, model_family=model, metric='accuracy',
directory=directory, seed=1)
dshap.run(100, 0.1)
dshap.sources[0]
X, y = X_raw[:train_size], y_raw[:train_size]
# X_test, y_test = X_raw[100:], y_raw[100:]
model = 'logistic'
problem = 'classification'
# num_test = 600
directory = './temp1'
dshap = DShap(X.toarray(), y, X_test.toarray(), y_test, num_test, model_family=model, metric='accuracy',
directory=directory, seed=2)
dshap.run(100, 0.1)
dshap.merge_results()
convergence_plots(dshap.marginals_tmc)
convergence_plots(dshap.marginals_g)
# +
#removing high value data point
dshap.performance_plots([dshap.vals_tmc, dshap.vals_g, dshap.vals_loo], num_plot_markers=20,
sources=dshap.sources)
# -
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demographic labeling
#
# ## Goal
#
# In this notebook, we will be associating our cleaned ads data with demographics. Associating a demographic that the IRA attempted to target with their ads is a very subjective process. Studies which inspired this analysis have not clearly communicated how they labeled their ads. From browsing the ads, it is very clear that most of the ads targeting is based on implicit biases and stereotypes about certain demographics.
#
# Labelling is also difficult as:
#
# * There is overlap between possible target categories (You can be both LGBT and African-American)
# * Some ad campaign didn't seem to have clear targets
#
# ## Summary of strategy
#
# My first attempt at labeling was to use a [well known document clustering approach](https://iksinc.online/2016/05/16/topic-modeling-and-document-clustering-whats-the-difference/) using tf-idf followed by k-means. tf-idf is used to calculated the frequency of terms in a document and compare with their frequency accross all documents. Terms that are seen more frequently are less important where terms seen less frequently are more important.
#
# K-Means is then used to identify clusters. To associate a cluster with a demographic, the different targeting interests will be listed by frequency of appearance in the ads of the cluster.
#
# This first approach was not successful because a lot of the ads target the same demographic. We can see this by looking at the frequency of terms in each clusters. Most clusters contain 'African-American' when we would really like for this to be in their own cluster. This is also due to ads targeting 'African-American' having an overlap with other categories. The [\[TEST\]\_k\_means_demographic_labeling]([TEST]_k_means_demographic_labeling.ipynb) notebook file contains code for this approach.
#
# Inspired by the Oxford study which used a graph algorithms in order to identify communities within a graph, the second approach (and the one which will be shown in this notebook) was to a do a manual "Breadth first search" within the data. The criterions for belonging to a group are described below as well as the process used.
#
# ### Criterion
#
# * Similar words are used
# * Similar social and or cutlural interests are shared
#
# As nothing forbids overlap between demographics such as "Enjoy memes", "LGBT" or "Mexican-American", whenever a it is unclear why a term was tied to a demographic, I have left a note indicating why and the add number refering to the pdf ad that was consulted to make the decision.
#
# ### Process
#
# 1. Create the list of interests as arrays of interest strings.
# 2. List the top 25 most frequent interests.
# 3. Attempt to create a demographic group with the top 25 interests.
# 4. Label rows based on the demographic group
# 5. Analyse whether terms related to those used to label the group should also be used as part of the interests labelling. If so add these to the labeling for the group and repeat 4-5, otherwise go to 6.
# 6. Stop listing interests frequency once the number of ads in a group stablelizes. Go to 3 with data not including the group identified.
#
# We first import the packages required and load our "clean_data" dataframe which contains the fields detailed below.
#
# | Field name | Type | Description |
# |------------------------|----------|-------------------------------------|
# | ad_targeting_interests | string | Interests used to target users |
# | ad_impressions | int | Number of users who saw the ads |
# | ad_clicks | int | Number of times the ads was clicked |
# | ad_spend | float | Money spent on the ad in RUB |
# | ad_creation_date | datetime | Creation date of the ad |
# | ad_end_date | datetime | Date at which the ad stopped |
# +
import pandas as pd
import numpy as np
import re
ads_df = pd.read_csv('../clean_data/clean_data.csv', parse_dates=['ad_creation_date', 'ad_end_date'])
ads_df.head(3)
# -
# ## Spliting the "ad_targeting_interests" string
#
# We need to do a bit more work before we can use the interests to identify demographics. First we need to split the long ad_targeting_interests into an array of interest strings. The interests are separated by a comma and an 'or' statement between the last two elements like so:
#
# "interest1, interest2, interest3 or interest4"
#
# We use a regular expression to do this transformation
# +
# We compile the regular expression to improve performance
comma_separation = re.compile(r'(?u)(?:^|,)([^\",\n]*)')
# Returns an array of interests found in the ad_targeting_interests column
def get_arr_of_interests(interest_string):
arr = []
# Iterate through matches found by regular expression
for matches in re.finditer(comma_separation, interest_string):
# Remove whitespace from both side of the string
match_value = matches.group(1).strip()
# If " or " is present in the string split it.
if match_value and ' or ' in match_value:
arr.extend(match_value.split(' or '))
elif match_value:
arr.append(match_value)
# Sometimes the string has one interest,
# we add the entire string as the interest.
if len(arr) == 0:
arr.push(interest_string)
return arr
# Obtain the interests array for each row using apply
ads_df['ad_interests_array'] = ads_df.ad_targeting_interests.apply(get_arr_of_interests)
# Add a column for demographic initialized as nan
ads_df['demographic'] = np.nan
# Show the first three entries
ads_df.head(3)
# -
# ## Outputting most frequent words
#
# We create a function which we can use to output the most frequent words found in the ad_interests_arrays.
# +
# We use a default dictionary which simplifies are code
# by skipping check for entires not in our dictionary
from collections import defaultdict
# Given the ad_interests_arrays column (a pandas' series)
# Output the top n most frequent words and their associated row count
def print_top_words_from_arrays(series, n):
# Create an empty interests dictionary
word_dict = defaultdict(int)
# For every array of interests
for arr in series:
# For every word
for val in arr:
# Increment count
word_dict[val] += 1
# Sort dictionary by count descending
count = 0
for w in sorted(word_dict, key=word_dict.get, reverse=True):
print(w, word_dict[w])
# output top n
count +=1
if count == n:
break;
# Output top 25 words
print_top_words_from_arrays(ads_df.ad_interests_array, 25)
# -
# ## Create demographic, add similar words, rince, repeat
#
# This is the very repetitive part of the process where we follow the steps below:
#
# 3. Attempt to create a demographic group with the top 25 interests.
# 4. Label rows based on the demographic group
# 5. Analyse whether terms related to those used to label the group should also be used as part of the interests labelling. If so add these to the labeling for the group and repeat 4-5, otherwise go to 6.
# 6. Stop listing interests frequency once the number of ads in a group stablelizes. Go to 3 with data not including the group identified.
#
# Notes behind the choice to include or exclude words are included in the process below.
# +
# Labels a single row with a demographic given:
# the row, the themes associated with the group, the name of the group
def label_demographic(row, themes, group_name):
interests_array = row['ad_interests_array']
if pd.isnull(row['demographic']):
for theme in themes:
if theme in interests_array:
row['demographic'] = group_name
break
return row
# Labels rows given the dataframe, themes and group name for the demographic
# Outputs the total number of rows labeled with the demographic given the themes
def label_demographic_rows(df, themes, group_name):
df = df.apply(label_demographic, args=(themes, group_name), axis=1)
print('This labelled %d rows!'% (df['demographic'] == group_name).sum())
return df
# -
# ## First demographic: African-American
#
# From this list of words above, many seem to target African-Americans lets make this a first grouping.
# +
african_american_themes = {
'African-American history',
'<NAME>',
'<NAME>',
'African-American Civil Rights Movement (1954-68)',
'Black (Color)',
'African-American culture',
'Pan-Africanism',
'Black Consciousness Movement',
'Black Matters',
'Black nationalism',
'African-American Civil Rights Movement(1954-68)',
'HuffPost Black Voices',
'BlackNews.com'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
# We now look at the frequency of interests referenced by these rows. To do so, we create a function that prints the top 25 interests and their frequency for ads tagged with the demographic group.
# Given the dataframe, the themes for the demographic and its name
# Output the top 25 interests related to these ads that are not yet part of the themes array.
def print_top_references_for_theme(df, themes, group_name):
rows = df[df['demographic'] == group_name]
ad_interest_arrays = rows.ad_interests_array.apply(lambda x: list(set(x) - themes))
print_top_words_from_arrays(ad_interest_arrays, 25)
print_top_references_for_theme(ads_df, african_american_themes, 'African-American')
# +
african_american_themes = african_american_themes | {
'Jr.',
'Stop Police Brutality',
'Police misconduct',
'Black history',
'Martin Luther King Ill',
'African-American Civil Rights Movement ( 1954-68)',
'AfricanAmerican culture',
'AfricanAmerican history',
'Jr.; African-American Civil Rights Movement (1954-68)',
'Black Power',
'Black History Month',
'Black Panther Party',
'Martin Luther King III',
'Black is beautiful',
'Say To No Racism',
'<NAME>',
'Stop Racism!!',
'African American',
'African-American Civil Rights Movement (1954--68)',
'AfricanAmerican Civil Rights Movement(1954-68)',
'Black Girls Rock!',
'My Black is Beautiful',
'Anti-discrimination',
'African-American Civil Rights Movement (1954-68). African-American history',
'Human rights'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
# In this case, we might wonder why 'Stop Police Brutality' was accepted in the list of African-American themes. One way to test if this interests should belong in this group is to output the list of ads which have this interest and are not already labeled African-American.
# Makes sure the interest string contains 'Stop Police Brutality' and that the row is not already tagged 'African-American'
ads_df[ads_df['ad_targeting_interests'].str.contains('Stop Police Brutality') & pd.isnull(ads_df['demographic'])]
# From this output we see that the first row belongs in the group by the mention of "I'm Black ..." the second mentions [<NAME>](https://en.wikipedia.org/wiki/Rodney_King) an African-American activist against police brutality. The third row is more ambiguous, but after looking at file P(1)0000483.txt we find that another part of this entry references African-Americans.
#
# A similar exercise reveals similar results for "Police misconduct", "Say To No Racisms", "Anti-discrimination", "Human rights" etc...
print_top_references_for_theme(ads_df, african_american_themes, 'African-American')
# +
african_american_themes = african_american_themes | {
'Cop Block',
'Social justice',
'Racism in the United States',
'<NAME>; Jr.',
'<NAME>',
'Police brutality in the United States',
'Police Brutality is a Crime',
'<NAME>',
'Malcolm X Memorial Foundation',
'Black Enterprise',
'Jr.; African-American culture',
'African National Congress',
'HuffPost Politics',
'Black Business Works',
'Jr.; African-American Civil Rights Movement (1954-68). African-American history',
'Pan Africanist Congress of Azania',
'Violence prevention'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
print_top_references_for_theme(ads_df, african_american_themes, 'African-American')
# +
african_american_themes = african_american_themes | {
'St. Louis',
'Union of Huffington Post Writers and Bloggers',
'Baptism',
'Afrocentrism',
'Fight the Power',
'United States presidential election',
'Black Tea Patriots',
'The Raw Story',
'<NAME>',
'National Museum of American History',
'<NAME>',
'<NAME>',
'Gospel',
'BLACK BUSINESS GLOBAL'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
print_top_references_for_theme(ads_df, african_american_themes, 'African-American')
# +
african_american_themes = african_american_themes | {
'<NAME>',
'Black Business Builders Club',
'Medgar Evers',
'I Have a Dream',
'African-American history. <NAME>',
'Baltimore',
'Civil and political rights',
'AfricanAmerican culture. African-American Civil Rights Movement (1954-68)',
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
# At this point the number of labeled rows changes very little, we now identify the next demographic by only examining rows not previously labeled. A new heading is added to the next demographic:
#
# ## Second demographic: Mexican-American
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 25)
# Many of the terms are part of Mexican-American identity politics notably:
#
# * [La Raza](https://en.wikipedia.org/wiki/La_Raza)
# * [Chicano / Chicana](https://en.wikipedia.org/wiki/Chicano)
# * [Lowrider](https://en.wikipedia.org/wiki/Lowrider)
# * [Hispanidad](https://en.wikipedia.org/wiki/Hispanidad)
#
# This will be the second manually-labelled group.
# +
mexican_american_themes = {
'La Raza',
'Chicano rap',
'Lowrider',
'Mexico',
'. Hispanidad',
'Latin hip hop',
'Hispanidad',
'Maxico. Latin hip hop. Chicano Movement',
'Mexican Pride',
'Chicano Movement',
'Hispanic and latino american culture',
'Hispanic culture'
'Mexican american culture',
'Latino culture',
'Chicano'
}
ads_df = label_demographic_rows(ads_df, mexican_american_themes, 'Mexican-American')
# -
print_top_references_for_theme(ads_df, mexican_american_themes, 'Mexican-American')
# +
mexican_american_themes = mexican_american_themes | {
'Mexico. Latin hip hop. Chicano Movement',
'Hispanic culture',
'Mexican american culture',
'Being Chicano',
'Chicano. Chicano Movement',
'Chicano Movement. Hispanidad',
'Mexico. Latin hip hop',
'Culture of Mexico',
'Being Chicano. Mexican american culture',
'Latin hip hop. Chicano',
'Being Mexican',
'Mexican American Pride',
'So Mexican',
'Lowrider; Chicano rap',
'Chicano Movement. Being Latino',
'Hispanic american culture',
'Latin hip hop. Chicano Movement',
'Mexican american culture; Hispanic american culture',
'Mexico; Latin hip hop. Chicano Movement. Hispanidad',
'Hispanic american culture. Chicano Movement',
'Being Latino'
}
ads_df = label_demographic_rows(ads_df, mexican_american_themes, 'Mexican-American')
# -
# At this point the number of labeled rows no longer changes, we now identify the next demographic by only examining rows not previously labeled.
#
# ## Third demographic: Memes
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 25)
# The most popular three terms describe their own category, people whom enjoy memes.
memes_themes = {
'BuzzFeed',
'CollegeHumor',
'9GAG',
'iFunny',
'Imgur',
'Funny Pics',
'Funny Photo\'s',
'Funny Pictures',
'LOL'
}
ads_df = label_demographic_rows(ads_df, memes_themes, 'Memes')
print_top_references_for_theme(ads_df, memes_themes, 'Memes')
memes_themes = memes_themes | {
'Meme',
'Internet meme',
'Reddit',
'Fail Blog',
'NBA Memes',
'Meme Center',
'lmgur',
'Humour',
'Reddit; BuzzFeed',
'Meme Center; NBA Memes',
'Imgur; CollegeHumor'
}
ads_df = label_demographic_rows(ads_df, memes_themes, 'Memes')
# At this point, very few new rows has been labeled. We find the next category.
#
# ## Fourth demographic: LGBT
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 25)
lgbt_themes = {
'LGBT United',
'LGBT community',
'Homosexuality'
}
ads_df = label_demographic_rows(ads_df, lgbt_themes, 'LGBT')
print_top_references_for_theme(ads_df, lgbt_themes, 'LGBT')
lgbt_themes = lgbt_themes | {
'Same-sex marriage',
'LGBT culture',
'Gay pride',
'Love',
'LGBT rights by country',
'Lesbian community',
'LGBT social movements',
'Politics and social issues',
'Yoga',
'Gay Rights',
'Human Sexuality',
'Bisexuality'
}
ads_df = label_demographic_rows(ads_df, lgbt_themes, 'LGBT')
# At this point, very few new row have been labeled. We find the next category.
#
# ## Fifth demographic: Right wing
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 15)
right_wing_themes = {
'Patriotism',
'Being Patriotic',
'Independence',
'Donald Trump for President',
'Gun Owners of America',
'Donald Trump',
'Republican Party (United States)'
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
print_top_references_for_theme(ads_df, right_wing_themes, 'Right wing')
right_wing_themes = right_wing_themes | {
'<NAME>r.',
'Concealed carry in the United States',
'National Rifle Association',
'The Tea Party',
'Gun Rights',
'Right to keep and bear arms',
'National Association for Gun Rights',
'Conservatism',
'The Second Amendment',
'2nd Amendment',
'Guns & Ammo',
'Young Republicans',
'Second Amendment to the United States Constitution',
'Confederate States of America',
'Dixie',
'conservative daily',
'dead hands',
'From my cold',
'lvanka Trump Fine Jewelry',
'donald j trump',
'Gun Rights Across America',
'Second Amendment Sisters',
'Veterans Day'
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
print_top_references_for_theme(ads_df, right_wing_themes, 'Right wing')
right_wing_themes = right_wing_themes | {
'Flags of the Confederate States of America',
'Guns & Patriots',
'Immigration',
'Hart of Dixie',
'Tea Party Patriots',
'Anything About Guns',
'American Guns',
'Guns.com',
'Second Amendment Supporters',
'Proud to be an American',
'Redneck Nation',
'Mud & Trucks',
'Conservative Tribune',
'100 Percent FED Up',
'Chicks On The Right',
'Sons of Confederate Veterans',
'Southern United States',
'Support Our Veterans',
'Students for Concealed Carry',
'Protect the Second Amendment',
'AR-15',
'ForAmerica',
'Confederate Flag',
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
print_top_references_for_theme(ads_df, right_wing_themes, 'Right wing')
right_wing_themes = right_wing_themes | {
'Confederate States Army',
'Support our troops',
'Supporting Our Veterans',
'Southern Pride',
'The Invaders',
'Veterans For America',
'Iraq and Afghanistan Veterans of America',
'Vietnam Veterans of America Foundation',
'Vietnam Veterans of America',
'American patriotism',
'Flag of the United States',
'American Patriots',
'American Patriot',
'Human migration',
'Illegal immigration',
'Disabled American Veterans',
'US Military Veterans',
'Concerned Veterans for America',
'Patriot Nation',
'confederate states america',
'<NAME>',
'Redneck Social Club',
'United Daughters of the Confederacy'
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
print_top_references_for_theme(ads_df, right_wing_themes, 'Right wing')
right_wing_themes = right_wing_themes | {
'Veterans',
'United States Department of Veterans Affairs',
'Vietnam Veterans Memorial Fund',
'Stop Illegal Immigration',
'vietnam veterans america',
'Vietnam Veterans Against the War',
'Institute for Veterans and Military Families',
'Vietnam Veterans Memorial',
'<NAME>',
'Wounded Warrior Project',
'Deportation',
'Immigration law',
'Fox News Politics',
'American History',
'Patriot(American Revolution)',
'Conservative News Today',
'The Conservative',
'College Republicans',
'Conservatism in the United States',
'Concealed carry',
'Tea Party movement',
'Ted Cruz',
'Conservative Republicans of Texas'
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
print_top_references_for_theme(ads_df, right_wing_themes, 'Right wing')
right_wing_themes = right_wing_themes | {
'Fox News Channel',
'Military',
'<NAME> (political commentator)',
'<NAME>',
'Jesus; TheBlaze',
'Christianity',
'<NAME>',
'<NAME>',
'<NAME>',
'Bible',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> Jr.; Politics US politics (very conservative)',
'La<NAME>',
'Conservatism in the United States; Sean Hannity',
'Thank A Soldier',
'U.S. Patriot Tactical',
'The Patriot Post',
'AMVETS',
'Emigration',
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
# Row count has stopped increasing we move on to the next group.
#
# ## New entries to: African-American
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 5)
# A web search revealed that 'Don't Shoot' was a group constructed by the IRA after the [shooting of <NAME>](https://en.wikipedia.org/wiki/Internet_Research_Agency). This was most-likely targeting African-Americans. We add it ot this category before proceeding.
# +
african_american_themes = african_american_themes | {
'Don\'t Shoot'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
print_top_references_for_theme(ads_df, african_american_themes, 'African-American')
# +
african_american_themes = african_american_themes | {
'Jr.. Stop Racism!!. AfricanAmerican culture. African-American Civil Rights Movement (1954-68)',
'Black history. AfricanAmerican Civil Rights Movement(1954-68)',
'Filming Cops',
'Melanin',
'Black panther',
'Slavery in the United States'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# -
# The number of rows has stabilized, we move on to the next group.
#
# ## Sixth demographic: Native-American
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 15)
# Out of the top values of 18 rows, many have to do with Native-American hence our new category.
native_american_themes = {
'Native American Indian Wisdom',
'Cherokee language',
'Cherokee Nation',
'American Indian Movement'
}
ads_df = label_demographic_rows(ads_df, native_american_themes, 'Native-American')
print_top_references_for_theme(ads_df, native_american_themes, 'Native-American')
native_american_themes = native_american_themes | {
'Native News Online',
'Indian Country Today Media Network',
'Native American civil rights',
'All Things Cherokee',
'Cherokee',
'American Indian Wars',
'Native american culture in the united states',
'National Congress of American Indians',
'Native American Times',
'Native American music'
}
ads_df = label_demographic_rows(ads_df, native_american_themes, 'Native-American')
# No new rows were added, we move to the next demographic.
#
# ## Seventh demographic: Muslim-American
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 15)
# +
muslim_american_themes = {
'Muslim-Brotherhood',
'Islam'
}
ads_df = label_demographic_rows(ads_df, muslim_american_themes, 'Muslim-American')
# -
print_top_references_for_theme(ads_df, muslim_american_themes, 'Muslim-American')
muslim_american_themes = muslim_american_themes | {
'Quran',
'Allah',
'Muslim Brotherhood',
'Islamism',
'Muhammad',
'Islam in the United States',
'Muslim world',
'All-american muslim culture',
'Muslim American Society',
'State of Palestine',
'Mosque',
'Sunnah',
'Glossary of Islam',
'Sharia',
'Muslim Students\' Association',
'Religion',
'Al Jazeera',
'ProductiveMuslim',
'Muslims Are Not Terrorists',
'Muhammad al-Baqir',
'Hajj',
'Hasan ibn Ali',
'Assalamu alaykum',
'Ahl al-Bayt'
}
ads_df = label_demographic_rows(ads_df, muslim_american_themes, 'Muslim-American')
print_top_references_for_theme(ads_df, muslim_american_themes, 'Muslim-American')
muslim_american_themes = muslim_american_themes | {
'Hijra (Islam)',
'Mecca',
'Prophets and messengers in Islam',
'As-salamu alaykum',
'Proud to be A Muslim',
'Abu Eesa Niamatullah',
'Zaid Shakir',
'State of Palestine; Muslim world; Mosque',
'Islam ; Quran',
'Haram',
'Hadith',
'Muslim Youth',
'Zakat',
'Medina',
'Muslim League (Pakistan)',
'Imam Ali Mosque',
'History of Islam',
'All Pakistan Muslim League',
'Islam Book',
'Ana muslim',
'Fiqh',
'Ja\'far al-Sadiq',
'Muslims Are Not Terrorists. Islamism',
'Arab world'
}
ads_df = label_demographic_rows(ads_df, muslim_american_themes, 'Muslim-American')
print_top_references_for_theme(ads_df, muslim_american_themes, 'Muslim-American')
muslim_american_themes = muslim_american_themes | {
'Muslims for America',
'Current events',
'Prod uctiveMuslim',
'<NAME>',
'Islam ism',
'Ramadan',
'<NAME>'
}
ads_df = label_demographic_rows(ads_df, muslim_american_themes, 'Muslim-American')
# The number of rows has stabilized we move on to the next demographic.
#
# ## Eight demographic: Self-Defence
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 15)
self_defense = {
'Mixed martial arts',
'Martial arts',
'The Women\'s Self Defense Institute',
'PERSONAL & HOME DEFENSE',
'Self-defense',
'Selfdefense (United States)',
'Personal Defense',
'Right of self-defense',
'Self Defense Family',
'Active Self Protection'
}
ads_df = label_demographic_rows(ads_df, self_defense, 'Self-Defense')
print_top_references_for_theme(ads_df, self_defense, 'Self-Defense')
# No new terms where related with these rows. We move on to the next demographic.
#
# ## Adding to Muslim-American and Right wing
print_top_words_from_arrays(ads_df.ad_interests_array[pd.isnull(ads_df['demographic'])], 15)
# Libertarianism seems to belong in the right-wing category, while United Muslims of America should be put in Muslim Americain.
muslim_american_themes = muslim_american_themes | {
'United Muslims of America'
}
ads_df = label_demographic_rows(ads_df, muslim_american_themes, 'Muslim-American')
print_top_references_for_theme(ads_df, muslim_american_themes, 'Muslim-American')
# No new rows where found!
right_wing_themes = right_wing_themes | {
'Libertarianism',
'Williams&Kalvin'
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
# Although we have seen the number of 'Right wing' entries increase, we also have very few rows left and will use a manual process for these last entries.
print('At this point, only ' + str(pd.isnull(ads_df['demographic']).sum()) + ' ads are unlabelled.')
for v in ads_df[pd.isnull(ads_df['demographic'])]['ad_targeting_interests']:
print(v)
# After looking at ads from the output above we identify the following trends:
#
# * Adds containing Grooveshark, Music and Free software were aimed at people willing to install free software.
#
# * African-American entries containing (Black, Blacktivist)
#
# * Right-wing politics (Politics, Veterans, jesus, Secured Borders, Texas) seem to be aim at republican leaning demographic and will be included under the "Right-wing" umbrella category.
#
# * Left-wing politics (Bernie Sanders, Innocence Project, Born Liberal)
# +
# Reading is related to a group called South United
# Stop A.1. is about illegal immigration
# History is about a confederate group
# Fitness and wellness, Sports is about a pro police movement
# Automobiles as the ad stated "drive like a patriot"
right_wing_themes = right_wing_themes | {
'Police',
'Texas',
'Heart of Texas',
'Secured Borders',
'Politics',
'Veterans',
'Jesus',
'jesus love u',
'Right-wing politics',
'National Police Wives Association',
'Police; Law Enforcement Today',
'Right Wing News',
'Syria',
'State police',
'Iraq War Veterans',
'jesus love u. I Am a Child of God. Jesus Daily',
'Jesus Daily',
'Reading',
'Stop A.1.',
'Stop A. I.',
'History',
'Fitness and wellness',
'Sports',
'Automobiles'
}
ads_df = label_demographic_rows(ads_df, right_wing_themes, 'Right wing')
# +
free_music_software_themes = {
'Grooveshark',
'Free software',
'Music'
}
ads_df = label_demographic_rows(ads_df, free_music_software_themes, 'Free music software')
# +
# Antelope Valley College is about stop police brutality
# Facism is also about police brutality
# Tax is targeted at African-American through the Behavior filter
# BM is for Black Matters
african_american_themes = african_american_themes | {
'Blacktivist',
'Black Economic Empowerment',
'Racism in the United States Interest',
'Understanding racial segregation in the united states',
'African Methodist Episcopal Zion Church',
'Copwatch Rodney King; Police brutality in the United States; Stop Police Brutality; Cop Block',
'Detroit',
'Stop Police Brutality',
'Antelope Valley College',
'Fascism',
'Tax',
'Trayv<NAME>',
'BM'
}
ads_df = label_demographic_rows(ads_df, african_american_themes, 'African-American')
# +
left_wing_themes = {
'<NAME>',
'Innocence Project',
'Born Liberal',
'Liberalism',
'Homeless shelter',
}
ads_df = label_demographic_rows(ads_df, left_wing_themes, 'Left wing')
# -
native_american_themes = native_american_themes | {
'Standing Rock Indian Reservation'
}
ads_df = label_demographic_rows(ads_df, native_american_themes, 'Native-American')
memes_themes = memes_themes | {
'Memopolis'
}
ads_df = label_demographic_rows(ads_df, memes_themes, 'Memes')
# At this point, the entry below is the only entry remaining and even after looking at the file it is difficult to identify what demographic the group was targeting. We will simply drop this row.
ads_df[pd.isnull(ads_df['demographic'])]
ads_df['demographic'].value_counts()
ads_df = ads_df[~pd.isnull(ads_df['demographic'])]
# We have now added a demographic tag to nearly all our rows and taken the time to periodicaly assess if the association we were making were correct by verify the related ads keywords.
ads_df.to_csv('../clean_data/labeled_clean_data.csv', index=None, header=True)
# Shortcomings of this approach:
#
# * Order matters
# * Labeled data is not visited after it is labeled the first time. This technique may not work as well if ads had belonged strongly to more than 1 category.
#
# * Not as thorough as a manual review
# * Some studies used Mechanical Turks to review their labeling. Ideally we should also have a third party review our result.
#
# * Subjective
# * Although choosing whether an ad targeted a demographic is a subjective exercise when using this method, I believe this manual labeling gave us a better understanding of the way the IRA picked the interests themselves. In comparison, a purely computational approach result's were difficult to tie back with the ads and interpret even though the method used (tfidf and kmeans) were very simple. Additionally, the name of the demographic group is also subjective and can influence the perception and understanding of the IRA's intent when used in graph. To reduce this influence, I have tried to keep away from names that are politically charged as much as possible.
#
# This concludes demographic labeling. We can move on to the [analysis](analysis.ipynb) notebook.
|
src/demographic_labeling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Germeval 2019: Pre-processing & baseline
#
# ```
# pip install scikit-multilearn
# ```
#
# Information for Subtask B: Exactly one parent is assigned to a child genre. The underlying hierarchy is a forest. The most specific writing genre of a book is not necessarily a leaf node.
# Change this
DATASET_DIR = '/Volumes/data/repo/data/germeval2019-1/GermEval2019T1_final_test_phase'
# +
import os
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import codecs
import re
from collections import defaultdict
import spacy
from spacy.util import minibatch, compounding
import pickle
from lxml import etree
from datetime import datetime
from sklearn.metrics import f1_score
from nltk.stem.snowball import SnowballStemmer
# -
np.random.seed(0)
class Book(object):
def __init__(self, title, body, _copyright, categories, authors, publish_date, isbn, url):
self.title = title
self.text = body
if body == None:
#sys.stderr.write('WARNING: Empty body, setting to "empty"\n')
self.text = "empty"
self._copyright = _copyright
self.categories = categories
self.authors = authors
self.publish_date = publish_date
self.isbn = isbn
self.url = url
# method 2 in Blurb: get leaf-labels
def get_task_labels(self, level=0):
if isinstance(self.categories, dict):
for topic in self.categories:
if self.categories[topic]['d'] == level:
yield topic
def get_task1_labels(self):
for topic in self.categories:
if self.categories[topic]['d'] == 0:
yield topic
def get_task2_labels(self):
for topic in self.categories:
if self.categories[topic]['d'] == 1:
yield topic
#def to_df_item(self, available_cats, with_text=True, levels=[0]):
def to_df_item(self, with_text=True):
# categories_level = name: level
age_in_years = max(round((datetime.today() - datetime.strptime(self.publish_date, '%Y-%m-%d')).days / 365), 1)
words = self.text.split()
words_len = np.array([len(w) for w in words])
authors_str = ';'.join(self.authors)
doc = {
'isbn': self.isbn,
'title': self.title,
'title_words': len(self.title.split()),
'author_count': len(self.authors),
'authors': ';'.join(self.authors),
'authors_academic': 1 if 'dr.' in authors_str.lower() or 'prof.' in authors_str.lower() else 0,
'copyright': self._copyright,
'publish_date': self.publish_date,
'age': age_in_years,
'text_words': len(words),
'text_words_10': round(len(words) / 10),
'word_len_mean': round(np.mean(words_len)),
'word_len_median': round(np.median(words_len)),
'word_len_max': np.max(words_len),
}
if with_text:
doc['text'] = self.text
return doc
def to_df_item_labels(self, categories_level):
# categories
#[('-' * level) + label for label, level in task_labels.items()]
labels = {('-' * level) + label: 0 for label, level in categories_level.items()} # init with 0
for cat, v in self.categories.items():
k = ('-' * v['d']) + cat
if k in labels:
labels[k] = 1
#doc.update(cats)
#return doc
return labels
# +
def getMatrix(books):
m = []
for b in books:
#row = [b.title, b._copyright, b.publish_date, b.isbn, ' '.join(b.authors), ' '.join(b.getTop_n(n)), b.getTask1Labels()]
row = [b.text]# doing this with only text for first baseline
labelcolumns = [0] * len(task1_labels)
for l in b.get_task1_labels():
labelcolumns[task1label2pos[l]] = 1
row.extend(labelcolumns)
m.append(row)
return m
def parseXML(xml, task_labels=dict()): # task_labels = set(), task2_labels = set
#global alllabels
#global taks1labels
tree = None
xmlParser = etree.XMLParser(strip_cdata=False, resolve_entities=False, encoding='utf-8', remove_comments=True)
try:
tree = etree.parse(xml, parser=xmlParser).getroot()
except etree.XMLSyntaxError:
# it wasn't proper xml; wrap tags around it
lines = codecs.open(xml, 'r').readlines()
lines = ['<rootnode>'] + lines + ['</rootnode>']
xmlstring = re.sub('&', '&', '\n'.join(lines))
tree = etree.fromstring(xmlstring)
maxNrAuthors = 0
#global maxNrAuthors
books = []
for book in tree:
title, body, _cr, auths, pubdata, isbn, url = ['_'] * 7
cats = defaultdict(lambda: defaultdict(str))
for node in book:
if node.tag == 'title':
title = node.text
elif node.tag == 'body':
body = node.text
elif node.tag == 'copyright':
_cr = node.text
elif node.tag == 'categories':
# creating a dict?
# category
for subnode in node:
# topic d = 0
for ssubnode in subnode:
# topic d = 1
if ssubnode.tag == 'topic':
cats[ssubnode.text]['d'] = int(ssubnode.get('d'))
if 'label' in ssubnode.attrib:
cats[ssubnode.text]['label'] = ssubnode.get('label')
elif node.tag == 'authors':
if node.text: # was empty for some...
auths = [x.strip() for x in node.text.split(',')]
if len(auths) > maxNrAuthors:
maxNrAuthors = len(auths)
elif node.tag == 'published':
pubdata = node.text
elif node.tag == 'isbn':
isbn = node.text
elif node.tag == 'url':
url = node.text
b = Book(title, body, _cr, cats, auths, pubdata, isbn, url)
task_labels.update({label: 0 for label in b.get_task_labels(0)})
# task2
task_labels.update({label: 1 for label in b.get_task_labels(1)})
task_labels.update({label: 2 for label in b.get_task_labels(2)})
books.append(b)
return books, task_labels
# -
trainbooks, task_labels = parseXML(os.path.join(DATASET_DIR, 'blurbs_train.txt'))
validationbooks, task_labels = parseXML(os.path.join(DATASET_DIR, 'blurbs_dev.txt'), task_labels)
task1_labels = [label for label, level in task_labels.items() if level == 0]
task1_labels
task_labels
trainbooks[0].__dict__
# +
with_text = True
doc_cols = list(trainbooks[0].to_df_item(with_text).keys())
label_cols = list(trainbooks[0].to_df_item_labels(task_labels).keys())
train_df = pd.DataFrame([{**d.to_df_item(with_text), **d.to_df_item_labels(task_labels)} for d in trainbooks])
val_df = pd.DataFrame([{**d.to_df_item(with_text), **d.to_df_item_labels(task_labels)} for d in validationbooks])
train_df = train_df.reindex(columns=doc_cols + label_cols)
val_df = val_df.reindex(columns=doc_cols + label_cols)
val_df.head()
# -
#df.groupby('author_count')['col_A'].agg(['min', 'max', 'mean','median']).transpose().plot(kind='bar')
#df.groupby('author_count')['author_count'].agg(['count'])
train_df['author_count'].value_counts().sort_index().plot(kind='bar')
val_df['author_count'].value_counts().sort_index().plot(kind='bar')
val_df['authors_academic'].value_counts().sort_index().plot(kind='bar')
train_df['age'].value_counts().sort_index()[:50].plot(kind='bar',figsize=(15,8))
val_df['age'].value_counts().sort_index()[:50].plot(kind='bar',figsize=(15,8))
train_df['title_words'].value_counts().sort_index().plot(kind='bar',figsize=(15,8))
# +
# BERT sequence limit? only 63 items > 256 ==> no problem
words_limit = 300
ds = train_df[train_df['text_words'] > words_limit]
print(f'DOCS with words > {words_limit}: {len(ds)} / {len(train_df)}')
ds['text_words'].value_counts().sort_index() #.plot(kind='bar',figsize=(15,8))
# -
train_df['copyright'].value_counts().sort_index().plot(kind='bar')
val_df['word_len_mean'].value_counts().sort_index().plot(kind='bar')
val_df['word_len_median'].value_counts().sort_index().plot(kind='bar')
val_df['word_len_max'].value_counts().sort_index().plot(kind='bar')
# Most frequent labels
val_df[task1_labels].sum().plot(kind='bar')
val_df[label_cols].sum().sort_values(ascending=False)[:10].plot(kind='bar')
# 2nd level
val_df[[l for l in label_cols if l.startswith('-') and not l.startswith('--')]].sum().sort_values(ascending=False)[:20].plot(kind='bar')
# 3nd level
val_df[[l for l in label_cols if l.startswith('--')]].sum().sort_values(ascending=False)[:20].plot(kind='bar')
# +
# Save with text + labels
#train_df = pd.DataFrame([d.to_df_item(task_labels, True) for d in trainbooks])
#val_df = pd.DataFrame([d.to_df_item(task_labels, True) for d in validationbooks])
# Pickle for later use
with open('germeval_train_df_meta.pickle', 'wb') as f:
pickle.dump((train_df, doc_cols, label_cols, task1_labels), f)
with open('germeval_val_df_meta.pickle', 'wb') as f:
pickle.dump((val_df, doc_cols, label_cols, task1_labels), f)
#########
# -
# # Low Resource: Number of available training data per label
# +
task2_labels = label_cols
#task1_labels
len(task2_labels)
print(f'Total labels: {len(task2_labels)} / total training samples: {len(train_df)}')
label2sample_count = {}
groups_f = {
'1': lambda x: x == 1,
'< 5': lambda x: 1 < x < 5,
'< 10': lambda x: 5 <= x < 10,
'< 25': lambda x: 10 <= x < 25,
'< 50': lambda x: 25 <= x < 50,
'< 100': lambda x: 50 <= x < 100,
'< 250': lambda x: 100 <= x < 250,
'≥ 250': lambda x: x > 250,
}
groups_f = {
'1-9': lambda x: 1 <= x < 10,
'10-19': lambda x: 10 <= x < 20,
'20-29': lambda x: 20 <= x < 30,
'30-39': lambda x: 30 <= x < 40,
'40-49': lambda x: 40 <= x < 50,
'≥ 50': lambda x: x >= 50,
#'50-99': lambda x: 50 <= x < 100,
#'100-199': lambda x: 100 <= x < 200,
#'≥ 200': lambda x: x >= 200,
}
groups_v = {v: 0 for v in groups_f}
for label in task2_labels:
c = sum(train_df[label] == 1)
label2sample_count[label] = c
for g in groups_f:
if groups_f[g](c):
if g in groups_v:
groups_v[g] += 1
else:
groups_v[g] = 1
print(groups_v)
#print(groups_v['1'] + groups_v['< 5'] + groups_v['< 10'])
plt.bar(groups_v.keys(), groups_v.values(), align='center')
plt.xticks(list(groups_v.keys()))
plt.ylabel('Number of label classes')
plt.xlabel('Available number of samples per label in training set')
#plt.title('Programming language usage')
plt.savefig('samples_per_labels.pdf')
plt.show()
pd.DataFrame.from_dict(groups_v)
# -
# # Do author stick to their genre?
# +
author2labels = {} # name => set(labels)
#for idx, row in pd.concat([train_df, val_df]).iterrows():
# for author in row['author'].split(';'):
# if author not in author2labels:
#
for b in trainbooks:
for a in b.authors:
if a not in author2labels:
author2labels[a] = set()
author2labels[a].add('/'.join(list(b.categories.keys())))
# -
author2labels
# +
#global task1label2pos
#global task1pos2label
task1label2pos = {}
task1pos2label = {}
for i, l in enumerate(task1labels):
task1label2pos[l] = i
task1pos2label[i] = l
train_m = getMatrix(trainbooks)
validation_m = getMatrix(validationbooks)
headers = ['blurb'] + [task1pos2label[x] for x in sorted(task1pos2label.keys())]
train_df = pd.DataFrame(train_m, columns=headers)
val_df = pd.DataFrame(validation_m, columns=headers)
print('INFO: Preprocessing data... completed\n')
# -
train_df.head()
val_df.head()
# +
# Pickle for later use
with open('germeval_train_df.pickle', 'wb') as f:
pickle.dump(train_df, f)
with open('germeval_val_df.pickle', 'wb') as f:
pickle.dump(val_df, f)
# +
def cleanPunc(sentence): #function to clean the word of any punctuation or special characters
cleaned = re.sub(r'[?|!|\'|"|#]',r'',sentence)
cleaned = re.sub(r'[.|,|)|(|\|/]',r' ',cleaned)
cleaned = cleaned.strip()
cleaned = cleaned.replace("\n"," ")
return cleaned
def removeStopWords(sentence, re_stop_words):
if sentence is not None and len(sentence) > 0:
return re_stop_words.sub(" ", sentence)
else:
print('WARNING: null sentence')
return ''
def stemming(sentence, stemmer):
stemSentence = ""
for word in sentence.split():
stem = stemmer.stem(word)
stemSentence += stem
stemSentence += " "
stemSentence = stemSentence.strip()
return stemSentence
# +
#nlp = spacy.load('de')
nlp = spacy.load('de_core_news_md') # sm models don’t ship with word vectors
tok = spacy.lang.de.German().Defaults.create_tokenizer(nlp)
stopwords = nlp.Defaults.stop_words
re_stop_words = re.compile(r"\b(" + "|".join(stopwords) + ")\\W", re.I)
stemmer = SnowballStemmer("german")
# +
train_df['clean_blurb'] = train_df['blurb']\
.apply(lambda x: stemming(x, stemmer))\
.apply(lambda x: removeStopWords(x, re_stop_words))\
.apply(lambda x: cleanPunc(x))
val_df['clean_blurb'] = val_df['blurb']\
.apply(lambda x: stemming(x, stemmer))\
.apply(lambda x: removeStopWords(x, re_stop_words))\
.apply(lambda x: cleanPunc(x))
# -
def displayInfo1(df):
categories = list(df.iloc[:,1:].columns.values)
sns.set(font_scale = 1)
plt.figure(figsize=(15,8))
#ax= sns.barplot(categories, df.iloc[:,2:].sum().values)
ax= sns.barplot(categories, df.iloc[:,1:].sum().values)
plt.title("Blurbs in each category", fontsize=24)
plt.ylabel('Number of blurbs', fontsize=18)
plt.xlabel('Blurb Type ', fontsize=18)
#adding the text labels
rects = ax.patches
labels = df.iloc[:,1:].sum().values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom', fontsize=18)
plt.show()
def displayInfo2(df):
# note to self: if we want to include this plot in the paper, guess it would be nicer to also include single-label blurbs (currently it seems to display only those blurbs having multiple labels)
rowSums = df.iloc[:,1:].sum(axis=1)
multiLabel_counts = rowSums.value_counts()
multiLabel_counts = multiLabel_counts.iloc[1:]
sns.set(font_scale = 1)
plt.figure(figsize=(15,8))
ax = sns.barplot(multiLabel_counts.index, multiLabel_counts.values)
plt.title("Blurbs having multiple labels ")
plt.ylabel('Number of blurbs', fontsize=18)
plt.xlabel('Number of labels', fontsize=18)
#adding the text labels
rects = ax.patches
labels = multiLabel_counts.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
# +
#displayInfo1(val_df) # does not work
displayInfo2(val_df)
# -
# # Baseline
#
# Seems like there is no randomness anywhere: ran fit and predict 10 times, 10 times the exact same value:
# INFO: Average weighted f1 over 10 runs: 0.729600
# +
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(strip_accents='unicode', analyzer='word', ngram_range=(1,4), norm='l2', max_features=10000)
#train_x = train_df.blurb
train_x = train_df.clean_blurb
train_y = train_df.iloc[:,1:-1]
#val_x = val_df.blurb
val_x = val_df.clean_blurb
val_y = val_df.iloc[:,1:-1]
vectorizer.fit(train_x)
vectorizer.fit(val_x)
x_train = vectorizer.transform(train_x)
y_train = train_y
x_val = vectorizer.transform(val_x)
y_val = val_y
# -
val_y[:1]
# +
# using classifier chains
from skmultilearn.problem_transform import ClassifierChain
from sklearn.linear_model import LogisticRegression#SGDClassifier
# initialize classifier chains multi-label classifier
classifier = ClassifierChain(LogisticRegression(solver='lbfgs'))
print('INFO: Training classifier...\n')
# -
np.random.seed(None)
# +
fscores = []
numIterations = 10
for i in range(numIterations):
print('INFO: Running iteration %d...\n' % (i+1))
classifier.fit(x_train, y_train)
predictions = classifier.predict(x_val)
f1 = f1_score(y_val,predictions, average='weighted')
print('INFO: weighted f1: %f\n' % f1)
fscores.append(f1)
print('INFO: Average weighted f1 over %d runs: %f\n' % (numIterations, (np.mean(fscores))))
# -
# INFO: weighted f1: 0.660669
# # Test data
#
# ### Sample submission
#
# ```
# # filename: Funtastic4__SVM_NAIVEBAYES_ensemble1.txt
# subtask_a
# 9783809436690 Literatur & Unterhaltung
# 9783453702653 Literatur & Unterhaltung
# 9783453407343 Ratgeber
# 9783424631388 Literatur & Unterhaltung
# 9783641058142 Literatur & Unterhaltung
# subtask_b
# 9783809436690 Literatur & Unterhaltung Romane & Erzählungen
# ...
# ```
# +
# join train + dev
fulltrain_books, task_labels = parseXML(os.path.join(DATASET_DIR, 'blurbs_train_and_dev.txt'))
# test without labels
test_books, _ = parseXML(os.path.join(DATASET_DIR, 'blurbs_test_nolabel.txt'))
# +
with_text = True
task1_labels = [label for label, level in task_labels.items() if level == 0]
doc_cols = list(fulltrain_books[0].to_df_item(with_text).keys())
label_cols = list(fulltrain_books[0].to_df_item_labels(task_labels).keys())
fulltrain_df = pd.DataFrame([{**d.to_df_item(with_text), **d.to_df_item_labels(task_labels)} for d in fulltrain_books])
test_df = pd.DataFrame([{**d.to_df_item(with_text), **d.to_df_item_labels(task_labels)} for d in test_books])
fulltrain_df = fulltrain_df.reindex(columns=doc_cols + label_cols)
test_df = test_df.reindex(columns=doc_cols)
test_df.head()
# +
# Pickle for later use
with open('germeval_fulltrain_df_meta.pickle', 'wb') as f:
pickle.dump((fulltrain_df, doc_cols, label_cols, task1_labels), f)
with open('germeval_test_df_meta.pickle', 'wb') as f:
pickle.dump((test_df, doc_cols, label_cols, task1_labels), f)
# -
# +
# authors to file: external script with retrieve additional author info
authors = list(fulltrain_df['authors'].values) + list(test_df['authors'].values) # TODO test data
with open('authors.pickle', 'wb') as f:
# list -> ; delmitted str
pickle.dump(authors, f)
# without test: 16627
len(authors)
# -
authors[:10]
|
germeval-data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1 align="center"> Machine learning-based prediction of early recurrence in glioblastoma patients: a glance towards precision medicine <br><br> [Statistical Analysis]</h1>
# <h2>[1] Library</h2>
# +
# OS library
import os
import sys
import argparse
import random
from math import sqrt
# Analysis
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from scipy import stats
import statsmodels.api as sm
from statsmodels.stats.proportion import proportion_confint
import pingouin as pg
# %matplotlib inline
# -
# <h2>[2] Data Preprocessing</h2>
# <h4>[-] Load the database</h4>
# +
file = os.path.join(sys.path[0], "db.xlsx")
db = pd.read_excel(file)
print("N° of patients: {}".format(len(db)))
print("N° of columns: {}".format(db.shape[1]))
db.head()
# -
# <h4>[-] Drop unwanted columns + create <i>'results'</i> column</h4>
# +
df = db.drop(['Name_Surname','SURVIVAL', 'OS', '...'], axis = 'columns')
print("Effective features to consider: {} ".format(len(df.columns)-1))
print("Creating 'result' column...")
# 0 = No relapse
df.loc[df['PFS'] > 6, 'outcome'] = 0
# 1 = Early relapse (within 6 months)
df.loc[df['PFS'] <= 6, 'outcome'] = 1
# -
# <h2>[3] Count and Frequency</h2>
df.groupby(['outcome', '...']).count()
df['...'].describe()
# <h2>[4] Statistical Association</h2>
# <ul>
# <li>Levene's test is an inferential statistic used to assess the equality of variances for a variable calculated for two or more groups. If p-value >> 0.05, no difference in variances between the groups</li>
# <li>F-one way ANOVA test is performed if the variance is the same</li>
# </ul>
# +
non_early = df[df['outcome'] == 0]['...']
early_relapse = df[df['outcome'] == 1]['...']
print(non_early.shape)
print(stats.levene(non_early, early_relapse))
print(stats.f_oneway(non_early, early_relapse))
## Change equal_var to False if Levene p-value is below 0.05
print(stats.ttest_ind(non_early, early_relapse, equal_var=True))
# +
sex_ct = pd.crosstab(df['...'], df['outcome'])
print("--- *** Contingency Table *** --- \n",sex_ct)
print("\n--- *** Chi-Square *** ---")
stat, p, dof, expected = stats.chi2_contingency(sex_ct, correction = False)
print("DOF=%d" % dof)
print("Expected values = ", expected)
print("p-value = ", p)
print("stat = ", stat)
prob = 0.95
critical = stats.chi2.ppf(prob, dof)
if abs(stat) >= critical:
print('\nDependent (reject H0), [Critical: {}]'.format(critical))
else:
print('\nIndependent (fail to reject H0), [Critical: {}]'.format(critical))
# -
# <h4>[-] Holm-Bonferroni correction</h4>
pvals = [...]
significant, adjusted = pg.multicomp(pvals, alpha=0.05, method='holm')
tab = {'Uncorrected':pvals, 'Adjusted':adjusted, 'Significant':significant}
df = pd.DataFrame(tab)
df
# <h2>[5] Multivariable Analysis</h2>
# <h4>[-] Label encoding</h4>
dummy_v = ['localization', '...']
df = pd.get_dummies(df, columns = dummy_v, prefix = dummy_v)
df[['..']].astype(float)
df.head(5)
# +
cols_to_keep = ['...']
data = df[cols_to_keep]
# manually add the intercept
data['intercept'] = 1.0
data.head()
data.columns
# -
train_cols = ['...']
logit = sm.Logit(data['outcome'], data[train_cols], missing = 'drop')
result = logit.fit()
result.summary(alpha = 0.05)
# +
coef = result.params
p = result.pvalues
conf = result.conf_int(alpha = 0.05)
conf['OR'] = coef
conf.columns = ['2.5%', '97.5%', 'OR']
conf = np.exp(conf)
conf['p-value'] = p
# -
# <h4>[-] Export Multivariable as Excel file</h4>
conf.to_excel("multivariable.xlsx")
|
Statistical Analysis.ipynb
|
from time import time
import numpy as np
import matplotlib.pyplot as plt
from HARK.utilities import plotFuncs
from HARK.ConsumptionSaving.ConsAggShockModel import (
AggShockConsumerType,
CobbDouglasEconomy,
AggShockMarkovConsumerType,
CobbDouglasMarkovEconomy,
KrusellSmithType,
KrusellSmithEconomy
)
from HARK.distribution import DiscreteDistribution
from scipy.stats import linregress
from copy import deepcopy
def mystr(number):
return "{:.4f}".format(number)
# Solve an AggShockConsumerType's microeconomic problem
solve_agg_shocks_micro = False
# Solve for the equilibrium aggregate saving rule in a CobbDouglasEconomy
solve_agg_shocks_market = False
# Solve an AggShockMarkovConsumerType's microeconomic problem
solve_markov_micro = False
# Solve for the equilibrium aggregate saving rule in a CobbDouglasMarkovEconomy
solve_markov_market = False
# Solve a simple Krusell-Smith-style two state, two shock model
solve_krusell_smith = True
# Solve a CobbDouglasEconomy with many states, potentially utilizing the "state jumper"
solve_poly_state = False
# ### Example impelementation of AggShockConsumerType
if solve_agg_shocks_micro or solve_agg_shocks_market:
# Make an aggregate shocks consumer type
AggShockExample = AggShockConsumerType()
AggShockExample.cycles = 0
# Make a Cobb-Douglas economy for the agents
EconomyExample = CobbDouglasEconomy(agents=[AggShockExample])
EconomyExample.makeAggShkHist() # Simulate a history of aggregate shocks
# Have the consumers inherit relevant objects from the economy
AggShockExample.getEconomyData(EconomyExample)
if solve_agg_shocks_micro:
# Solve the microeconomic model for the aggregate shocks example type (and display results)
t_start = time()
AggShockExample.solve()
t_end = time()
print(
"Solving an aggregate shocks consumer took "
+ mystr(t_end - t_start)
+ " seconds."
)
print(
"Consumption function at each aggregate market resources-to-labor ratio gridpoint:"
)
m_grid = np.linspace(0, 10, 200)
AggShockExample.unpack('cFunc')
for M in AggShockExample.Mgrid.tolist():
mMin = AggShockExample.solution[0].mNrmMin(M)
c_at_this_M = AggShockExample.cFunc[0](m_grid + mMin, M * np.ones_like(m_grid))
plt.plot(m_grid + mMin, c_at_this_M)
plt.ylim(0.0, None)
plt.show()
if solve_agg_shocks_market:
# Solve the "macroeconomic" model by searching for a "fixed point dynamic rule"
t_start = time()
print("Now solving for the equilibrium of a Cobb-Douglas economy. This might take a few minutes...")
EconomyExample.solve()
t_end = time()
print(
'Solving the "macroeconomic" aggregate shocks model took '
+ str(t_end - t_start)
+ " seconds."
)
print("Aggregate savings as a function of aggregate market resources:")
plotFuncs(EconomyExample.AFunc, 0, 2 * EconomyExample.kSS)
print(
"Consumption function at each aggregate market resources gridpoint (in general equilibrium):"
)
AggShockExample.unpack('cFunc')
m_grid = np.linspace(0, 10, 200)
AggShockExample.unpack('cFunc')
for M in AggShockExample.Mgrid.tolist():
mMin = AggShockExample.solution[0].mNrmMin(M)
c_at_this_M = AggShockExample.cFunc[0](m_grid + mMin, M * np.ones_like(m_grid))
plt.plot(m_grid + mMin, c_at_this_M)
plt.ylim(0.0, None)
plt.show()
# ### Example Implementations of AggShockMarkovConsumerType
if solve_markov_micro or solve_markov_market:
# Make a Markov aggregate shocks consumer type
AggShockMrkvExample = AggShockMarkovConsumerType()
AggShockMrkvExample.IncomeDstn[0] = 2 * [AggShockMrkvExample.IncomeDstn[0]]
AggShockMrkvExample.cycles = 0
# Make a Cobb-Douglas economy for the agents
MrkvEconomyExample = CobbDouglasMarkovEconomy(agents=[AggShockMrkvExample])
MrkvEconomyExample.DampingFac = 0.2 # Turn down damping
MrkvEconomyExample.makeAggShkHist() # Simulate a history of aggregate shocks
AggShockMrkvExample.getEconomyData(
MrkvEconomyExample
) # Have the consumers inherit relevant objects from the economy
if solve_markov_micro:
# Solve the microeconomic model for the Markov aggregate shocks example type (and display results)
t_start = time()
AggShockMrkvExample.solve()
t_end = time()
print(
"Solving an aggregate shocks Markov consumer took "
+ mystr(t_end - t_start)
+ " seconds."
)
print("Consumption function at each aggregate market resources-to-labor ratio gridpoint (for each macro state):")
m_grid = np.linspace(0, 10, 200)
AggShockMrkvExample.unpack('cFunc')
for i in range(2):
for M in AggShockMrkvExample.Mgrid.tolist():
mMin = AggShockMrkvExample.solution[0].mNrmMin[i](M)
c_at_this_M = AggShockMrkvExample.cFunc[0][i](
m_grid + mMin, M * np.ones_like(m_grid)
)
plt.plot(m_grid + mMin, c_at_this_M)
plt.ylim(0.0, None)
plt.show()
if solve_markov_market:
# Solve the "macroeconomic" model by searching for a "fixed point dynamic rule"
t_start = time()
MrkvEconomyExample.verbose = True
print("Now solving a two-state Markov economy. This should take a few minutes...")
MrkvEconomyExample.solve()
t_end = time()
print(
'Solving the "macroeconomic" aggregate shocks model took '
+ str(t_end - t_start)
+ " seconds."
)
print("Consumption function at each aggregate market resources-to-labor ratio gridpoint (for each macro state):")
m_grid = np.linspace(0, 10, 200)
AggShockMrkvExample.unpack('cFunc')
for i in range(2):
for M in AggShockMrkvExample.Mgrid.tolist():
mMin = AggShockMrkvExample.solution[0].mNrmMin[i](M)
c_at_this_M = AggShockMrkvExample.cFunc[0][i](
m_grid + mMin, M * np.ones_like(m_grid)
)
plt.plot(m_grid + mMin, c_at_this_M)
plt.ylim(0.0, None)
plt.show()
if solve_krusell_smith:
# Make default KS agent type and economy
KSeconomy = KrusellSmithEconomy()
KStype = KrusellSmithType()
KStype.cycles = 0
KStype.getEconomyData(KSeconomy)
KSeconomy.agents = [KStype]
KSeconomy.makeMrkvHist()
# Solve the Krusell-Smith economy
t0 = time()
print("Now solving for the equilibrium of the Krusell-Smith (1998) model. This might take a few minutes...")
KSeconomy.solve()
t1 = time()
print('Solving the Krusell-Smith model took ' + str(t1-t0) + ' seconds.')
state_names = ['bad economy, unemployed', 'bad economy, employed',
'good economy, unemployed', 'good economy, employed']
# Plot the consumption function for each discrete state
for j in range(4):
plt.xlabel(r'Idiosyncratic market resources $m$')
plt.ylabel(r'Consumption $c$')
plt.title('Consumption function by aggregate market resources: ' + state_names[j])
plotFuncs(KStype.solution[0].cFunc[j].xInterpolators, 0., 50.)
# Extract history of aggregate capital and run a serial autoregression
mystr = lambda x : '{:.4f}'.format(x)
mystr2 = lambda x : '{:.7f}'.format(x)
K_hist = np.array(KSeconomy.history['Aprev'])[KSeconomy.T_discard:]
Mrkv_hist = KSeconomy.MrkvNow_hist[KSeconomy.T_discard:]
bad = Mrkv_hist[:-1] == 0
good = Mrkv_hist[:-1] == 1
logK_t = np.log(K_hist[:-1])
logK_tp1 = np.log(K_hist[1:])
results_bad = linregress(logK_t[bad], logK_tp1[bad])
results_good = linregress(logK_t[good], logK_tp1[good])
print('')
print('Equilibrium dynamics of aggregate capital:')
print("Bad state: log k' = " + mystr(results_bad[1]) + ' + ' + mystr(results_bad[0]) + ' log k (r-sq = ' + mystr2(results_bad[2]**2) + ')')
print("Good state: log k' = " + mystr(results_good[1]) + ' + ' + mystr(results_good[0]) + ' log k (r-sq = ' + mystr2(results_good[2]**2) + ')')
print('')
print("Krusell & Smith's published results (p877):")
print("Bad state: log k' = 0.085 + 0.965 log k (r-sq = 0.999998)")
print("Good state: log k' = 0.095 + 0.962 log k (r-sq = 0.999998)")
if solve_poly_state:
StateCount = 15 # Number of Markov states
GrowthAvg = 1.01 # Average permanent income growth factor
GrowthWidth = 0.02 # PermGroFacAgg deviates from PermGroFacAgg in this range
Persistence = 0.90 # Probability of staying in the same Markov state
PermGroFacAgg = np.linspace(
GrowthAvg - GrowthWidth, GrowthAvg + GrowthWidth, num=StateCount
)
# Make the Markov array with chosen states and persistence
PolyMrkvArray = np.zeros((StateCount, StateCount))
for i in range(StateCount):
for j in range(StateCount):
if i == j:
PolyMrkvArray[i, j] = Persistence
elif (i == (j - 1)) or (i == (j + 1)):
PolyMrkvArray[i, j] = 0.5 * (1.0 - Persistence)
PolyMrkvArray[0, 0] += 0.5 * (1.0 - Persistence)
PolyMrkvArray[StateCount - 1, StateCount - 1] += 0.5 * (1.0 - Persistence)
# Make a consumer type to inhabit the economy
PolyStateExample = AggShockMarkovConsumerType()
PolyStateExample.MrkvArray = PolyMrkvArray
PolyStateExample.PermGroFacAgg = PermGroFacAgg
PolyStateExample.IncomeDstn[0] = StateCount * [PolyStateExample.IncomeDstn[0]]
PolyStateExample.cycles = 0
# Make a Cobb-Douglas economy for the agents
# Use verbose=False to remove printing of intercept
PolyStateEconomy = CobbDouglasMarkovEconomy(agents=[PolyStateExample], verbose=False)
PolyStateEconomy.MrkvArray = PolyMrkvArray
PolyStateEconomy.PermGroFacAgg = PermGroFacAgg
PolyStateEconomy.PermShkAggStd = StateCount * [0.006]
PolyStateEconomy.TranShkAggStd = StateCount * [0.003]
PolyStateEconomy.slope_prev = StateCount * [1.0]
PolyStateEconomy.intercept_prev = StateCount * [0.0]
PolyStateEconomy.update()
PolyStateEconomy.makeAggShkDstn()
PolyStateEconomy.makeAggShkHist() # Simulate a history of aggregate shocks
PolyStateExample.getEconomyData(
PolyStateEconomy
) # Have the consumers inherit relevant objects from the economy
# Solve the many state model
t_start = time()
print(
"Now solving an economy with "
+ str(StateCount)
+ " Markov states. This might take a while..."
)
PolyStateEconomy.solve()
t_end = time()
print(
"Solving a model with "
+ str(StateCount)
+ " states took "
+ str(t_end - t_start)
+ " seconds."
)
|
examples/ConsumptionSaving/example_ConsAggShockModel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Statistical analysis
# In this notebook we use _pandas_ and the _stats_ module from _scipy_ for some basic statistical analysis.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import pandas as pd
from matplotlib import pyplot as plt
plt.style.use("ggplot")
# -
# First we need some data. Let'use pandas to load the _'adult'_ data set from the _UC Irvine Machine Learning Repository_ in our dataframe.
# + jupyter={"outputs_hidden": false}
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", names=["Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"])
# some data cleaning remove leading and trailing spaces
df['Sex'] = df['Sex'].str.strip()
df.head()
# -
# ### Descriptive statistics
# Let's have a first look at the shape of our dataframe.
# + jupyter={"outputs_hidden": false}
df.shape
# -
# What are the column names.
df.columns
# We can calculate the mean, median, standard error of the mean (sem), variance, standard deviation (std) and the quantiles for every column in the dataframe
# + jupyter={"outputs_hidden": false}
df.mean()
# + jupyter={"outputs_hidden": false}
df.median()
# + jupyter={"outputs_hidden": false}
df.sem()
# + jupyter={"outputs_hidden": false}
df.var()
# + jupyter={"outputs_hidden": false}
df.std()
# + jupyter={"outputs_hidden": false}
df.quantile(q=0.5)
# + jupyter={"outputs_hidden": false}
df.quantile(q=[0.05, 0.95])
# -
# In the next sample we replace a value with _None_ so that we can show how to hanlde missing values in a dataframe.
# ## Basic visualization
# First let's create a pair plot
_ = sns.pairplot(df, hue="Target")
_ = sns.displot(df, x="Age" ,hue="Sex", label="male", kind="kde", log_scale=False)
# ## Inferential statistics
female = df[df.Sex == 'Female']
male = df[df.Sex == 'Male']
# T-Test
# + jupyter={"outputs_hidden": false}
t, p = stats.ttest_ind(female['Age'], male['Age'])
print("test statistic: {}".format(t))
print("p-value: {}".format(p))
# -
# Wilcoxon rank-sum test
z, p = stats.ranksums(female['Age'], male['Age'])
print("test statistic: {}".format(z))
print("p-value: {}".format(p))
|
notebooks/basics/statistical_analysis.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# Uso interactivo
# ===
# #### Contenido
# > * [Cálculos numéricos](#Cálculos-numéricos)
# * [Funciones](#Funciones)
# * [Funciones matemáticas](#Funciones-matemáticas)
# * [Cadenas de caracteres](#Cadenas-de-caracteres)
# * [Vectores](#Vectores)
# * [Listas](#Listas)
# # Cálculos numéricos
# [Contenido](#Contenido)
# R puede ser usado de forma interactiva como una calculadora. Esto permite que el análisis de datos pueda ser realizado de forma interactiva, de forma similar a como pueden usarse otras herramientas como el lenguaje Python o Matlab. A continuación se ejemplifican los cálculos aritméticos básicos.
2 + 2
50 - 5 * 6
(50 - 5 * 6) / 4 # aritmética entera
8 / 5 # resultado real
8 %% 5 # residuo de la division
8 %/% 5 # parte entera de la división
5 ** 2 # potencia
5 ^ 2 # potencia
# La asignación puede hacerse de diferentes formas.
x <- 1 # asignación típica en R
x = 1 # tradicional en muchos lenguajes de programación
1 -> x # posible, pero poco frecuente
# También se pueden declarar y usar variables en la ejecución interactiva.
width <- 20
height <- 5 * 9
width * height
tax <- 12.5 / 100
price <- 100.50
price * tax
# # Funciones
# [Contenido](#Contenido)
# Las funciones son definidas mediante la palabra reservada `function`. La siguiente función devuelve el cuadrado de su argumento.
square <- function(x) {
return (x**2) # lo que se retorna tiene que ir obligatoriamente entre paréntesis
}
square(2)
square <- function(x) {
x**2 # el último cálculo antes de salir de la función es lo que se retorna
}
square(2)
square(1+2)
square(square(2))
square(1) + square(2)
sum_of_squares <- function(x, y) square(x) + square(y) # las funciones puden ser llamadas dentro de otra
sum_of_squares(1, 2)
# # Funciones matemáticas
# [Contenido](#Contenido)
cos(3.141516)
pi
# **Ejercicio.** Calcule el valor de la siguiente expresión:
#
# $$\frac{5-(1 -(3 - \exp(\frac{1}{8}))}{3(4-2)(2-\frac{3}{8})} - 2!(-1)^3 + \sin (0.98\pi)$$
# # Cadenas de caracteres
# [Contenido](#Contenido)
# En R también pueden usarse cadenas de caracteres (strings). Ellas pueden delimitarse usando comillas simples o dobles.
'hola mundo' # comillas simples
"hola mundo" # comillas dobles
'--"--' # uso alternado de comillas.
"--'--"
'--\'--' # En estos dos casos se requiere usar el `\` para indicar que la comilla intermedia no es el delimitador.
"--\"--"
# El caracter de escape `\n` indica retorno-de-carro o nueva-linea.
s = 'Primera linea.\nsegunda linea.'
s
print(s) # se debe usar la función print para imprimir con formato.
cat('Primera linea.\nsegunda linea.')
" Los strings de varias lineas pueden \
escribirse indicando la continuación de \
dobles y son usados corrientemente como \
comentarios \
"
cat("
Los strings de varias lineas pueden \
escribirse deli pormitandolos tres comillas \
dobles y son usados corrientemente como \
comentarios
")
paste('Py', 'thon', sep = '')
paste(paste(rep('abc', 3), collapse = ' '), '012', sep=' ')
# borra los caracteres '-' de la cadena de texto
gsub(pattern='-', replacement='', x='h-o-l-a- -m-u-n-d-o')
# cambia los '-' por '='
gsub(pattern='-', replacement='=', x='h-o-l-a- -m-u-n-d-o')
# convierte a mayusculas
toupper('hola mundo')
# convierte a minusculas
tolower('HOLA MUNDO')
# En R, los strings son vectores de caracteres; el primer caracter ocupa la posición 1, el segundo la 2 y así sucesivamente.
#
# +---+---+---+---+---+---+
# | P | y | t | h | o | n |
# +---+---+---+---+---+---+
# 1 2 3 4 5 6
word <- 'Python'
substr(word, start = 1,stop = 1) # caracter en la posición 1
substr(word, start = 6, stop = 6) # caracter en la posición 6
nchar('abcde') # la función nchar calcula la longitud de una cadena de caracteres.
substr(word, start = nchar(word), stop = nchar(word)) # último caracter
substr(word, start = nchar(word)-2, stop = nchar(word)-2) # antepenúltimo caracter
substr(word, start = 1, stop = 2)
substr(word, start = 3, stop = nchar(word)) # desde la posición 3 hasta el final
# # Vectores
# [Contenido](#Contenido)
# Los vectores en R son la principal estructura para realizar cálculos numéricos.
squares <- c(1, 4, 9, 16, 25) # las listas se crean con la función `c()`
squares
1:5 # forma para crear secuencias
seq(5) # generación de secuencias
seq(from=1, to=10, by=2)
squares[1] # Sus elementos se indexan desde uno
1:10-1 # generación de secuencias (resta 1 a todos los elementos de la secuencia)
1:(10-1)
rep(c(1, 2), times = 3) # repetición de secuencias
rep(c(1, 2), each = 2) # repite cada elemento del vector 2 veces
rep(c(1, 2), times = 3, each = 2) # repite cada elemento del vector 2 veces en 3 repeticios
squares[-1] # los indices negativos indican exclusión del elemento. (excluye el primer elemento)
squares[(length(squares)-2):length(squares)] # ultimos tres elementos
tail(squares, n = 3) # ultimos tres elementos
# +
tail(squares) # desde el primer hasta el último elemento.
# -
head(squares, n = 3) # primeros tres elementos
x <- 1:3 # concatenacion de vectores
c(x, 0, x)
cubes = c(1, 8, 27, 65, 125) # lista de cubos con un elemento malo
4 ** 3 # el cubo de 4 es 64, no 65!
cubes[4] = 64 # se reemplaza el valor erróneo
cubes
cubes <- c(cubes, 216) # se agrega el cubo de 6 al final de la lista.
cubes <- append(cubes, 7 ** 3) # y nuevamente se agrega el cubo de 7 al final
cubes
letters = c('a', 'b', 'c', 'd', 'e', 'f', 'g')
letters
letters[3:5] = c('C', 'D', 'E') # se puede reemplazar un rango de posiciones
letters
letters[-(2:5)] # elimina los elementos en las posiciones 2, 3, 4, 5
letters[seq(from=1, to=7, by=2)] # selecciona los elementos en posiciones impares
letters = c('a', 'b', 'c', 'd') # la función lenght retorna la longitud de la lista
length(letters)
a = c('a', 'b', 'c') # los elementos de las listas pueden ser de cualquier tipo.
n = c(1, 2, 3)
x = c(a, n) # x es una lista
x
c(1, 3, 5, 7) + 1 # operaciones aritméticas. Suma 1 a todos los elementos
c(1, 3, 5, 7) + c(2, 4, 6, 8) # suma los elementos en las mismas posiciones
1 / c(1, 2, 3) # divide cada elemento por 1
sum(c(1, 2, 3, 4)) # suma de los elementos de un vector
sum(1:4)
prod(1:4)
cumsum(1:4) # suma acumulada
cumprod(1:4) # producto acumulado
# # Listas
# [Contenido](#Contenido)
# Las listas son una de las principales estructuras para almacenar información en R. Se usan comunmente para almacenar información. Las listas en lenguaje Python son equivalentes a los vectores en R. Las listas en R no tienen equivalente en Python; sobre ellas no se pueden realizar operaciones matemáticas.
squares = list(1, 4, 9, 16, 25) # las listas se crean con la palabra list
squares
squares = list(a=1, b=4, c=9, d=16, e=25)
squares
squares$a = 2
squares
squares['b'] # acceso por nombre.
squares[2] # acceso por posición.
print(list(list(1, 2, 3), list(4, 5, 6))) # lista de listas
list(list(1, 2, 3), list(4, 5, 6)) # lista de listas
x <- list(list(1, 2, 3), list(4, 5, 6))
x[1] # elemento en posición 1, es una lista con 1,2,3
x[[1]] # elemento en posición 1, pero obtiene también la lista interna
x[[1]][2] # elemento en posición 1, trae la lista 1,2,3 y luego saca el elemento 2
# [Contenido](#Contenido)
|
R01-uso-interactivo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="9nkDv5dppU6B"
# # HEX algorithm **Kopuru Vespa Velutina Competition**
#
# **XGBoost model**
#
# Purpose: Predict the number of Nests in each of Biscay's 112 municipalities for the year 2020.
#
# Output: *(WaspBusters_20210609_batch_XGBy_48019prodigal.csv)*
#
# @authors:
# * <EMAIL>
# * <EMAIL>
# * <EMAIL>
# * <EMAIL>
# -
# ## Libraries
# +
# Base packages -----------------------------------
import numpy as np
import pandas as pd
# Visualization -----------------------------------
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15, 10)
import seaborn as sns
plt.style.use("seaborn-notebook")
# Scaling data ------------------------------------
from sklearn import preprocessing
# Grid search -------------------------------------
from sklearn.model_selection import GridSearchCV
# Confusion matrix --------------------------------
#from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# XGBoost -----------------------------------------
from xgboost import XGBRegressor
from xgboost import plot_importance
# -
# ## Functions
# +
# Function that checks if final Output is ready for submission or needs revision
def check_data(HEX):
def template_checker(HEX):
submission_df = (HEX["CODIGO MUNICIPIO"].astype("string") + HEX["NOMBRE MUNICIPIO"]).sort_values().reset_index(drop=True)
template_df = (template["CODIGO MUNICIPIO"].astype("string") + template["NOMBRE MUNICIPIO"]).sort_values().reset_index(drop=True)
check_df = pd.DataFrame({"submission_df":submission_df,"template_df":template_df})
check_df["check"] = check_df.submission_df == check_df.template_df
if (check_df.check == False).any():
pd.options.display.max_rows = 112
return check_df.loc[check_df.check == False,:]
else:
return "All Municipality Names and Codes to be submitted match the Template"
print("Submission form Shape is", HEX.shape)
print("Number of Municipalities is", HEX["CODIGO MUNICIPIO"].nunique())
print("The Total 2020 Nests' Prediction is", int(HEX["NIDOS 2020"].sum()))
assert HEX.shape == (112, 3), "Error: Shape is incorrect."
assert HEX["CODIGO MUNICIPIO"].nunique() == 112, "Error: Number of unique municipalities is correct."
return template_checker(HEX)
# -
# ## Get the data
# +
QUEEN_train = pd.read_csv('../Feeder_months/WBds03_QUEENtrainMONTHS.csv', sep=',')
QUEEN_predict = pd.read_csv('../Feeder_months/WBds03_QUEENpredictMONTHS.csv', sep=',')
clustersMario = pd.read_csv("../auxiliary_files/WBds_CLUSTERSnests.csv")
template = pd.read_csv("../../../Input_open_data/ds01_PLANTILLA-RETO-AVISPAS-KOPURU.csv",sep=";", encoding="utf-8")
# + tags=[]
#QUEEN_predict.isnull().sum()
# -
QUEEN_train.shape
QUEEN_predict.shape
# ### Add in more Clusters (nest amount clusters)
QUEEN_train = pd.merge(QUEEN_train, clustersMario, how = 'left', on = ['municip_code', 'municip_name'])
QUEEN_predict = pd.merge(QUEEN_predict, clustersMario, how = 'left', on = ['municip_code', 'municip_name'])
QUEEN_train.fillna(4, inplace=True)
QUEEN_predict.fillna(4, inplace=True)
QUEEN_train.shape
QUEEN_predict.shape
QUEEN_predict.Cluster.value_counts()
# ## Get hyperparameters with GridsearchCV using 2018's features (i.e. 2019's nests) as the test year
# + tags=[]
# The target variable
hyper_y_train = QUEEN_train.loc[QUEEN_train.year_offset.isin([2017]), ['municip_code', 'year_offset', 'month', 'NESTS']]
hyper_y_train = hyper_y_train.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
hyper_y_train.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
hyper_y_test = QUEEN_train.loc[QUEEN_train.year_offset.isin([2018]), ['municip_code', 'year_offset', 'month', 'NESTS']]
hyper_y_test = hyper_y_test.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
hyper_y_test.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
# The features matrix
hyperXtrain = QUEEN_train.loc[QUEEN_train.year_offset.isin([2017]), :].drop(['municip_name', 'station_code', 'station_name', 'NESTS'], axis=1)
hyperXtrain = hyperXtrain.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
hyperXtrain.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
hyperXtest = QUEEN_train.loc[QUEEN_train.year_offset.isin([2018]), :].drop(['municip_name', 'station_code', 'station_name', 'NESTS'], axis=1)
hyperXtest = hyperXtest.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
hyperXtest.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
# +
xgb1 = XGBRegressor(random_state=23)
parameters = {'nthread':[4], #when use hyperthread, xgboost may become slower
'objective':['reg:linear'],
'learning_rate': [.03, 0.05, .07], #so called `eta` value
'max_depth': [5, 6, 7],
'min_child_weight': [4],
'silent': [1],
'subsample': [0.7],
'colsample_bytree': [0.7],
'n_estimators': [500]}
xgb_grid = GridSearchCV(xgb1,
parameters,
cv = 3,
n_jobs = 5,
verbose=True)
xgb_grid.fit(hyperXtrain, hyper_y_train)
print(xgb_grid.best_score_)
# -
print(xgb_grid.best_params_)
y_xgb_grid = xgb_grid.best_estimator_.predict(hyperXtest)
# +
#matrix = confusion_matrix(hyper_y_test, y_xgb_grid)
# +
#ax = sns.heatmap(
# matrix.T, square=True, annot=True, fmt="d", cbar=False, cmap="viridis",
# xticklabels=["0", "1"], yticklabels=["0", "1"]
#)
#ax.set_xlabel("True label")
#ax.set_ylabel("Predicted label");
# +
#print(classification_report(hyper_y_test, y_xgb_grid))
# -
# ## Prediction time!
# ### 1. Choose the model class
XGBRegressor
# ### 2. Instantiate the model
xgb = xgb_grid.best_estimator_
# ### 3. Prepare Feature matrix and Target variable
# + tags=[]
# The target variable
y_train = QUEEN_train.loc[:, ['municip_code', 'year_offset', 'month', 'NESTS']]
#y_train = y_train.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
y_train.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
y_predict = QUEEN_predict.loc[:, ['municip_code', 'year_offset', 'month', 'NESTS']]
#y_predict = y_predict.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
y_predict.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
# The features matrix
X_train = QUEEN_train.drop(['municip_name', 'station_code', 'station_name', 'NESTS'], axis=1)
#X_train = X_train.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
X_train.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
X_predict = QUEEN_predict.drop(['municip_name', 'station_code', 'station_name', 'NESTS'], axis=1)
#X_predict = X_predict.sort_values(by=['year_offset', 'month', 'municip_code'], ascending=True)
X_predict.set_index(['year_offset', 'month', 'municip_code'], inplace=True)
# -
X_train.shape
y_train.shape
X_predict.shape
y_predict.shape
# ### 4. Fit the model to the training data sets
# #### Scale and get feature importance
# +
#X = X_train
#y = y_train
#scalators = X.columns
#X[scalators] = preprocessing.minmax_scale(X[scalators])
# +
# define the model
#model_fi = XGBRegressor(random_state=23)
# fit the model
#model_fi.fit(X, y)
# + tags=[]
# get importance
#importance = model_fi.feature_importances_
# summarize feature importance
#for i,v in enumerate(importance):
# print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
#plot_importance(model_fi, height=0.5, xlabel="F-Score", ylabel="Feature Importance", grid=False)
#plt.show()
# -
# #### Now, do fit the model but only with the relevant features
# +
X_train = X_train.loc[:, ['population', 'weath_humidity', 'food_fruit', 'weath_maxLevel', 'food_txakoli', 'weath_midLevel', 'weath_minLevel', 'colonies_amount', 'weath_maxWindM', 'weath_meanWindM', 'weath_accuRainfall', 'weath_10minRainfall', 'food_kiwi', 'food_apple', 'weath_days_rain1mm', 'weath_meanDayMaxWind', 'weath_meanTemp']]
X_predict = X_predict.loc[:, ['population', 'weath_humidity', 'food_fruit', 'weath_maxLevel', 'food_txakoli', 'weath_midLevel', 'weath_minLevel', 'colonies_amount', 'weath_maxWindM', 'weath_meanWindM', 'weath_accuRainfall', 'weath_10minRainfall', 'food_kiwi', 'food_apple', 'weath_days_rain1mm', 'weath_meanDayMaxWind', 'weath_meanTemp']]
# -
xgb.fit(X_train, y_train)
# ### 5. Predict the labels for new data
y_predict = xgb.predict(X_predict)
accuracy_train = xgb.score(X_train, y_train)
print(f"Accuracy on the training set: {accuracy_train:.0%}")
accuracy_predict = xgb.score(X_predict, y_predict)
print(f"Accuracy on the test set: {accuracy_predict:.0%}")
y_predict.shape
QUEEN_predict['NESTS'] = y_predict
QUEEN_predict.NESTS.sum()
QUEEN_predict.NESTS[QUEEN_predict.NESTS < 0] = 0
QUEEN_predict.NESTS.sum()
# ## Prepare the dataset for submission
HEX = QUEEN_predict.loc[:,['municip_code', 'municip_name', 'NESTS']].groupby(by=['municip_code', 'municip_name'], as_index=False).sum()
# ## Adjust manually for Bilbao 48020 and generate the output
HEX.loc[HEX.municip_code.isin([48020]), 'NESTS'] = 0
HEX.loc[HEX.municip_code.isin([48022, 48071, 48088, 48074, 48051, 48020]), :]
HEX.columns = ["CODIGO MUNICIPIO", "NOMBRE MUNICIPIO", "NIDOS 2020"] # change column names to Spanish (Competition template)
check_data(HEX)
# ### Export dataset for submission
HEX.to_csv('WaspBusters_20210609_136-mXGB-prodigal-GSCV-noSort-FI-no0s.csv', index=False)
# ## VERSION Manual adjustments
HEX.columns = ['municip_code', 'municip_name', 'NESTS'] # change column names to Spanish (Competition template)
HEX.loc[HEX.municip_code.isin([48022, 48071, 48088, 48074, 48051]), 'NESTS'] = [0,0,1,0,1]
HEX.loc[HEX.municip_code.isin([48022, 48071, 48088, 48074, 48051, 48020]), :]
HEX.columns = ["CODIGO MUNICIPIO", "NOMBRE MUNICIPIO", "NIDOS 2020"] # change column names to Spanish (Competition template)
check_data(HEX)
# ### Export dataset for submission
HEX.to_csv('WaspBusters_20210609_135-mXGB-prodigal-GSCV-noSort-FI-0s.csv', index=False)
|
B_Submissions_Kopuru_competition/2021-06-09_submit FINAL/XGBoost_48019prodigalberriz/submitted/workerbee05_HEX-135-mXGB-prodigal-GSCV-noSort-FI.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
# Import Scikit-Learn library for the regression models
import sklearn
from sklearn import linear_model, datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error, accuracy_score
# Note - you will need version 0.24.1 of scikit-learn to load this library (SequentialFeatureSelector)
from sklearn.feature_selection import f_regression, SequentialFeatureSelector
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# Import numpy
import numpy as np
# Another statistic model library
import statsmodels.api as sm
import statsmodels.formula.api as smf
import scipy.stats as stats
import scipy
from scipy import interpolate
from scipy.interpolate import interp1d
# Import plotting libraries
import seaborn as sns
import matplotlib
from matplotlib import pyplot as plt
# Set larger fontsize for all plots
matplotlib.rcParams.update({'font.size': 20})
# Command to automatically reload modules before executing cells
# not needed here but might be if you are writing your own library
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
data = pd.read_csv('data.csv')
data.pop('Unnamed: 32')
data['diagnosis'] = data['diagnosis'].replace('B',0);
data['diagnosis'] = data['diagnosis'].replace('M',1);
y = data.diagnosis
X = data.T
x = X[2:]
x = x.T
# -
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# +
model = LinearRegression().fit(X_train,y_train)
y_predict = model.predict(X_test)
print(r2_score(y_test,y_predict))
print(mean_squared_error(y_test,y_predict))
accuracy_1 = np.round(r2_score(y_test,y_predict)*100)
# +
sfs_forward = SequentialFeatureSelector(LinearRegression(),
n_features_to_select=4,
direction='forward').fit(x, y)
f_names = np.array(data.columns)
selected = sfs_forward.get_support(indices=True)
print("Selected input features using Forward Stepwise Selection:\n", f_names[selected])
columns_names = list(f_names[selected])
if 'diagnosis' in columns_names:
columns_names.remove('diagnosis')
if 'id' in columns_names:
columns_names.remove('id')
print(columns_names)
# +
x_manip = data[columns_names]
y = data.diagnosis
X_train_manip, X_test_manip, y_train_2, y_test_2 = train_test_split(x_manip, y, test_size=0.2, random_state=42)
# +
model_2 = LinearRegression().fit(X_train_manip,y_train_2)
y_predict_2 = model_2.predict(X_test_manip)
print(y_predict_2)
accuracy_2 = np.round(r2_score(y_test_2,y_predict_2)*100)
# +
fig,ax = plt.subplots(1,2,figsize=(12.5,5))
ax[0].scatter(y_test, y_predict - y_test, marker='o',s=(200-(100*abs(y_predict_2-y_test_2))),alpha=0.1,c= 'FireBrick',label = "R^2: "+str(accuracy_1)+"%")
ax[0].set_title('Residual plot of all features after Linear Regression', fontsize=15)
ax[0].set_xlabel('Diagnosis')
ax[0].set_ylabel('Correct Label')
ax[0].legend()
ax[1].scatter(y_test_2, y_predict_2 - y_test_2,s=(200-(100*abs(y_predict_2-y_test_2))),alpha=0.1,c= 'DarkCyan',label = "R^2: "+str(accuracy_2)+"%")
ax[1].set_title('Residual plot after Stepwise Selection after Linear Regression', fontsize=15)
ax[1].set_xlabel('Diagnosis')
ax[1].set_ylabel('Correct Label')
ax[1].legend()
plt.tight_layout()
# -
|
old_jupyter_notes/Linear_Regression_SW_updated.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # After Training
#
# After the on-the-fly training is complete, we can play with the force field we obtained.
# We are going to do the following things:
#
# 1. Parse the on-the-fly training trajectory to collect training data
# 2. Reconstruct the GP model from the training trajectory
# 3. Build up Mapped GP (MGP) for accelerated force field, and save coefficient file for LAMMPS
# 4. Use LAMMPS to run fast simulation using MGP pair style
#
# ## Parse OTF log file
#
# After the on-the-fly training is complete, we have a log file and can use the `otf_parser` module to parse the trajectory.
# +
import numpy as np
from flare import otf_parser
logdir = '../../../tests/test_files'
file_name = f'{logdir}/AgI_snippet.out'
hyp_no = 2 # use the hyperparameters from the 2nd training step
otf_object = otf_parser.OtfAnalysis(file_name)
# -
# ## Construct GP model from log file
#
# We can reconstruct GP model from the parsed log file (the on-the-fly training trajectory). Here we build up the GP model with 2+3 body kernel from the on-the-fly log file.
# +
gp_model = otf_object.make_gp(hyp_no=hyp_no)
gp_model.parallel = True
gp_model.hyp_labels = ['sig2', 'ls2', 'sig3', 'ls3', 'noise']
# write model to a binary file
gp_model.write_model('AgI.gp', format='json')
# -
# The last step `write_model` is to write this GP model into a binary file,
# so next time we can directly load the model from the pickle file as
# +
from flare.gp import GaussianProcess
gp_model = GaussianProcess.from_file('AgI.gp.json')
# -
# ## Map the GP force field & Dump LAMMPS coefficient file
#
# To use the trained force field with accelerated version MGP, or in LAMMPS, we need to build MGP from GP model.
# Since 2-body and 3-body are both included, we need to set up the number of grid points for 2-body and 3-body in `grid_params`.
# We build up energy mapping, thus set `map_force=False`.
# See [MGP tutorial](https://flare.readthedocs.io/en/latest/tutorials/mgp.html) for more explanation of the MGP settings.
# +
from flare.mgp import MappedGaussianProcess
grid_params = {'twobody': {'grid_num': [64]},
'threebody': {'grid_num': [20, 20, 20]}}
data = gp_model.training_statistics
lammps_location = 'AgI_Molten'
mgp_model = MappedGaussianProcess(grid_params, data['species'],
var_map=None, lmp_file_name='AgI_Molten', n_cpus=1)
mgp_model.build_map(gp_model)
# -
# The coefficient file for LAMMPS mgp pair_style is automatically saved once the mapping is done.
# Saved as `lmp_file_name`.
#
# ## Run LAMMPS with MGP pair style
#
# With the above coefficient file, we can run LAMMPS simulation with the mgp pair style.
# First download our mgp pair style files, compile your lammps executable with mgp pair style following our [instruction](https://flare.readthedocs.io/en/latest/installation/lammps.html) in the *Installation* section.
#
# 1. One way to use it is running `lmp_executable < in.lammps > log.lammps`
# with the executable provided in our repository.
# When creating the input file, please note to set
#
# ```
# newton off
# pair_style mgp
# pair_coeff * * <lmp_file_name> <chemical_symbols> yes/no yes/no
# ```
#
# An example is using coefficient file `AgI_Molten.mgp` for AgI system,
# with two-body (the 1st `yes`) together with three-body (the 2nd `yes`).
#
# ```
# pair_coeff * * AgI_Molten.mgp Ag I yes yes
# ```
#
# 2. Another way is to use the ASE LAMMPS interface
# +
import os
from flare.utils.element_coder import _Z_to_mass, _element_to_Z
from flare.ase.calculator import FLARE_Calculator
from ase.calculators.lammpsrun import LAMMPS
from ase import Atoms
# create test structure
species = otf_object.gp_species_list[-1]
positions = otf_object.position_list[-1]
forces = otf_object.force_list[-1]
otf_cell = otf_object.header['cell']
structure = Atoms(symbols=species, cell=otf_cell, positions=positions)
# get chemical symbols, masses etc.
species = gp_model.training_statistics['species']
specie_symbol_list = " ".join(species)
masses=[f"{i} {_Z_to_mass[_element_to_Z[species[i]]]}" for i in range(len(species))]
# set up input params
parameters = {'command': os.environ.get('lmp'), # set up executable for ASE
'newton': 'off',
'pair_style': 'mgp',
'pair_coeff': [f'* * {lammps_location + ".mgp"} {specie_symbol_list} yes yes'],
'mass': masses}
files = [lammps_location + ".mgp"]
# create ASE calc
lmp_calc = LAMMPS(label=f'tmp_AgI', keep_tmp_files=True, tmp_dir='./tmp/',
parameters=parameters, files=files, specorder=species)
structure.calc = lmp_calc
# To compute energy, forces and stress
# energy = structure.get_potential_energy()
# forces = structure.get_forces()
# stress = structure.get_stress()
# -
# 3. The third way to run LAMMPS is using our LAMMPS interface, please set the
# environment variable `$lmp` to the executable.
# +
from flare import struc
from flare.lammps import lammps_calculator
# lmp coef file is automatically written now every time MGP is constructed
# create test structure
species = otf_object.gp_species_list[-1]
positions = otf_object.position_list[-1]
forces = otf_object.force_list[-1]
otf_cell = otf_object.header['cell']
structure = struc.Structure(otf_cell, species, positions)
atom_types = [1, 2]
atom_masses = [108, 127]
atom_species = [1, 2] * 27
# create data file
data_file_name = 'tmp.data'
data_text = lammps_calculator.lammps_dat(structure, atom_types,
atom_masses, atom_species)
lammps_calculator.write_text(data_file_name, data_text)
# create lammps input
style_string = 'mgp'
coeff_string = '* * {} Ag I yes yes'.format(lammps_location)
lammps_executable = '$lmp'
dump_file_name = 'tmp.dump'
input_file_name = 'tmp.in'
output_file_name = 'tmp.out'
input_text = \
lammps_calculator.generic_lammps_input(data_file_name, style_string,
coeff_string, dump_file_name)
lammps_calculator.write_text(input_file_name, input_text)
# To run lammps and get forces
# lammps_calculator.run_lammps(lammps_executable, input_file_name,
# output_file_name)
# lammps_forces = lammps_calculator.lammps_parser(dump_file_name)
|
docs/source/tutorials/after_training.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Estadística con Python
# ## Introducción a Pandas
#
# ### GitHub repository: https://github.com/jorgemauricio/python_statistics
#
# ### Instructor: <NAME>
#
# ### Base de datos: Stanford Open Policing Project ([Montana](https://openpolicing.stanford.edu/data))
# librerías
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# leer csv
df = pd.read_csv("data/Montana.csv", low_memory=False)
# estructura del dataFrame Montana
df.head()
# cantidad de columnas y filas
df.shape
# tipos de datos de cada columna
df.dtypes
# contabilizar los valores nulos
df.isnull().sum()
# ## Remover columnas que solo contienen valores nulos
# eliminar la columna de "police_department"
df.drop("police_department", axis="columns", inplace=True)
# contabilizar columnas y filas
df.shape
# checar columnas
df.columns
# método alternativo
df.dropna(axis='columns', how='all').shape
# NOTA:
# - Existe más de una forma de eliminar datos nulos
# - Verificar los argumentos de cada método
# ## ¿Qué genero es detenido más por exceso de velocidad?
# Número de mujeres y hombres que son detenidos por exceso de velocidad
df[df["violation"] == 'Speeding']["driver_gender"].value_counts()
# Porcentaje de mujeres y hombres que son detenidos por exceso de velocidad
df[df["violation"] == 'Speeding']["driver_gender"].value_counts(normalize=True)
# forma alternativa
df.loc[df["violation"] == 'Speeding', "driver_gender"].value_counts(normalize=True)
# cuando un hombre es detenido, que porcentaje es por exceso de velocidad?
df[df['driver_gender'] == "M"]["violation"].value_counts(normalize=True)
# cuando una mujer es detenida, que porcentaje es por exceso de velocidad?
df[df['driver_gender'] == "F"]["violation"].value_counts(normalize=True)
# porcentaje agrupado por genero
df.groupby("driver_gender")["violation"].value_counts(normalize=True)
# año con el número mayor de detenciones
df.columns
# combinar celdas de fecha y tiempo
combined = df["stop_date"] + " " + df['stop_time']
# convertir a columna de tiempo
df['fecha'] = pd.to_datetime(combined)
# corroborar tipo de dato de la columna
df.dtypes
# año con mayor número de detenciones
df['fecha'].dt.year.value_counts()
# mes con mayor número de detenciones del 2010
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts()
# ordenar datos por índice
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts().sort_index()
# gráficar los datos (línea)
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts().sort_index().plot()
# gráficar los datos (barra)
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts().sort_index().plot(kind="bar")
# gráficar los datos (barra horizontal)
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts().sort_index().plot(kind="barh")
# aplicando estilo
plt.style.use("seaborn")
# gráficar los datos (barra horizontal)
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts().sort_index().plot(kind="barh")
# gráficar los datos (barra y título)
df[df['fecha'].dt.year == 2010]['fecha'].dt.month.value_counts().sort_index().plot(kind="barh", title="Detenciones 2010")
# histograma de edades
df['driver_age'].hist(bins=20)
|
Intro_Pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>CNN MNIST</h1>
# <h6><NAME></h6>
# Date: 28 Jun 2019
# +
#We will apply convolutional layer in which we will pass many filters
# (10x10) -> 5 filters (3x3) -> output is a (8x8) image channeled with 5 layers -> 5 filters (2x2) ->
# -> output is a (7x7) image channeled with 5 layers and each pixel is sum of all the pixels of 5 layers -> flatten the image >
# apply neural network
# -
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import keras
from keras.utils import np_utils
from keras.layers import Dense, Activation, Flatten, Convolution2D, Dropout, MaxPooling2D
#MaxPooling is used to control the dimension of the image -> puts the maximum value at the output of filter
from keras.models import Sequential
# -
from keras.datasets import mnist
(X_train, y_train),( X_test, y_test)= mnist.load_data()
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
np.unique(y_train)
plt.imshow(X_train[0], cmap='gray')
#CNN expect karega ki image multichannel hoga
X_train= X_train.reshape(-1,28,28,1)
X_test= X_test.reshape(-1,28,28,1)
print(X_train.shape,X_test.shape)
# +
#one hot encoding
y_train= np_utils.to_categorical(y_train)
y_test= np_utils.to_categorical(y_test)
print(y_train.shape,y_test.shape)
# +
X_train= X_train[:3600]
y_train=y_train[:3600]
X_test=X_test[:900]
y_test=y_test[:900]
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# -
# +
model=Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(28,28,1)))
model.add(Activation('relu'))
#output dimension of 1st conv-> (26,26,32)
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
#output dimension of 2nd conv-> (26,26,64)
#since the no of feautre becomes very large in no, therefore we will control dimension using Maxpool
model.add(MaxPooling2D(pool_size=(2,2)))
#output dimension of 2nd conv-> (12,12,64)
model.add(Convolution2D(16, 3, 3))
model.add(Activation('relu'))
#output dimension of 2nd conv-> (10,10,16)
model.add(Flatten())
#1600
#Regularisation using Dropout
model.add(Dropout(0.25))
#output layer
#10->no of classes
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
# -
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit( X_train,y_train, batch_size=8, epochs=10,validation_data=(X_test,y_test))
model.save()
|
Deep Learning/Convolutional Neural Networks/CNN MNIST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HTTP协议理解
# ## 网络协议结构与HTTP协议
# ### 网络OSI模型
# 网络OSI(Open System Interconnection)协议规范分成7层:
# |-物理层
# |-链路层
# |-网络层
# |-传输层
# |-会话层
# |-表示层
# |-应用层
#
# 一般从开发协议来说分成3层(链路层与网络层看成一起,也是常见的高层,中层与低层)或者4层。
# |-链路层(ETHERNET协议:ARP)
# |-网络层(IP等)
# |-传输层(TCP,UDP等)
# |-应用层(HTTP,FTP,SMTP,TELNET等)
# ### HTTP协议层
#
# HTTP协议是应用协议,他下面依赖的是TCP传输协议,TCP依赖的是IP协议,IP协议依赖的是链路层ARP(Address Resolution Protocol)协议。
# HTTP
# |
# TCP
# |
# IP
# |
# ARP
# 1. HTTP协议特点:
#
# HTTP协议是基于TCP协议的应用协议,是属于应用层的面向对象的协议,浏览器实现的就是HTTP协议。它于1990年提出,目前使用的是HTTP/1.0与1.1,HTTP-NG(Next Generation of HTTP)的建议已经提出。
#
# HTTP协议的主要特点:
#
# 1.基于TCP协议。
#
# 2.简单快速,协议是基于文本的:
# 客户向服务器请求服务时,只需传送请求方法和路径。由于HTTP协议简单,使得HTTP服务器的程序规模小,因而通信速度很快。
#
# 3.灵活:
# HTTP允许传输任意类型的数据对象。正在传输的类型由Content-Type加以标记。
#
# 4.无连接:
# 服务器处理完客户的请求,并收到客户的应答后,即断开连接。
#
# 5.无状态:
# HTTP协议是无状态协议。不识别连接者的状态。
#
#
# ## HTTP协议格式
#
# HTTP协议是Web浏览器与Web服务器之间的对话协议,HTTP协议可以根据分工分成请求协议与响应协议两个部分:
# |- 请求协议:Web浏览器向Web服务器发起请求使用的协议;
# |- 响应协议:Web服务器对Web浏览器的请求进行响应的协议;
#
# 不管是请求协议还是响应协议,HTTP协议格式从结构上分成4格部分,下面我们使用Socket通信程序来说明。
# 1. HTTP协议的,主要有如下三个部分构成。
# |-
# |- HTTP URL
# |- HTTP请求
# |- HTTP响应
#
#
# 2. HTTP/1.0与1.1
#
# |- HTTP/1.0
# 这是第一个在通讯中指定版本号的 HTTP 协议版本,至今仍被广泛采用,特别是在代理服务器中。
#
# |- HTTP/1.1
# 当前版本。持久连接被默认采用,并能很好地配合代理服务器工作。还支持以管道方式在同时发送多个请求,以便降低线路负载,提高传输速度。
# 1.1对比1.0的主要区别特点
# |- 1 缓存处理
# |- 2 带宽优化及网络连接的使用
# |- 3 错误通知的管理
# |- 4 消息在网络中的发送
# |- 5 互联网地址的维护
# |- 6 安全性及完整性
#
# 3. HTTP URL介绍
#
# 也称:通用资源定位符
#
# 格式组成:协议,主机,资源路径
# |- http://host[":"port] [abs_path]
# |- 协议:常见的协议:HTTP,HTTPS,FTP,本地文件,RMI等。
# |- 主机:包含IP或者域名,端口
# |-资源路径:
# 使用 / 分隔
# 资源路径可以后缀?开始的字符串,称为querystring(查询字符串,类似SQL语句作用)。Querystring可以使用&合并多个值对。
#
#
# 例子:一般浏览器都支持
# |- WEB:http://baidu.com/index.html
# |- WEB:http:// 192.168.3.11:80/index.html?name=louis&favor=football。
# |- FTP:ftp:// 192.168.3.11/publish/a.txt
# |- FILE:file:///root/codes
#
# ### HTTP协议请求格式
# 下面使用socket编程来实现一个程序模拟web服务器,并抓取浏览器发送请求数据,通过这些请求数据来认知HTTP协议的请求格式。
# 下面例子使用的技术是socket通信编程技术,因为HTTP是建立在TCP之上,所以下面在用TCP编程模式,其中对多用户处理,采用多路并发的编处理技术。
# #### 接收显示浏览器请求数据的程序实现
# +
# coding =utf-8
import socket # socket编程模块
import select # 多路复用模块
import sys # 系统调用
import signal
import os
# 方便发送信号灭掉这个进程
print('本进程ID:%d,可以使用kill灭之,如果在ipython中不方便结束的话!' % os.getpid())
# 这里处理ctrl+c的信号(方便ctrl+c退出)
def handle_int(signum, handler):
print('程序中断退出,信号:%d' % signum, '处理器是:{}'.format(handler))
sys.exit(0)
# 绑定对ctrl+c信号的处理
signal.signal(signalnum=signal.SIGINT, handler=handle_int)
# 下面代码为了清晰思路,没有做任何代码的异常处理
# 多路复用数据
io_inputs = []
io_outputs = []
io_error = []
# 创建socket
server_socket = socket.socket(
socket.AF_INET, # 网络地址族:常用的是internet网地址格式(IP地址)
socket.SOCK_STREAM, # 网络通信方式:流与报文两种
socket.IPPROTO_TCP) # 通信协议:数据包的格式
# 绑定地址
server_address = ('', 9999) # 地址包含IP地址与端口地址
server_socket.bind(server_address)
# 监听
server_socket.listen(2)
# 把server_socket加入多路复用IO中
io_inputs.append(server_socket)
# 开始监控多路复用异步IO(包含server_socket的连接,新连接的也加入,短线的删除)
while True:
print('开始多路复用IO监控......')
ready_inputs, ready_outputs, ready_error = select.select(
io_inputs, io_outputs, io_error, None)
# 检查返回值,并相应的处理,这里我们只接收,不发送,所以不处理输出IO
for fd in ready_inputs:
# 服务器IO与每个客户的IO分开处理
if fd == server_socket: # 服务器IO
# 对服务器IO的护处理:接收客户连接
client_socket, client_address = fd.accept() # 这里需要处理异常,就是服务器挂掉,退出应用
print('客户连接:IP=%s,PORT=%d' % client_address)
# 把新连接的客户加入多路复用监控处理
io_inputs.append(client_socket)
else: # 每个客户的IO
# 对客户IO是接收数据请求:请求都是HTTP协议的请求协议
while True:
buffer = fd.recv(1024*4, 0) # 接收缓冲大小与接收标记
if not buffer:
fd.close() # 关闭
io_inputs.remove(fd) # 客户退出
print('客户连退出')
break
else:
print(buffer.decode('UTF-8'))
# 上述代码可以粘贴到pycharm等IDE环境中运行,也可以在这儿运行
# -
# #### HTTP请求协议的格式说明
# >GET / HTTP/1.1
# >Host: 127.0.0.1:9999
# >Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
# >Upgrade-Insecure-Requests: 1
# >Cookie: _xsrf=2|f97c87ab|aa4e0381b08224325486cb739b6d5c6a|1547777015
# >User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15
# >Accept-Language: zh-cn
# >Accept-Encoding: gzip, deflate
# >Connection: keep-alive
# >空行
# >请求体
#
#
# HTTP请求协议由4各部分构成:
#
# |- 请求行:GET / HTTP/1.1
#
# |- 请求头:Host: 127.0.0.1:9999
#
# |- 空行: 表示请求头结束
#
# |- 响应体: 表示发送给服务器的数据
#
#
# 实际上,上面每一行结束都是使用两个非可视化字符表示结束:'\r\n'。这是属于HTTP协议的一部分。
# 1. 请求行
#
# GET / HTTP/1.1
#
# 由三个部分构成
# |- GET: 请求方法
# |- /: 请求资源
# |- HTTP/1.1: 请求协议与版本
#
# 浏览器默认发起的请求方法都是GET。
# 用户可以根据自身情况,指定请求资源:
# 2. 请求头:
#
# Host: 127.0.0.1:9999
# Accept: text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8
# Upgrade-Insecure-Requests: 1
# Cookie: xsrf=2|f97c87ab|aa4e0381b08224325486cb739b6d5c6a|1547777015
# User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 1012_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15
# Accept-Language: zh-cn
# Accept-Encoding: gzip, deflate
# Connection: keep-alive
#
# 由多行构成,每行由两个部分构成:
# |- HEADER键:表示数据的用途
# |- HEADER值:发起请求的数据
#
# 所有的HEADER的名称由HTTP协议规定,HEADER值的格式由协议规定。
# 3. 空行
#
# 请求头后面紧跟一个空行,表示请求数据描述完毕;空行后面紧跟的是请求的用户数据。
# 4. 用户数据
#
# 用户数据紧跟在空行后面,比如用户登录的用户名与密码等。
# 用户数据在某些请求方法是没有的,比如GET请求方法的请求协议就没有数据行。因为GET主要目的是GET数据,不是发送数据。GET方法传递数据使用QueryString格式。
# POST方法的数据行就存在,因为POST请求方式就是上传数据,比如用户登录信息,上传文件等。POST方法的目的就是POST数据。
# **作业:**
#
# 使用HTML写一个页面,使用GET,POST,PUT,TRACE等方法发起请求,并观察HTTP请求协议的格式。
# #### HTTP请求方法说明
#
# 下面介绍的请求方法,其中GET,POST与HEAD是必须实现的,其他的可选实现。
# 1. GET方法
#
# GET:请求指定的页面信息,并返回实体主体。
# 2. POST方法
#
# POST: 请求服务器接受所指定的文档作为对所标识的URI的新的从属实体。
# 3. HEAD方法
#
# HEAD: 只请求页面的首部。
# 4. PUT方法
#
# PUT: 从客户端向服务器传送的数据取代指定的文档的内容。
# 4. DELETE方法:
#
# DELETE: 请求服务器删除指定的页面。
# 5. OPTIONS方法:
#
# OPTIONS: 允许客户端查看服务器的性能。
# 6. TRACE方法:
#
# TRACE: 请求服务器在响应中的实体主体部分返回客户端发送的数据。 (回显数据,便于跟踪与测试)
# 7. 其他方法:
#
# PATCH: 实体中包含一个表,表中说明与该URI所表示的原内容的区别。
# MOVE: 请求服务器将指定的页面移至另一个网络地址。
# COPY: 请求服务器将指定的页面拷贝至另一个网络地址。
# LINK: 请求服务器建立链接关系。
# UNLINK: 断开链接关系。
# WRAPPED: 允许客户端发送经过封装的请求。
# Extension-mothed:在不改动协议的前提下,可增加另外的方法。
# ### HTTP协议响应格式
# #### 模拟浏览器发起HTTP请求,实现响应协议数据获取的代码实现
# +
# coding = utf-8
import socket
import signal
import sys
import re
# 这里处理ctrl+c的信号(方便ctrl+c退出)
def handle_int(signum, handler):
print('程序中断退出,信号:%d' % signum, '处理器是:{}'.format(handler))
client_socket.close()
sys.exit(0)
# 绑定对ctrl+c信号的处理
signal.signal(signalnum=signal.SIGINT, handler=handle_int)
server_name = 'www.huanqiu.com'
# 获取IP地址(这个过程不是必须的)
list_info = socket.getaddrinfo(
host=server_name,
port=80,
family=socket.AF_INET,
type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP,
flags=socket.AI_ALL) # socket.AI_***参数的一部分
for _, _, _, _, address in list_info:
print('IP:%s,PORT:%d' % address)
# 创建socket
client_socket = socket.socket(
socket.AF_INET, # 网络地址族:常用的是internet网地址格式(IP地址)
socket.SOCK_STREAM, # 网络通信方式:流与报文两种
socket.IPPROTO_TCP) # 通信协议:数据包的格式
# 连接到到服务器(取上面取出的第一个INET地址)
print(list_info[0][-1])
client_socket.connect(list_info[0][-1]) # 没有返回值,需要进行异常处理,这里容易被信号中断
# 发送数据
request_string = ''
request_string += 'GET / HTTP/1.1\r\n'
request_string += 'Host: %s:9999\r\n' % server_name # 可以替换成IP地址
request_string += 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n'
request_string += 'Upgrade-Insecure-Requests: 1\r\n'
request_string += 'Cookie: _xsrf=2|f877d065|146c6a9838e67ba203776913fae34f45|1547796259\r\n'
request_string += 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.2 Safari/605.1.15\r\n'
request_string += 'Accept-Language: zh-cn\r\n'
# request_string += 'Accept-Encoding: gzip, deflate\r\n'
request_string += 'Connection: keep-alive\r\n'
request_string += '\r\n'
request_string += '\r\n'
bytes_num = client_socket.send(request_string.encode('UTF-8'))
print('发送成功的字节数:%d' % bytes_num)
# 接收服务器响应的协议数据
header_buffers = b''
while True:
# 一个子节一个子节读取
buffer = client_socket.recv(1, 0) # MSG_**等
header_buffers += buffer
# 判定最后四个子节是否是'\r\n\r\n':表示响应头读完
last_four_bytes = header_buffers[-4:] # 取最后四个子节
if last_four_bytes == b'\r\n\r\n':
break # header结束
# 把子节转换为字符串
header_string=header_buffers.decode('utf-8')
print('响应协议头:\n', header_string)
# 获取请求体的长度
regex = r'Content-Length: (\d*?)\r\n'
len_content = re.findall(regex, header_string,re.MULTILINE)
len_content = int(len_content[0])
print('响应正文长度为:{}'.format(len_content))
# 下面读取响应正文
body_buffers = b''
while True:
buffer = client_socket.recv(1024*10, 0) # MSG_**等
body_buffers += buffer
if len(body_buffers) == len_content:
break
print('响应体读取完毕!')
body_string = body_buffers.decode('utf-8')
# print(body_string)
# -
# #### HTTP响应协议的格式说明
# >HTTP/1.1 200 OK
# >Server: NWS_TCloud_S1
# >Connection: keep-alive
# >Date: Sat, 19 Jan 2019 14:41:09 GMT
# >Cache-Control: max-age=600
# >Expires: Sat, 19 Jan 2019 14:51:09 GMT
# >Last-Modified: Sat, 19 Jan 2019 14:30:00 GMT
# >Content-Type: text/html
# >Content-Length: 145971
# >X-NWS-LOG-UUID: 1950543609143247235 07102197e13b4c6b15335e94859e887e
# >X-Cache-Lookup: Hit From Disktank3
# >X-Daa-Tunnel: hop_count=1
# >X-Cache-Lookup: Hit From Inner Cluster
#
#
# HTTP响应协议也是由4各部分构成:
#
# |- 响应行:HTTP/1.1 200 OK
#
# |- 响应头:Server: NWS_TCloud_S1
#
# |- 空行: 表示响应头结束
#
# |- 响应体
#
# 与请求协议格式一样,每行结束都是使用'\r\n'表示。
# 1. 响应行
#
# 响应行由三部分构成:
#
# |- HTTP协议版本:HTTP/1.1
#
# |- 状态码:200
#
# |- 状态描述:OK
#
# 其中状态码也是HTTP协议规范的一部分。
# 2. 响应头
#
# 响应头由多行构成,直到空行表示结束。每行由两个部分构成:
#
# |- 响应头Key: 响应值说明
# |- 响应头Value:响应值
#
# 响应头也是HTTP协议的一部分。
# 3. 空行
#
# 空行表示响应头结束。加上最后一个响应头末尾的'\r\n',一般这个地方有两个'\r\n',一般作为识别响应头结束的位置或者标记。
# 4. 响应体
#
# 响应体就是响应的正文内容,一般是HTML页面,CSS,JS等,也可以是图片等。
# 响应头的长度什么时候结束,一般都在响应头中说明,响应正文的长度使用Content_Length说明:
#
# |- Content-Length: 145971
#
# 一般服务器为了传输的速度,都会响应体进行压缩,为了防止压缩,可以在请求的时候,使用请求头中说明,或者取消压缩相关的请求头说明,比如我们可以取消如下请求头,服务器就不会发送压缩数据。
#
# |- Accept-Encoding: gzip, deflate # 在请求头就是表示支持gzip压缩或者deflate压缩格式,可以不指定这个请求,服务器就不在发送压缩数据。
#
# 注意:
# 有的服务器因为数据太大,会采用分包的方式,分包格式采用独有的分包标记来实现。这里不详细介绍。大家今后碰见了稍微注意下。
# #### HTTP响应码说明
#
# HTTP响应码使用3位数表示
#
# HTTP响应码分成5类:
# |- 1xx :报告的 -请求被接收到,继续处理
# |- 2xx :成功 - 被成功地接收(received),理解(understood),接受(accepted)的动作 。
# |- 3xx :重发 - 为了完成请求必须采取进一步的动作。
# |- 4xx :客户端出错 - 请求包括错的语法或不能被满足。
# |- 5xx :服务器出错 - 服务器无法完成显然有效的请求。
#
# 1. 1XX响应码
#
# 100 继续
# 101 转换协议
#
# 2. 2XX响应码
#
# 200 OK
# 201 已创建
# 202 接受
# 203 非权威信息
# 204 无内容
# 205 重置内容
# 206 部分内容
#
# 3. 3XX响应码
#
# 300 多个选择
# 301 永久移动
# 302 发现
# 303 见其它
# 304 没有被改变
# 305 使用代理
# 307 临时重发
#
# 4. 4XX响应码
#
# 400 坏请求
# 401 未授权的
# 402 必要的支付
# 403 禁用
# 404 没有找到
# 405 方式不被允许
# 406 不接受的
# 407 需要代理验证
# 408 请求超时
# 409 冲突
# 410 不存在
# 411 长度必需
# 412 先决条件失败
# 413 请求实体太大
# 414 请求URI太大
# 415 不被支持的媒体类型
# 416 请求的范围不满足
# 417 期望失败
#
# 5. 5XX响应码
#
# 500 服务器内部错误
# 501 不能实现
# 502 坏网关
# 503 服务不能获得
# 504 网关超时
# 505 HTTP版本不支持
#
# ## HTTP常见头说明
# ### HTTP请求头说明
# 1. Accept
#
#
# >Accept: text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,
# > text/plain;q=0.8, video/x-mng,image/png,image/jpeg,image/gif;q=0.2,
# > text/css,*/*;q=0.1 s
#
# Accept消息报头:告诉服务器,请求者可以接受的内容类型与格式。
#
# |- text/xml表示请求内容是text文本,子类型是xml格式。子类型使用+指定多个混合, */*表示其他任意其他内容类型与格式, 每个类型之间使用逗号分隔。
#
# |- 在内容类型与格式后可以使用分号隔开指定内容类型提供的优先级别q=优先级别,优先级别在0到1之间的数。字母q表示quality 品质。
#
#
# 其中类型的支持由浏览器决定。但是类型的定义由MIMETYPE定义有兴趣可以扩展MIME-TYPE的知识点。
# MIME-TYPE的知识扩展可以参考:
# |- http://www.w3school.com.cn/media/media_mimeref.asp
# 不过常用的MIME-TYPE有:
# |- Text
# |- Image
# |- Audio
# |- Video
# |- Application
#
# 2. Accept-Charset
#
# 指定请求数据的字符编码
# ```
# Accept-Charset: ISO-8859-1, utf-8;q=0.66, *;q=0.66
# ```
#
# 对我们来说常见的字符编码:
# |- GB2312:标准汉字编码规范
# |- GBK:扩展汉字编码规范
# |- UTF-8:是一种针对Unicode的可变长度字符编码,又称万国码
# |- ISO-8859-1:西文编码
#
#
# 3. Accept-Encoding
#
# 指定请求的数据压缩格式
# ```
# Accept-Charset: ISO-8859-1, utf-8;q=0.66, *;q=0.66
# ```
#
# 用于优化数据传输的大小。
# 可以指定的压缩编码格式:
# identity:默认值,不采用压缩
# gzip:用于UNIX系统的文件压缩,扩展名.gz
# deflate: 同时使用了LZ77算法与哈夫曼编码的一个无损数据压缩算法
# compress: 古老的 unix 档案压缩指令,压缩后的档案会加上 .Z扩展名。
# 4. Accept-Language
#
# 告诉服务器,请求端可以使用的语言参考
# ```
# Accept-Language: zh-CN
# ```
# 语言的格式:
# 语言-地区
#
# 对我们来说比较常用的语言:
# en :英文 en-US :美国英语
# zh :汉语 zh-CN :大陆地区汉语
#
# 5. Authorization/Proxy-Authorization
#
# 传递授权信息给服务器/代理服务器。
#
# ```
# Authorization: Basic bXluYW1lOm15cGFzcw==
# ```
#
# Basic表示授权方式,授权信息的加密算法是Base64,后面紧跟加密的信息。加密信息为name:password格式,使用冒号分隔授权名与授权密码。
# 常见加密算法:
# |- Basic授权方式
# |- Digest授权方式(容易暴露用户名)
#
# ```
# Authorization: Digest username=“myuser”, //用户名
# realm=“HTTP Developer‘s Handbook”, //提示领域
# uri=“/”, //请求资源
# nonce=“a4b8c8d7e0f6a7b2c3d2e4f5a4b7c5d2e7f”, //用来产生digest消息
# response=“47d5aaf1b20e5b3483901267a3944737” //加密信息
# ```
# 6. Cookie
#
# 传递非用户的数据,用来管理HTTP状态
#
# ```
# Cookie: fname=chris; lname=shiflett
# ```
# Cookie的值使用分号分隔,每个值是name:value的值对
#
# 7. Expect
#
# 用来告诉服务器,是否需要继续传递消息主体
#
# ```
# Expect:100-Continue
# ```
# 目前Expect的值只能是100-continue,或者其他扩展。这个选项用来提升访问性能,用于客户端判定服务器是否愿意接受客户端发来的消息主体,(在有些情况下,如果服务器拒绝查看消息主体,这时客户端发送消息主体是不合适的或会降低效率。 )
# 注意:
# 如果服务器不能满足任何expectation值,服务器必须以417(期望失败)状态码响应,或者如果服务器满足请求时遇到其它问题,服务器必须发送4xx状态码。
# 该HEAD在有的代理服务器没有实现,会导致显示不正常,所有选择WEB服务器的一个很重要的测试,就是测试服务器是否支持Expect头。
#
# 8. From
#
# 用来转发一个电子邮件地址:
# ```
# From: <EMAIL>
# ```
# 现在很少有WEB服务器支持该HEAD。
#
# 9. Host
#
# 用来提供多寻址方式
# ```
# Host: www.google.com:80
# ```
#
# 拥有一个IP地址对应有多个主机名服务器,允许源服务器或网关去区分有内在歧义的URLS。
#
# 端口使用冒号分隔,默认端口80可以省略。
# 10. If-Match/ If-None-Match
#
# 发送一个实体与服务器当前实体比较
# ```
# If-Match: "xyzzy"
# ```
#
# 设置条件请求。如果一个客户端已经从一个资源获得一个或多个实体(entity),那么它可以通过在If-Match头域里包含相应的实体标签(entity tag)来验证实体是否就是服务器当前实体。与响应的Etag有关。
#
# 
#
# A客户端先发送IF-match,服务器匹配后修改实体资源,B客户端发If-match就无法匹配,导致412响应
#
# 11. If-Modified-Since /If-Unmodified-Since
#
# 指定最后修改的时间
# ```
# If-Modified-Since: Fri, 12 May 2013 18:53:33 GMT
# ```
#
# 由客户端往服务器发送的头,可以看到,再次请求本地存在的 cache 页面时,客户端会通过 If-Modified-Since 头将先前服务器端发过来的 Last-Modified 最后修改时间戳发送回去,这是为了让服务器端进行验证,通过这个时间戳判断客户端的页面是否是最新的,如果不是最新的,则返回新的内容,如果是最新的,则 返回 304 告诉客户端其本地 cache 的页面是最新的,于是客户端就可以直接从本地加载页面了,这样在网络上传输的数据就会大大减少,同时也减轻了服务器的负担。
#
# 12. If-Range
#
# 条件请求部分数据重传,条件是实体匹配或者时间匹配。
# ```
# If-Range: "df6b0-b4a-3be1b5e1"
# If-Range: Tue, 21 May 2002 12:34:56 GMT
# ```
#
# IF-Range头部需配合Range,如果没有Range参数,则If-Range会被视为无效。
# 如果If-Range匹配上,那么客户端已经存在的部分是有效的,服务器将返回缺失部分,也就是Range里指定的,然后返回206(Partial content),否则证明客户端的部分已无效(可能已经更改),那么服务器将整个实体内容全部返回给客户端,同时返回200OK
#
# 13. Range :
#
# 用来指定部分数据请求
# ```
# Range: bytes 0-499, 1000-1499,1500-
# ```
# 其中bytes表示单位,目前只能是bytes。
# 其他例子说明:假设请求10000个字节数据。
# -- 第一个500字节: bytes=0-499
# -- 第二个500字节: bytes=500-999
# -- 最后500字节: bytes=-500 或 bytes=9500-
# -- 仅仅第一个和最后一个字节: bytes=0-0,-1
#
# 14. Max-Forwards :
#
# 为TRACE和OPTIONS提供一种机制去限制转发请求的代理或网关的数量。
# 用来解决代理服务器阻止返回的响应问题。
#
# ```
# Max-Forwards: 0
# ```
# 15. Referer :
#
# 告诉服务器我是从哪个页面链接过来的,服务器籍此可以获得一些信息用于处理。
#
# 指定请求URI来源的资源URI允许服务器为了个人兴趣,记录日志,优化缓存等来产生回退链接列表。它照样允许服务器为维护而跟踪过时或写错的链接。
#
# ```
# Referer: http://www.w3.org/hypertext/DataSources/Overview.html
# ```
# 16. TE:
#
# 使用语法与Accept-Encoding与Content-Encoding一样,但该HEADER用于传输的编码指定。
#
# 17. User-Agent:
#
# 用来指定用户的代理请求工具。服务器可以根据这个特性来提供更适合客户的响应。
#
# ```
# User-Agent: Mozilla/5.0 (X11; U; Linux i686; zh-CN; rv:1.9.2.13)
# ```
# ### HTTP请求与响应都使用的头说明
# 1. Connection:
#
# 用于服务器对客户端连接的管理
# Connect常用的值
# |- Keep-Alive:保持连接。
# |- Close:在传输完成后关闭连接。
# |- Upgrade:用于代理服务器更新响应。
#
# ```
# Connection: Keep-Alive
# Connection: Close
# ```
# 2. Keep-Alive:
#
# 这个HEAD可能今后不被推荐使用。用于说明保持连接的时间,单位是秒数。
# 这个HEAD的值没有明确定义,有的服务器用来表示TCP连接打开的最大数量。
#
# ```
# Keep-Alive:300
# ```
# 3. Cache-Control
#
# 用来管理服务器的缓冲,包含代理服务器。
#
# ```
# Cache-Control: max-age=600, no-cache="Set-Cookie"
# ```
#
# 其中包含的设置项有:
# |- no-cache:不是用来禁止缓冲,而是用来重新校验缓冲的回送给客户的数据。如果no-cache缓存控制指令没有指定一个field-name,那么一个缓存不能在没有通过源服务器对它进行成功重验证的情况下,利用此响应去满足后续的请求。这允许源服务器去防止响应被缓存保存,即使此缓存已被设置可以返回陈旧响应给客户端。
# |- no-store:no-store缓存控制指令的目的在于防止不经意地释放或保留敏感信息
# |- max-age:值为秒数,表示请求时间长度不超过max-age的缓存。
# |- max-stale=600 :指定缓存的过期时间。
# |- min-fresh=600:表示请求时间长度超过min-fresh的缓存。
# |- no-transform:告知服务器不要修改响应的缓存数据。
# |- only-if-cached:告知服务器如果缓存存在,则直接使用缓存。
# |- cache-extension:缓存扩展。代理服务器不支持会直接丢弃该HEAD。
#
# 4. Date
#
# 指定消息产生的时间。
# 注意:时间格式必须按照Tue, 21 May 2002 12:34:56 GMT 指定。
#
# ```
# Date: Tue, 21 May 2002 12:34:56 GMT
# ```
# 5. Pragma
#
# 在HTTP/1.0用来控制缓冲,在HTTP1.1中使用Cache-Control。
#
# ```
# Pragma: no-cache
# ```
# 6. Trailer
#
# 用来确定在消息正文后,添加HEAD的值。
#
# ```
# Trailer: Date
# ```
#
# 产生的效果如下,必须与Transfer-Encoding:chunked使用。
#
# ```
# HTTP/1.1 200 OK
# Content-Type: text/html
# Transfer-Encoding: chunked
# Trailer: Date
#
# 7f
# <html>
#
# </html>
# 0
# Date: Tue, 21 May 2002 12:34:56 GMT
# ```
#
# 7. Transfer-Encoding:
#
# Transfer-Encoding: chunked 表示输出的内容长度不能确定,普通的静态页面、图片之类的基本上都用不到这个。但动态页面就有可能会用到。
#
# 使用这个HEAD表示数据的开始行不是数据,而是表示数据开始,数据结束使用行内容为0的单独行。
#
# ```
# Transfer-Encoding: chunked
# ```
#
# 8. Upgrade:
#
# 用来告知服务器与客户端协议或者协议版本的变化。响应会发送101 Switching Protocols 响应状态。
#
# ```
# Upgrade: HTTP/2.0, HTTPS/1.3, IRC/6.9, RTA/x11, websocket
# ```
# 9. Via:
#
# 被代理服务器使用,用来标识自己的信息,与User-Agent一样。
#
# ```
# Via: 1.0 fred, 1.1 example.com (Apache/1.1)
# ```
# 10. Warning
#
# 实体可能会发生的问题的通用警告
#
# ```
# Warning: 199 Miscellaneous warning
# ```
# ### HTTP非标准请求头
# 1. X-Requested-With
#
# 标识Ajax请求,大部分js框架发送请求时都会设置它为XMLHttpRequest
#
# ```
# X-Requested-With: XMLHttpRequest
# ```
#
# 2. DNT
#
# 请求web应用禁用用户追踪
#
# ```
# DNT: 1 (Do Not Track Enabled)
# DNT: 0 (Do Not Track Disabled)
# ```
#
# 3. X-Forwarded-For
#
# 一个事实标准,用来标识客户端通过HTTP代理或者负载均衡器连接的web服务器的原始IP地址
#
# ```
# X-Forwarded-For: client1, proxy1, proxy2
# X-Forwarded-For: 192.168.127.12, 192.168.3.11
# ```
#
# 4. X-Forwarded-Host
#
# 一个事实标准,用来标识客户端在HTTP请求头中请求的原始host,因为主机名或者反向代理的端口可能与处理请求的原始服务器不同
#
# ```
# X-Forwarded-Host: en.wikipedia.org:8080
# X-Forwarded-Host: en.wikipedia.org
# ```
#
# 5. X-Forwarded-Proto
#
# 一个事实标准,用来标识HTTP原始协议,因为反向代理或者负载均衡器和web服务器可能使用http,但是请求到反向代理使用的是https
#
# ```
# X-Forwarded-Proto: https
# ```
#
# 6. Front-End-Https
#
# 微软应用程序和负载均衡器使用的非标准header字段 Front-End-Https: on
#
# 7. X-Http-Method-Override
#
# 请求web应用时,使用header字段中给定的方法(通常是put或者delete)覆盖请求中指定的方法(通常是post),如果用户代理或者防火墙不支持直接使用put或者delete方法发送请求时,可以使用这个字段
#
# ```
# X-HTTP-Method-Override: DELETE
# ```
#
# 8. X-ATT-DeviceId
#
# 允许更简单的解析用户代理在AT&T设备上的MakeModel/Firmware
#
# ```
# X-Att-Deviceid: GT-P7320/P7320XXLPG
# ```
#
# 9. X-Wap-Profile
#
# 设置描述当前连接设备的详细信息的xml文件在网络中的位置
#
# ```
# x-wap-profile: http://wap.samsungmobile.com/uaprof/SGH-I777.xml
# ```
#
# 10. Proxy-Connection
#
# 早起HTTP版本中的一个误称,现在使用标准的connection字段
#
# ```
# Proxy-Connection: keep-alive
# ```
#
# 11.X-UIDH
#
# 服务端深度包检测插入的一个唯一ID标识Verizon Wireless的客户
#
# ```
# X-UIDH: ...
# ```
#
# 12. X-Csrf-Token,X-CSRFToken,X-XSRF-TOKEN
#
# 防止跨站请求伪造
#
# ```
# X-Csrf-Token: <KEY>
# ```
#
# 13. X-Request-ID,X-Correlation-ID
#
# 标识客户端和服务端的HTTP请求
#
# ```
# X-Request-ID: f058ebd6-02f7-4d3f-942e-904344e8cde5
# ```
#
#
# ### HTTP通用响应头说明
# 1. Accept-Ranges
#
# 说明服务器是否支持指定范围的数据重发或者发送。
#
# HEAD的值说明:
#
# |- bytes表示支持
# |- none表示不支持。
#
#
# ```
# Accept-Ranges: bytes
# Accept-Ranges: none
# ```
#
# 2. Age
#
# 说明用户请求资源的存在年龄。
# 值的单位为秒,使用4字节整数表示,注意不要溢出。
#
# ```
# Age:2500
# ```
# 3. Authentication-Info
#
# 包含用户的授权信息
#
# ```
# HTTP/1.1 200 OK
# Authentication-Info: qop="auth-int", rspauth="<PASSWORD>",
# cnonce="f5e2d7c0b6a7f2e3d2c4b5a4f7e4d8c8b7a", nc="00000001"
# ```
#
# 4. Content-Disposition:
#
# Content-disposition 是 MIME 协议的扩展,MIME 协议指示 MIME 用户代理如何显示附加的文件。Content-disposition其实可以控制用户请求所得的内容存为一个文件的时候提供一个默认的文件名,文件直接在浏览器上显示或者在访问时弹出文件下载对话框。
# 可以解决如下问题:
# |- 希望某类或者某已知MIME 类型的文件(比如:*.gif;*.txt;*.htm)能够在访问时弹出“文件下载”对话框。
# |- 希望客户端下载时以指定文件名显示。
# |- 希望某文件直接在浏览器上显示而不是弹出文件下载对话框。
#
# ```
# Content-Disposition: attachment; filename="example.pdf"
# ```
#
# 5. Etag
#
# 提供一个值唯一指定请求资源的版本(分块发送的资源边界标识)
# 客户一般用来重发给服务器,用来请求对缓存数据的状态管理。
#
# ```
# ETag: "1cdb-4efedbb8"
# ```
# 6. Location:
#
# 告知浏览器重新请求的位置。
# 用于为了完成请求或识别一个新资源,使接收者能重定向于Location指明的URI而不是请求URI。
# 其中必须使用绝对位置。
#
# ```
# Location: http://httphandbook.org/
# ```
# 7. Proxy-Authenticate/WWW-Authenticate
#
# 要求客户提供授权信息
# |- Basic是加密算法
# |- Realm指定提示域信息。
#
#
# ```
# WWW-Authenticate: Basic realm="<EMAIL>"
# ```
# 8. Refresh
#
# 告知浏览器刷新的参数
#
# 等价于
# <meta http-equiv="refresh" content="3; url=http://httphandbook.org/">
#
#
# ```
# Refresh: 3; url=http://httphandbook.org/
# ```
# 7. Retry-After
#
# 指定重试时间间隔或者时间
#
# ```
# Retry-After: Tue, 21 May 2002 12:34:56 GMT
# Retry-After: 600
# ```
# 8. Server
#
# 指定服务器相关信息
#
# ```
# Server: Apache
# ```
# 9. Set-Cookie
#
# 用来指示客户端浏览器存储数据到cookie。
# Cookie格式说明:
# fname=chris :存储文件名
# domain=httphandbook.org :存储的域
# path=/ :存储路径
# expires=Tue, 21 May 2002 12:34:56 GMT:失效时间
# Secure :表明使用安全连接发送,比如SSL。
#
#
# ```
# Set-Cookie: fname=chris; domain=.httphandbook.org; path=/; expires=Tue, 21 May 2002 12:34:56 GMT; secure
# ```
# 10. Vary:
#
# vary的意义在于告诉代理服务器缓存,如何判断请求是否一样,vary中的组合就是服务器判断的依据,
# 比如Vary中有User-Agent,那么即使相同的请求,如果用户使用IE打开了一个页面,再用Firefox打开这个页面的时候,CDN/代理会认为是不同的页面,如果Vary中没有User-Agent,那么CDN/代理会认为是相同的页面,直接给用户返回缓存的页面,而不会再去web服务器请求相应的页面。
#
# ```
# Vary: Accept-Language, User-Agent
# ```
#
# ### HTTP用于响应体的头说明
# 1. Allow:
#
# 对请求而言,是告诉服务器只能使用指定的方法请求。
# 对响应而言,是告诉浏览器,服务只能支持的方法。
#
# ```
# Allow: GET, HEAD, POST
# ```
# 2. Content-Encoding
#
# 指定实体内容的压缩编码格式:
# 一般编码格式是:
# |- gzip
# |- compress
# |- deflate
#
# ```
# Content-Encoding:gzip,deflate,compress
# ```
#
# 3. Content-Language
#
# 实体内容支持的语言。
#
# 4. Content-Length
#
# 实体内容的长度
#
# 5. Content-Location
#
# 指定资源的位置
# 6. Content-MD5
#
# 指定实体内容的MD5信息
#
# ```
# Content-MD5: ZTFmZDA5MDYyYTMzZGQzMDMxMmIxMjc4YThhNTMyM2I=
# ```
# 7. Content-Range
#
# 服务器用来返回请求资源的一部分。
#
# ```
# Content-Range: 600-900/1234
# ```
#
# 说明返回总的1234字节的600-900字节。
#
# 8. Content-Type
#
# 指定内容实体的文件格式/子格式。
#
# ```
# Content-Type: text/html
# ```
#
# 9. Expires
#
# 指定内容实体过期的时间
#
# ```
# Expires: Tue, 21 May 2002 12:34:56 GMT
# ```
# 10. Last-Modified
#
# 资源最后修改的时间,往往用来计算资源的Age。
#
# ```
# Last-Modified: Tue, 21 May 2002 12:34:56 GMT
# ```
# ### HTTP非标准响应头
# 1. X-XSS-Protection
#
# 过滤跨站脚本
#
# ```
# X-XSS-Protection: 1; mode=block
# ```
#
# 2. Content-Security-Policy, X-Content-Security-Policy,X-WebKit-CSP
#
# 定义内容安全策略
# ```
# X-WebKit-CSP: default-src 'self'
# ```
#
# 3. X-Content-Type-Options
#
# 唯一的取值是"",阻止IE在响应中嗅探定义的内容格式以外的其他MIME格式
#
# ```
# X-Content-Type-Options: nosniff
# ```
#
# 4. X-Powered-By
#
# 指定支持web应用的技术
# ```
# X-Powered-By: PHP/5.4.0
# ```
#
# 5. X-UA-Compatible
#
# 推荐首选的渲染引擎来展示内容,通常向后兼容,也用于激活IE中内嵌chrome框架插件
# <meta http-equiv="X-UA-Compatible" content="chrome=1" />
#
# ```
# X-UA-Compatible: IE=EmulateIE7
# X-UA-Compatible: IE=edge
# X-UA-Compatible: Chrome=1
# ```
#
# 6. X-Content-Duration
#
# 提供音视频的持续时间,单位是秒,只有Gecko内核浏览器支持
#
# ```
# X-Content-Duration: 42.666
# ```
#
# 7. Upgrade-Insecure-Requests
#
# 标识服务器是否可以处理HTTPS协议
#
# ```
# Upgrade-Insecure-Requests: 1
# ```
#
# 8. X-Request-ID,X-Correlation-ID
#
# 标识一个客户端和服务端的请求
#
# ```
# X-Request-ID: f058ebd6-02f7-4d3f-942e-904344e8cde5
# ```
# ### 响应头的使用例子:
#
# #### 要求浏览器使用basic登录的例子
#
# 这里使用响应头,发送一个用户需要登录的协议沟通:
#
# ````
# "HTTP/1.1 401 Unauthorized\r\n"
# "WWW-Authenticate: Basic realm=\"Louis Young Examples for Response!\"\r\n"
# "Connection: Keep-Alive\r\n"
# "Keep-Alive: 115\r\n"
# "\r\n\r\n");
# ````
#
# 代码略
# #### 获取浏览器的发送的cookie
# 代码略
# #### 获取服务器发送的set-cookie
# 代码略
# ## HTTP Cookie与Session
# ### Cookie的格式
#
# 注意: 在HTTP协议中,一般Cookie支持大小最多4K所以使用Cookie处理数据存在局限性。
# 一般Cookie采用明文信息,所以极度不安全。一般浏览器中可以设置是否存储Cookie。
# #### 几个浏览器发送的cookie例子
#
# 有的站点在访问时,浏览器不会发起cookie。
#
#
# 1. 自己写的程序抓取到的cookie
#
# ```
# Cookie: _xsrf=2|f877d065|146c6a9838e67ba203776913fae34f45|1547796259
# ```
#
# 2. 使用浏览器向百度翻译请求时发送的cookie
# ```
# Cookie: Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1547803048; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1547545982,1547720682,1547775577,1547803048; from_lang_often=%5B%7B%22value%22%3A%22fra%22%2C%22text%22%3A%22%u6CD5%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%5D; to_lang_often=%5B%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; REALTIME_TRANS_SWITCH=1; SOUND_PREFER_SWITCH=1; SOUND_SPD_SWITCH=1; locale=zh; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=1430_21119_28329_28132_26350_28267_27245_22158; PSINO=1; delPer=0; pgv_pvi=5448934400; pgv_si=s1851237376; BDUSS=hsVWV6czh0a1hOQ3BaYkhTM0FrOXhNYnBCUWFsMlY0clhlYkNvTkRKdENDUGxiQUFBQUFBJCQAAAAAAAAAAAEAAAAFyMhmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEJ70VtCe9FbY0; BIDUPSID=C53590ACE23DAC88DBE0C3D65AEBAA30; PSTM=1539535646; BAIDUID=DB00283B42FBC875B67496A00F47ABAB:FG=1
# ```
#
# 使用浏览器解析出来是一个表格:
#
# cookie名称|Cookie值
# -|-
# BAIDUID|DB00283B42FBC875B67496A00F47ABAB:FG=1
# BDORZ|B490B5EBF6F3CD402E515D22BCDA1598
# BDUSS|hsVWV6czh0a1hOQ3BaYkhTM0FrOXhNYnBCUWFsMlY0clhlYkNvTkRKdENDUGxiQUFBQUFBJCQAAAAAAAAAAAEAAAAFyMhmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEJ70VtCe9FbY0
# BIDUPSID|C53590ACE23DAC88DBE0C3D65AEBAA30
# delPer|0
# FANYI_WORD_SWITCH|1
# from_lang_often|%5B%7B%22value%22%3A%22fra%22%2C%22text%22%3A%22%u6CD5%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%2C%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%5D
# H_PS_PSSID|1430_21119_28329_28132_26350_28267_27245_22158
# HISTORY_SWITCH|1
# Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574|1547972748
# Hm_lvt_64ecd82404c51e03dc91cb9e8c025574|1547545982,1547720682,1547775577,1547803048
# locale|zh
# pgv_pvi|5448934400
# pgv_si|s1851237376
# PSINO|1
# PSTM|1539535646
# REALTIME_TRANS_SWITCH|1
# SOUND_PREFER_SWITCH|1
# SOUND_SPD_SWITCH|1
# to_lang_often|%5B%7B%22value%22%3A%22en%22%2C%22text%22%3A%22%u82F1%u8BED%22%7D%2C%7B%22value%22%3A%22zh%22%2C%22text%22%3A%22%u4E2D%u6587%22%7D%5D
#
# 3. 抓取到百度首页响应的Cookie如下:
#
# ```
# HTTP/1.1 200 OK
# Content-Type: text/html;charset=utf-8
# Set-Cookie: BD_HOME=1; path=/
# Set-Cookie: BDSVRTM=321; path=/
# Set-Cookie: H_PS_PSSID=1430_21119_28206_28132_26350_28267_27245_22158; path=/; domain=.baidu.com
# Content-Encoding: gzip
# Expires: Sun, 20 Jan 2019 08:36:16 GMT
# Transfer-Encoding: Identity
# Cache-Control: private
# Date: Sun, 20 Jan 2019 08:36:16 GMT
# Connection: Keep-Alive
# Server: BWS/1.1
# Bdqid: 0x8878399100134524
# Bdpagetype: 2
# Strict-Transport-Security: max-age=172800
# X-Ua-Compatible: IE=Edge,chrome=1
#
# ```
#
# 其中Cookie是:
#
# ```
# Set-Cookie: BD_HOME=1; path=/
# Set-Cookie: BDSVRTM=321; path=/
# Set-Cookie: H_PS_PSSID=1430_21119_28206_28132_26350_28267_27245_22158; path=/; domain=.baidu.com
#
# ```
# #### Cookie的格式
#
# Cookie就是使用Key:Value格式构成的值对。
#
# 其中的Key有通用约定用途的Key,也可以用户自己定义的Key,因为Cookie既用于浏览器与Web服务器之间数据约定,也用于用户的数据约定。
#
# Cookie约定的Key有:
# Set-Cookie: <name>=<value>[; <name>=<value>]...
# [; expires=<date>][; domain=<domain_name>]
# [; path=<some_path>][; secure][; httponly]
#
# 下面是百度的Cookie的例子:
# Set-Cookie: BAIDUID=801BAD1D5CB256E7448139623D0AE089:FG=1; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.com
# Set-Cookie: BIDUPSID=801BAD1D5CB256E7448139623D0AE089; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.com
# Set-Cookie: PSTM=1547976171; expires=Thu, 31-Dec-37 23:55:55 GMT; max-age=2147483647; path=/; domain=.baidu.com
# Set-Cookie: BD_LAST_QID=15551781544627683897; path=/; Max-Age=1
# 1. name=value
#
# 指定cookie的名称和值,名称大小写不敏感,值必须经过URL编码。
# 2. domain=<domain_name>
#
# 域,表示当前cookie所属于哪个域或子域下面。
#
# 对于服务器返回的Set-Cookie中,如果没有指定Domain的值,那么其Domain的值是默认为当前所提交的http的请求所对应的主域名的。
#
# 比如访问 http://www.baidu.com,返回一个cookie,没有指名domain值,那么其为值为默认的www.baidu.com。
#
# 注:
# |- 临时cookie(没有expires参数的cookie)不能带有domain选项。
#
# |- 当客户端发送一个http请求时,会将有效的cookie一起发送给服务器。
#
# |- 如果一个cookie的domain和path参数和URL匹配,那么这个cookie就是有效的。一个URL中包含有domain和path
# 3. path=<some_path>
#
# 表示cookie的存储的路径。
# 4. secure
#
# 表示该cookie只能用https传输。一般用于包含认证信息的cookie,要求传输此cookie的时候,必须用https传输。
# 5. httponly :
#
# 表示cookie不能被客户端脚本获取到。
# 表示此cookie必须用于http或https传输。这意味着,浏览器脚本,比如javascript中,是不允许访问操作此cookie的。
# 6.expires=<date>:
#
# 如果cookie超过date所表示的日期时,cookie将失效。
# 如果没有设置这个选项,那么cookie将在浏览器关闭时失效。
#
# 注意:date是格林威治时间(GMT),使用如下格式表示:
#
# DAY, DD MMM YYYY HH:MM:SS GMT
#
# |- DAY:星期 (Sun, Mon, Tue, Wed, Thu, Fri, Sat)
# |- DD:每个月的天数(从 01 到每个月最大天数)
# |- MMM:月份(Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec)
# |- YYYY:年(2019)
# |- HH:小时(24小时制:22 表示 10:00 P.M.)
# |- MM:分钟(00-59)
# |- SS:秒:(00-59)
# 7. max-age=<seconds>
#
# 用max-age指定当前cookie是在多长时间之后而失效(单位是秒数,从请求开始计时)
# ### 发送Cookie
# ### 获取Cookie
# ### 浏览器中Cookie的存放
# 1. Chrome的存放位置
#
# Mac OS X: ~/Library/Application Support/Google/Chrome/Default
# Windows XP: Documents and SettingsusernameLocal SettingsApplication DataGoogleChromeUser DataDefault
# Windows 7: C:\Users\XXXX\AppData\Local\Google\Chrome\User Data\Default\
# Linux: ~/.config/google-chrome/Default
# 在Mac下的截图:
# >
# 2. Safari的存放位置
#
# Mac : /Users/yangqiang/Library/Safari/Databases
#
# 注意:Safari的Cookie保存采用的是Sqlite数据保存。
# 在Mac下截图
# >
#
# 3. Firefox的存放位置
#
# /Users/yangqiang/Library/Application Support/Firefox/Profiles/bqyl1a0p.default
# 在Mac下截图
# >
# ### Session机制
#
# HTTP协议本身是无状态的,客户端的每一次请求之间都是独立的,因为无论是客户端还是服务器都没有必要纪录彼此过去的行为。
#
# 然而随着互联网技术的发展,很快这种无状态协议满足不了应用需求:
# |- DHTML技术开始出现(Javascript + DOM )
# |- 服务器端则出现了CGI规范以响应客户端的动态请求
# |- cookie的出现解决HTTP协议无状态的缺陷。
# |- session出现提供客户端与服务器之间保持状态的最终解决方案。
# #### Session的作用
#
#
# Session是用来识别用户状态的一种机制或者解决方案。
#
# Session包含两个部分:
# |- 状态存储
# |- 状态数据
#
# #### Sesson状态存储
#
# Session机制是一种服务器端的机制,在服务器就是一个类似Hash的结构,这种类似散列表的结构来保存用户ID信息。
#
#
# #### Session状态数据
#
# |
# |- 当客户端第一次请求的时候,服务器没有该客户状态,则为此客户端生成一个Session ID(该ID与客户端唯一绑定),并且创建一条存储纪录;
# |
# |- 当客户端再次访问,服务器首先检查这个客户端的请求里是否已包含了一个Session ID,从而识别用户状态(比如用户是否登录等),用户状态就是绑定在ID上的数据(这种数据通常使用Key-Value的方式存放,就是Hash表结构)
#
#
# #### Session的实现技术
#
# Session的实现使用的Cookie。
# |
# |- 当第一次访问,客户端没有Session,服务器自动生成一个Session ID,并在在响应Cookie中返回给浏览器,浏览器一般会保存到本地Cookie数据库。下载请求会自动把本地Cookie打包到请求协议中。
#
# |- 但是Cookie因为自身的不安全性,一般浏览器会提供Cookie存储选项:是否存储Cookie到本地。如果浏览器不保存Cookie,就无法实现浏览器发送Cookie到服务器验证Session ID。所以为了解决Cookie局限带来的问题,一般会采用URL重写的方式来实现Session ID传递。重写技术就是创建一个QueryString或者直接在URL后添加Session ID(用来区分QueryString)。
# |- http://...../xxx;jsessionid=xxx
# |- http://...../xxx?jsessionid=xxx
#
# 注意:上面其中;?就是QueryString格式规范。
# 解决Cookie禁用的问题,还有其他技术:比如隐藏表单等(该技术因为要创建HTML隐藏表单类型,所以一般不再使用)
#
# 同时其中的Session ID在不同的服务器中由不同的命名方式。
# #### Session的失效问题
#
# 是不是服务器保存了Session ID信息,这样用户状态就一直存在呢?这样服务器的负载就太大了,这种机制明显不行的。一般服务器会对Session ID对应的活跃度进行检测,一定时间后,服务器会删除客户Session ID对应的数据。
# 这样当用户一段时间不访问Web服务器,Session就会失效,需要重新建立Session,比如用户登录就需要重新登录。
#
# 一般Session ID设置的Cookie周期都是临时,当浏览器关闭后,Cookie都会删除,从而需要重新建立Session ID,如果我们自己写程序保留Cookie,那就另当别论(可以保存Session ID的)。
# ## HTTP请求方法
# ### GET请求方法
# 浏览器默认的请求方法都是GET方法。为了传递数据,提出了QueryString的规范。下面使用例子说明GET方法的使用:
# #### GET方法获取页面数据
# +
#
# -
# #### 在GET方法中使用QueryString传递数据
# +
#
# -
# #### 使用GET获取百度翻译的Cookie与Session状态
# +
#
# -
# ### POST请求方法
# #### 使用POST提交表单
# +
#
# -
# #### 使用POST上传文件
# +
#
# -
# #### 使用POST实现自动登录
# ### PUT请求方法
#
#
# 这里提出PUT方法,是因为知乎的登录验证码使用了PUT请求方法。
# #### 知乎验证码破解实现例子
|
东南大学/D01HTT协议与爬虫/3_HTTP协议与访问模块开发_HTTP协议.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 자료형
# - 리스트
# - 튜플
# - 딕셔너리
# ### 리스트
# - 순서가 있는 데이터의 집합
# 리스트 변수 선언하는 방법
ls = [1,"python",False]
print(type(ls), ls)
# 리스트 데이터를 문자열로 합쳐주는 함수 - join
a = ["data", "science", "python"]
result = "@".join(a)
print(result, type(result))
# ### quiz 1
# - 리스트 데이터를 문자열로 합쳐서 문장으로 만들어 주세요.
# - 가장 앞글자는 대문자, 마지막에는 마침표를 찍어주세요.
# ls =["python","is", "best", "programming", "language"]
ls =["python", "is", "best", "programming", "language"]
data1 = ls[0][0].upper() + ls[0][1:]
data2 = " ".join(ls[1:])
print(data1 + " " + data2 + ".")
ls =["python", "is", "best", "programming", "language"]
result = " ".join(ls)
result = result[0].upper() + result[1:] + "."
print(result)
# #### 리스트의 오프셋
# - 문자열은 하나의 문자를 오프셋 단위로 인식
# - 리스트는 하나의 값을 오프셋 단위로 인식
a = ["python", "data", 1, True, "fast"]
print(a[1])
print(a[2:])
print(a[::-1])
print(a[-2:])
print(a[-2:][0])
# ### quiz 2
# - 홀수 데이터를 거꾸로 출력하세요.
# - 오프셋 사용
# '''
# ls = [0,1,2,3,4,5,6,7,8,9]
# '''
# -결과
# '''
# result => [9,7,5,3,1]
# '''
ls = [0,1,2,3,4,5,6,7,8,9]
result = ls[::-2]
print(result)
ls = [0,1,2,3,4,5,6,7,8,9]
result = ls[1::2][::-1] #오프셋 사용
print(result)
# 리스트 데이터를 문자열 데이터로 바꿔주는 함수 : join
# 문자열 데이터를 리슷트 데이터로 바꿔주는 함수 : split
txt = "python data science"
ls = ["python", "data", "science"]
result = txt.split(" ")
print(result)
# ### 리스트 함수
# - append : 데이터 추가
# - sort : 데이터 정렬
# - reverse : 데이터를 역순으로 정렬
# append 함수
ls = ["data", "science"]
ls.append("fastcampus")
print(ls)
# sort 함수
ls = ["fast", "data", "campus", "science"]
ls.sort()
print(ls)
# reverse 함수
ls.reverse()
print(ls)
ls[1] = "slow"
print(ls)
# ### quiz 3
# - "Beautiful is better than ugly." 단어의 길이가 긴 순서대로 재정렬해서 문장을 다시 만드는 코드를 작성해주세요
# - <list변수>.sort(key=len)
#
# - 결과
# '''
# Beautiful better ugly than is
# '''
data = ["abc", "bc", "d", "efgh"]
data.sort(key=len)
print(data)
data = ["Beautiful", "is", "better", "than", "ugly"]
data.sort(key=len)
data.reverse()
print(data)
txt = "Beautiful is better than ugly."
result = txt.lower()
result = result.replace(".","")
result = txt.split(" ")
result.sort(key=len)
result.reverse()
print(result)
txt = "Beautiful is better than ugly."
result = txt.lower()[:-1] # 굳이 소문자 만들어야하는지?
result = result.split(" ")
result.sort(key=len)
result.reverse()
result = " ".join(result)
result = result[0].upper() + result[1:] + "."
print(result)
# ### 튜플
# - 리스트와 같지만 수정이 불가능한 데이터 타입
# - 리스트보다 저장공간을 적게 사용하는 특징
# 튜플 선언
tp = (1, 2, 3)
print(type(tp), tp)
tp[1] = 10
# 튜플에서의 오프셋 인덱스 사용
tp[0::2]
# +
import sys
ls = [1,2,3,4,5]
tp = (1,2,3,4,5)
print(sys.getsizeof(ls), "byte")
print(sys.getsizeof(tp), "byte")
# -
# ### 딕셔너리
# - 순서가 없는 데이터 집합
# - 키, 값으로 구성되어 있는 데이터 타입
# 선언
dic = {
1:"one",
"A":["data", "science"],
"숫자":1234,
}
print(type(dic), dic)
# 데이너에 접급
dic["A"], dic[1]
# 데이터 수정
dic[1] = "하나"
dic
# ### quiz 1
# - 아래의 테이블 데이터를 딕셔너리 데이터 타입으로 선언하세요.
#
# key value
# name python
# adddr seoul
# age 25
# +
dic = {
"name":"python",
"addr":"seoul",
"age":25
}
print(dic)
# -
# # 딕셔너리에서 키값으로 사용할 수 있는 데이터 타입은 문자열과 정수 데이터 타입만 사용 가능합니다.
# 딕셔너리에서는 오프셋 인덱스 사용불가
dic = {"data1":1, "data2":2, "data3":3}
dic
dic[1] # []안에는 key 값이 들어가야함
# #### 딕셔너리 함수
# - keys() : 키 데이터만 가져오는 함수
# - values() : 값 데이터만 가져오는 함수
# - items() : 키와 값을 가져오는 함수
# - update() : 두개의 딕셔너리를 합쳐주는 함수
dic = {
"data1":1,
"data2":2,
"data3":3,
}
dic
# keys
result = dic.keys()
print(type(result), result)
# values
result = dic.values()
result
# items
result = dic.items()
result
# update
dic1 = {1:"a", 2:"b"}
dic2 = {2:"c", 3:"d"}
# dic3 = {1:"a", 2:"c", 3:"d"}
dic1.update(dic2)
dic3 = dic1
print(dic3)
# #### quiz 2
# - "국어 점수는 80점, 영어 점수는 90점, 수학 점수는 70점" 이 데이터를 리스트, 튜플, 딕셔너리 데이터 타입으로 나타내세요.
ls = ["kor", 80, "eng", 90, "mat", 70]
tp = ("kor", 80, "eng", 90, "mat", 70)
dic = {"kor":80, "eng":90, "mat":70}
print(ls, tp, dic)
# +
# answer
# List 1
sub = ["kor", "eng", "mat"]
sco = [80, 90, 70]
#print(sub, sco)
# List 2
ls = [("kor", 80), ("eng", 90), ("mat", 70)]
#print(ls)
# Tuple
tp = (("kor", 80), ("eng", 90), ("mat", 70))
#print(tp)
# Dictionary
dic = {
"kor":80,
"eng":90,
"mat":70
}
print(dic)
# -
|
python/sem0/04_datatype_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib
#matplotlib.use('WebAgg')
#matplotlib.use('Qt4Cairo')
#matplotlib.use('Qt5Cairo')
matplotlib.use('nbAgg')
import matplotlib.pyplot as plt
# The iterative solution is, in fact, given by
# $$
# v^{(n+1)}=P^{T} v^{(n)} \; ,
# $$
# with $P$ is the transition matrix ($P^{T}v$ gives a "probabilistic state", which its entries are the probabilities of moving to its descrete state) $v^{(n+1)}$ the state after $n+1$ transformations of the initial state $v^{(0)}$.
# That is, we have a sequence of $v$'s given by:
#
# \begin{align}
# v^{(0)} & \;\;\;\;\;\;\;\;\;\;\;\;\;\;\; 0 \;\text{(initial state)}
# \\
# v^{(1)}=&P^{T}v^{(0)} \;\;\;\;\;\;\; 1
# \\
# v^{(2)}=&P^{T}v^{(1)} \;\;\;\;\;\;\; 2
# \\
# \vdots
# \\
# v^{(n)}=&P^{T} v^{(n-1)} \;\;\;\;\; n \;.
# \end{align}
#
# The idea is that as $v^{(\infty)}=v^{(n\to \infty)}$, tends to a constant.
# This constant vector is the equilibrium vector, i.e. the probability to
# find the system in a given state.
#
# For example, if the equilibrium vector is $W=(0.1,0.4,0.5)$, then there is $10\%$ probability
# to find the system in state $1$, $40\%$ in state $2$, and $50\%$ in state $3$.
# +
'''
Generate a random Markov Chain (with N_dim #states) and see if it reaches equilibrium (the most probable scenario is that it will, since I generate random rows
with numbers that add up to 1)
''';
N_dim=11
Transition=np.random.dirichlet(np.ones(N_dim),N_dim)
#Number of steps in both the Iteration and (the maximum of) Simulation.
N_tot=100000
#Doing the following you start at state 0 automatically.
len_T=len(Transition[0])
init_s=np.zeros(len_T)
init_s[0]=1
# -
v=init_s[:]
for i in np.arange(N_tot):
'''
Iterative solution:
Calculate v^{(n+1)}=P^{T}v^{(n)}
'''
v=np.dot(Transition.T,v)
# +
'''
Simulate the Markov Chain
'''
#state=[1,0,0,0]#This is the initial sate vector, which indicates the current state. e.g [1,0,0] indicates that the system is in state 0 (I start counting from 0).
state=init_s[:]#start at state 0 automatically
#initialize _visits=[0,0,0,0,0,...,0]
_visits=np.zeros(len(state))
tolerance=1e-7
#The variable ac defines how often looks for convergence, and how many positions takes
#in order to get a mean and variance.
ac=500
fN_tot=float(N_tot)
means=[]
tot_means=[]#track all means, and show a trace plot.
print ('Transition \t' , ' Mean position \t', 'Variance ')
for transition in np.arange(1,N_tot+1):
'''
Simulation:
The next state in the simulation is determined by the multinomial distribution,
which is included in numpy ( you can find how to samlpe from the multinomial in misc/multinomial.py ).
To determine if the chain has converged, get the mean positions every 'ac' transitions. This will give you
a list N_dim*2 means. The chains has convered if the relative variance of the means is below some
threshold (tolerance variable).
'''
state= np.random.multinomial(1,np.dot(Transition.T,state))
#Fortunately, all notations click together. Since states are defined in "binary",
#we can add them up to obtain the number of visits for each state.
_visits+=state
equilibrium=_visits/float(transition)
#get the mean position for 'ac' transitions
if transition%(ac) != 0:
#mean position. <i>:=\sum_{j} j*P_{j} (ie position of state * probability of being in this state )
_m=np.sum( [ e*i for i,e in enumerate(equilibrium)] )
means.append( _m )
tot_means.append(_m)
#after 'ac' transitions, check if it has converged. If not, reset the list of means and continue.
else:
_var=np.var(means)
_mean=np.mean(means)
if _var/_mean<tolerance:
s=equilibrium
print ('converged after: {} transitions. Equilibrium state: \n {}'.format(transition, equilibrium) )
break
print( '{0:}\t{3:>9}{1:0.5}\t{3:>9}{2:0.5}'.format(transition , _mean, _var,'') )
#keep in mind that the variance is not entirely correct, because of autocorrelation.
means=[]
# -
#The equilibrium one is the same as the iterative one
print( equilibrium)
print( v)
_trace=True #set True to see the plot
if _trace:
#=================================== Plots ===================================#
fig=plt.figure(figsize=(8.5,4))
fig.subplots_adjust(bottom=0.1, left=0.1, top = 0.97, right=0.97)
#=============================================================================#
sub = fig.add_subplot(111)
#===========================================================================================================================================#
_len=len(tot_means)
sub.plot(np.arange(_len), tot_means )
sub.set_xlabel('step')
sub.set_ylabel('Mean state')
sub.yaxis.set_label_coords(-0.07, 0.5)
sub.xaxis.set_label_coords(0.5, 0.05)
sub.set_xscale('log')
sub.set_xlim(1,_len)
sub.set_ylim(0,N_dim-1)
plt.show()
#===========================================================================================================================================#
|
Monte_Carlo/Markov-Chain/Discrete_MC.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from pyqubo import Spin, Vector, Placeholder, solve_ising, Constraint
import matplotlib.pyplot as plt
import networkx as nx
# ## グラフ分割問題
#
# 偶数の個数の頂点を持つグラフを2つに分割する。分割されるエッジが最小となる分割方法を見つけたい。
# この問題はIsingモデルにより次のように定式化される。
#
# $$H(s) = \alpha H_{A}(s) + H_{B}(s)$$
# $$H_{A}(s) = \left( \sum_{i \in V} s_{i}\right )^2$$
# $$H_{B}(s) = \sum_{(i, j) \in E} \frac{1-s_{i}s_{j}}{2}$$
#
# $H_{A}(s)$は2つの集合の頂点数が同じになる制約、$H_{B}(s)$は切断されるエッジの個数、$\alpha$はペナルティの強さである。
def plot_graph(E, colors=None):
G = nx.Graph()
for (i, j) in E:
G.add_edge(i, j)
plt.figure(figsize=(4,4))
pos = nx.spring_layout(G)
if colors:
nx.draw_networkx(G, pos, node_color=[colors[node] for node in G.nodes])
else:
nx.draw_networkx(G, pos)
plt.axis("off")
plt.show()
# エッジが以下のように与えられる
E = {(0, 6), (2, 4), (7, 5), (0, 4), (2, 0),
(5, 3), (2, 3), (2, 6), (4, 6), (1, 3),
(1, 5), (7, 1), (7, 3), (2, 5)}
plot_graph(E)
# ノード数と同じである$8$次元のスピンのスピンベクトル$s$を用意する。各スピンは対応するノードがどちらの集合に属するかを表している。
# +
# スピンベクトルの宣言
s = Vector("s", 8, spin=True)
# プレースホルダーA, Bの宣言
a = Placeholder("alpha")
# +
# ハミルトニアン H_{A}を定義
HA =Constraint(sum(s) ** 2, "num_nodes")
# ハミルトニアン H_{B}を定義
HB = sum((1.0 - s[i]*s[j]) / 2.0 for (i, j) in E)
H = a * HA + HB
# +
# モデルのコンパイル
model = H.compile()
# A=1.0, B=1.0としてIsingモデルを得る
feed_dict={'alpha': 0.1}
linear, quad, offset = model.to_ising(feed_dict=feed_dict)
# +
# Isingモデルを解く
solution = solve_ising(linear, quad)
# 解をデコードする
decoded_sol, broken, energy = model.decode_solution(solution, vartype="SPIN", feed_dict=feed_dict)
print("#broken constraints: {}".format(len(broken)))
# -
# グラフを色分けしてみる
plot_graph(E, [solution[k]+1 for k in sorted(solution.keys())])
|
notebooks/japanese/graph_partition.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generate simulated infrastructure telemetry
# Install requiered packages if needed (only once)
# !pip install pytimeparse
# !pip install -i https://test.pypi.org/simple/ v3io-generator --upgrade
# !pip install faker
# !pip install pyarrow --upgrade
# +
import os
import time
import yaml
import pandas as pd
import datetime
import itertools
# DB Connection
import v3io_frames as v3f
# Data generator
from v3io_generator import metrics_generator, deployment_generator
# -
# General definitions
# %env SAVE_TO_KV = True
# %env DEPLOYMENT_TABLE = netops_devices
# ## Create Metadata
# the following section will create a list of devices which are scattered in multiple datacenters
def _create_deployment():
print('creating deployment')
# Create meta-data factory
dep_gen = deployment_generator.deployment_generator()
faker=dep_gen.get_faker()
# Design meta-data
dep_gen.add_level(name='company',number=2,level_type=faker.company)
dep_gen.add_level('data_center',number=2,level_type=faker.street_name)
dep_gen.add_level('device',number=2,level_type=faker.msisdn)
# Create meta-data
deployment_df = dep_gen.generate_deployment()
return deployment_df
def _is_deployment_exist(path):
# Checking shared path for the devices table
return os.path.exists(f'/v3io/bigdata/{path}')
def _get_deployment_from_kv(path):
print(f'Retrieving deployment from {path}')
# Read the devices table from our KV store
deployment_df = client.read(backend='kv', table=path)
# Reset index to column
deployment_df.index.name = 'device'
deployment_df = deployment_df.reset_index()
return deployment_df
def _save_deployment_to_kv(path, df, client=v3f.Client('framesd:8081')):
# Save deployment to our KV store
client.write(backend='kv', table='netops_devices',dfs=df, index_cols=['device'])
def get_or_create_deployment(path, save_to_cloud=False, client=v3f.Client('framesd:8081')):
if _is_deployment_exist(path):
# Get deployment from KV
deployment_df = _get_deployment_from_kv(path)
else:
# Create deployment
deployment_df = _create_deployment()
if save_to_cloud:
_save_deployment_to_kv(path, deployment_df, client)
return deployment_df
# Create our DB client
client = v3f.Client('framesd:8081')
deployment_df = get_or_create_deployment(os.environ['DEPLOYMENT_TABLE'], os.environ['SAVE_TO_KV'])
deployment_df
# Read from our KV to make sure we have backup
# verify the table is written
client.read(backend='kv', table='netops_devices')
# ## Add initial values
deployment_df['cpu_utilization'] = 70
deployment_df['latency'] = 0
deployment_df['packet_loss'] = 0
deployment_df['throughput'] = 290
deployment_df.head()
# ## Generate simulated metrics per device
# Metrics schema (describe simulated values) is read from `metrics_configuration.yaml`
# +
# Load metrics configuration from YAML file
with open('metrics_configuration.yaml', 'r') as f:
metrics_configuration = yaml.load(f)
# Create metrics generator based on YAML configuration
met_gen = metrics_generator.Generator_df(metrics_configuration, user_hierarchy=deployment_df, initial_timestamp=time.time())
metrics = met_gen.generate_range(start_time=datetime.datetime.now(),
end_time=datetime.datetime.now()+datetime.timedelta(hours=1),
as_df=True,
as_iterator=True)
# -
df = pd.concat(itertools.chain(metrics))
df.head(5)
# ## Save to Iguazio Time-series Database
# uncomment the line below if you want to reset the TSDB table
client.delete(backend='tsdb', table='netops_metrics_jupyter')
# create a new table, need to specify estimated sample rate
client.create(backend='tsdb', table='netops_metrics_jupyter', attrs={'rate': '1/m'})
# write the dataframe into the time-seried DB, note the company,data_center,device indexes are automatically converted to search optimized labels
client.write(backend='tsdb', table='netops_metrics_jupyter', dfs=df)
# ## Verify that the data was written
client.read(backend='tsdb', query='select avg(cpu_utilization) as cpu_utilization, avg(latency) as latency, avg(packet_loss) as packet_loss, avg(throughput) as throughput from netops_metrics_jupyter group by company, data_center, device',
start="now-1d", end='now+1d', multi_index=True, step='5m').head(10)
# ### Save the generated dataset to parquet for future reproducability
# craete directory if doesnt exist
# !mkdir data
import pyarrow as pa
from pyarrow import parquet as pq
#write the dataframe into a parquet (on iguazio file system)
version = '1.0'
filepath = 'data/netops_metrics.v{}.parquet'.format(version)
pq.write_table(pa.Table.from_pandas(df), filepath)
# ### Reading the data from parquet into the time-series DB
# if we want to reproduce the same results we can rebuild the TSDB from the saved parquet file
# uncomment the line below if you want to reset the TSDB table
client.delete(backend='tsdb', table='netops_metrics_jupyter')
client.create(backend='tsdb', table='netops_metrics_jupyter', attrs={'rate': '1/m'})
# read the parquet into memory and print the head
pqdf = pq.read_table(filepath).to_pandas()
pqdf.head()
# write the dataframe into the time-seried DB, uncomment the line below
client.write(backend='tsdb', table='netops_metrics_jupyter', dfs=pqdf)
# verify the table is written
client.read(backend='tsdb', query='select avg(cpu_utilization) as cpu_utilization, avg(latency) as latency, avg(packet_loss) as packet_loss, avg(throughput) as throughput from netops_metrics_jupyter group by company, data_center, device',
start="now-1d", end='now+1d', multi_index=True, step='5m').head(10)
|
demos/netops/01-generator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/fansha1994/DS-Unit-2-Kaggle-Challenge/blob/master/module4-classification-metrics/LS_DS18_224_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zuG74G7mOhUJ" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 4*
#
# ---
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# # Classification Metrics
#
# ## Assignment
# - [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Plot a confusion matrix for your Tanzania Waterpumps model.
# - [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline).
# - [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_
# - [ ] Commit your notebook to your fork of the GitHub repo.
# - [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](http://archive.is/DelgE), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.
#
#
# ## Stretch Goals
#
# ### Reading
#
# - [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_
# - [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)
# - [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)
#
#
# ### Doing
# - [ ] Share visualizations in our Slack channel!
# - [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)
# - [ ] Stacking Ensemble. (See module 3 assignment notebook)
# - [ ] More Categorical Encoding. (See module 2 assignment notebook)
# + colab_type="code" id="lsbRiKBoB5RE" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + colab_type="code" id="BVA1lph8CcNX" colab={}
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# + id="OwkO-fJoOhUo" colab_type="code" colab={}
# %matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import plot_confusion_matrix, classification_report
# + id="Lv3FB598P341" colab_type="code" colab={}
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer features
X['years'] = X['year_recorded'] - X['construction_year']
# Add target feature
if 'status_group' in X.columns:
X['needs_repair'] = X['status_group'].apply(lambda x: 0 if x =='functional' else 1)
X.drop('status_group', axis=1, inplace=True)
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# + id="XHZUKuXFP-CJ" colab_type="code" colab={}
test = wrangle(test)
train = wrangle(train)
# + id="upoArZOaQHBx" colab_type="code" colab={}
target = 'needs_repair'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Arrange data into X features matrix and y target vector
y_train = train[target]
X_train = train.drop(columns=target)
y_val = val[target]
X_val = val.drop(columns=target)
X_test = test
# + id="8kydWWv3QVl7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5c25e2ba-c01f-49ed-a748-fad69de8033b"
print('Baseline Accuracy:', y_train.value_counts(normalize=True).max())
# + id="u_CShiQcQYaw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="ca19fd58-ace6-49a9-9019-e03de5ceeaa7"
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=40, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
# Check Metrics
print('Training Accuracy', accuracy_score(y_train, pipeline.predict(X_train)))
print('Validation Accuracy', accuracy_score(y_val, y_pred))
# + id="F7Uvby-PQdn8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="a49ad04d-275f-4cae-9dba-db66bc4e4209"
plot_confusion_matrix(pipeline, X_val, y_val,
values_format=".0f",
display_labels=['no repair needed', 'repair needed']);
# + id="AVU_dTQCQm5I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="0ea9ee27-10df-40a8-b333-85d6178152c3"
print(classification_report(y_val, pipeline.predict(X_val)))
# + id="7hsIcd8zQsbS" colab_type="code" colab={}
|
module4-classification-metrics/LS_DS18_224_assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.datasets import load_breast_cancer
cancer=load_breast_cancer()
print(type(cancer))
# print(cancer)
# +
cancer_data=cancer['data']
# print(cancer_data)
# print(cancer_data.shape)
# 标签
cancer_target=cancer['target']
# print(cancer_target) [0,1]
# 特征名
cancer_feature_names=cancer['feature_names']
print(cancer_feature_names)
# 标签名
# cancer_names=cancer['target_names']
# print(cancer_names) ['malignant' 'benign']
# 描述信息
cancer_desc=cancer['DESCR']
# print(cancer_desc)
# +
# 原始数据形状
print(cancer_data.shape) # 569行,30列,(569, 30) 说明有30个特征,569个病例的数据,可以理解为person_id
# 原始数据集标签形状
print(cancer_target.shape) # 569行,1列 (569,)
# # 对数据划分为训练集和测试集
from sklearn.model_selection import train_test_split
cancer_data_train, cancer_data_test, cancer_target_train, cancer_target_test=train_test_split(cancer_data,cancer_target,test_size=0.25,random_state=10)
print('训练集数据的形状为:',cancer_data_train.shape)
print('训练集标签的形状为:',cancer_target_train.shape)
print('测试集数据的形状为:',cancer_data_test.shape)
print('测试集标签的形状为:',cancer_target_test.shape)
# -
import numpy as np
from sklearn.preprocessing import MinMaxScaler
# 生成规则 Scaler
Scaler=MinMaxScaler().fit(cancer_data_train)
##将规则应用于训练集
cancer_trainScaler=Scaler.transform(cancer_data_train)
##将规则应用于测试集
cancer_testScaler=Scaler.transform(cancer_data_test)
# 离差标准化前训练集数据的最小值
print(np.min(cancer_data_train))
# 离差标准化后训练集数据的最小值
print(np.min(cancer_trainScaler))
# 离差标准化前训练集数据的最大值
print(np.max(cancer_data_train))
# 离差标准化后训练集数据的最大值
print(np.max(cancer_trainScaler))
# 为什么要做离差标准化或者标准差标准化?
# 1. 去除量纲
# 2. 为了让算法模型跑的更快
|
month06/DATASCIENCE/sklearn/sklearn_practice.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
'''
数据分析
'''
import pandas as pd
#读取训练和测试数据。
train_data = pd.read_csv('../Datasets/ames/train.csv')
test_data = pd.read_csv('../Datasets/ames/test.csv')
# -
train_data.info()
test_data.info()
# +
'''
数据预处理
'''
y_train = train_data['SalePrice']
def data_preprocess(df):
for column in df.columns:
if df[column].isna().sum() <= df[column].size * 0.2:
if df[column].dtype == 'object':
df = df.fillna({column: df[column].value_counts().idxmax()})
elif df[column].dtype == 'int64':
df = df.fillna({column: df[column].median()})
elif df[column].dtype == 'float64':
df = df.fillna({column: df[column].mean()})
else:
df = df.drop([column], axis=1)
return df
train_data = data_preprocess(train_data)
test_data = data_preprocess(test_data)
# -
X_train = train_data.drop(['Id', 'SalePrice'], axis=1)
X_test = test_data.drop(['Id'], axis=1)
# +
cate_columns = []
num_columns = []
#找出数值型与类别型特征。
for column in X_train.columns:
if X_train[column].dtype == 'object':
cate_columns.append(column)
elif X_train[column].dtype == 'int64' or X_train[column].dtype == 'float64':
num_columns.append(column)
# -
#选出数值型特征。
num_X_train = X_train[num_columns].values
num_X_test = X_test[num_columns].values
# +
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder()
#对类别型特征进行编码。
cate_X_train = ohe.fit_transform(X_train[cate_columns]).todense()
cate_X_test = ohe.transform(X_test[cate_columns]).todense()
# +
import numpy as np
#将数值特征与类别特征的独热编码进行拼接。
X_train = np.concatenate([num_X_train, cate_X_train], axis=1)
X_test = np.concatenate([num_X_test, cate_X_test], axis=1)
# +
'''
采用梯度提升树回归器,并且交叉验证、超参数寻优。
'''
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
parameters = {'n_estimators':[50, 100, 200, 500, 1000]}
gbr = GradientBoostingRegressor()
reg = GridSearchCV(gbr, parameters, n_jobs=4, scoring='neg_root_mean_squared_error')
reg.fit(X_train, y_train)
print('最优超参数设定为:%s' %reg.best_params_)
print('交叉验证得到的最佳RMSE为:%f' %-reg.best_score_)
# +
'''
使用最优的模型,依据测试数据的特征进行数值回归。
'''
y_predict = reg.predict(X_test)
submission = pd.DataFrame({'Id': test_data['Id'], 'SalePrice': y_predict})
submission.to_csv('../Kaggle_submissions/ames_submission.csv', index=False)
# -
|
Chapter_8/Section_8.2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imputing Data
#
# When a dataset has missing values, you can either remove those values or fill them in. In this exercise, you'll work with World Bank GDP (Gross Domestic Product) data to fill in missing values.
# run this code cell to read in the data set
import pandas as pd
df = pd.read_csv('../data/gdp_data.csv', skiprows=4)
df.drop('Unnamed: 62', axis=1, inplace=True)
# run this code cell to see what the data looks like
df.head()
# Run this code cell to check how many null values are in the data set
df.isnull().sum()
# There are quite a few null values. Run the code below to plot the data for a few countries in the data set.
# +
import matplotlib.pyplot as plt
# put the data set into long form instead of wide
df_melt = pd.melt(df, id_vars=['Country Name', 'Country Code', 'Indicator Name', 'Indicator Code'], var_name='year', value_name='GDP')
# convert year to a date time
df_melt['year'] = pd.to_datetime(df_melt['year'])
def plot_results(column_name):
# plot the results for Afghanistan, Albania, and Honduras
fig, ax = plt.subplots(figsize=(8,6))
df_melt[(df_melt['Country Name'] == 'Afghanistan') |
(df_melt['Country Name'] == 'Albania') |
(df_melt['Country Name'] == 'Honduras')].groupby('Country Name').plot('year', column_name, legend=True, ax=ax)
ax.legend(labels=['Afghanistan', 'Albania', 'Honduras'])
plot_results('GDP')
# -
# Afghanistan and Albania are missing data, which show up as gaps in the results.
#
# # Exercise - Part 1
#
# Your first task is to calculate mean GDP for each country and fill in missing values with the country mean. This is a bit tricky to do in pandas. Here are a few links that should be helpful:
# * https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DataFrame.groupby.html
# * https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.transform.html
# * https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html
# +
# TODO: Use the df_melt dataframe and fill in missing values with a country's mean GDP
# If aren't sure how to do this,
# look up something like "how to group data and fill in nan values in pandas" in a search engine
# Put the results in a new column called 'GDP_filled'.
df_melt['GDP_filled'] = df_melt.groupby('Country Name')['GDP'].transform(lambda x: x.fillna(x.mean()))
# -
# Plot the results
plot_results('GDP_filled')
# This is somewhat of an improvement. At least there is no missing data; however, because GDP tends to increase over time, the mean GDP is probably not the best way to fill in missing values for this particular case. Next, try using forward fill to deal with any missing values.
# plot_results('GDP_filled')
# # Excercise - Part 2
#
# Use the fillna forward fill method to fill in the missing data. Here is the [documentation](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html). As explained in the course video, forward fill takes previous values to fill in nulls.
#
# The pandas fillna method has a forward fill option. For example, if you wanted to use forward fill on the GDP dataset, you could execute `df_melt['GDP'].fillna(method='ffill')`. However, there are two issues with that code.
# 1. You want to first make sure the data is sorted by year
# 2. You need to group the data by country name so that the forward fill stays within each country
#
# Write code to first sort the df_melt dataframe by year, then group by 'Country Name', and finally use the forward fill method.
# +
# TODO: Use forward fill to fill in missing GDP values
# HINTS: use the sort_values(), groupby(), and fillna() methods
df_melt['GDP_ffill'] = df_melt.sort_values('year').groupby('Country Name')['GDP'].fillna(method='ffill')
# -
# plot the results
plot_results('GDP_ffill')
# This looks better at least for the Afghanistan data; however, the Albania data is still missing values. You can fill in the Albania data using back fill. That is what you'll do next.
# # Exercise - Part 3
#
# This part is similar to Part 2, but now you will use backfill. Write code that backfills the missing GDP data.
# +
# TODO: Use back fill to fill in missing GDP values
# HINTS: use the sort_values(), groupby(), and fillna() methods
df_melt['GDP_bfill'] = df_melt.sort_values('year').groupby
('Country Name')['GDP'].fillna(method='ffill').fillna
(method='bfill')
# -
# plot the results
plot_results('GDP_bfill')
# # Conclusion
#
# In this case, the GDP data for all three countries is now complete. Note that forward fill did not fill all the Albania data because the first data entry in 1960 was NaN. Forward fill would try to fill the 1961 value with the NaN value from 1960.
#
# To completely fill the entire GDP data for all countries, you might have to run both forward fill and back fill. Note as well that the results will be slightly different depending on if you run forward fill first or back fill first. Afghanistan, for example, is missing data in the middle of the data set. Hence forward fill and back fill will have slightly different results.
#
# Run this next code cell to see if running both forward fill and back fill end up filling all the GDP NaN values.
# +
# Run forward fill and backward fill on the GDP data
df_melt['GDP_ff_bf'] = df_melt.sort_values('year').groupby('Country Name')['GDP'].fillna(method='ffill').fillna(method='bfill')
# Check if any GDP values are null
df_melt['GDP_ff_bf'].isnull().sum()
# -
|
lessons/ETLPipelines/10_imputation_exercise/10_imputations_exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
matplotlib.rcParams['font.size'] = 12
matplotlib.rcParams['figure.figsize'] = (13, 3)
matplotlib.rcParams['figure.facecolor'] = '#00000000'
sns.set_style('darkgrid');
data = pd.read_csv("car_data_cleaned.csv")
data.head()
# ### Creating Train set and Test set
# +
# Random Sampling
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(data, test_size = 0.2, random_state = 42)
# -
# ### Preparing the data for Machine Learning Algorithm
car_label = train_set['selling_price'].copy()
car_features = train_set.drop('selling_price', axis = 1)
car_features_category = car_features[['car_company', 'variant', 'fuel_type', 'seller_type', 'transmission_type', 'num_of_ownership']]
car_features_category.head()
car_features_num = car_features.drop(['car_company', 'variant', 'fuel_type', 'seller_type', 'transmission_type', 'num_of_ownership'], axis = 1)
car_features_num.head()
# +
# Using Column Transformer to create a pipeline to use StandardScaler to perform feature scaling on num features
# and also converting categorical features into numerical feature using OneHot Encoder
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
num_attribs = list(car_features_num)
cat_attribs = ['car_company', 'variant', 'fuel_type', 'seller_type', 'transmission_type', 'num_of_ownership']
pipeline = ColumnTransformer([
('std_scaler', StandardScaler(), num_attribs),
('cat', OneHotEncoder(), cat_attribs)
])
data_prepared = pipeline.fit_transform(car_features)
# -
data_prepared
# +
# data size after OneHot Encoding
data_prepared.shape
# -
# ### Select and Train a Model
# +
# Multiple Linear Regression
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(data_prepared, car_label)
# +
# Using k-fold cross validation for checking the performance of the linear regression model using r² metric
from sklearn.model_selection import cross_val_score
lin_score = cross_val_score(lin_reg, data_prepared, car_label, scoring='r2', cv = 10).mean()
lin_score
# +
# Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(data_prepared, car_label)
# +
# Using k-fold cross validation for checking the performance of the Decision Tree regression model using r² metric
tree_score = cross_val_score(tree_reg, data_prepared, car_label, scoring = 'r2', cv = 10).mean()
tree_score
# +
# Random Forest Regressor
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor(n_estimators=10, random_state=42)
forest_reg.fit(data_prepared, car_label)
# +
# Using k-fold cross validation for checking the performance of the Random Forest regression model using r² metric
forest_score = cross_val_score(forest_reg, data_prepared, car_label, scoring = 'r2', cv = 10).mean()
forest_score
# +
# SVR using linear kernel
from sklearn.svm import SVR
svm_linear_reg = SVR(kernel = 'linear')
svm_linear_reg.fit(data_prepared, car_label)
# +
# Using k-fold cross validation for checking the performance of the model using r² metric
svm_lin_score = cross_val_score(svm_linear_reg, data_prepared, car_label, scoring = 'r2', cv = 10).mean()
svm_lin_score
# -
# As from the above results we can see that Random Forest Regression Model gives the best score on K-Fold cross validation using r² metric. Therefore, Random Forest Regression model will be used to predict the prices
# ### Hyperparameter Tuning of the model
# +
# searching for best parameters of the model using RandomizedSerachCV
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
param_distribs = {
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=347),
}
forest_reg = RandomForestRegressor(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,
n_iter=10, cv=5, scoring='r2', random_state=42)
rnd_search.fit(data_prepared, car_label)
# -
rnd_search.best_estimator_
# +
# checking final score of the model in the test set
final_model = rnd_search.best_estimator_
y_test = test_set['selling_price'].copy()
X_test = test_set.drop('selling_price', axis = 1)
X_test_prepared = pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
from sklearn.metrics import r2_score
r2_score(y_test, final_predictions)
# +
# visualization of error rate in the model
sns.displot(y_test - final_predictions, kde = True);
# -
# ### Final Pipeline for prediction
# +
# creating a full pipeline to predict the price of a car
from sklearn.pipeline import Pipeline
full_pipeline_with_predictor = Pipeline([
('preparation', pipeline),
('final_model', final_model)
])
full_pipeline_with_predictor.fit(car_features, car_label)
# -
model = full_pipeline_with_predictor
# +
# storing the final model as a pkl
import joblib
joblib.dump(model, 'model.pkl')
|
Modelling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from tidynamics import msd, acf
from scipy.optimize import curve_fit
import numpy as np
import h5py
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
matplotlib.rcParams['xtick.labelsize']=20
matplotlib.rcParams['ytick.labelsize']=20
matplotlib.rcParams['font.size']=25
matplotlib.rcParams['legend.fontsize']=20
matplotlib.rcParams['axes.labelsize']=20
matplotlib.rcParams['text.latex.preamble']=[r'\usepackage{bm}', r'\usepackage{textgreek}', r'\usepackage{sfmath}', r'\usepackage{siunitx}', r'\sisetup{per-mode=reciprocal} ', r'\DeclareSIUnit\Molar{M}']
matplotlib.rc('text',usetex=True)
# -
job_names = ['50591797', '50593010', '50593011']
data_dir = '/Volumes/_work/genius/scratch/'
# +
def fit_shear(time, D):
return 2.0 * D * time * (1+ 1./3. * (shear_rate * time) ** 2.0)
def fit(time, D):
return 2.0 * D * time
cutoff = 1000
particles = 10000
msd_x = np.empty([5,1000])
msd_z = np.empty([5,1000])
fit_params = np.empty([5,3])
fit_params_shear = np.empty([5,3])
run = 0
for job_name in job_names:
for i in range(5):
j = i+1
with open('./params_dpd_vel.csv') as f:
line = f.readlines()[j]
line = line.split(',')
shear_vel = float(line[0]) / 2.0
print("Shear velocity:", shear_vel)
h5file = h5py.File(data_dir + 'le_traj_' + job_name + '[' + str(j) +']/trajectory.h5', 'r')
r = h5file['particles/atoms/position/value'][:-cutoff]
vel = h5file['particles/atoms/velocity/value'][:-cutoff]
image = h5file['particles/atoms/image/value'][:-cutoff]
le_offset = h5file['particles/atoms/lees_edwards_offset/value'][:-cutoff]
edges = h5file['particles/atoms/box/edges/'][:]
r_time = h5file['particles/atoms/position/time'][:-cutoff]
time = r_time-r_time[0]
shear_rate = shear_vel / edges[1]
print("Shear rate:", shear_rate)
# Get trajectory of all particles and unwrap them
# Add the LE offset
pos = r + image*edges[None,None,:]
pos[:,:,0] -= le_offset
selected_part = np.random.randint(0, particles)
x_traj_single = pos[:, selected_part, 0]
y_traj_single = pos[:, selected_part, 1]
z_traj_single = pos[:, selected_part, 2]
x_traj_all = pos[:, :, 0]
y_traj_all = pos[:, :, 1]
z_traj_all = pos[:, :, 2]
plt.figure()
ax = plt.axes(projection = '3d')
ax.plot(x_traj_single, y_traj_single, z_traj_single)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
msd_x_all = np.empty_like(x_traj_all)
msd_y_all = np.empty_like(y_traj_all)
msd_z_all = np.empty_like(z_traj_all)
for k in range(particles):
for l in range(len(x_traj_single)):
msd_x_all[l,k] = (x_traj_all[l,k] - x_traj_all[0,k] - time[l] * shear_vel * (y_traj_all[0,k] / edges[1] - 0.5)) ** 2.
msd_y_all[l,k] = (y_traj_all[l,k] - y_traj_all[0,k]) ** 2.
msd_z_all[l,k] = (z_traj_all[l,k] - z_traj_all[0,k]) ** 2.
msd_x_mean = np.mean(msd_x_all, axis=1)
msd_y_mean = np.mean(msd_y_all, axis=1)
msd_z_mean = np.mean(msd_z_all, axis=1)
msd_x[i,:] = msd_x_mean
msd_z[i,:] = msd_z_mean
opt3, cov3 = curve_fit(fit_shear, time, msd_x_mean)
print("Diffusion coefficient directly from MSD in shearing dir:", opt3[0])
opt4, cov4 = curve_fit(fit, time, msd_z_mean)
print("Diffusion coefficient in the vorticity direction", opt4[0])
print("Ratio:", opt4[0]/opt3[0])
fit_params[i, run] = opt3
fit_params_shear[i, run] = opt4
plt.figure()
plt.plot(time, msd_x_mean, 'o', color="tab:blue", markersize='2', label="MSD x")
plt.plot(time, msd_z_mean, 'o', color="tab:orange", markersize='2', label="MSD y")
plt.plot(time, fit_shear(time, *opt4), color='tab:blue', label = "MSD fit shearing dir \n(D from vorticity direction)")
plt.legend()
plt.loglog()
run += 1
plt.show()
# +
fig, axs = plt.subplots(2,1, figsize=(2*3.375,3*3.375), gridspec_kw={'height_ratios': [2, 1]})
colors=['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple']
for i in range(5):
with open('./params_dpd_vel.csv') as f:
line = f.readlines()[i+1]
line = line.split(',')
shear_vel = float(line[0]) / 2.0
shear_rate = shear_vel / edges[1]
axs[0].plot(time, msd_x[i,:], 'o', markersize='1', color=colors[i])
axs[0].plot(time, fit_shear(time, fit_params_shear[i,0]), 'k:')
axs[0].plot(time, msd_z[i,:], 'ko', markersize='1')
axs[1].errorbar(2.0*shear_vel, np.mean(fit_params[i,:]), yerr=np.std(fit_params[i,:], ddof=1), marker='o', color=colors[i], capsize = 5)
axs[1].errorbar(2.0*shear_vel, np.mean(fit_params_shear[i,:]), yerr=np.std(fit_params_shear[i,:], ddof=1), marker='s', color=colors[i], capsize = 5)
axs[0].loglog()
axs[0].set_xlabel('time')
axs[0].set_ylabel('mean square displacment')
axs[1].plot([],[], 'ko', label = 'neutral direction')
axs[1].plot([],[], 'ks', label = 'shear direction')
axs[1].plot([],[], 'k:', label = 'fitting curves')
axs[1].legend(loc='lower center', bbox_to_anchor=(0.4, -.9), ncol=2)
axs[1].set_xlim(0.0,1.6)
axs[1].locator_params(axis='x', nbins=8)
axs[1].set_ylabel('diffusion coefficient $D$')
axs[1].set_ylim(0.29,0.31)
axs[1].locator_params(axis='y', nbins=5)
axs[1].axhline(y=0.29689698, color='k')
axs[1].axhline(y=0.29689698 + 0.00543089, c='k', linestyle = ':')
axs[1].axhline(y=0.29689698 - 0.00543089, c='k', linestyle = ':')
axs[1].set_xlabel('shear velocity $v$')
plt.tight_layout()
plt.savefig("adv_diff_contishear.pdf")
# -
|
notebooks/08-plot_enhanced_diffusion_lin_shear.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import pickle
iris = pd.read_csv("./Data/Iris.csv")
iris.drop("Id", axis=1, inplace = True)
X = iris[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']].values
y = iris['Species'].values
X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.2)
iris.head()
model = RandomForestClassifier(n_estimators = 6)
model.fit(X_train, y_train)
prediction = model.predict(X_test)
print('The accuracy of Logistic Regression is: ', metrics.accuracy_score(prediction, y_test))
with open('./Model/model.pkl','wb') as fp:
pickle.dump(model,fp)
with open('./Model/model.pkl','rb') as fp:
model = pickle.load(fp)
model.predict([[0.2,0.2,0.2,0.2]])[0]
|
Model_Train.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import display, Image, HTML, FileLink, FileLinks
i = Image(url='https://github.com/UC-Berkeley-I-School/mids-w200-assignments-upstream-spring2021/week_02/week-02-extras/ucb.png')
i
# <h1>Week 2</h1><p>This is an <u>optional</u> notebook version of the week’s material. A different way to seeing the action.</p><font size="-2">Jan 13, 2021</font>
# <ul><li>Today we look at the basics of <u>control-of-flow</u> and how python is an object-oriented language. These themes are expanded upon in much more depth and much more rapidly in the coming weeks.</li>
# <li>We look at String <u>object</u> and will use it as an example of how “methods” (essentially functions that are built into an object, tho we don’t always see them) are accessed using the dot operator and we’ll work on the very common/important technique of <u>splicing</u>.</li>
# <li>Last week we touched on the operating system and how we interact with it directly through the terminal window and how we might through Jupyter Notebook. For the fun of it, let's use a python library ("sys" for system) that asks python to tell us what version is loaded on the machine.</li>
# <li>Breakout rooms to practice will follow.</li></ul>
# <h1>control of flow</h1> and python...
# +
import sys
if not sys.version_info.major == 3 and sys.version_info.minor >= 6:
print("Python 3.6 or higher is required.")
print("\t\t You are using Python {}.{}.".format(sys.version_info.major, sys.version_info.minor))
sys.exit(1)
else:
print("You are using Python {}.{}.".format(sys.version_info.major, sys.version_info.minor))
# -
# <h2>Operators and Assignments</h2>
# <p>A variable, as you know, is a container to a value, e.g., <code>x = 5</code>. <br />
# A var actually consists of two parts: a <u>declaration</u> and an <u>assignment</u>. "<code>x</code>" declares a variable in the <code>name space</code> and the assignment <code>=</code> directs the program to assign a value in the <code>object</code>. If you've programmed in C++, you know we can save a variable the usual way ... and also we can have a variable that points to the memory address of that variable (called a <code>pointer</code>). Pointers are much faster to use - python doesn't use them in the API but behind the scenes it does.</p>
# <p>From here it's just building on a similar theme. = means "assign a value." Other symbols are<br />
# <code>==</code> equivalency (returns a true or a false value)<br />
# <code>+</code> addition<br />
# <code>-</code> subtraction<br />
# <code>*</code> multiplication<br />
# <code>/</code> division<br />
# <code>**</code> exponent<br />
# <code>%</code> modulus<br />
# <code>//</code> integer division
# </p><p>And the fun short cuts for accumulators: <code>+=</code> and <code>-=</code>, such as x += 5 is the same as x = x + 5.</p>
# +
print("-"*50)
print("the many shades of x!")
x = 50
print(x)
print("_"*30, "\n")
x - 5
print(x)
print(x / 5)
print(x * 5)
print(x ** 5)
print("_"*30, "\n")
x += 5
print(x)
# a nice shortcut is to test equivalency; rather than ask "if == 5, then ... "
print("equivalency is a useful too. Does x == 5?", x == 5)
print("_"*30, "\n")
x -= 5
print(x)
print("integer division: ", (x // 2.5) )
print("modulus: ", (x % 2))
print("-"*50)
# -
# <h2>A first note about objects</h2>
# <p style="border-radius:4px;padding:10px; color:cornflowerblue;">Note that this is an optional discussion to show that our basic python language set are objects and to prepare us for longer discussion a few weeks from now. Do note the parallels between python’s structure of object-hierarchy and the classes we'll create later with the same kind of hierarchy.</p>
#
# <p>First, let’s think about an object-oriented language from the computer's p.o.v. Pretty much everything in a computer is designed to be an hierarchy. Objects are no different; tho we use objects in non-linear ways. At the base of python as a language is the root of all things - an object called <code>object</code>.</p>
# <p>That object has lots of <code>subclasses</code> (or child classes). Here’s an example using just a <code>str</code> object.
#
# <ul><li>
# If we create a variable called "s" and assign it a value "Hello", we've instantiated a copy of python's <code>str</code> class. We can check this by issuing the command <code>type(s)</code> and see <class 'str'> as the output. We can use a similar command with a different syntax, that's reserved for accessing data inside an object (double underscore or <i>dunder</i>), e.g., <code>s.__class__</code>; and see the same result <code><class 'str'></code></li>
# <li>
# Next we can ask who the base (parent) is of the str class: <code>str.__bases__</code> and the result is <code><class 'object'></code>
# </li></ul>
# <pre style="color:maroon">
# [metadata] [class] [instance]
# type object object()
# |__ bases
# |__str .str()
# |__s
# # this is our variable "s"
# </pre>
# <blockquote>
# If you want to see all the subclasses of the parent class object, type the command <code>object.__subclasses()__</code> in your python window and look out! If you want to see all the modules you can import, enter <code>help('modules')</code>.
# <p>Python has <code>metaclasses</code> (like the "type" command that gets data about a class), <code>class</code> as we’ve seen with object (and int, str, list, module and hundreds more), and a behind-the-scenes object called <code>object</code> that in turn starts the hierarchy of data types and modules we use from the python language. This is how python is an OOP language. The syntax of python allows us to be pretty flexible with lots of shortcuts that mayn’t look like other OOPs’. Compare Java’s String constructor (<code>String s = new String("hello")</code> with python's down-and-dirty <code>s = "hello"</code>.
# </blockquote>
# <p>Python is an object-oriented language but is kinda loose - we can use syntax that's commonly employed in procedural langauges and use more strict object approaches. We'll tackle this in a couple of weeks.</p>
# <hr>
# <p>For the moment note that python language itself is object-oriented and we see that in its syntax.</p>
# <p>Consider the String object and notice the <span style="color:cornflowerblue;">method</span>s used to affect an instance of a String.</p>
# <p>Think of a String as a contiguous block of characters:
# <pre style="color:maroon;">
# h e l l o
# [0] [1] [2] [3] [4]
# </pre>
# +
s = "Huzzah"
print(s)
"""
The variable s is an instance of the
class String and it has a value. Strings are "immutable"
sequences of Unicode code points - in other words a sequence
of characters identified by the position in RAM of the first letter.
Let's confirm using the type() command. """
print(type(s))
""" Let's use some methods in the string to alter the string """
print(s.upper())
print(s.lower())
""" how long is the string? """
len(s)
# -
""" what's the first letter? """
s[0] # remember we start counting at 0
""" what's the 4th letter? """
s[3]
""" Can we find if a particular char is in there? """
"i" in s
# +
""" What if we have a menu of options for people to use? """
legit_options = "123q" # 1, 2, 3, and q (for quit)
user_choice = input("We have 3 options and q to quit:")
""" is the user's choice in the legit_options? """
user_choice in legit_options
# -
# <p>Great. Now let's apply it and move on to <code>if</code> and <code>while</code> loop.</p>
# +
legit_options = "123q"
print("-"*50)
print("Welcome to the Lab. Enter your choice from 1, 2, 3 and q[uit]")
user_choice = input("I'm waiting ... ")
if user_choice not in legit_options:
print("\n\tSorry, bud, that wasn't an option. Bye.")
else:
print("Hey, that's legit.")
if (user_choice == "q"):
print("you want to quit?")
# +
""" String Slicing """
""" if our string is 'python' we have actually a sequence of
letters in spaces 0, 1, 2, 3, ... n...
The syntax is usually START : STOP : STEP
and there's lots of shorthand techniques to look at. """
s = "<NAME>"
print("start only s[0]: ", s[0]) # from the START
print("one letter from end: s[-1]:", s[-1]) # one letter from the end of the string
print("range 0:3 ",s[0:3]) # from the start of the string to 4th letter
print("from 1 to next-to-last: s[1:-1] ", s[1:-1]) # from 1 to the next-to-last letter
print("extract a chunk: ", s[1:5:2]) # from 1 to 5, taking 2 letters at a time
print("use shortcut : ", s[:]) # shortcut for the entire string
print("_"*30)
""" why go forward? Let's go backwards ... """
print(s[::-1])
# -
# <p> </p>
# <hr/><h2>Starting off with the <code>if</code> statement</h2>
# +
""" python comments use 3 quote marks. """
# and can use hashes but the prefer is """ """
# if statement, most basic...
# an if statement - think of bigger examples ...
if 5 > 3:
print("five is indeed bigger than 3")
# an if statement using variables
x = 5
y = 3
if x > y:
print("and it works with variables")
# -
""" if-elif statement """
# an if-elif statement using variables
x = 5
y = 3
if x > y:
print("Five is indeed bigger!")
elif x == y:
print("Well, x and y are equal! Weird.")
# +
""" if - else """
# an if-else statement using variables
x = "Tom"
y = "Turkey"
if x == y:
print("The two variables are equal.")
else:
print("No, the two variables are not the same.")
# +
""" There are also lots of shortcuts in python """
x = 82
y = 92
if y > x: print("You scored an A")
""" in the above statement notice the syntax.
If the value of y is greater than that of x,
then print 'You score and A.' """
print("Grade A") if y > x else print("B")
# notice the syntax about the outcome (the print)
# appears first, then the comparison - the if statement
# followed by the else. Get familiar with this
# notation because it's used in 'lambda functions'.
# +
""" Combine comparison with 'and' """
a = 5
b = 10
c = 15
if a > b and a > c:
print("Both statements are true.")
else:
print("something is fishy...")
# +
""" using this kind of syntax is great for ranges """
x = 3
if 1 < x < 4:
print("she warrants an A+")
if 1 < x**2 < 20:
print("can we do that?! What is x now? ",x**2)
# +
a = 5
b = 10
c = 15
if 3 < a or 3 < c:
print("at least one of these is true.")
# +
""" Sometimes we want to check a condition and see if it is true.
If that condition is true, then we might want to do tests on other conditions.
For example, (if the student is in class 100) we want to assign grades.
"""
student_in_class = True
score = 93
if student_in_class:
if score > 90:
print("Grade A")
elif score > 80 and score <= 89:
print("B.")
elif score > 70 and score <= 70:
print("Grade C")
else:
print("Grade D, sorry.")
# +
# -*- coding: utf-8 -*-
"""There are times when you're coding and you kind of
know what you want to include but you're not finished.
The pass statement allows us to use the code even
if we're not ready to finish the code and not get an error.
"""
student_Name = "明娃"
is_enrolled = True
if is_enrolled:
pass
# -
# <div style="background-color:lightblue;color:black;border-radius:4px;padding:6px;">
# <h2>Examples?</h2>
# <ul><li>You're given a giant string of DNA data and you're looking for a single instance of a mutation (say T where there should be a G): <code>ATTTATATTTAAAGUGAUAA<b>T</b>A</code></ul>
# <li>You've an encryption key at work that needs to select a set of symbols of the day (like an Enigma machine).</li>
# <li>Nightly your company receives a giant, unchecked stream of data from its regional offices. Would you just accept the (dirty) data?</li>
# <li>Don't forget the end-user: a rule of thumb is to do <code>error correction on input</code>, meaning keep data that aren't good from being submitted - otherwise your code would have to check for everything making a lot of extraneous coding and the chance for bugs to get in anyway.</li>
# <li>If statements are awfully useful, even nested-ifs. But what if you have a whole bunch of if-statements (say 7 in a row)? Options?</li></ul></div>
# <h2>While</h2> loops
# <p>are great fun. With a while-loop, we need some condition to be met in order to stop running the script. Here's an example:</p>
# +
countdown = 5
while countdown > 0:
print(countdown)
countdown -= 1
print("Go, Buffalo Bills!")
# +
""" Compare the output of these two while loops """
row = int(input("Enter an integer: "))
# while row >= 0:
j = 0
while j <= row:
print(j, end="")
j += 1
# +
row = int(input("Enter an integer: "))
# outer loop
while row >= 0:
j = 0
#inner loop
while j <= row:
print(j, end = " <NAME> ")
j += 1
print(" ")
row -= 1
# -
# <p></p><hr /><h2>Basic Data Types</h2>
# <p>Depending on the kind of data we have we use different kinds of variables to hold those data. Here’s the basics.</p>
# <p>
# <code>integer</code> counting numbers, e.g., 0,2,3,6,333,142396<br />
# <code>floating point</code> 3.141458372<br />
# <code>char</code> ‘a’, ‘b’<br />
# <code>string</code> “Hi, folk” can use single quotes, too, ‘Hey!’<br />
#
# <code>str</code> for text, e.g., x = “Smith”<br />
# <code>int, float, complex</code> for numbers, e.g., i = 3.1432<br />
# <code>sequence</code> types list, tuple, range<br />
# <code>mapping</code> types dict<br />
# <code>boolean</code> types bool<br />
# </p>
# <p>
# There are others but we’ll not use them now. Notice that strings are wrapped in quotes. Numbers are not. This is how python determines the data type.
# </p>
i = 5
j = 3.1234
c = 'a'
l = ['a','b','c','d'] # list- we're not there yet (grin)
is_cool = True
i = 20
type(i)
# why not use it in data cleaning or checking?
input_stream = 338293784723892384716
if type(input_stream) == int:
print("all clear.")
else:
print("sorry, only ints allowed.")
type(j)
type(c)
type(is_cool)
# <h3>Using variables</h3>
#
# <p>Variables can have <b>global</b> or <b>local</b> scope. A global var can be used anywhere in the code. A local variable is used only in the code block where it is declared.</p>
# +
""" Notice that the quotes can be double or single when
wrapping a string. That’s how we can use the ‘ in
the word let’s. If the quotes are unbalanced then the
code will fail.
"""
s = "Let's go to the beach!"
print("It's such a great day ... " + s)
# notice that python is loose about concatenation:
# most oop languages won't let us use the + sign to add
# two strings,
# but python is down with it
# -
i = Image(url='https://github.com/UC-Berkeley-I-School/mids-w200-assignments-upstream-spring2021/blob/master/week_02/week-02-extras/namespace.png')
i
# <img src="week-02-extras/namespace.png" />
# <h1>Another look at objects</h1>
# <p>Objects may be hierarchical in that there's a parent class and then we can create copies of that parent <i>and</i> create children of the parent class and add more behaviors to that child class. </p>
# <p>We have then an hierarchy. At the root of all oop is an object called Object. We'll see this more in play when we look at property decorators and get/set methods. Just a heads-up.</p>
# <p>When <code>casting</code> a variable from one data type to another we're actually going up the chain of code and then down another branch. For instance if we cast a string into an integer we're going Up the String chain 'til we hit a parent of both and then the chain in the Integer string.</p>
# <p>Objects are self-contained: they have a name (starting with a capital letter), and the box that is a class there are the properties (kinda like the variables) and the actions that the class can do (kinda like functions; called <b>methods</b>).
# <img src="week-02-extras/object-hierarchy.png"/>
# <hr/>
# <h3>Objects representing things in the physical world</h3>
# <br />
#
i = Image(url="https://github.com/UC-Berkeley-I-School/mids-w200-assignments-upstream-spring2021/blob/master/week_02/week-02-extras/object-hierarchy.png")
i
i = Image(url='https://github.com/UC-Berkeley-I-School/mids-w200-assignments-upstream-spring2021/blob/master/week_02/week-02-extras/babykitty.png')
i
# How might we create a class to represent by cat, BabyKitty?<br />
# <br />Properties of BabyKitty: has fur (<code>Color</code> Orange); has <code>legs</code> (four of 'em, use an integer); has a <code>name</code> (BabyKitty, so use a String) ... She <code>purrs()</code>, <code>eats()</code>, <code>sleeps()</code> and more ... in fact all cats (usually) have these properties so we can construct a conceptual cat! Grouping all these properties and actions together is called <span style="color:red">encapsulation</span>. From our original Cat class we can make copies all day long and add new properties to our spin-off cats - using <span style="color:red">inheritance</span>. All coming up in a few weeks.
# <br />
i = Image(url='https://github.com/UC-Berkeley-I-School/mids-w200-assignments-upstream-spring2021/blob/master/week_02/week-02-extras/catconcept.png')
i
# +
# -*- coding: utf-8 -*-
""" a little nested if script """
""" since python defaults to ASCII unless otherwise informed ... we like UTF-8 """
""" can you get a loan to work remotely in Tahiti? I sure would like to! """
min_salary = 50000.0 # min salary to get loan
min_years = 2 # min years on the job
# get the annual salary
salary = float(input("Enter the annual salary: $ "))
# years on the job
years_on_job = int(input("and the number of years on the job? "))
# check conditions
if salary >= min_salary:
if years_on_job >= min_years:
print("\n\tJoy! Tahiti here we come!\n")
else:
print('Sorry! The minimum is ', min_years,' to qualify.')
print("Treat yourself to a consolation peña colada on us.")
else:
print("Annual income must be at least $", format(min_salary, ',.2f')," to qualify.")
# -
# <div style="background-color:skyblue; border-radius:4px; padding:8px;">
# <b>Notes:</b>
# <ol><li>Encoding directive utf-8. Often automatically added by some IDEs.</li>
# <li>Python """ style comments.</li>
# <li>Defining some global variables (min_salary and min_years)</li>
# <li>Get input from the stdin (keyboard) and converting the input into a float or int type.</li>
# <li>Nested-if statements. Notice the range checking, use of escape-sequences \n\t and format commands.</li>
# <li>print() sends data to the currently selected stdout.</li>
# <li>BTW, note that <code>print()</code> can take no parameters or arguments, or it can take a string e.g., <code>print("Hello, Gunnar")</code>, or it can take a bunch of arguments, separated by a comma: <code>print("Hello, " , name, " what's up? ").</code> This is called <span style="color:red">polymorphism</span>.</li></ol>
# </div>
# <h1>Breakout Rooms</h1>
# <p>Visit the Activity folder in this week's upstream.</p>
# <p style="color:cornflowerblue;">Tribbles, Spiders, and Calculators</p>
# Use what you downloaded for week 2 in Jupyter to create a string variable that prints exactly
# <blockquote>
# <pre>The "trouble with
# Tribbles" is that they
# \\\EAT/// too many MREs.
# </pre>
# </blockquote>
# <p>
# Using <u>one</u> line of Python code, make your
# variable from part 1 <u>print Tribbles backwards</u>
# "<code>selbbirT</code>" 300 times.</p>
# <p>Make a calculator - the purpose is to practice conditions
# (“if” statements) and saving your work as a .py and executing it.</p>
# <p></p>
# <hr/>
t = "tribbles"
# answer in class
# <h1>That's it for today. Be well and stay well. Cheers </h1>
# +
a = float(input("Enter first number: "))
b = float(input("Enter second number: "))
operator = input("Enter operator: ")
if operator == "+":
calc = a + b
elif operator == "-":
calc = a - b
elif operator == "*":
calc = a * b
elif operator == "/":
calc = a / b
else:
calc = "invalid"
if calc == "invalid":
print("Invalid operator")
else:
print(a, operator, b, "=", calc)
|
SUBMISSIONS/week_02/week-02-extras/Week-02-ExtraNB.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Density and Contour Plots
# Sometimes it is useful to display three-dimensional data in two dimensions using contours or color-coded regions.
# There are three Matplotlib functions that can be helpful for this task: ``plt.contour`` for contour plots, ``plt.contourf`` for filled contour plots, and ``plt.imshow`` for showing images.
# This section looks at several examples of using these. We'll start by setting up the notebook for plotting and importing the functions we will use:
# + tags=[]
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# -
# ## Visualizing a Three-Dimensional Function
# We'll start by demonstrating a contour plot using a function $z = f(x, y)$, using the following particular choice for $f$ (we've seen this before in numpy broadcasting, when we used it as a motivating example for array broadcasting):
def f(x, y):
return np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)
# A contour plot can be created with the ``plt.contour`` function.
# It takes three arguments: a grid of *x* values, a grid of *y* values, and a grid of *z* values.
# The *x* and *y* values represent positions on the plot, and the *z* values will be represented by the contour levels.
# Perhaps the most straightforward way to prepare such data is to use the ``np.meshgrid`` function, which builds two-dimensional grids from one-dimensional arrays:
# +
x = np.linspace(0, 5, 50)
y = np.linspace(0, 5, 40)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
# -
# Now let's look at this with a standard line-only contour plot:
plt.contour(X, Y, Z, colors='black');
# Notice that by default when a single color is used, negative values are represented by dashed lines, and positive values by solid lines.
# Alternatively, the lines can be color-coded by specifying a colormap with the ``cmap`` argument.
# Here, we'll also specify that we want more lines to be drawn—20 equally spaced intervals within the data range:
plt.contour(X, Y, Z, 20, cmap='RdGy');
# Here we chose the ``RdGy`` (short for *Red-Gray*) colormap, which is a good choice for centered data.
# Matplotlib has a wide range of colormaps available, which you can easily browse in IPython by doing a tab completion on the ``plt.cm`` module:
# ```
# plt.cm.<TAB>
# ```
#
# Our plot is looking nicer, but the spaces between the lines may be a bit distracting.
# We can change this by switching to a filled contour plot using the ``plt.contourf()`` function (notice the ``f`` at the end), which uses largely the same syntax as ``plt.contour()``.
#
# Additionally, we'll add a ``plt.colorbar()`` command, which automatically creates an additional axis with labeled color information for the plot:
plt.contourf(X, Y, Z, 20, cmap='RdGy')
plt.colorbar();
# # Histograms, Binnings
# A simple histogram can be a great first step in understanding a dataset.
# + tags=[]
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
data = np.random.randn(1000)
# -
plt.hist(data);
# The ``hist()`` function has many options to tune both the calculation and the display;
# here's an example of a more customized histogram:
plt.hist(data, bins=30, density=True, alpha=0.5,
histtype='stepfilled', color='steelblue',
edgecolor='none');
# The ``plt.hist`` docstring has more information on other customization options available.
# I find this combination of ``histtype='stepfilled'`` along with some transparency ``alpha`` to be very useful when comparing histograms of several distributions:
# +
x1 = np.random.normal(0, 0.8, 1000)
x2 = np.random.normal(-2, 1, 1000)
x3 = np.random.normal(3, 2, 1000)
kwargs = dict(histtype='stepfilled', alpha=0.3, density=True, bins=40)
plt.hist(x1, **kwargs)
plt.hist(x2, **kwargs)
plt.hist(x3, **kwargs);
# -
# If you would like to simply compute the histogram (that is, count the number of points in a given bin) and not display it, the ``np.histogram()`` function is available:
counts, bin_edges = np.histogram(data, bins=5)
print(counts)
|
day2/10. matplotlib - density, contour, histogram.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import joblib
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import matplotlib.colors as colors
from matplotlib_venn import venn2
import statistics
import random
import csv
# %matplotlib inline
# -
pd.options.mode.chained_assignment = None
# +
HERE = os.path.dirname(os.path.abspath('__file__'))
ROOT = os.path.abspath(os.path.join(HERE, os.pardir))
DATA = os.path.join(ROOT, 'data')
SSGSEA_PRAD_KEGG = os.path.join(
DATA,
"kegg_prad.tsv"
)
TRAINED_MODEL = os.path.join(
DATA,
'trained_models',
"prad_trained_model.joblib"
)
PRAD_LABELS = os.path.join(
DATA,
"phenotype_classes_prad.cls"
)
DRUGCENTRAL_TO_GENES_ID = os.path.join(
DATA,
"drugcentral_mapped_to_drugbank.tsv"
)
ALL_HGNC_GENES = os.path.join(
DATA,
"hgnc_symbols.csv"
)
GENE_SETS_KEGG = os.path.join(
DATA,
"kegg_geneset_final.gmt"
)
# +
## Lable preparation
prad_labels = pd.read_csv(PRAD_LABELS, sep = "\t")
prad_labels.drop(prad_labels.index[0], inplace=True)
prad_labels = prad_labels.rename(index={1:'label'})
prad_labels = prad_labels.transpose()
temp_lable = []
for lable in prad_labels.label:
temp_lable = lable.split(' ')
# +
## Patient_Pathway dataframe preparation
# Transpose the dataframe's columns and rows
raw_data = pd.read_csv(SSGSEA_PRAD_KEGG, sep = "\t", index_col=0).transpose()
# Append the data lable as a column to main dataframe
raw_data.insert(311, "label", temp_lable, True)
# Convert the data lable into numerical value
num_labels = {"Normal": 0, "Tumor": 1}
raw_data.label = [num_labels[item] for item in raw_data.label]
lable_list = list(raw_data.label.values)
# # Removing the index column
raw_data.reset_index(drop=True, inplace=True)
# -
def parse_gmt_file(gmt_path: str, min_size=3, max_size=3000):
"""Parse gmt file of pathways and their corresponding genes."""
with open(gmt_path) as f:
genesets_dict = {
line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in f
}
return genesets_dict
pathway_genes_dict = parse_gmt_file(GENE_SETS_KEGG)
#HGNC_ID_map_to_gene_name = pd.read_csv(HGNC_ID_MAP_TO_GENE_NAME, sep = "\t")
all_hgnc_genes = pd.read_csv(ALL_HGNC_GENES, sep = "\t")
# +
## Read the drugbank_to_genes file and filter all source_databases but drugbank
drugbank_to_genes_ID = pd.read_csv('https://raw.githubusercontent.com/drug2ways/results/master/networks/data/custom_network.tsv',sep = "\t")
# +
## Filter all source_databases but drugbank
drugbank_to_genes_ID_keep_drugbank = drugbank_to_genes_ID.loc[drugbank_to_genes_ID['source_database'] == "drugbank"]
for i in range(len(drugbank_to_genes_ID_keep_drugbank["source"])):
temp_drug = drugbank_to_genes_ID_keep_drugbank["source"].iloc[i]
temp_drug = temp_drug.split(':')[1]
drugbank_to_genes_ID_keep_drugbank["source"].iloc[i] = temp_drug
# +
drugcentral = pd.read_csv(DRUGCENTRAL_TO_GENES_ID,sep = "\t")
columnsTitles = ['drugbank_id', 'hgnc_symbol', 'relation']
drugcentral = drugcentral.reindex(columns=columnsTitles)
drugcentral.rename(columns={'drugbank_id': 'source', 'hgnc_symbol': 'target', 'relation': 'relation' }, inplace=True)
# +
#drugbank = drugbank_to_genes_ID_keep_drugbank.drop('source_database', 1)
drugbank = drugbank_to_genes_ID_keep_drugbank['source']
#drugcentral = drugcentral['source']
#mixed_df = drugbank.append(drugcentral)
# -
mixed_df = pd.DataFrame(drugbank)
# +
## Drug dataframe prepration for calculating score of a pathway including all of its involving genes
# Group the targeting genes based on the drugs
mixed_df_groupby_drug = mixed_df.groupby('source')
# Forming a list of unique drugs used further for preparation of dataframe containing drugs and its targeted pathway
## and all targeted genes involved in that pathway
unique_drug = mixed_df["source"].unique()
# +
## Load the trained classifier
trained_model = joblib.load(open(TRAINED_MODEL, "rb"))
# +
score_list = [-1,1]
synthetic_data_frame = {}
for i in tqdm(range(100)):
synthetic_drug_gene_score = pd.DataFrame(columns = ["drug",'Gene','relation'])
num_data_frame = str(i)
random_selected_drug = random.sample(list(unique_drug), len(unique_drug))
for drug in random_selected_drug:
#num_gene_select = gene_per_drug[drug]
random_selected_gene = random.sample(list(all_hgnc_genes["hgnc_symbol"]), 1)
#for gene in random_selected_gene:
random_selected_relation = random.sample(score_list, 1)
synthetic_drug_gene_score = synthetic_drug_gene_score.append({'drug': drug, 'Gene':random_selected_gene[0], 'relation':random_selected_relation[0]},ignore_index=True)
synthetic_data_frame[num_data_frame] = synthetic_drug_gene_score
del synthetic_drug_gene_score
# +
synthetic_pathway_to_score_data_frame = {}
for df_num, df in tqdm(synthetic_data_frame.items()):
pathway_to_score = pd.DataFrame(columns=['drug_ID','pathway','affection_rate','gene_name'])
temp_df = df.groupby('drug')
df_drug_unique = df["drug"].unique()
for drug in df_drug_unique:
# get the subset of drugbank dataset with regards to the a data
temp_drug_gene_relation_df = temp_df.get_group(drug)
# drop the drug column to turn it to dict for efficient looping
temp_drug_gene_relation_df = temp_drug_gene_relation_df.drop("drug",1)
# convert the subset dataframe to dictionary
temp_gene_score_dict = dict(temp_drug_gene_relation_df.values.tolist())
# loop over pathway_genes_dict genes and pathways
for pathways, genes in pathway_genes_dict.items():
temp_gene= genes
temp_pathway = pathways
# loop over subset dataframe converted dict genes and scores
for gene, score in temp_gene_score_dict.items():
gene_temp = gene
score_temp = score
# find all genes of a pathway and makeing a dataframe out of that with all details (drug,gene,pathway,affecting score)
if gene_temp in temp_gene:
#pathway_to_score.append([drug,temp_pathway,score_temp,gene_temp])
pathway_to_score = pathway_to_score.append({'drug_ID':drug,'pathway': temp_pathway, 'affection_rate': score_temp, 'gene_name': gene_temp},ignore_index=True)
synthetic_pathway_to_score_data_frame[df_num] = pathway_to_score
del pathway_to_score
# +
synthetic_pathway_scores_data_frame = {}
for df_num, df in tqdm(synthetic_pathway_to_score_data_frame.items()):
pathway_scores = pd.DataFrame(columns=['drug_ID','Pathway', 'Finall_affected_score'])
pathway_to_score_groupby = df.groupby(['drug_ID','pathway'])
for drug,path,score,gene in df.values:
# get the subset of last step prepared dataframe with regards to the drug and correponding pathway (considering all of its involving genes)
temp_pathway_to_score_df = pathway_to_score_groupby.get_group((drug,path))
# calculating the sum of the scores for all the genes of a pathway
temp_affected_score = temp_pathway_to_score_df['affection_rate'].sum()
# calculating the mean
finall_affected_score = temp_affected_score / (temp_pathway_to_score_df.shape[0])
# make a dataframe dataframe with a score per drug per pathway
pathway_scores = pathway_scores.append({'drug_ID':drug,'Pathway': path, 'Finall_affected_score': finall_affected_score},ignore_index=True)
synthetic_pathway_scores_data_frame[df_num] = pathway_scores
del pathway_scores
# +
## Splite samples based on our desired lables
def splite_samples(raw_data, desired_label):
# Split the subset of pateints having desired lable
desired_label_sample= raw_data.loc[raw_data['label'] == desired_label]
# Dataframe including the other subset of patients with undisred lable
undesired_label_sample = pd.concat([raw_data, desired_label_sample]).drop_duplicates(keep=False)
return desired_label_sample, undesired_label_sample
# +
desired_label_sample, undesired_label_sample = splite_samples(raw_data,1)
desired_label_sample.drop('label', axis=1, inplace=True)
undesired_label_sample.drop('label', axis=1, inplace=True)
patients_mean_pathway = {}
healthy_mean_pathway = {}
for pathway in desired_label_sample:
mean = desired_label_sample[pathway].mean()
patients_mean_pathway[pathway] = mean
mean = 0
for pathway in undesired_label_sample:
mean = undesired_label_sample[pathway].mean()
healthy_mean_pathway[pathway] = mean
patients_mean_pathway_df = pd.DataFrame(patients_mean_pathway.items(), columns=['pathway', 'mean_patient'])
healthy_mean_pathway_df = pd.DataFrame(healthy_mean_pathway.items(), columns=['pathway', 'mean_healthy'])
mean_patient_healthy_pathway = pd.merge(healthy_mean_pathway_df, patients_mean_pathway_df, on=["pathway"])
#mean_patient_healthy_pathway = pd.merge(mean_patient_healthy_pathway, path_coef, on=["pathway"])
# -
mean_patient_healthy_pathway["diff_mean"] = abs(mean_patient_healthy_pathway["mean_healthy"] - mean_patient_healthy_pathway["mean_patient"])
mean_patient_healthy_pathway = mean_patient_healthy_pathway.sort_values(by = 'diff_mean',ascending = False)
# +
## Modify the pathway score of each patient with regards to each drug available in drugbank
def path_score_modification(drug_name, raw_data, desired_label,mean_patient_healthy_pathway):
desired_path_score_changed_sample, undesired_path_score_changed_sample = splite_samples(raw_data, desired_label)
# Get subset of dataframe with a score per drug per pathway with regards to selected drug
temp_pathway_drug_all_gene_score = df.groupby('drug_ID')
pathway_drug_including_all_gene_score = temp_pathway_drug_all_gene_score.get_group(drug_name)
# Dictionary of pathways affected by the drug to their respective scores
affected_pathway_to_score = {
pathway: score
for _, pathway, score in pathway_drug_including_all_gene_score.values
}
# For each sample id
for sample in range(len(desired_path_score_changed_sample)):
# For each pathway that we have to modify a score in all patients since it is targetted by the drug
for pathway in affected_pathway_to_score:
# Get related affection scored calculated per drug per pathway
affection_score = affected_pathway_to_score[pathway]
if pathway in desired_path_score_changed_sample.columns:
pathway_column = desired_path_score_changed_sample.columns.get_loc(pathway)
current_score = desired_path_score_changed_sample.iloc[sample, pathway_column]
temp_mean_patient_healthy_pathway = mean_patient_healthy_pathway.loc[mean_patient_healthy_pathway['pathway'] == pathway]
if affection_score > 0:
if temp_mean_patient_healthy_pathway.iloc[0,3] > np.quantile(mean_patient_healthy_pathway['diff_mean'], 0.75):
desired_path_score_changed_sample.iloc[sample, pathway_column] = 20 * abs(current_score)
elif np.quantile(mean_patient_healthy_pathway['diff_mean'], 0.75) >= temp_mean_patient_healthy_pathway.iloc[0,3] >= np.quantile(mean_patient_healthy_pathway['diff_mean'], 0.5):
desired_path_score_changed_sample.iloc[sample, pathway_column] = 5 * abs(current_score)
else:
desired_path_score_changed_sample.iloc[sample, pathway_column] = 10 * abs(current_score)
elif affection_score == 0:
desired_path_score_changed_sample.iloc[sample, pathway_column] = current_score
else:
if temp_mean_patient_healthy_pathway.iloc[0,3] > np.quantile(mean_patient_healthy_pathway['diff_mean'], 0.75):
desired_path_score_changed_sample.iloc[sample, pathway_column] = -20 * abs(current_score)
elif np.quantile(mean_patient_healthy_pathway['diff_mean'], 0.75) >= temp_mean_patient_healthy_pathway.iloc[0,3] >= np.quantile(mean_patient_healthy_pathway['diff_mean'], 0.5):
desired_path_score_changed_sample.iloc[sample, pathway_column] = -5 * abs(current_score)
else:
desired_path_score_changed_sample.iloc[sample, pathway_column] = -10 * abs(current_score)
return desired_path_score_changed_sample
# -
def auc_per_drug(drug_data_set,model,data,desired_lable, raw_data_set_lable,mean_patient_healthy_pathway):
pathway_drug_score_prad = pd.DataFrame(columns=['drug','label_changed_ratio'])
for drug in df["drug_ID"].unique():
ratio = 0
temp_data_set = path_score_modification(drug,raw_data,desired_lable,mean_patient_healthy_pathway)
prepared_data_set_for_prediction = temp_data_set.iloc[:,:311]
prediction = trained_model.predict(prepared_data_set_for_prediction)
for l_o,l_p in zip(raw_data.label,prediction):
if l_o != l_p and l_o == 1:
ratio = ratio + 1
ratio = (ratio/lable_list.count(1))
pathway_drug_score_prad = pathway_drug_score_prad.append({'drug': drug,'label_changed_ratio': ratio},ignore_index=True)
return pathway_drug_score_prad
# +
permut_dist = pd.DataFrame(columns=['DataFrame_num', 'Prioritized_drugs_num'])
for num_df,df in tqdm(synthetic_pathway_scores_data_frame.items()):
number_of_prioritized_drugs = 0
pathway_drug_score_lihc_df = auc_per_drug(df,trained_model,raw_data,1,raw_data.label,mean_patient_healthy_pathway)
for drug,label_changed_ratio in pathway_drug_score_lihc_df.values:
if label_changed_ratio >=0.8:
number_of_prioritized_drugs = number_of_prioritized_drugs + 1
print(num_df,number_of_prioritized_drugs)
permut_dist = permut_dist.append({'DataFrame_num': num_df, 'Prioritized_drugs_num': number_of_prioritized_drugs},ignore_index=True)
# -
permut_dist.to_csv('/home/skhatami/Projects/drug-pathway-revert/drug-pathways-revert/ssGSEA_Results/permut_dist_prad.txt', sep = '\t', index = False)
|
scripts_and_notebooks/Synthetic-drug-pathway_score_dataset_prad.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Part 1 Analysis
# One observable trend is the confirmation that the closer you get to the equator, the hotter it tends to get. The interesting thing is it appears the hottest temperatures are seen around the 20 degrees latitude line, this could be due to the way the earth's axis is titled. It would be interesting to see if -20 latitude showed similar trends when summertime rolls around for them.
#
# A second observation is temperatures in the southern hemisphere at this time is overall cooler than the northern hemisphere which makes sense as the northern hemisphere is experiencing summer while the southern hemisphere is experiencing winter; the differences are not as stark as they would be at the height of summer and winter due to how close fall and spring are for each hemisphere but there is still a noticable difference.
#
# A third observation is wind speed does not appear to be correlated to latitude; this is probably due to other factors affecting wind speed such as mountains, valleys, and ocean currents. Additionally, there is not a correlation between cloudiness and latitude either as their correlation values are not high enough to indicate they are correlated.
# ## Part 2 Analysis
# 
|
Analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # School demographics
# This notebook loads and cleans school enrollment and demographic data from the annual enrollment snapshots produced by the New York City Department of Education.
# ## Import Python libraries and set working directories
import os
import feather
import numpy as np
import pandas as pd
input_dir = os.path.join(os.path.dirname(os.getcwd()), 'data', 'input')
intermediate_dir = os.path.join(os.path.dirname(os.getcwd()), 'data', 'intermediate')
output_dir = os.path.join(os.path.dirname(os.getcwd()), 'data', 'output')
# ## Load data and select relevant variables
# The [raw file](http://schools.nyc.gov/NR/rdonlyres/77954FB0-FD24-476B-AB81-3E9BBE8655D9/213559/DemographicSnapshot201213to201617Public_FINAL1.xlsx) comes from the NYC Department of Education (NYCDOE), available [here](http://schools.nyc.gov/Accountability/data/default.htm).
# +
demographics = pd.read_excel(
os.path.join(input_dir, 'DemographicSnapshot201213to201617Public_FINAL1.xlsx'),
sheetname = 'School'
)
demographics.columns = demographics.columns.str.lower()
demographics.columns = demographics.columns.str.replace(' ', '_')
demographics.columns = demographics.columns.str.replace('%', 'perc')
demographics.drop([c for c in demographics.columns if ('grade' in c) | ('#' in c) | ('index' in c)], axis = 1, inplace = True)
percent_vars = [c for c in demographics.columns if 'perc' in c]
for var in percent_vars :
demographics[var] = demographics[var] * 100
demographics = demographics.loc[demographics['year'] == '2013-14']
demographics.rename(columns = {'perc_multiple_race_categories_not_represented':'perc_multiple_other',
'perc_poverty':'perc_free_lunch'}, inplace = True)
demographics.drop(['year'], axis = 1, inplace = True)
demographics.reset_index(inplace = True, drop = True)
# -
# ## Save data
# Save the `demographics` dataframe to a [feather](https://blog.cloudera.com/blog/2016/03/feather-a-fast-on-disk-format-for-data-frames-for-r-and-python-powered-by-apache-arrow/) file in the `data/intermediate` folder.
demographics.to_feather(os.path.join(intermediate_dir, 'df_demographics.feather'))
|
processing/schools_demographics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from sklearn import preprocessing
x = np.array([[1., -1., 2.0],
[2., 0., 0.],
[0., 1., -1.],
[1.0, 2.0, 0.7]])
# +
x_scaled = preprocessing.scale(x)
## it makes mean to be 0
## and variance to be 1
## whenever we are scaling the feature we have to scale
## the combine training and testing data otherwise it lead to
## a huge error
## but in case , when testing come after completion of the algorithm
## then it would lead to an error , and we need some type of scaling factor
## so that we can swcale our testing data as well in future
x_scaled
# -
# axis = 0 .... check row wise
# axis = 1 .... check column wise
x_scaled.mean(axis = 0)
x_scaled.std(axis = 0)
scaler = preprocessing.StandardScaler()
scaler.fit(x)
scaler.transform(x)
x_test = [[1,1,0]]
scaler.transform(x_test)
### example
import numpy as np
data = [[0 , 3], [9 , 4], [2 , 7], [1 , 1]]
scaler = preprocessing.StandardScaler()
scaler.fit(data)
first = scaler.transform(data)
second = scaler.transform([[2 , 13] , [1 , 4] , [10 , 7] , [1 , 9]])
print(first.sum())
print(second.sum())
|
13. Feature Scaling ..... [abhishek201202].ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.2
# language: julia
# name: julia-1.7
# ---
# + [markdown] iooxa={"id": {"block": "kyx6JSIepxhH7qX4neN7", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# # Introduction to JUDI
#
# [JUDI.jl](https://github.com/slimgroup/JUDI.jl) is a framework for large-scale seismic modeling and inversion and designed to enable rapid translations of algorithms to fast and efficient code that scales to industry-size 3D problems. The focus of the package lies on seismic modeling as well as PDE-constrained optimization such as full-waveform inversion ([FWI](https://slimgroup.github.io/JUDI.jl/dev/tutorials/03_constrained_fwi/)) and imaging ([LS-RTM](https://github.com/slimgroup/JUDI.jl/blob/master/examples/scripts/splsrtm_2D.jl)). Wave equations in JUDI are solved with [Devito](https://www.devitoproject.org), a Python domain-specific language for automated finite-difference (FD) computations. JUDI's modeling operators can also be used as layers in (convolutional) neural networks to implement physics-augmented deep learning algorithms. For this, check out JUDI's deep learning extension [JUDI4Flux](https://github.com/slimgroup/JUDI4Flux.jl).
#
# The JUDI software is published in [<NAME>., et al. "A large-scale framework for symbolic implementations of seismic inversion algorithms in Julia." Geophysics 84.3 (2019): F57-F71.](https://library.seg.org/doi/10.1190/geo2018-0174.1). For more information on usage of JUDI, you can check the [JUDI reference guide](https://slimgroup.github.io/JUDI.jl/dev/).
#
# This tutorial covers the following topics:
#
# 1. How to set up the geometry/acquisition in a seismic experiment
# 2. How software abstraction in Julia plays in role in the modeling via the abstracted linear operators
# 3. How to set up a mini-cluster to run seismic experiments parallel over shots
# + iooxa={"id": {"block": "naWAqZ0CtFjFcqJw1dBj", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
using JUDI, PyPlot, LinearAlgebra
# + [markdown] iooxa={"id": {"block": "Cfww3LpTSJd2ae4k0nX6", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# # Physical problem setup
# ## Grid
# JUDI relies on a cartesian grid for modeling and inversion. We start by defining the parameters needed for a cartesian grid:
# - A shape
# - A grid spacing in each direction
# - An origin
# + iooxa={"id": {"block": "kB9KKCNF3LuU6csPt5ld", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "OVMzvxZgcQjXevNweT3d", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
shape = (201, 201) # Number of gridpoints nx, nz
spacing = (10.0, 10.0) # in meters here
origin = (0.0, 0.0) # in meters as well
# + [markdown] iooxa={"id": {"block": "gsjye8bJqSIBBSELPcXp", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ## Physical object
#
# JUDI defines a few basic types to handle physical object such as the velocity model. The type `PhyisicalParameter` is an [AbstractVector](https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-array) and behaves as a standard vector.
# A `PhysicalParameter` can be constructed in various ways but always require the origin `o` and grid spacing `d` that
# cannot be infered from the array.
#
# ```julia
#
# PhysicalParameter(v::Array{vDT}, d, o) where `v` is an n-dimensional array and n=size(v)
#
# PhysicalParameter(n, d, o; vDT=Float32) Creates a zero PhysicalParameter
#
# PhysicalParameter(v::Array{vDT}, A::PhysicalParameter) Creates a PhysicalParameter from the Array `v` with n, d, o from `A`
#
# PhysicalParameter(v::Array{vDT, N}, n::Tuple, d::Tuple, o::Tuple) where `v` is a vector or nd-array that is reshaped into shape `n`
#
# PhysicalParameter(v::vDT, n::Tuple, d::Tuple, o::Tuple) Creates a constant (single number) PhyicalParameter
#
# ```
#
# Let's make a simple 3-layer velocity model
# + iooxa={"id": {"block": "K15A1ihvqxzquI0Ad9b3", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
# Define the velocity (in km/s=m/ms)
vp = 1.5f0 * ones(Float32, shape)
vp[:, 66:end] .= 2.0f0
vp[:, 134:end] .= 2.5f0
# Create a physical parameter
VP = PhysicalParameter(vp, spacing, origin);
# + [markdown] iooxa={"id": {"block": "nBkywHpS97WDVGlnu4kU", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# Let's plot the velocities. Because we adopt a standard cartesian dimension ordering for generality (X, Z) in 2D and (X, Y, Z) in 3D, we plot the transpose of the velocity for proper visualization.
# + iooxa={"id": {"block": "cVIkSWIa4FYOpxGaVvM2", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "j3d9KuFeRSRMcOmNKguZ", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
figure(figsize=(12, 8));
subplot(121);
imshow(vp', cmap="jet", extent=[0, (shape[1]-1)*spacing[1], (shape[2]-1)*spacing[2], 0]);
xlabel("X [m]");ylabel("Depth [m]");
title("vp");
subplot(122);
imshow(VP', cmap="jet", extent=[0, (shape[1]-1)*spacing[1], (shape[2]-1)*spacing[2], 0]);
xlabel("X [m]");ylabel("Depth [m]");
title("vp as a physical parameter");
# + [markdown] iooxa={"id": {"block": "1xcXi80TGcZQvo86kCWX", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# Because the physical parameter behaves as vector, we can easily perform standard operations on it.
# + iooxa={"id": {"block": "TlXr2bfUoupI9WyO08fu", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "bKUtISoI6vABjOZPAJNz", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
norm(VP), extrema(VP), 2f0 .* VP, VP .^ 2
# + [markdown] iooxa={"id": {"block": "gcgBbfFFX2b96Rh9AKhr", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ## Model
#
# JUDI then provide a `Model` structure that wraps multiple physical parameters together.
# + iooxa={"id": {"block": "eKqQ6mozUNcRNrQsTPyt", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "pXfzOCCXDq75eUKSwMLQ", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
model = Model(shape, spacing, origin, 1f0./vp.^2f0; nb = 80)
# + [markdown] iooxa={"id": {"block": "sbuPS0jnoQHjRKR3Ty3l", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# # Modeling
# Now that we have a seismic model, we will generate a few shot records.
# + [markdown] iooxa={"id": {"block": "5jdosOeoih4YNTM53lGW", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ## Acquisition Geometry
# The first thing we need is an acquisiton geometry. In JUDI, there are two ways to create a Geometry.
# - By hand, as we will show here
# - From a SEGY file, as we will show in a follow-up tutorial
#
# We create a split-spread geomtry with sources at the top and receivers at the ocean bottom (top of second layer).
#
# **Note**:
# - For 2D simulation (i.e. wave propagation in a 2D plane), JUDI still takes y-coordinate of source/receiver locations but we can just put them to 0 anyway.
# + iooxa={"id": {"block": "ST3cHT7v3zSTs33HOhci", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
# Sources position
nsrc = 11
xsrc = range(0f0, (shape[1] -1)*spacing[1], length=nsrc)
ysrc = 0f0 .* xsrc # this a 2D case so we set y to 0
zsrc = 12.5f0*ones(Float32, nsrc);
# + [markdown] iooxa={"id": {"block": "1UeoKh7KaZZk7tV7h6Mq", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# Now this definition creates a single Array of position, which would correspond to a single simultenous source (firing at the same time). Since we are interested in single source experiments here, we convert these position into an Array of Array
# of size `nsrc` where each sub-array is a single source position
# + iooxa={"id": {"block": "iwKcxKxBa8EHfJfSAkBr", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
xsrc, ysrc, zsrc = convertToCell.([xsrc, ysrc, zsrc]);
# + iooxa={"id": {"block": "ZcWScBYo0V8bz6MHZDGu", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
# OBN position
nrec = 101
xrec = range(0f0, (shape[1] -1)*spacing[1], length=nrec)
yrec = 0f0 # this a 2D case so we set y to 0. This can be a single number for receivers
zrec = (66*spacing[1])*ones(Float32, nrec);
# + [markdown] iooxa={"id": {"block": "w8FyDx8jn4Omxw1Rbo50", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# The last step to be able to create and acquisiton geometry is to define a recording time and sampling rate
# + iooxa={"id": {"block": "SDqpqtbGm3eYqDreGuKY", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
record_time = 2000f0 # Recording time in ms (since we have m/ms for the velocity)
sampling_rate = 4f0; # Let's use a standard 4ms sampling rate
# + [markdown] iooxa={"id": {"block": "5sLox45e2ylqt8ClkerP", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# Now we can create the source and receivers geometry
# + iooxa={"id": {"block": "44Mlpm0tBn1H6aVss3Q7", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": null}
src_geom = Geometry(xsrc, ysrc, zsrc; dt=sampling_rate, t=record_time)
# For the receiver geometry, we specify the number of source to tell JUDI to use the same receiver position for all sources
rec_geom = Geometry(xrec, yrec, zrec, dt=sampling_rate, t=record_time, nsrc=nsrc);
# + [markdown] iooxa={"id": {"block": "sCmEbwcgKqjwqUGscYc5", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# Let's visualize the geometry onto the model
# + iooxa={"id": {"block": "e7ApxAHnJdOz0W6EeU8Y", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "EN3H3zRdZVHrh12FrBwI", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
figure();
imshow(vp', cmap="jet", extent=[0, (shape[1]-1)*spacing[1], (shape[2]-1)*spacing[2], 0]);
scatter(xsrc, zsrc, label=:sources);
scatter(xrec, zrec, label="OBN");
xlabel("X [m]");ylabel("Depth [m]");
legend();
title("acquisition geometry")
# + [markdown] iooxa={"id": {"block": "LxXvTnhtfJnzzqHiFISo", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ### Source wavelet
#
# For the source wavelet, we will use a standard Ricker wavelet at 10Hz for this tutorial. In practice, this wavelet would be read from a file or estimated during inversion.
# + iooxa={"id": {"block": "X4ZB2XX49RI7KtdjsUF9", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "fr9rzy916GaPybtzxNDa", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
f0 = 0.010 # Since we use ms, the frequency is in KHz
wavelet = ricker_wavelet(record_time, sampling_rate, f0);
plot(wavelet)
# + [markdown] iooxa={"id": {"block": "RINEAVfbtUNbhJxcr6Lx", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ## judiVector
#
# In order to represent seismic data, JUDI provide the `judiVector` type. This type wraps a geometry with the seismic data corresponding to it. Note that `judiVector` works for both shot records at the receiver locations and for the wavelet at the source locations. Let's create one for the source
# + iooxa={"id": {"block": "vPnsedvYv8N3peXDRNcl", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "6YXsI2g1HET04OxI5WBB", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
q = judiVector(src_geom, wavelet)
# + [markdown] iooxa={"id": {"block": "QNg9ppRu7D24W8paBOKI", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# # Linear operator
#
# Following the math, the seismic data recorded at the receiver locations can be calculated by
#
# $$\mathbf{d}_{obs} = \mathbf{P}_r \mathbf{A}(\mathbf{m})^{-1} \mathbf{P}_s^{\top}\mathbf{q},$$
#
# Here, $\mathbf{q}$ is the source wavelet set up at the firing locations (defined above). $\mathbf{P}_s^{\top}$ injects $\mathbf{q}$ to the entire space. $\mathbf{A}(\mathbf{m})$ is the discretized wave equation parameterized by squared slowness $\mathbf{m}$. By applying the inverse of $\mathbf{A}(\mathbf{m})$ on source $\mathbf{q}$, we acquire the entire wavefield in the space. Finally, $\mathbf{P}_r$ restricts the wavefield at the receiver locations at the recording time.
#
# Now let's these linear operators, which behave as "matrices".
# + iooxa={"id": {"block": "wJJTb4IHsESS0VM9Niy8", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "VzZYOhdoo71hP0SkotzN", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
Pr = judiProjection(rec_geom) # receiver interpolation
# + iooxa={"id": {"block": "EHDiTYVOeCEudHKrUDyx", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "WLOVQMIGHMvOKnhAJ0e7", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
Ps = judiProjection(src_geom) # Source interpolation
# + iooxa={"id": {"block": "pHAEf89T7Jr1sczHa3vP", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "xs2iC1p8i2q7e8VpZfXn", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
Ainv = judiModeling(model) # Inverse of the disrete ewave equation
# + [markdown] iooxa={"id": {"block": "6spuEznPNQ3oW0ZWiZQB", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# **WARNING**
# While these three operator are well defined in JUDI, `judiProjection` is a no-op operator and cannot be used by itself but only in combination with a `judiModeling` operator
# + [markdown] iooxa={"id": {"block": "GAxA99A2jS4Z3sTCycg4", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# # Seismic data generation
#
# With the set-up operators above, we can finally generate synthetic data with simple mat-vec product thanks to the abstraction.
# + iooxa={"id": {"block": "mwansk3VCAKldyxRgkXb", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "MWwz6Su2ElVuO8ZIbT5I", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
d_obs = Pr * Ainv * Ps' * q
# + [markdown] iooxa={"id": {"block": "KyvfBXD7IIIoc16Nsghy", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# Similarly, we can define a modeling operator ``F`` that combines the modeling operator with source/receiver restriction operators, as
# + iooxa={"id": {"block": "HKdUzzdjY5XNaXctk0yx", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "whWWOZeYRN9RhkbTukMV", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
F = Pr * Ainv * Ps'
# + [markdown] iooxa={"id": {"block": "AB5n8XCTzaSCRGZOcMwO", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# We can run the same code to generate the data
# + iooxa={"id": {"block": "4DVJ8YtXP8wcWjpabubG", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "hwBKQfxxtJE46CnBULEW", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
d_obs = F * q
# + [markdown] iooxa={"id": {"block": "PBaqnkddbJ02L73yFvLn", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# There are 11 sources in this experiment, and the data is generated for each shot independently. If we only want to acquire the data for the 6th source for example, then we can run
# + iooxa={"id": {"block": "ID3X7P9AC32ZKs1EDe6P", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "5YSGqIUB7CDWdsOu1R0a", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
d_obs6 = F[6] * q[6]
# + iooxa={"id": {"block": "LNoTRwZQIUGb9RtNBwwS", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "ioNfqx6wQ8oyFfB52g9s", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
data_extent = [xrec[1], xrec[end], 1f-3*record_time, 0]
figure(figsize=(20, 5))
for i=1:5
subplot(1, 5, i)
imshow(d_obs.data[2*i], vmin=-1, vmax=1, cmap="Greys", extent=data_extent, aspect="auto")
xlabel("Receiver position (m)")
ylabel("Recording time (s)")
title("xsrc=$(1f-3xsrc[2*i][1])km")
end
tight_layout()
# + [markdown] iooxa={"id": {"block": "gBVL5W2fKEWjPlirkj0Z", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ## Parallelization -- workers and threads
# + [markdown] iooxa={"id": {"block": "xcxaqs87gR06TXiZwHuf", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# JUDI (based on Devito) uses [shared memory OpenMP parallelism for solving PDEs](https://slimgroup.github.io/JUDI.jl/dev/installation/#Configure-compiler-and-OpenMP). Here, we show how to build up a small 2-worker cluster by calling `addprocs(2)`. Because we set the environment variable `ENV["OMP_DISPLAY_ENV"] = "true"`, we will see the OMP environment printed out on each worker.
#
# We set 4 environment variables related to OpenMP:
#
# `OMP_DISPLAY_ENV` prints out the OpenMP environment on each worker
# `OMP_PROC_BIND` specifies that threads should be bound to physical cores
# `OMP_NUM_THREADS` specifies the number of threads per workers is `1/nw` the number of physical cores
# `GOMP_CPU_AFFINITY` specifies which physical cores the threads run on for each worker
#
# If you run the shell command top during execution, you will see 3 julia processes: the main process and 2 workers. The two workers should generally have about 50% of the system, and load average should tend towards the physical number of cores. When running with multiple workers, we gain the parallelism over sources.
# + iooxa={"id": {"block": "2A133VxRrnWxVHP2Wbcx", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "YA0IVPRCDFN8qJQJg3fG", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
using Distributed
# Sytem informations
nthread = Sys.CPU_THREADS
nw = 2
ENV["OMP_DISPLAY_ENV"] = "true"
ENV["OMP_PROC_BIND"] = "close"
ENV["OMP_NUM_THREADS"] = "$(div(nthread, nw))"
addprocs(nw; lazy=false)
@show workers()
for k in 1:nworkers()
place1 = (k - 1) * div(nthread,nworkers())
place2 = (k + 0) * div(nthread,nworkers()) - 1
@show place1, place2, div(nthread, nw)
@spawnat workers()[k] ENV["GOMP_CPU_AFFINITY"] = "$(place1)-$(place2)";
end
@everywhere using Distributed
@everywhere using JUDI
# + iooxa={"id": {"block": "lZ1gf4LaYL8qniTS8pqT", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "OHcCZybOkhcZVhZGHjwv", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
d_obs = F * q
# + [markdown] iooxa={"id": {"block": "krEcPVpS80pS0expxuBd", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
# ## Reverse time migration
#
# Reverse time migration ([RTM](https://wiki.seg.org/wiki/Reverse_time_migration)) is a seismic migration method to move the dipping events in the data domain to their supposedly true subsurface positions in the image domain. This is also a linear operation, as shown in our abstraction below.
# + iooxa={"id": {"block": "KY0p428IZbiY0QcJxgIp", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "PJfwAVzKlnS19AaDGRss", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
J = judiJacobian(F, q) # forward is linearized born modeling, adjoint is reverse time migration
rtm = J' * d_obs
# + iooxa={"id": {"block": "ebVjzcQ2QhNPkbMGgtlf", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}, "outputId": {"block": "5lPkeZ3R0x8LaW3qPEZe", "project": "4qWjnDwuuPUd70Df3LsC", "version": 1}}
figure();
imshow(rtm', cmap="Greys", extent=[0, (shape[1]-1)*spacing[1], (shape[2]-1)*spacing[2], 0]);
xlabel("X [m]");ylabel("Depth [m]");
title("RTM");
|
00_intro_JUDI.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Rationale
# A 'carpet plot' is a 2d representation of fMRI data (voxels x time), very similar to 'The Plot' described by <NAME> ([Power 2017](https://www.sciencedirect.com/science/article/abs/pii/S1053811916303871?via%3Dihub)).
# This visual representation of fMRI data is suited for identifying wide-spread signal fluctutations
# ([Aquino et al., 2020](https://www.sciencedirect.com/science/article/pii/S1053811920301014)),
# which often come from non-neural sources (e.g. head motion).
#
# That said, the carpet plot can also reveal 'real' neural activity, especially when the activity is slow and synchronous, as is the case for **anesthesia-induced burst-suppression** ([Sirmpiltze et al., 2021](https://www.biorxiv.org/content/10.1101/2021.10.15.464515)).
# The `pcarpet` package implements the analytical pipeline used in the [Sirmpiltze et al., 2021](https://www.biorxiv.org/content/10.1101/2021.10.15.464515) paper to identify instances of burst-suppression in anesthetized humans, nonhuman primates, and rats.
#
# ## How it works
# The pipeline consists of the following steps:
#
# 1. First tha necessary data is imported, consisting of a preprocessed fMRI scan (4d NIFTI file) and a mask (3d NIFTI file) defining a single region-of-interest.
# 2. A carpet plot is generated from within the mask. To make wide-spread fluctuations more visually prominent, the voxel time-series (carpet rows) are normalized (z-score) and re-ordered according to their correlation with the mean time-series.
# 3. Principal Component Analysis (PCA) is applied to the carpet matrix (using the `scikit-learn` implementation) and a given number (`ncomp`, default is 5) of first Principal Components - hereafter referred to as 'fPCs' - is extracted. The fPCs (e.g. PC1 - PC5) represent the temporal patterns of activity with the highest explained variance ratios.
# 4. The fPCs are correlated with all voxel time-series within the carpet to get a distribution of Pearson's correlation coefficients (*r*) per fPC.
# 5. The fPCs are also correlated with the entire fMRI scan, including areas outside the mask, to get the brain-wide spatial distribution of each fPC.
# 6. A visual summary of results from steps 1-4 is plotted (example below).
#
# 
#
# The above image corresponds to an instance of burst-suppression in a female long-tailed macaque (*Macaca fascicularis*) anesthetized with isoflurane. The carpet plot (using a cortical mask) shows a wide-spread, slow, quasi-periodic signal fluctuation, which is well captured by PC1. PC1 is positively correlated with most cortical voxel timeseries, resulting in a heavily asymmetric distribution of correlation coefficients (*r*), while PCs 2-4 show symmetric *r* histograms centered on zero. This property can be quantified by taking the median of carpet-wide *r* values (bottom right). According to the terminology introduced in Sirmpilatze et al. 2021, PC1 is an 'asymmetric PC`. Under the right circumstances, the presence of an asymmetric PC in a cortical carpet plot can be an fMRI signature of burst-suppression, with the brain-wide distribution of the asymmetric PC representing a map of burst-suppression (see manuscript for details).
# # Installation
#
# ## a. pip
# You can install the latest release from PyPI via
#
# ```python
# pip install pcarpet
# ```
#
# Pip will try to ensure that the following requirements are satisfied:
#
# 1. Python 3.6 or higher
# 2. [numpy](https://numpy.org/)
# 3. [scipy](https://scipy.org/)
# 4. [matplotlib](https://matplotlib.org/)
# 5. [pandas](https://pandas.pydata.org/)
# 6. [scikit-learn](https://scikit-learn.org/stable/)
# 7. [nibabel](https://nipy.org/nibabel/)
# 8. [ipython](https://ipython.org/)
#
# ## b. Anaconda
# If you are having issues with resolving package dependencies, you can create a virtual environment using [Anaconda](https://www.anaconda.com/products/individual):
#
# 1. Install an Anaconda distribution of python 3, choosing your operating system.
# 2. Download the `environment.yml` file from this repository. You can clone the repository or copy-paste the file contents into a text document on your local computer.
# 3. Open a terminal/anaconda prompt with conda for python 3 in the path.
# 4. Navigate to the directory where the `environment.yml` is stored and run `conda env create -f environment.yml`
# 5. Activate the environment with `conda activate pcarpet-env` (Note: you will always have to activate `pcarpet-env` before using `pcarpet`)
#
|
doc/readme.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="W89KmCZNW_vr" colab_type="text"
# <table style="font-size: 1em; padding: 0; margin: 0;">
#
# <tr style="vertical-align: top; padding: 0; margin: 0;background-color: #ffffff">
# <td style="vertical-align: top; padding: 0; margin: 0; padding-right: 15px;">
# <p style="background: #182AEB; color:#ffffff; text-align:justify; padding: 10px 25px;">
# <strong style="font-size: 1.0em;"><span style="font-size: 1.2em;"><span style="color: #ffffff;">Deep Learning </span> for Satellite Image Classification</span> (Manning Publications)<br/>by <em><NAME></em></strong><br/><br/>
# <strong>> Chapter 1: Deliverable Solution </strong><br/>
# </p>
# + [markdown] id="doNBqBMRW_vu" colab_type="text"
# Load an image. I'm using one from the inbuilt CIFAR10 dataset. I take the first image and delete the rest
# + id="hEO4otSOW_vw" colab_type="code" colab={}
import tensorflow as tf
(x_train, _), (_, _) = tf.keras.datasets.cifar10.load_data()
img = x_train[0]
del x_train
# + [markdown] id="mFEna9dMW_v3" colab_type="text"
# Check the dimensions of the image
# + id="6aG0scwXW_v5" colab_type="code" colab={} outputId="96482278-f518-4d07-f93a-bedbf01a1764"
img.shape
# + [markdown] id="uFeQlveKW_wF" colab_type="text"
# Use ```tf.image``` commands, first to double the size of the image
# + id="YzUZE0ToW_wR" colab_type="code" colab={}
img_array = tf.image.resize(
img,
(64,64),
preserve_aspect_ratio=True,
antialias=False,
name=None)
# + [markdown] id="3PRWw4D0W_wW" colab_type="text"
# ... then to rotate the image 90 degrees
# + id="rovZSYFIW_wX" colab_type="code" colab={}
img_r = tf.image.rot90(img_array)
# + [markdown] id="mjP8jfQPW_wc" colab_type="text"
# Plot the result
# + id="RGGrukdCW_wc" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
# + id="Qqtnix0YW_wd" colab_type="code" colab={} outputId="2c892f2b-611d-4a45-b338-f2a53bd83aab"
plt.subplot(121)
plt.imshow(img, cmap='gray')
plt.subplot(122)
plt.imshow(img_r.numpy().astype('uint8'), cmap='gray')
|
dl-satellite-image/p1_intro/project_data/1. Getting Started.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="I9gUzvnVPCoy"
# ##### Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# + colab={} colab_type="code" id="wuUgEPFW9-V7"
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="Vm7hH0EC97mD"
# # TensorFlow 2.0: Train and save a model
# + [markdown] colab_type="text" id="SW83ZEZg8BN5"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/2/guide/train_and_save"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/train_and_save.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/train_and_save.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="NRXR0hJKPCo2"
# This notebook trains a simple MNIST model to demonstrate the basic workflow for using TensorFlow 2.0 APIs:
#
# 1. Define a model
# 2. Preprocessing your data into a `tf.data.Dataset`
# 3. Train the model with the dataset
# - Use `tf.GradientTape` to compute gradients
# - Use stateful `tf.keras.metrics.*` to collect metrics of interest
# - Log metrics with `tf.summary.*` APIs to view in TensorBoard
# - Use `tf.train.Checkpoint` to save and restore weights
# 4. Export a `SavedModel` with `tf.saved_model` (this is a portable representation of the model that can be imported into C++, JS, Python without knowledge of the original TensorFlow code.)
# 5. Re-import the `SavedModel` and demonstrate its usage in Python.
# + [markdown] colab_type="text" id="vNM_jwND8-PY"
# ## Setup
#
# Import TensorFlow 2.0 Preview Nightly and enable TF 2.0 mode:
# + colab={} colab_type="code" id="LPYWfsC09BJU"
from __future__ import absolute_import, division, print_function
import os
import time
import numpy as np
# + colab={} colab_type="code" id="Z-T4T8IEoQRf"
# !pip install tf-nightly-2.0-preview
import tensorflow as tf
# + colab={} colab_type="code" id="yo5oaJ-hAnk_"
from tensorflow.python.ops import summary_ops_v2
# + [markdown] colab_type="text" id="c3iafiz9PCpA"
# ## Define a model with the tf.Keras API
#
# + [markdown] colab_type="text" id="PtOPPajN6WI3"
# Build a convolutional model using the [tf.Keras API](https://www.tensorflow.org/guide/keras). This model uses the `channel_last` [data format](https://www.tensorflow.org/guide/performance/overview#data_formats).
# + colab={} colab_type="code" id="Z_cEHfTdPCpB"
from tensorflow.keras import layers
def create_model():
max_pool = layers.MaxPooling2D((2, 2), (2, 2), padding='same')
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential([
layers.Reshape(
target_shape=[28, 28, 1],
input_shape=(28, 28,)),
layers.Conv2D(2, 5, padding='same', activation=tf.nn.relu),
max_pool,
layers.Conv2D(4, 5, padding='same', activation=tf.nn.relu),
max_pool,
layers.Flatten(),
layers.Dense(32, activation=tf.nn.relu),
layers.Dropout(0.4),
layers.Dense(10)])
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# + [markdown] colab_type="text" id="UY2CZgNW7_Jy"
# Create the model and optimizer:
# + colab={} colab_type="code" id="YsaXPR6OPCpE"
model = create_model()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.5)
# + [markdown] colab_type="text" id="7bgV8B1wPCpJ"
# ## Download and create datasets
#
# Load the MNIST dataset into a [tf.data.Dataset](https://www.tensorflow.org/guide/datasets). This provides useful transformations like batching and shuffling.
#
# Note: Keras models can train directly on numpy arrays for small datasets (see [basic classification](../keras/basic_classification.ipynb)). The use of `tf.data` here is to demonstrate the API for applications that need more scalability.
# + colab={} colab_type="code" id="zxmeEGyhPCpL"
# Set up datasets
def mnist_datasets():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Numpy defaults to dtype=float64; TF defaults to float32. Stick with float32.
x_train, x_test = x_train / np.float32(255), x_test / np.float32(255)
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
return train_dataset, test_dataset
# + colab={} colab_type="code" id="YtB65OO0PCpP"
train_ds, test_ds = mnist_datasets()
train_ds = train_ds.shuffle(60000).batch(100)
test_ds = test_ds.batch(100)
print('Dataset will yield tensors of the following shape: {}'.format(train_ds.output_shapes))
# + [markdown] colab_type="text" id="uB_C1r9PPCpU"
# ## Configure training
#
# Note: Keras models include a complete training loop (see [basic classification](../keras/basic_classification.ipynb)). The training process is only defined manually here as a starting point for applications that need deeper customization.
#
# The `train()` function iterates over the training dataset, computing the gradients for each batch and then applying them to the model variables. It periodically outputs summaries.
# + colab={} colab_type="code" id="G8EfprJ1PCpU"
@tf.function
def train_step(model, optimizer, images, labels):
# Record the operations used to compute the loss, so that the gradient
# of the loss with respect to the variables can be computed.
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = compute_loss(labels, logits)
compute_accuracy(labels, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
def train(model, optimizer, dataset, log_freq=50):
"""Trains model on `dataset` using `optimizer`."""
start = time.time()
# Metrics are stateful. They accumulate values and return a cumulative
# result when you call .result(). Clear accumulated values with .reset_states()
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
# Datasets can be iterated over like any other Python iterable.
for images, labels in dataset:
loss = train_step(model, optimizer, images, labels)
avg_loss(loss)
# if tf.equal(optimizer.iterations % log_freq, 0):
# summary_ops_v2.scalar('loss', avg_loss.result(), step=optimizer.iterations)
# summary_ops_v2.scalar('accuracy', compute_accuracy.result(), step=optimizer.iterations)
avg_loss.reset_states()
compute_accuracy.reset_states()
rate = log_freq / (time.time() - start)
print('Step #%d\tLoss: %.6f (%d steps/sec)' % (optimizer.iterations, loss, rate))
start = time.time()
# + colab={} colab_type="code" id="6zrAPkdEPCpa"
def test(model, dataset, step_num):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
for (images, labels) in dataset:
logits = model(images, training=False)
avg_loss(compute_loss(labels, logits))
compute_accuracy(labels, logits)
print('Model test set loss: {:0.4f} accuracy: {:0.2f}%'.format(
avg_loss.result(), compute_accuracy.result() * 100))
# summary_ops_v2.scalar('loss', avg_loss.result(), step=step_num)
# summary_ops_v2.scalar('accuracy', compute_accuracy.result(), step=step_num)
# + [markdown] colab_type="text" id="kox8FEeNPCpd"
# ## Configure model directory
#
# Use one directory to save the relevant artifacts—summary logs, checkpoints, and `SavedModel` exports.
# + colab={} colab_type="code" id="bLwFfkYhPCpe"
# Where to save checkpoints, tensorboard summaries, etc.
MODEL_DIR = '/tmp/tensorflow/mnist'
def apply_clean():
if tf.io.gfile.exists(MODEL_DIR):
print('Removing existing model dir: {}'.format(MODEL_DIR))
tf.io.gfile.rmtree(MODEL_DIR)
# + colab={} colab_type="code" id="5DUL7OVYPCph"
# Optional: wipe the existing directory
apply_clean()
# + [markdown] colab_type="text" id="SL51Mdg9PCpj"
# You can configure the output location for the training summaries. Previously, we called `tf.summary.scalar(...)` in the `train()` function, by using the `summary_writer` in a `with` block, you can catch those generated summaries and direct them to a file. View the summaries with `tensorboard --logdir=<model_dir>`
# + colab={} colab_type="code" id="YZgxx95-PCpk"
train_dir = os.path.join(MODEL_DIR, 'summaries', 'train')
test_dir = os.path.join(MODEL_DIR, 'summaries', 'eval')
# train_summary_writer = summary_ops_v2.create_file_writer(
# train_dir, flush_millis=10000)
# test_summary_writer = summary_ops_v2.create_file_writer(
# test_dir, flush_millis=10000, name='test')
# + [markdown] colab_type="text" id="pLSsDST8PCpn"
# ## Configure checkpoints
#
# The `tf.train.Checkpoint` object helps manage which `tf.Variable`s are saved and restored from the checkpoint files.
#
# A checkpoint differs from a `SavedModel` because it additionally keeps track of training-related state, such as momentum variables for a momentum-based optimizer or things like the global step. A checkpoint only stores weights so you'll need the original code to define the computation with those weights.
# + colab={} colab_type="code" id="Xab3feHXPCpp"
checkpoint_dir = os.path.join(MODEL_DIR, 'checkpoints')
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
checkpoint = tf.train.Checkpoint(
model=model, optimizer=optimizer)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
# + [markdown] colab_type="text" id="U0ZNI7rFPCps"
# ## Train
#
# Now that `train()` and `test()` are set up, create a model and train it for some number of epochs:
# + colab={} colab_type="code" id="CagzdcoRFrN9"
NUM_TRAIN_EPOCHS = 1
for i in range(NUM_TRAIN_EPOCHS):
start = time.time()
# with train_summary_writer.as_default():
train(model, optimizer, train_ds)
end = time.time()
print('\nTrain time for epoch #{} ({} total steps): {}'.format(
i + 1, optimizer.iterations, end - start))
# with test_summary_writer.as_default():
# test(model, test_ds, optimizer.iterations)
checkpoint.save(checkpoint_prefix)
# + [markdown] colab_type="text" id="dc1hV_21PCpw"
# ## Export a SavedModel
# + colab={} colab_type="code" id="NGCnOqMTPCpy"
export_path = os.path.join(MODEL_DIR, 'export')
tf.saved_model.save(model, export_path)
# + [markdown] colab_type="text" id="uECIhO7aPCp1"
# ## Restore and run the SavedModel
#
# Restore any `SavedModel` and call it without reference to the original source code. APIs for importing and transforming `SavedModel`s exist for a variety of languages. See the [SavedModel guide](https://www.tensorflow.org/guide/saved_model) for more.
# + colab={} colab_type="code" id="go-t0CvMzrSi"
def import_and_eval():
restored_model = tf.saved_model.restore(export_path)
_, (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_test = x_test / np.float32(255)
y_predict = restored_model(x_test)
accuracy = compute_accuracy(y_test, y_predict)
print('Model accuracy: {:0.2f}%'.format(accuracy.result() * 100))
# TODO(brianklee): Activate after v2 import is implemented.
# import_and_eval()
# + colab={} colab_type="code" id="PAZJZ_jbscO4"
|
site/en/r2/tutorials/beginner/tf2_overview.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="jrQq3qzV7piH"
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import time
# %matplotlib inline
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 513} colab_type="code" executionInfo={"elapsed": 10341, "status": "ok", "timestamp": 1531759444556, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-TW70oLGKdm4/AAAAAAAAAAI/AAAAAAAAB9A/uaMFIQDez-s/s50-c-k-no/photo.jpg", "userId": "111963640914363981554"}, "user_tz": -330} id="vAExho-j7piL" outputId="0d22757c-3e23-4d07-e2fe-99995c068dcd"
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print('Shape of training set:', mnist.train.images.shape)
print('Shape of training set labels:', mnist.train.labels.shape)
print('Shape of validation set:', mnist.validation.images.shape)
print('Shape of validation set labels:', mnist.validation.labels.shape)
print('Shape of test set:', mnist.test.images.shape)
print('Shape of test set labels:', mnist.test.labels.shape)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="EP-8nWT37piS"
def create_placeholders():
X = tf.placeholder(tf.float32, shape=(784, None))
Y = tf.placeholder(tf.float32, shape=(None, 10))
return X,Y
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="SkTbWxfZ7piW"
def initialize_parameters(layers_dims):
L = len(layers_dims)
parameters = {}
for l in range(1, L):
parameters['W' + str(l)] = tf.get_variable(shape=[layers_dims[l], layers_dims[l - 1]], initializer=tf.contrib.layers.xavier_initializer(seed=1), name='W' + str(l))
parameters['b' + str(l)] = tf.get_variable(shape=[layers_dims[l], 1], initializer=tf.zeros_initializer(), name='b' + str(l))
return parameters
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="_8BRaD-C7piZ"
def forward_propagation(X, parameters):
L = len(parameters) // 2
values = {}
dropout = {}
values['A0'] = X
for l in range(1, L):
values['Z' + str(l)] = tf.add(tf.matmul(parameters['W' + str(l)], values['A' + str(l - 1)]), parameters['b' + str(l)])
values['A' + str(l)] = tf.nn.relu(values['Z' + str(l)])
values['Z' + str(L)] = tf.add(tf.matmul(parameters['W' + str(L)], values['A' + str(L - 1)]), parameters['b' + str(L)])
return values['Z' + str(L)]
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Aug65EKv7pid"
def compute_cost(Y, Z):
Z = tf.transpose(Z)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=Z, labels=Y))
return cost
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="yzY78PWq7pig"
def model(X_train, Y_train, X_valid, Y_valid, layers_dims, learning_rate, epochs, minibatch_size=64, print_costs=False):
tf.reset_default_graph()
m = X_train.shape[1]
X, Y = create_placeholders()
layers_dims = [784] + layers_dims + [10]
parameters = initialize_parameters(layers_dims)
Z = forward_propagation(X, parameters)
cost = compute_cost(Y, Z)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
costs = []
num_minibatches = m // minibatch_size
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
initial_time = time.time()
for num in range(epochs):
permutation = np.random.RandomState(num).permutation(m)
X_train, Y_train = X_train[:, permutation], Y_train[permutation, :].reshape(m, Y_train.shape[1])
epoch_cost = 0
for mb in range(num_minibatches):
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: X_train[:, mb * minibatch_size : (mb + 1) * minibatch_size], Y: Y_train[mb * minibatch_size : (mb + 1) * minibatch_size, :]})
epoch_cost += minibatch_cost
if print_costs and num % 50 == 0:
print('Cost after epoch', num, '=', epoch_cost / num_minibatches)
if print_costs and num > 1:
costs.append(epoch_cost / num_minibatches)
print("Time taken:", time.time()-initial_time, 'seconds')
if print_costs:
plt.plot(np.squeeze(costs))
plt.xlabel('Epoch number')
plt.ylabel('Cost')
plt.show()
parameters = sess.run(parameters)
correct_predictions = tf.equal(tf.argmax(Y, axis=1), tf.argmax(Z))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'))
print('Training Accuracy:', accuracy.eval({X: X_train, Y: Y_train}))
print('Validation Accuracy:', accuracy.eval({X: X_valid, Y: Y_valid}))
return parameters
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 466} colab_type="code" executionInfo={"elapsed": 578913, "status": "ok", "timestamp": 1531760669549, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-TW70oLGKdm4/AAAAAAAAAAI/AAAAAAAAB9A/uaMFIQDez-s/s50-c-k-no/photo.jpg", "userId": "111963640914363981554"}, "user_tz": -330} id="5rYPvTQ87pik" outputId="e2d93f60-fc77-405f-c5a3-c4e489b74654"
parameters = model(mnist.train.images.T, mnist.train.labels, mnist.validation.images.T, mnist.validation.labels,learning_rate=0.01, layers_dims=[100, 20], epochs=400, print_costs=True)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 763, "status": "ok", "timestamp": 1531760702298, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-TW70oLGKdm4/AAAAAAAAAAI/AAAAAAAAB9A/uaMFIQDez-s/s50-c-k-no/photo.jpg", "userId": "111963640914363981554"}, "user_tz": -330} id="DF23FqEG7pip" outputId="e76b5f04-688f-4501-dea7-a817d619a423"
def test(X_test, Y_test, parameters):
X = tf.placeholder(tf.float32, shape=X_test.shape)
Z = forward_propagation(X, parameters)
correct_predictions = tf.equal(tf.argmax(Y_test, axis=1), tf.argmax(Z))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'))
with tf.Session() as sess:
print('Test Accuracy:', accuracy.eval({X: X_test}))
test(mnist.test.images.T, mnist.test.labels, parameters)
|
Neural Network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Corona status in Finland
# <i>Calculations by <NAME>, <EMAIL>, 2020-03-15</i>
# Almost all COVID-19 cases in Finland are tested before 14.3.2020. It is known, that all the cases have not been tested, even if the person had symptoms of the disease, but the other criteria have not been met. How ever data is useful. <BR>
#
# The disease will spread if preventive measures are not taken, ie social contacts are not minimized.<BR>
#
# ### Comments of this study
# This study is based purely on math. No machine learning is used. It is also good to know, that when 60% of population has had the virus spread speed will decrease and when immunity level is about 90%, virus is not spreading any more.
#
# Sivun tiedot pohjautuvat Helsingin Sanomien julkaisemaan avoimeen dataan Suomen koronavirus-tartunnoista. HS on kerännyt aineiston julkisista lähteistä: tiedotustilaisuuksista, mediasta ja haastatteluista. Dataa päivitetään aina kun uusia tietoja tulee. https://github.com/HS-Datadesk/koronavirus-avoindata
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import pandas as pd
import numpy as np
import urllib.request as request
import json
import datetime
from scipy.optimize import curve_fit
from datetime import datetime, date, timedelta
import time
import matplotlib.dates as mdates
def func(x, a, b, c):
return a * np.exp(b * x) + c
# ### Finnish Corona data from open data
url = 'https://w3qa5ydb4l.execute-api.eu-west-1.amazonaws.com/prod/finnishCoronaData'
with request.urlopen(url) as response:
source = response.read()
data = json.loads(source)
newdata = data['confirmed']
df = pd.DataFrame.from_dict(newdata)
df['date'] = pd.to_datetime(df['date'])
df['date'] = df['date'].dt.date
# ### Chinese case in Lapland was separate case. We can drop that.
df = df[df['infectionSourceCountry'] != 'CHN']
df.sort_values('date',inplace = True)
df.tail()
df1 = df.groupby('date').size().reset_index(name='New Cases')
df1 = df1.set_index(['date'])
df1['Cum'] = df1.cumsum()
# ### Drop current day, statistics for the current day is not ready
#df1 = df1[:-1]
ax = df1['Cum'].plot(style='ro--', title='New cases')
ax.set_xlabel('date')
plt.xticks(rotation=90)
df1.reset_index(inplace = True)
df1['days'] = df1['date'].shift(-1) - df1['date']
df1['Cumdays'] = df1['days'].cumsum()
df1['Cumdays2'] = df1['Cumdays'].shift(1)
df1['Cumdays2'] = df1['Cumdays2'] / np.timedelta64(1, 'D')
df1 = df1.fillna(0.0)
df1.drop(['days', 'Cumdays'], axis=1, inplace=True)
df1.columns = ['date','New Cases','Cum','day']
df1.head(20)
df_malli = df1[df1['day']<18]
df_havainnot = df1[df1['day']>=18]
df1.plot(kind='bar',x='date',y='New Cases')
df_malli.dtypes
x = df_malli['day']
y = df_malli['Cum']
popt, pcov = curve_fit(func, x, y)
popt
# ### Cumulative cases fitted to function
plt.figure()
plt.plot(df_malli['date'], y, 'ko', label="Total Cases")
plt.plot(df1['date'], func(df1['day'], *popt), 'r-', label="Prediction")
plt.plot(df_havainnot['date'],df_havainnot['Cum'], 'bo', label="Testing reduced")
plt.title('Corona situation in Finland')
plt.xticks(rotation=90)
plt.legend()
plt.show()
# +
def predictTotal(Date,model):
date_format = "%d.%m.%Y"
start_date = datetime.strptime('26.02.2020', date_format)
pred_date = datetime.strptime(Date, date_format)
x = (pred_date-start_date).days
return round(func(x, *model))
# -
# ### How many cases there will be on some specific day?
note = ""
today = date.today()
for i in range(15):
end_date = today + timedelta(days=i)
day2 = end_date.strftime("%d.%m.%Y")
prediction = predictTotal(day2,popt)
if prediction > 74000:
note = " China 18.2.2018. deaths 2000"
elif prediction > 20000:
note = " "
elif prediction > 14800:
note = " Hubei for Feb. 12: 14,840 new cases and 242 new deaths."
elif prediction > 12800:
note = " Italy 12.3.20200, deaths 1000."
elif prediction > 12000:
note = " Italy shut down 11.3.2020."
elif prediction > 9000:
note = " Italy strict quarantine measures extended to all regions. 9.3.2020"
elif prediction > 6000:
note = " Northern Italy under lockdown. 8.3.2020"
elif prediction > 5000:
note = " Entire region of Lombardy is in lockdown. 7.3.2020"
elif prediction > 1000:
note = ""
elif prediction > 700:
note = " First death has happen at least when there has been over 800 cases."
print(day2 + " cumulative cases: " + str(prediction) + note)
# ### How many people are not aware having Corona today?
# +
today = date.today()
end_date = today + timedelta(days=5)
day1 = today.strftime("%d.%m.%Y")
day2 = end_date.strftime("%d.%m.%Y")
print("Wash your hands, because " + str(predictTotal(day2,popt) - predictTotal(day1,popt)) + " people are out there without knowing having COVID-19!")
# -
#
|
Corona in Finland.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] lc_cell_meme={"current": "a28f51c6-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f532e-495a-11e8-8017-0242ac130002", "previous": null}
# # About: KVM - Set! CentOS 6
#
# ---
#
# Prepare CentOS6 image for KVM using libvirt. KVM and libvirt has been installed already.
#
# CentOS 6 VMイメージを作成するためのNotebook。
# + [markdown] lc_cell_meme={"current": "a28f532e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5464-495a-11e8-8017-0242ac130002", "previous": "a28f51c6-495a-11e8-8017-0242ac130002"}
# ## *Operation Note*
#
# *This is a cell for your own recording. ここに経緯を記述*
# + [markdown] lc_cell_meme={"current": "a28f5464-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f55a4-495a-11e8-8017-0242ac130002", "previous": "a28f532e-495a-11e8-8017-0242ac130002"}
# # Notebookと環境のBinding
#
# Inventory中のgroup名でBind対象を指示する。
#
# **VMを起動したいホスト(KVMがインストールされた物理マシン)**を示すInventory中の名前を以下に指定する。
# + lc_cell_meme={"current": "a28f55a4-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f56d0-495a-11e8-8017-0242ac130002", "previous": "a28f5464-495a-11e8-8017-0242ac130002"}
target_group = 'test-hypervisor'
# + [markdown] lc_cell_meme={"current": "a28f56d0-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f57e8-495a-11e8-8017-0242ac130002", "previous": "a28f55a4-495a-11e8-8017-0242ac130002"}
# Bind対象への疎通状態を確認する。
# + lc_cell_meme={"current": "a28f57e8-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f590a-495a-11e8-8017-0242ac130002", "previous": "a28f56d0-495a-11e8-8017-0242ac130002"}
# !ansible -m ping {target_group}
# + [markdown] lc_cell_meme={"current": "a28f590a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5a2c-495a-11e8-8017-0242ac130002", "previous": "a28f57e8-495a-11e8-8017-0242ac130002"}
# Bind対象は以下の条件を満たしている必要がある。**満たしていない場合は、このお手本の操作をBind対象にそのまま適用することはできず、適宜セルの改変が必要。**
# + [markdown] lc_cell_meme={"current": "a28f5a2c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5b44-495a-11e8-8017-0242ac130002", "previous": "a28f590a-495a-11e8-8017-0242ac130002"}
# ## 仮想マシン用ブリッジが作成されていること
#
# 仮想マシン用のブリッジが作成されていること。お手本を作成している環境においては、以下のようなインタフェース構成となることを想定している。
#
# - ブリッジ br-eth1 インタフェース ... ここにはサービス用IPアドレスが設定される
# - eth1インタフェース ... Promiscuousモードでサービス用NICと対応付け、br-eth1インタフェースに接続される
# + lc_cell_meme={"current": "a28f5b44-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5c5c-495a-11e8-8017-0242ac130002", "previous": "a28f5a2c-495a-11e8-8017-0242ac130002"}
external_nic = 'eth1'
bridge_nic = 'br-eth1'
# + lc_cell_meme={"current": "a28f5c5c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5d74-495a-11e8-8017-0242ac130002", "previous": "a28f5b44-495a-11e8-8017-0242ac130002"}
# !ansible -a "/sbin/ip addr show {bridge_nic}" {target_group}
# !ansible -a "/sbin/ip addr show {external_nic}" {target_group}
# !ansible -a "/usr/sbin/brctl show {bridge_nic}" {target_group}
# + [markdown] lc_cell_meme={"current": "a28f5d74-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5e8c-495a-11e8-8017-0242ac130002", "previous": "a28f5c5c-495a-11e8-8017-0242ac130002"}
# ブリッジ用NIC名として br-eth1 を利用する。
# + [markdown] lc_cell_meme={"current": "a28f5e8c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f5fa4-495a-11e8-8017-0242ac130002", "previous": "a28f5d74-495a-11e8-8017-0242ac130002"}
# **br-eth1, eth1が定義されており、br-eth1にサービス用IPアドレスが定義されていれば**OK。
# + [markdown] lc_cell_meme={"current": "a28f5fa4-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f60bc-495a-11e8-8017-0242ac130002", "previous": "a28f5e8c-495a-11e8-8017-0242ac130002"}
# ## libvirtのNetwork設定が無効化されていること
#
# defaultのNetwork設定が無効化されているかどうかを確認する。
# + lc_cell_meme={"current": "a28f60bc-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f61de-495a-11e8-8017-0242ac130002", "previous": "a28f5fa4-495a-11e8-8017-0242ac130002"}
# !ansible -b -a 'virsh net-list --all' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f61de-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f62f6-495a-11e8-8017-0242ac130002", "previous": "a28f60bc-495a-11e8-8017-0242ac130002"}
# **defaultのstateがinactiveになっていて、かつautostartがnoになっていれば**OK。
# + [markdown] lc_cell_meme={"current": "a28f62f6-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f640e-495a-11e8-8017-0242ac130002", "previous": "a28f61de-495a-11e8-8017-0242ac130002"}
# ## dnsmasqが起動していること
#
# 同じホストで、IPアドレス配布用のdnsmasqが実行されていることを前提としている。
# + lc_cell_meme={"current": "a28f640e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6526-495a-11e8-8017-0242ac130002", "previous": "a28f62f6-495a-11e8-8017-0242ac130002"}
# !ansible -b -a 'service dnsmasq status' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f6526-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6648-495a-11e8-8017-0242ac130002", "previous": "a28f640e-495a-11e8-8017-0242ac130002"}
# **dnsmasq (pid XXXXX) is running と表示されれば**OK。
# + [markdown] lc_cell_meme={"current": "a28f6648-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f676a-495a-11e8-8017-0242ac130002", "previous": "a28f6526-495a-11e8-8017-0242ac130002"}
# ## libvirtが動作していること
#
# libvirtが動作しており、仮想マシン一覧が取得できるかどうかを確認する。
# + lc_cell_meme={"current": "a28f676a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6896-495a-11e8-8017-0242ac130002", "previous": "a28f6648-495a-11e8-8017-0242ac130002"}
# !ansible -b -a 'virsh list' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f6896-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f69ae-495a-11e8-8017-0242ac130002", "previous": "a28f676a-495a-11e8-8017-0242ac130002"}
# **エラーメッセージが表示されなければ**OK。
# + [markdown] lc_cell_meme={"current": "a28f69ae-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6ad0-495a-11e8-8017-0242ac130002", "previous": "a28f6896-495a-11e8-8017-0242ac130002"}
# ## virt-installがインストールされていること
#
# 仮想マシンの作成には、virt-installコマンドを利用する。
# + lc_cell_meme={"current": "a28f6ad0-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6be8-495a-11e8-8017-0242ac130002", "previous": "a28f69ae-495a-11e8-8017-0242ac130002"}
# !ansible -a 'which virt-install' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f6be8-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6d00-495a-11e8-8017-0242ac130002", "previous": "a28f6ad0-495a-11e8-8017-0242ac130002"}
# **エラーメッセージが表示されなければ**OK。
# + [markdown] lc_cell_meme={"current": "a28f6d00-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6e18-495a-11e8-8017-0242ac130002", "previous": "a28f6be8-495a-11e8-8017-0242ac130002"}
# # パラメータの決定
#
# イメージ作成により、以下の2つのファイルがBinding対象ホストに作成される。
#
# - base.img
# - libvirt-base.xml
#
# このファイルを作成するディレクトリのパスと、イメージのサイズ(GB)を指定する。
# + lc_cell_meme={"current": "a28f6e18-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f6f3a-495a-11e8-8017-0242ac130002", "previous": "a28f6d00-495a-11e8-8017-0242ac130002"}
image_base_dir = '/mnt/centos6-base-vm'
size_gb = 100
# + [markdown] lc_cell_meme={"current": "a28f6f3a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7052-495a-11e8-8017-0242ac130002", "previous": "a28f6e18-495a-11e8-8017-0242ac130002"}
# `size_gb` で指定した空き容量がBind対象ホストにあるかどうかを確認する。
# + lc_cell_meme={"current": "a28f7052-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7160-495a-11e8-8017-0242ac130002", "previous": "a28f6f3a-495a-11e8-8017-0242ac130002"}
# !ansible -a 'df -H' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f7160-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7278-495a-11e8-8017-0242ac130002", "previous": "a28f7052-495a-11e8-8017-0242ac130002"}
# # イメージ取得用VMの新規作成
#
# Binding対象ホストにイメージ保存用のディレクトリを作成する。
# + lc_cell_meme={"current": "a28f7278-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7390-495a-11e8-8017-0242ac130002", "previous": "a28f7160-495a-11e8-8017-0242ac130002"}
# !ansible -b -m file -a 'path={image_base_dir} state=directory' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f7390-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f74a8-495a-11e8-8017-0242ac130002", "previous": "a28f7278-495a-11e8-8017-0242ac130002"}
# スナップショット用のVM名を決める。
# + lc_cell_meme={"current": "a28f74a8-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f75ca-495a-11e8-8017-0242ac130002", "previous": "a28f7390-495a-11e8-8017-0242ac130002"}
new_vmname = 'snapshot-vm-20160609'
# + [markdown] lc_cell_meme={"current": "a28f75ca-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f76e2-495a-11e8-8017-0242ac130002", "previous": "a28f74a8-495a-11e8-8017-0242ac130002"}
# 仮想マシンの作成は、virt-installを使い、CentOS 6のMinimal ISOを使ってインストールする。
# + [markdown] lc_cell_meme={"current": "a28f76e2-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f77fa-495a-11e8-8017-0242ac130002", "previous": "a28f75ca-495a-11e8-8017-0242ac130002"}
# ## インストール用ISOの準備
#
# インストールディスクのダウンロードをおこなう。
#
# MD5チェックサムが `0ca12fe5f28c2ceed4f4084b41ff8a0b` であることを確認すること。*(2016/06/17)*
# + lc_cell_meme={"current": "a28f77fa-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7912-495a-11e8-8017-0242ac130002", "previous": "a28f76e2-495a-11e8-8017-0242ac130002"}
# !ansible -b -m get_url -a 'url=http://ftp.riken.jp/Linux/centos/6/isos/x86_64/CentOS-6.8-x86_64-minimal.iso \
# dest=/tmp/CentOS-6.8-x86_64-minimal.iso' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f7912-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7a34-495a-11e8-8017-0242ac130002", "previous": "a28f77fa-495a-11e8-8017-0242ac130002"}
# ## Kickstartファイルの準備
#
# インストール手順は Kickstartを使って定義する。
#
# 念のため、VMにはrootパスワードを指定しておく。このパスワードはスナップショット処理の最後にロックする。
# + lc_cell_meme={"current": "a28f7a34-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7b4c-495a-11e8-8017-0242ac130002", "previous": "a28f7912-495a-11e8-8017-0242ac130002"}
from getpass import getpass
rootpw = getpass()
# + [markdown] lc_cell_meme={"current": "a28f7b4c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7c5a-495a-11e8-8017-0242ac130002", "previous": "a28f7a34-495a-11e8-8017-0242ac130002"}
# CentOS6のインストールをおこない、public keyをInjectionするようなKickstartファイルを生成する。
#
# まずローカルに一時ディレクトリを作り、そこにファイルを作成する。
# + lc_cell_meme={"current": "a28f7c5a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7d7c-495a-11e8-8017-0242ac130002", "previous": "a28f7b4c-495a-11e8-8017-0242ac130002"}
import tempfile
work_dir = tempfile.mkdtemp()
work_dir
# + lc_cell_meme={"current": "a28f7d7c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7e94-495a-11e8-8017-0242ac130002", "previous": "a28f7c5a-495a-11e8-8017-0242ac130002"} run_control={"marked": false}
import os
pub_key = None
with open(os.path.expanduser('~/.ssh/ansible_id_rsa.pub'), 'r') as f:
pub_key = f.readlines()[0].strip()
with open(os.path.join(work_dir, 'centos6.ks.cfg'), 'w') as f:
f.write('''#version=RHEL6
cmdline
cdrom
install
lang en_US.UTF-8
keyboard jp106
network --device eth0 --onboot yes --bootproto dhcp --noipv6
zerombr
bootloader --location=mbr --append="crashkernel=auto rhgb quiet"
clearpart --all --initlabel
part / --fstype=ext4 --grow --size=1 --asprimary
rootpw --plaintext {rootpw}
authconfig --enableshadow --passalgo=sha512
selinux --disabled
firewall --disabled
firstboot --disabled
timezone --utc Asia/Tokyo
poweroff
%packages --nobase
%end
%post
cd /root
mkdir --mode=700 .ssh
cat >> .ssh/authorized_keys << "PUBLIC_KEY"
{pub_key}
PUBLIC_KEY
chmod 600 .ssh/authorized_keys
%end'''.format(rootpw=rootpw, pub_key=pub_key))
# !grep -v rootpw {work_dir}/centos6.ks.cfg
# + [markdown] lc_cell_meme={"current": "a28f7e94-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f7fac-495a-11e8-8017-0242ac130002", "previous": "a28f7d7c-495a-11e8-8017-0242ac130002"}
# なお、Kickstartの設定では、最後にpoweroffすることでインストール成功後、VMを停止するようにしている。
# + [markdown] lc_cell_meme={"current": "a28f7fac-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f80c4-495a-11e8-8017-0242ac130002", "previous": "a28f7e94-495a-11e8-8017-0242ac130002"}
# Bind対象にアップロードする。
# + lc_cell_meme={"current": "a28f80c4-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f81dc-495a-11e8-8017-0242ac130002", "previous": "a28f7fac-495a-11e8-8017-0242ac130002"}
# !ansible -b -m copy -a 'src={work_dir}/centos6.ks.cfg dest=/tmp/centos6.ks.cfg' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f81dc-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f82f4-495a-11e8-8017-0242ac130002", "previous": "a28f80c4-495a-11e8-8017-0242ac130002"}
# ## インストールの実行
#
# virt-installを実行する。なお、AnsibleのSSH処理の関係で、 `process.error: Cannot run interactive console without a controlling TTY` と出力されるが、ここでは無視する。
# + lc_cell_meme={"current": "a28f82f4-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f840c-495a-11e8-8017-0242ac130002", "previous": "a28f81dc-495a-11e8-8017-0242ac130002"}
# !ansible -b -a 'virt-install --name {new_vmname} \
# --hvm \
# --virt-type kvm \
# --ram 1024 \
# --vcpus 1 \
# --arch x86_64 \
# --os-type linux \
# --os-variant rhel6 \
# --boot hd \
# --disk path\={image_base_dir}/base.img,size\={size_gb},format\=raw \
# --network bridge\={bridge_nic} \
# --graphics none \
# --serial pty \
# --console pty \
# --noreboot \
# --location /tmp/CentOS-6.8-x86_64-minimal.iso \
# --initrd-inject /tmp/centos6.ks.cfg \
# --extra-args "ks\=file:/centos6.ks.cfg console\=ttyS0"' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f840c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8524-495a-11e8-8017-0242ac130002", "previous": "a28f82f4-495a-11e8-8017-0242ac130002"}
# VMの状態確認は以下で行える。
# + lc_cell_meme={"current": "a28f8524-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f863c-495a-11e8-8017-0242ac130002", "previous": "a28f840c-495a-11e8-8017-0242ac130002"}
# !ansible -b -m shell -a 'virsh dominfo {new_vmname} | grep State' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f863c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8754-495a-11e8-8017-0242ac130002", "previous": "a28f8524-495a-11e8-8017-0242ac130002"}
# 具体的なコンソール出力の確認は、 `virsh console ${new_vmname}` でもおこなえる。
# + [markdown] lc_cell_meme={"current": "a28f8754-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f886c-495a-11e8-8017-0242ac130002", "previous": "a28f863c-495a-11e8-8017-0242ac130002"}
# poweroffされるまで待つ・・・
# + lc_cell_meme={"current": "a28f886c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8984-495a-11e8-8017-0242ac130002", "previous": "a28f8754-495a-11e8-8017-0242ac130002"}
# vm_status = !ansible -b -m shell -a 'virsh dominfo {new_vmname} | grep State' {target_group}
import time
while vm_status[1].split()[-1] == 'running':
time.sleep(60)
# vm_status = !ansible -b -m shell -a 'virsh dominfo {new_vmname} | grep State' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f8984-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8a92-495a-11e8-8017-0242ac130002", "previous": "a28f886c-495a-11e8-8017-0242ac130002"}
# 以下の出力が `shut off` となっていればOK。
# + lc_cell_meme={"current": "a28f8a92-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8baa-495a-11e8-8017-0242ac130002", "previous": "a28f8984-495a-11e8-8017-0242ac130002"}
# !ansible -b -m shell -a 'virsh dominfo {new_vmname} | grep State' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f8baa-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8cc2-495a-11e8-8017-0242ac130002", "previous": "a28f8a92-495a-11e8-8017-0242ac130002"}
# 起動してみる。
# + lc_cell_meme={"current": "a28f8cc2-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8dda-495a-11e8-8017-0242ac130002", "previous": "a28f8baa-495a-11e8-8017-0242ac130002"}
# !ansible -b -a 'virsh start {new_vmname}' {target_group}
# + [markdown] lc_cell_meme={"current": "a28f8dda-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f8ef2-495a-11e8-8017-0242ac130002", "previous": "a28f8cc2-495a-11e8-8017-0242ac130002"}
# ## 仮想マシンの情報確認
#
# VMにふられたIPアドレスの確認
# + lc_cell_meme={"current": "a28f8ef2-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f900a-495a-11e8-8017-0242ac130002", "previous": "a28f8dda-495a-11e8-8017-0242ac130002"}
# !ansible -b -a "virsh domiflist {new_vmname}" {target_group}
# + [markdown] lc_cell_meme={"current": "a28f900a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9122-495a-11e8-8017-0242ac130002", "previous": "a28f8ef2-495a-11e8-8017-0242ac130002"}
# 上記で確認できたMACアドレスを、以下の変数に代入。
# + lc_cell_meme={"current": "a28f9122-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f923a-495a-11e8-8017-0242ac130002", "previous": "a28f900a-495a-11e8-8017-0242ac130002"}
import re
# domiflist_stdio = !ansible -b -a "virsh domiflist {new_vmname}" {target_group}
mac_pattern = re.compile(r'.*bridge.*\s([0-9a-f\:]+)\s*')
vmmac = [mac_pattern.match(line).group(1) for line in domiflist_stdio if mac_pattern.match(line)][0]
vmmac
# + [markdown] lc_cell_meme={"current": "a28f923a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9348-495a-11e8-8017-0242ac130002", "previous": "a28f9122-495a-11e8-8017-0242ac130002"}
# dnsmasqのlease情報を確認する。
# + lc_cell_meme={"current": "a28f9348-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9460-495a-11e8-8017-0242ac130002", "previous": "a28f923a-495a-11e8-8017-0242ac130002"}
# !ansible -b -a "grep {vmmac} /var/lib/dnsmasq/dnsmasq.leases" {target_group}
# + lc_cell_meme={"current": "a28f9460-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9578-495a-11e8-8017-0242ac130002", "previous": "a28f9348-495a-11e8-8017-0242ac130002"}
# leases_stdio = !ansible -b -a "grep {vmmac} /var/lib/dnsmasq/dnsmasq.leases" {target_group}
ip_pattern = re.compile(r'.*\s([0-9a-f\:]+)\s+([0-9\.]+)\s.*')
ipaddr = [ip_pattern.match(line).group(2) for line in leases_stdio if ip_pattern.match(line)][0]
ipaddr
# + [markdown] lc_cell_meme={"current": "a28f9578-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9690-495a-11e8-8017-0242ac130002", "previous": "a28f9460-495a-11e8-8017-0242ac130002"}
# このIPアドレスに対して操作すればよい・・・疎通しているか、確認する。
#
# (VMには、このNotebook環境から疎通するIPアドレスが振られることを想定している。)
# + lc_cell_meme={"current": "a28f9690-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f97a8-495a-11e8-8017-0242ac130002", "previous": "a28f9578-495a-11e8-8017-0242ac130002"}
# !ping -c 4 {ipaddr}
# + [markdown] lc_cell_meme={"current": "a28f97a8-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f98c0-495a-11e8-8017-0242ac130002", "previous": "a28f9690-495a-11e8-8017-0242ac130002"}
# ## 仮想マシンの設定変更
#
#
# + [markdown] lc_cell_meme={"current": "a28f98c0-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f99f6-495a-11e8-8017-0242ac130002", "previous": "a28f97a8-495a-11e8-8017-0242ac130002"}
# ### Ansible操作用ユーザの作成
#
# ユーザ `ansible` でAnsibleの操作が可能なよう、設定変更をおこなう。
# + lc_cell_meme={"current": "a28f99f6-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9b0e-495a-11e8-8017-0242ac130002", "previous": "a28f98c0-495a-11e8-8017-0242ac130002"}
import os
snapshot_hosts = os.path.join(work_dir, 'init-hosts')
with open(snapshot_hosts, 'w') as f:
f.write('{address} ansible_ssh_user=root\n'.format(address=ipaddr))
# !cat { snapshot_hosts }
# + [markdown] lc_cell_meme={"current": "a28f9b0e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9c26-495a-11e8-8017-0242ac130002", "previous": "a28f99f6-495a-11e8-8017-0242ac130002"}
# Ansible経由でpingできるかの確認をする。
# + lc_cell_meme={"current": "a28f9c26-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9d3e-495a-11e8-8017-0242ac130002", "previous": "a28f9b0e-495a-11e8-8017-0242ac130002"}
# !ansible -m ping -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28f9d3e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9e56-495a-11e8-8017-0242ac130002", "previous": "a28f9c26-495a-11e8-8017-0242ac130002"}
# 設定変更用のPlaybookを生成する。
# + lc_cell_meme={"current": "a28f9e56-495a-11e8-8017-0242ac130002", "history": [], "next": "a28f9f6e-495a-11e8-8017-0242ac130002", "previous": "a28f9d3e-495a-11e8-8017-0242ac130002"}
pub_key = None
with open(os.path.expanduser('~/.ssh/ansible_id_rsa.pub'), 'r') as f:
pub_key = f.readlines()[0].strip()
playbook_inject_key = os.path.join(work_dir, 'playbook_inject-key.yml')
with open(playbook_inject_key, 'w') as f:
f.write('- hosts: all\n')
f.write(' become: yes\n')
f.write(' tasks:\n')
f.write(' - user: name=ansible state=present\n')
f.write(' - authorized_key: user=ansible key="{}"\n'.format(pub_key))
f.write(' - lineinfile: "dest=/etc/sudoers backup=yes state=present regexp=\'^ansible\' line=\'ansible ALL=(ALL) NOPASSWD: ALL\'"\n')
f.write(' - command: passwd -l root\n')
# !cat { playbook_inject_key }
# + [markdown] lc_cell_meme={"current": "a28f9f6e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa086-495a-11e8-8017-0242ac130002", "previous": "a28f9e56-495a-11e8-8017-0242ac130002"}
# Playbookを実行する。
# + lc_cell_meme={"current": "a28fa086-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa19e-495a-11e8-8017-0242ac130002", "previous": "a28f9f6e-495a-11e8-8017-0242ac130002"} run_control={"marked": false}
# !ansible-playbook -i { snapshot_hosts } { playbook_inject_key }
# + [markdown] lc_cell_meme={"current": "a28fa19e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa2ac-495a-11e8-8017-0242ac130002", "previous": "a28fa086-495a-11e8-8017-0242ac130002"}
# これで、ユーザ `ansible` でSSH可能な状態になった。
# + lc_cell_meme={"current": "a28fa2ac-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa3c4-495a-11e8-8017-0242ac130002", "previous": "a28fa19e-495a-11e8-8017-0242ac130002"}
snapshot_hosts = os.path.join(work_dir, 'hosts')
with open(snapshot_hosts, 'w') as f:
f.write('{address}\n'.format(address=ipaddr))
# !cat { snapshot_hosts }
# + [markdown] lc_cell_meme={"current": "a28fa3c4-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa4d2-495a-11e8-8017-0242ac130002", "previous": "a28fa2ac-495a-11e8-8017-0242ac130002"}
# 以下のpingに成功すればOK。
# + lc_cell_meme={"current": "a28fa4d2-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa5ea-495a-11e8-8017-0242ac130002", "previous": "a28fa3c4-495a-11e8-8017-0242ac130002"}
# !ansible -m ping -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28fa5ea-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa702-495a-11e8-8017-0242ac130002", "previous": "a28fa4d2-495a-11e8-8017-0242ac130002"}
# ### ifcfgの修正
#
# インタフェースの定義にスナップショット時のMACアドレスが含まれていると、新規VMとして起動したときに正しくNIC設定が適用されないので、修正しておく。
# + lc_cell_meme={"current": "a28fa702-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa810-495a-11e8-8017-0242ac130002", "previous": "a28fa5ea-495a-11e8-8017-0242ac130002"}
# !ansible -a 'cat /etc/sysconfig/network-scripts/ifcfg-eth0' -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28fa810-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fa91e-495a-11e8-8017-0242ac130002", "previous": "a28fa702-495a-11e8-8017-0242ac130002"}
# `HWADDR`, `UUID`の定義を削除する。
# + lc_cell_meme={"current": "a28fa91e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28faa36-495a-11e8-8017-0242ac130002", "previous": "a28fa810-495a-11e8-8017-0242ac130002"}
# !ansible -b -m lineinfile -a "dest=/etc/sysconfig/network-scripts/ifcfg-eth0 regexp='^HWADDR=' state=absent" -i { snapshot_hosts } all
# !ansible -b -m lineinfile -a "dest=/etc/sysconfig/network-scripts/ifcfg-eth0 regexp='^UUID=' state=absent" -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28faa36-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fab44-495a-11e8-8017-0242ac130002", "previous": "a28fa91e-495a-11e8-8017-0242ac130002"}
# `HWADDR`, `UUID`の定義が削除されていればよい。
# + lc_cell_meme={"current": "a28fab44-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fac5c-495a-11e8-8017-0242ac130002", "previous": "a28faa36-495a-11e8-8017-0242ac130002"}
# !ansible -a 'cat /etc/sysconfig/network-scripts/ifcfg-eth0' -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28fac5c-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fad6a-495a-11e8-8017-0242ac130002", "previous": "a28fab44-495a-11e8-8017-0242ac130002"}
# ### udevのネットワーク定義の修正
#
# udevの定義も削除しておく。
# + lc_cell_meme={"current": "a28fad6a-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fae82-495a-11e8-8017-0242ac130002", "previous": "a28fac5c-495a-11e8-8017-0242ac130002"}
# !ansible -a 'cat /etc/udev/rules.d/70-persistent-net.rules' -i { snapshot_hosts } all
# + lc_cell_meme={"current": "a28fae82-495a-11e8-8017-0242ac130002", "history": [], "next": "a28faf90-495a-11e8-8017-0242ac130002", "previous": "a28fad6a-495a-11e8-8017-0242ac130002"}
# !ansible -b -m file -a 'path=/etc/udev/rules.d/70-persistent-net.rules state=absent' -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28faf90-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb0a8-495a-11e8-8017-0242ac130002", "previous": "a28fae82-495a-11e8-8017-0242ac130002"}
# 削除されているかを確認する。
# + lc_cell_meme={"current": "a28fb0a8-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb1ca-495a-11e8-8017-0242ac130002", "previous": "a28faf90-495a-11e8-8017-0242ac130002"}
# !ansible -a 'ls -la /etc/udev/rules.d/' -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28fb1ca-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb2e2-495a-11e8-8017-0242ac130002", "previous": "a28fb0a8-495a-11e8-8017-0242ac130002"}
# ## VMイメージファイルへの同期
# + lc_cell_meme={"current": "a28fb2e2-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb3fa-495a-11e8-8017-0242ac130002", "previous": "a28fb1ca-495a-11e8-8017-0242ac130002"}
# !ansible -a 'sync' -i { snapshot_hosts } all
# + [markdown] lc_cell_meme={"current": "a28fb3fa-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb508-495a-11e8-8017-0242ac130002", "previous": "a28fb2e2-495a-11e8-8017-0242ac130002"}
# # VM定義の保存
#
# VM複製用に、XML定義を得ておく。
# + lc_cell_meme={"current": "a28fb508-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb620-495a-11e8-8017-0242ac130002", "previous": "a28fb3fa-495a-11e8-8017-0242ac130002"}
import xml.etree.ElementTree as ET
# vmxml_s = !ansible -b -a "virsh dumpxml {new_vmname}" {target_group}
vmxml_s = vmxml_s[1:]
vmxml = ET.fromstring('\n'.join(vmxml_s))
del vmxml.attrib['id']
vmxml.remove(vmxml.find('uuid'))
intrElem = vmxml.find('devices').find('interface')
intrElem.remove(intrElem.find('target'))
intrElem.remove(intrElem.find('alias'))
vmxml.find('name').text = ''
vmxml.find('devices').find('disk').find('source').attrib['file'] = ''
vmxml.find('devices').find('interface').find('mac').attrib['address'] = ''
ET.ElementTree(vmxml).write(os.path.join(work_dir, 'libvirt-base.xml'))
# !cat {work_dir}/libvirt-base.xml
# + [markdown] lc_cell_meme={"current": "a28fb620-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb738-495a-11e8-8017-0242ac130002", "previous": "a28fb508-495a-11e8-8017-0242ac130002"}
# リモートのイメージと同じパスに保存しておく。
# + lc_cell_meme={"current": "a28fb738-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb846-495a-11e8-8017-0242ac130002", "previous": "a28fb620-495a-11e8-8017-0242ac130002"}
# !ansible -b -m copy -a 'src={work_dir}/libvirt-base.xml dest={image_base_dir}' {target_group}
# + [markdown] lc_cell_meme={"current": "a28fb846-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fb95e-495a-11e8-8017-0242ac130002", "previous": "a28fb738-495a-11e8-8017-0242ac130002"}
# # イメージ取得用VMの停止
#
# 停止してBaseの作業完了・・・
# + lc_cell_meme={"current": "a28fb95e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fba76-495a-11e8-8017-0242ac130002", "previous": "a28fb846-495a-11e8-8017-0242ac130002"}
# !ansible -b -a "virsh destroy {new_vmname}" {target_group}
# + [markdown] lc_cell_meme={"current": "a28fba76-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fbb8e-495a-11e8-8017-0242ac130002", "previous": "a28fb95e-495a-11e8-8017-0242ac130002"}
# しばらく待ってから再度 virsh listを実行すると、仮想マシンが停止してリストから消えたことがわかる。
# + lc_cell_meme={"current": "a28fbb8e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fbca6-495a-11e8-8017-0242ac130002", "previous": "a28fba76-495a-11e8-8017-0242ac130002"}
# !ansible -b -a "virsh list" {target_group}
# + [markdown] lc_cell_meme={"current": "a28fbca6-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fbdbe-495a-11e8-8017-0242ac130002", "previous": "a28fbb8e-495a-11e8-8017-0242ac130002"}
# VMの定義も削除しておく。
# + lc_cell_meme={"current": "a28fbdbe-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fbed6-495a-11e8-8017-0242ac130002", "previous": "a28fbca6-495a-11e8-8017-0242ac130002"}
# !ansible -b -a "virsh undefine {new_vmname}" {target_group}
# + [markdown] lc_cell_meme={"current": "a28fbed6-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fbfee-495a-11e8-8017-0242ac130002", "previous": "a28fbdbe-495a-11e8-8017-0242ac130002"}
# ## dnsmasqの後始末
#
# dnsmasqのリース情報の後始末。VM用IPアドレスが潤沢にある場合は不要。
# + lc_cell_meme={"current": "a28fbfee-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc110-495a-11e8-8017-0242ac130002", "previous": "a28fbed6-495a-11e8-8017-0242ac130002"}
# !ansible -a "cat /var/lib/dnsmasq/dnsmasq.leases" {target_group}
# + lc_cell_meme={"current": "a28fc110-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc21e-495a-11e8-8017-0242ac130002", "previous": "a28fbfee-495a-11e8-8017-0242ac130002"}
# !ansible -b -m lineinfile -a "dest=/var/lib/dnsmasq/dnsmasq.leases regexp='^.*\s+{ ipaddr }\s+.*' state=absent" {target_group}
# + lc_cell_meme={"current": "a28fc21e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc336-495a-11e8-8017-0242ac130002", "previous": "a28fc110-495a-11e8-8017-0242ac130002"}
# !ansible -a "cat /var/lib/dnsmasq/dnsmasq.leases" {target_group}
# + lc_cell_meme={"current": "a28fc336-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc44e-495a-11e8-8017-0242ac130002", "previous": "a28fc21e-495a-11e8-8017-0242ac130002"}
# !ansible -b -m service -a "name=dnsmasq state=restarted" {target_group}
# + [markdown] lc_cell_meme={"current": "a28fc44e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc566-495a-11e8-8017-0242ac130002", "previous": "a28fc336-495a-11e8-8017-0242ac130002"}
# # イメージファイルの確認
#
# イメージファイルとXML定義が生成されていることを確認する。以下の2つのファイルがホストに作成されていればOK。
#
# - base.img
# - libvirt-base.xml
# + lc_cell_meme={"current": "a28fc566-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc67e-495a-11e8-8017-0242ac130002", "previous": "a28fc44e-495a-11e8-8017-0242ac130002"}
# !ansible -b -a "ls -la {image_base_dir}" {target_group}
# + [markdown] lc_cell_meme={"current": "a28fc67e-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc796-495a-11e8-8017-0242ac130002", "previous": "a28fc566-495a-11e8-8017-0242ac130002"}
# 完了。
# + [markdown] lc_cell_meme={"current": "a28fc796-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc8a4-495a-11e8-8017-0242ac130002", "previous": "a28fc67e-495a-11e8-8017-0242ac130002"}
# # 後始末
#
# 一時ディレクトリを削除する。
# + lc_cell_meme={"current": "a28fc8a4-495a-11e8-8017-0242ac130002", "history": [], "next": "a28fc9bc-495a-11e8-8017-0242ac130002", "previous": "a28fc796-495a-11e8-8017-0242ac130002"}
# !rm -fr {work_dir}
# + lc_cell_meme={"current": "a28fc9bc-495a-11e8-8017-0242ac130002", "history": [], "next": null, "previous": "a28fc8a4-495a-11e8-8017-0242ac130002"}
|
D03b_KVM - Set! CentOS6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
# $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
# $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
# $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
# $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
# <font style="font-size:28px;" align="left"><b> Basics of Python: Loops </b></font>
# <br>
# _prepared by <NAME>_
# <br><br>
# We review using loops in Python here.
#
# Run each cell and check the results.
# <h3> For-loop </h3>
# let's print all numbers between 0 and 9
for i in range(10): print(i)
# range(n) represents the list of all numbers from 0 to n-1
# i is the variable to take the values in the range(n) iteratively: 0,1,...,9 in our example
# let's write the same code in two lines
for i in range(10): # do not forget to use colon
print(i)
# the second line is indented
# this means that the command in the second line will be executed inside the for-loop
# any other code executed inside the for-loop must be intented in the same way
#my_code_inside_for-loop_2 will come here
#my_code_inside_for-loop_3 will come here
#my_code_inside_for-loop_4 will come here
# now I am out of the scope of for-loop
#my_code_outside_for-loop_1 will come here
#my_code_outside_for-loop_2 will come here
# +
# let's calculate the summation 1+2+...+10 by using a for-loop
# we use variable total for the total summation
total = 0
for i in range(11): # do not forget to use colon
total = total + i # the value of total is increased by i in each iteration
# alternatively, the same assignment can shortly be written as total += i similarly to the languages C, C++, Java, etc.
# now I am out of the scope of for-loop
# let's print the final value of total
print(total)
# -
# let's calculate the summation 10+12+14+...+44
# we create a list having all numbers in the summation
# for this purpose, this time we will use three parameters in range
total = 0
for j in range(10,45,2): # the range is defined between 10 and 44, and the value of j will be increased by 2 after each iteration
total += j # let's use the shortened version of total = total + j this time
print(total)
# let's calculate the summation 1+2+4+8+16+...+256
# remark that 256 = 2*2*...*2 (8 times)
total = 0
current_number = 1 # this value will be multiplied by 2 after each iteration
for k in range(9):
total = total + current_number # current_number is 1 at the beginning, and its value will be doubled after each iteration
current_number = 2 * current_number # let's double the value of the current_number for the next iteration
# short version of the same assignment: current_number *= 2 as in the languages C, C++, Java, etc.
# now I am out of the scope of for-loop
# let's print the latest value of total
print(total)
# instead of range, we may also directly use a list if it is short
for i in [1,10,100,1000,10000]:
print(i)
# instead of [...], we may also use (...)
# but this time it is a tuple, not a list (keep in your mind that the values in a tuple cannot be changed)
for i in (1,10,100,1000,10000):
print(i)
# let's create a range between 10 and 91 that contains the multiples of 7
for j in range(14,92,7):
# 14 is the first multiple of 7 greater than or equal to 10; so we should start with 14
# 91 should be in the range, and so we end the range with 92
print(j)
# let's create a range between 11 and 22
for i in range(11,23):
print(i)
# we can also use variables in range
n = 5
for j in range(n,2*n):
print(j) # we will print all numbers in {n,n+1,n+2,...,2n-1}
# we can use a list of strings
for name in ("Asja","Balvis","Fyodor"):
print("Hello",name,":-)")
# +
# any range indeed returns a list
L1 = list(range(10))
print(L1)
L2 = list(range(55,200,11))
print(L2)
# -
# <h3> Task 1 </h3>
#
# Calculate the value of summation $ 3+6+9+\cdots+51 $, and then print the result.
#
# Your result should be 459.
#
# your solution is here
#
# <a href="Python12_Basics_Loops_Solutions.ipynb#task1">click for our solution</a>
# <h3> Task 2 </h3>
#
# $ 3^k $ means $ 3 \cdot 3 \cdot \cdots \cdot 3 $ ($ k $ times) for $ k \geq 2 $.
#
# Moreover, $ 3^0 $ is 1 and $ 3^1 = 3 $.
#
# Calculate the value of summation $ 3^0 + 3^1 + 3^2 + \cdots + 3^8 $, and then print the result.
#
# Your result should be 9841.
#
# your solution is here
#
# <a href="Python12_Basics_Loops_Solutions.ipynb#task2">click for our solution</a>
# <h3> While-loop </h3>
# +
# let's calculate the summation 1+2+4+8+...+256 by using a while-loop
total = 0
i = 1
#while condition(s):
# your_code1
# your_code2
# your_code3
while i < 257: # this loop iterates as long as i is less than 257
total = total + i
i = i * 2 # i is doubled in each iteration, and so soon it will be greater than 256
print(total)
# we do the same summation by using for-loop above
# +
L = [0,1,2,3,4,5,11] # this is a list containing 7 integer values
i = 0
while i in L: # this loop will be iterated as long as i is in L
print(i)
i = i + 1 # the value of i iteratively increased, and so soon it will hit a value not in the list L
# the loop is terminated after i is set to 6, because 6 is not in L
# -
# let's use negation in the condition of while-loop
L = [10] # this list has a single element
i = 0
while i not in L: # this loop will be iterated as long as i is not equal to 10
print(i)
i = i+1 # the value of i will hit 10 after ten iterations
# let's rewrite the same loop by using a direct inequality
i = 0
while i != 10: # "!=" is used for operator "not equal to"
print(i)
i=i+1
# +
# let's rewrite the same loop by using negation of equality
i = 0
while not (i == 10): # "==" is used for operator "equal to"
print(i)
i=i+1
# while-loop seems having more fun :-)
# but we should be more careful when writing the condition(s)!
# -
# Consider the summation $ S(n) = 1+ 2+ 3 + \cdots + n $ for some natural number $ n $.
#
# Let's find the minimum value of $ n $ such that $ S(n) \geq 1000 $.
#
# While-loop works very well for this task.
# <ul>
# <li>We can iteratively increase $ n $ and update the value of $ S(n) $.</li>
# <li>The loop iterates as long as $S(n)$ is less than 1000.</li>
# <li>Once it hits 1000 or a greater number, the loop will be terminated.</li>
# </ul>
# summation and n are zeros at the beginning
S = 0
n = 0
while S < 1000: # this loop will stop after S exceeds 999 (S = 1000 or S > 1000)
n = n +1
S = S + n
# let's print n and S
print("n =",n," S =",S)
# <h3> Task 3 </h3>
#
# Consider the summation $ T(n) = 1 + \dfrac{1}{2} + \dfrac{1}{4}+ \dfrac{1}{8} + \cdots + \dfrac{1}{2^n} $ for some natural number $ n $.
#
# Remark that $ T(0) = \dfrac{1}{2^0} = \dfrac{1}{1} = 1 $.
#
# This summation can be arbitrarily close to $2$.
#
# Find the minimum value of $ n $ such that $ T(n) $ is close to $2$ by $ 0.01 $, i.e., $ 2 - T(n) < 0.01 $.
#
# In other words, we find the minimum value of $n$ such that $ T(n) > 1.99 $.
#
# The operator for "less than or equal to" in python is "$ < = $".
# +
# three examples for the operator "less than or equal to"
#print (4 <= 5)
#print (5 <= 5)
#print (6 <= 5)
# you may comment out the above three lines and see the results by running this cell
#
# your solution is here
#
# -
# <a href="Python12_Basics_Loops_Solutions.ipynb#task3">click for our solution</a>
# <h3> Task 4 </h3>
#
# Randomly pick number(s) between 0 and 9 until hitting 3, and then print the number of attempt(s).
#
# We can use <i>randrange</i> function from <i>random</i> module for randomly picking a number in the given range.
# +
# this is the code for including function randrange into our program
from random import randrange
# randrange(n) picks a number from the list [0,1,2,...,n-1] randomly
#r = randrange(100)
#print(r)
#
# your solution is here
#
# -
# <a href="Python12_Basics_Loops_Solutions.ipynb#task4">click for our solution</a>
# <h3> Task 5 </h3>
#
# This task is challenging .
#
# It is designed for the usage of double nested loops: one loop inside of the other loop.
#
# In the fourth task above, the expected number of attempt(s) to hit number 3 is 10.
#
# Do a series of experiments by using your solution for Task 4.
#
# Experiment 1: Execute your solution 20 times, and then calculate the average attempts.
#
# Experiment 2: Execute your solution 200 times, and then calculate the average attempts.
#
# Experiment 3: Execute your solution 2000 times, and then calculate the average attempts.
#
# Experiment 4: Execute your solution 20000 times, and then calculate the average attempts.
#
# Experiment 5: Execute your solution 200000 times, and then calculate the average attempts.
#
# <i>Your experimental average sgould get closer to 10 when the number of executions is increased.</i>
#
# Remark that all five experiments may also be automatically done by using triple loops.
# <a href="Python12_Basics_Loops_Solutions.ipynb#task5">click for our solution</a>
# +
# here is a schematic example for double nested loops
#for i in range(10):
# your_code1
# your_code2
# while j != 7:
# your_code_3
# your_code_4
#
# your solution is here
#
|
python/Python12_Basics_Loops.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/butchland/fastai_nb_explorations/blob/master/fastai_scratch_with_tpu_mnist_4_experiment4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ws0aMBh7SjXF" colab_type="code" colab={}
import os
assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator'
# + id="P4jifTAqTJ5z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="44a7bf29-d218-4428-cf09-dea942b0e9a9"
# !curl https://course.fast.ai/setup/colab | bash
# + id="f4t2znf8TdDY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f0c759c2-7bca-41d0-9689-b1571c3995ef"
VERSION = "20200325" #@param ["1.5" , "20200325", "nightly"]
# !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
# !python pytorch-xla-env-setup.py --version $VERSION
# + id="CJ-TSJDATxiP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="23285082-38ec-4719-c064-ac0bb59cd812"
# !pip freeze | grep torchvision
# !pip freeze | grep torch-xla
# + id="G7k6LMYgUQmE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="19ffa27e-7672-4519-fa77-f57a4135227f"
# !pip install fastcore --upgrade
# + id="SljFZBZXUWJc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 801} outputId="77919434-d7a1-4836-a7b8-acc0121b92d2"
# !pip install fastai2 --upgrade
# + id="SkJcrhthUcFs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 766} outputId="19976555-d252-4989-b235-38034e48e63f"
pip install fastai --upgrade
# + id="OjwXTFa80tdg" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive')
# + id="fIdqQny7UmrL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="201d5b23-709e-4b54-c4fd-8816b296fbbf"
# %cd /content/drive/My\ Drive/course-v4/
# !pwd
# + id="B_Io5kRfVmTY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f5d03a57-0c2c-4225-a87f-1e96143e26ea"
# !pip install -r requirements.txt
# + id="nZJWqOmdVzWY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1375b45c-0af8-4061-e73a-a6e9b176af29"
# %cd nbs
# + id="6yNVBZ74koKV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2e9a61de-8253-47f7-e70f-9d775a77e362"
# !pwd
# + [markdown] id="-180nJSB1KkJ" colab_type="text"
# ### Start of import libraries
# + id="PCYsUhyUV7e2" colab_type="code" colab={}
from fastai2.vision.all import *
# + id="T-WEKRGrVf-p" colab_type="code" colab={}
from utils import *
# + id="d9cf9kAmWLa1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="e39e8459-6158-4598-d62a-4d5dea98c716"
path = untar_data(URLs.MNIST_SAMPLE)
# + id="CZww5QnlVcqA" colab_type="code" colab={}
Path.BASE_PATH = path
# + id="BDG9B6sNWf18" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="db3c2808-7a51-492f-da19-319b26a1d0e8"
path.ls()
# + [markdown] id="zRWl_ZPk1cpK" colab_type="text"
# ### Import torch xla libraries
# + id="hGtKpUptYL0h" colab_type="code" colab={}
import torch
# + id="ClQR3UOuYYw7" colab_type="code" colab={}
import torch_xla
# + id="athAR2e3Ycu7" colab_type="code" colab={}
import torch_xla.core.xla_model as xm
# + id="SOYZ_9XiWVT4" colab_type="code" colab={}
# OptimWrapper?
# + id="BO23Ka0yW_bj" colab_type="code" colab={}
class WrapperOpt:
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
opt = self.f(*args, **kwargs)
optim_wrapper = OptimWrapper(opt)
def my_step():
xm.optimizer_step(opt,barrier=True)
optim_wrapper.step = my_step
return optim_wrapper
def wrap_xla_optim(opt):
w = WrapperOpt(opt)
return w
# + [markdown] id="9wD5DwTqbw9x" colab_type="text"
# ### Get TPU Device
# + id="N9_bVKZ_YmKi" colab_type="code" colab={}
tpu_dev = xm.xla_device()
# + id="s6lttXjaZo95" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e670fb1c-66fa-49eb-ae91-25c11b0fbd4c"
tpu_dev
# + id="qqHQ4Fnmb2u3" colab_type="code" colab={}
datablock = DataBlock(
blocks=(ImageBlock(cls=PILImageBW),CategoryBlock),
get_items=get_image_files,
splitter=GrandparentSplitter(),
get_y=parent_label,
item_tfms=Resize(28),
batch_tfms=[])
# + id="DN8Y66jdcHps" colab_type="code" colab={}
dls = datablock.dataloaders(path,device=tpu_dev)
# + id="8cHZEySuYlNx" colab_type="code" colab={}
adam_xla_opt = wrap_xla_optim(Adam)
# + id="exrxhsv2ikNO" colab_type="code" colab={}
sgd_xla_opt = wrap_xla_optim(SGD)
# + id="HXW9t0uaYbNy" colab_type="code" colab={}
learner = cnn_learner(dls, resnet18, metrics=accuracy,
loss_func=F.cross_entropy, opt_func=adam_xla_opt)
# + id="uGt-qD7uZrCc" colab_type="code" colab={}
from fastai2.callback.tensorboard import *
# + id="sTtNlgnAYERQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 143} outputId="c181631f-5274-4943-8005-a1a49360144f"
learner.fit_one_cycle(3)
# + id="tPm7GDIjYox0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="a17c9e80-1b8e-423d-b9ff-25e9394135ef"
# !pip freeze | grep tensorboard
# + id="q5gYGgQYZPFZ" colab_type="code" colab={}
|
fastai_scratch_with_tpu_mnist_4_experiment4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table>
# <tr><td align="right" style="background-color:#ffffff;">
# <img src="../images/logo.jpg" width="20%" align="right">
# </td></tr>
# <tr><td align="right" style="color:#777777;background-color:#ffffff;font-size:12px;">
# Prepared by <NAME><br>
# <NAME> | August 23, 2019 (updated)<br>
# <NAME> | December 05, 2019 (updated)
# </td></tr>
# <tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;">
# This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros.
# </td></tr>
# </table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# <h2>Quantum Teleportation</h2>
# Asja wants to send a qubit to Balvis by using only classical communication.
#
# Let $ \ket{v} = \myvector{a\\ b} \in \mathbb{R}^2 $ be the quantum state of Asja's qubit.
#
# If Asja has many copies of this qubit, then she can collect the statistics based on these qubits and obtain an approximation of $ a $ and $ b $, say $ \tilde{a} $ and $\tilde{b}$, respectively. After this, Asja can send $ \tilde{a} $ and $\tilde{b}$ by using many classical bits, the number of which depends on the precision of the amplitudes.
# On the other hand, If Asja and Balvis share the entangled qubits in state $ \sqrttwo\ket{00} + \sqrttwo\ket{11} $ in advance, then it is possible for Balvis to create $ \ket{v} $ in his qubit after receiving two bits of information from Asja.
# <h3>What is quantum teleportation?</h3>
#
# It is the process of transmission of quantum information, that is the state of a qubit, using classical communication and previously entangled qubits.
#
# The state of a qubit is transfered onto another qubit, while the state of the source qubit is destroyed.
#
# Note that we never obtain multiple copies of the same qubit - "No Cloning Theorem".
#
# <a href = "https://www.nature.com/news/quantum-teleportation-is-even-weirder-than-you-think-1.22321">Read more </a>
# <h3> Protocol </h3>
# Asja has two qubits and Balvis has one qubit.
#
# Asja wants to send her first qubit which is in state $ \ket{v} = \myvector{a\\b} = a\ket{0} + b\ket{1} $.
#
# Asja's second qubit and Balvis' qubit are entangled. The quantum state of Asja's second qubit and Balvis' qubit is $ \sqrttwo\ket{00} + \sqrttwo\ket{11} $.
#
# So, the state of the three qubits is
#
# $$ \mypar{a\ket{0} + b\ket{1}}\mypar{\sqrttwo\ket{00} + \sqrttwo\ket{11}}
# = \sqrttwo \big( a\ket{000} + a \ket{011} + b\ket{100} + b \ket{111} \big). $$
# <h4> CNOT operator by Asja </h4>
#
# Asja applies CNOT gate to her qubits where her first qubit is the control qubit and her second qubit is the target qubit.
# <h3>Task 1</h3>
#
# Calculate the new quantum state after this CNOT operator.
# <a href="B54_Quantum_Teleportation_Solutions.ipynb#task1">click for our solution</a>
# <h3>Hadamard operator by Asja</h3>
#
# Asja applies Hadamard gate to her first qubit.
# <h3>Task 2</h3>
#
# Calculate the new quantum state after this Hadamard operator.
#
# Verify that the resulting quantum state can be written as follows:
#
# $$
# \frac{1}{2} \ket{00} \big( a\ket{0}+b\ket{1} \big) +
# \frac{1}{2} \ket{01} \big( a\ket{1}+b\ket{0} \big) +
# \frac{1}{2} \ket{10} \big( a\ket{0}-b\ket{1} \big) +
# \frac{1}{2} \ket{11} \big( a\ket{1}-b\ket{0} \big) .
# $$
# <a href="B54_Quantum_Teleportation_Solutions.ipynb#task2">click for our solution</a>
# <h3> Measurement by Asja </h3>
#
# Asja measures her qubits. With probability $ \frac{1}{4} $, she can observe one of the basis states.
#
# Depeding on the measurement outcomes, Balvis' qubit is in the following states:
# <ol>
# <li> "00": $ \ket{v_{00}} = a\ket{0} + b \ket{1} $ </li>
# <li> "01": $ \ket{v_{01}} = a\ket{1} + b \ket{0} $ </li>
# <li> "10": $ \ket{v_{10}} = a\ket{0} - b \ket{1} $ </li>
# <li> "11": $ \ket{v_{11}} = a\ket{1} - b \ket{0} $ </li>
# </ol>
# As can be observed, the amplitudes $ a $ and $ b $ are "transferred" to Balvis' qubit in any case.
#
# If Asja sends the measurement outcomes, then Balvis can construct $ \ket{v} $ exactly.
# <h3>Task 3</h3>
#
# Asja sends the measurement outcomes to Balvis by using two classical bits: $ x $ and $ y $.
#
# For each $ (x,y) $ pair, determine the quantum operator(s) that Balvis can apply to obtain $ \ket{v} = a\ket{0}+b\ket{1} $ exactly.
# <a href="B54_Quantum_Teleportation_Solutions.ipynb#task3">click for our solution</a>
# <h3> Task 4 </h3>
#
# Create a quantum circuit with three qubits and two classical bits.
#
# Assume that Asja has the first two qubits and Balvis has the third qubit.
#
# Implement the protocol given above until Asja makes the measurements (included).
# <ul>
# <li>The state of Asja's first qubit can be set by a rotation with randomly picked angle.</li>
# <li>Balvis does not make the measurement.</li>
# </ul>
#
# At this point, read the state vector of the circuit by using "statevector_simulator".
#
# <i> When a circuit having measurement is simulated by "statevector_simulator", the simulator picks one of the outcomes, and so we see one of the states after the measurement.</i>
#
# Verify that the state of Balvis' qubit is in one of these: $ \ket{v_{00}}$, $ \ket{v_{01}}$, $ \ket{v_{10}}$, and $ \ket{v_{11}}$.
#
# <i> Remark that, the qubits are combined in reverse order in qiskit.</i>
# +
#
# your code is here
#
# -
# <a href="B54_Quantum_Teleportation_Solutions.ipynb#task4">click for our solution</a>
# <i>Classically controlled</i> recovery operations are also added as follows. Below, the state vector is used to confirm that quantum teleportation is completed.
# +
from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister,execute,Aer
from random import randrange
from math import sin,cos,pi
# We start with 3 quantum registers
qreg=QuantumRegister(3)
# All the quantum registers are in |0> state. We create a random qubit to teleport and an entangled state.
# We pick a random angle.
d=randrange(360)
r=2*pi*d/360
print("Picked angle is "+str(d)+" degrees, "+str(round(r,2))+" radians.")
# The amplitudes of the angle.
x=cos(r)
y=sin(r)
print("Cos component of the angle: "+str(round(x,2))+", sin component of the angle: "+str(round(y,2)))
print("So to be teleported state is "+str(round(x,2))+"|0>+"+str(round(y,2))+"|1>.")
print("Summation of probabilities: "+str(round(x**2,2))+"+"+str(round(y**2,2))+"="+str(round(x**2+y**2,2)))
c0=ClassicalRegister(1)
c1=ClassicalRegister(1)
qcir=QuantumCircuit(qreg,c0,c1)
# Generation of random qubit by rotating the quantum register at the amount of picked angle.
qcir.ry(2*r,qreg[0])
qcir.barrier()
# Generation of the entangled state.
qcir.h(qreg[1])
qcir.cx(qreg[1],qreg[2])
qcir.barrier()
qcir.cx(qreg[0],qreg[1])
qcir.h(qreg[0])
qcir.barrier()
qcir.measure(qreg[0],c0)
qcir.measure(qreg[1],c1)
print()
result=execute(qcir,Aer.get_backend('qasm_simulator')).result()
print("The values of classical registers by qasm_simulator:")
print(result.get_counts()) # The quantum circuit runs 1024 times by default. It gives the information that which bits at which amounts are seen in classical registers.
print()
qcir.x(qreg[2]).c_if(c1,1)
qcir.z(qreg[2]).c_if(c0,1)
result2=execute(qcir,Aer.get_backend('statevector_simulator')).result()
print("The values of classical registers by statevector_simulator:")
print(result2.get_counts()) # The quantum circuit runs 1 time. It gives the values of classical registers.
print()
print("The statevector represents all the registers.")
for i in result2.get_statevector():
print(i)
print()
qcir.draw(output='mpl')
# -
|
bronze/B54_Quantum_Teleportation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # MultiLexScaled - sentiment analysis (file-based) (2021-12-10)
#
# _by <NAME>_
#
# _Modification history:_
# _2021-12-03 - Convert to csv lexica; use newest versions of lexica, as publicly available_
# _2021-12-10 - Clean up & streamline for GitHub repo_
#
# This notebook applies sentiment analysis to a corpus. The corpus file-based and does not need to fit into memory all at once. Intermediate stages (cleaned text, individual valences, individual calibrated valences) are stored as separate output files.
#
# ### 0. Set-up
#
# Import necessary code modules; specify location of sentiment analysis lexica and associated files; specify corpus location.
#
STAIRfolder = '/Users/username/STAIR/'
# +
# Code files to import
import sys
sys.path.append(STAIRfolder + 'Code')
import os
import csv
import numpy as np
from datetime import datetime
# local code modules -> these should be in the folder just specified (or otherwise locatable by python)
import tokenization
import valence
import calibrate
# Print summary version info (for fuller info, simply print sys.version)
print('You are using python version {}.'.format(sys.version.split()[0]))
# -
# Next, specify where to find the sentiment analysis lexica and the calibration file, along with their names.
#
SAfolder = STAIRfolder + 'Corpora/Lexica/English/MultiLexScaled/'
# +
lexica = {'HuLiu': SAfolder + 'HuLiu/opinion-lexicon-English/HuLiu_lexiconX.csv',
'LabMT_filtered': SAfolder + 'labMT/labMT_lexicon_filtered.csv',
'LexicoderSD': SAfolder + 'Lexicoder/LSDaug2015/LSD_lexiconX.csv',
'MPQA': SAfolder + 'MPQA 2.0/opinionfinderv2.0/lexicons/MPQA_lexicon.csv',
'NRC': SAfolder + 'NRC/NRC-Emotion-Lexicon-v0.92/NRC_lexicon.csv',
'SOCAL': SAfolder + 'SO-CAL/English (from GitHub)/SO-CAL_lexiconX.csv',
'SWN_filtered': SAfolder + 'SWN/SWN_lexicon_filtered0.1.csv',
'WordStat': SAfolder + 'WordStat/WSD 2.0/WordStat_lexicon2X.csv',
}
lexnames = sorted(lexica.keys())
# If not using modifiers, just set modifierlex to None
modifierlex = SAfolder + 'SO-CAL/English (from GitHub)/SO-CAL_modifiersX.csv'
# -
# Load lexica & modifier info
lexica_used = [valence.load_lex(lexfile) for lexname, lexfile in sorted(lexica.items())]
mods = valence.load_lex(modifierlex) if len(modifierlex) > 0 else {}
# Identify the calibration pathname
calibrationfolder = SAfolder + 'Calibration/'
calibrationfile = calibrationfolder + 'Calibration_US_2021-12-10.csv'
# #### 0.1 Corpus location & file names
#
# Identify the folder in which the corpus is to be found (and into which any new files will be saved).
#
# Specify corpus location
projectfolder = STAIRfolder1 + 'Corpora/Media/Neutral/Corpus/US/'
corpusfilestem = projectfolder + 'US'
# ### 1. Preprocess text
#
# Pre-tokenize text to make sure punctuation does not affect sentiment calculation.
# +
# Generate clean file(s) from dataset
# The default output is a file with the suffix _clean that contains 2 columns (id, cleanedtext) and no headers
# See the code in tokenization.py for other options
textcols = (10, 12) # columns containing text (will be combined)
rawsuffix = '_dedup'
cleansuffix = '_clean'
tokenization.preprocess_texts(corpusfilestem + rawsuffix + '.csv',
corpusfilestem + cleansuffix + '.csv',
textcols=textcols, inheader=True, lang='english',
stripspecial=False, stripcomma=False)
# -
# ### 2. Calculate valence
#
# #### 2.1. Specify parameters
#
# We can specify words to ignore (for example, key search terms that might also appear in a valence lexicon), as well as special punctuation to skip (standard punctuation will be skipped automatically). The latter will not be included in the word count; the former will.
# +
ignorewords = set() # Valenced words to ignore, if any, but include in wordcount
words2skip = set(('.', ',', '...')) # Words to skip altogether (usually just punctuation)
# Negation words, to combine with modifiers/intensifiers such as 'very' or 'hardly' in adjusting valence
negaters = ('not', 'no', 'neither', 'nor', 'nothing', 'never', 'none',
'nowhere', 'noone', 'nobody',
'lack', 'lacked', 'lacking', 'lacks', 'missing', 'without')
# -
# #### 2.2 Valence calculation
#
cleansuffix = '_clean'
cleantextcols = (1,)
valencesuffix = '_vals' # suffix for file to contain text-level valence data
# +
corpusfile = corpusfilestem + cleansuffix + '.csv'
valencefile = corpusfilestem + valencesuffix + '.csv'
valence.calc_corpus_valence(corpusfile, valencefile,
lexnames, lexica_used, mods,
textcols=cleantextcols, modify=True, negaters=negaters,
ignore=ignorewords, skip=words2skip, header=False,
need2tokenize=False, makelower=True, skippunct=True,
nrjobs=4)
# -
# ### 3. Calibrate
#
# Now we calibrate our valences. We can either calibrate against the parameters calculated from another corpus we assume to be neutral, or we can calibrate against ourselves, simply standardizing to have a mean of 0 and a standard deviation of 1.
#
# To calibrate against an existing set of calibration parameters, set `extcalibrate` to be `True`, and specify the calibrationfile. The calibration file will contain the scaling parameters (mean, std. dev.) for each individual lexicon, as well as the standard deviation of their average, which we need to divide by as the final calibration step. The code snippet below loads the scaler and displays some information about it.
#
# To calibrate a corpus against itself (as here), set `extcalibrate` to `False`. If we want to use the resulting calibration parameters for additional corpora, set savescaler to `True`.
#
# +
# Load calibration file, as needed. Set to False to calibrate based on each corpus itself
extcalibrate = False
if extcalibrate:
# Load calibration data
neutralscaler, featurenames, nrfeatures, nravailable, stdev_adj, descriptor = \
calibrate.load_scaler_fromcsv(calibrationfile, includevar=True, displayinfo=True)
else:
neutralscaler = '' # dummy value
stdev_adj = 1 # if not using pre-set calibration, also don't do any scale adjustment
scalersuffix = '_newscaler'
print('No scaler loaded -> will calibrate corpus against itself.')
# -
# Now perform the calibration.
# +
calibratedsuffix = '_cal' # suffix for file containing calibrated valence data
idcol = 'id'
wordcountcol = 'nrwords'
keepcols = [idcol,] # word count info is automatically retained, because used as a filter/scaler
valencefile = corpusfilestem + valencesuffix + '.csv'
scaler, new_stdev_adj, nrtexts = \
calibrate.calibrate_features(valencefile, lexnames,
neutralscaler, stdev_adj=stdev_adj,
filtercol=wordcountcol, keepcols=keepcols,
missing=-999, outsuffix=calibratedsuffix)
# +
# Optionally, save this scaler
# (give temporary name & descriptor; can always change later)
savescaler = False # set to True to save the newly generated scaler
scalername = 'newcorpus'
newcalibrationfile = calibrationfolder + 'Calibration_new_temporary.csv'
if savescaler:
descriptor = 'New scaler for {} based on {} texts. Generated: {}'.format(
scalername, nrtexts, datetime.now())
calibrate.write_scaler_tocsv(newcalibrationfile, newscaler, featurenames=lexnames,
name=scalername, descriptor=descriptor, stdev_adj=new_stdev_adj)
# Describe the scaler (by reloading it)
print('\nScaler pathname: {}\n'.format(calibrationfile))
neutralscaler, featurenames, nrfeatures, nravailable, stdev_adj, descriptor = \
calibrate.load_scaler_fromcsv(calibrationfile, includevar=True, displayinfo=True)
# -
# ### Done!
|
Notebooks/MultiLexScaled - sentiment analysis (file-based) (2021-12-10).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Antennal neural network with intrinsic set-point (simulation)
#
# Here, we will simulate the antennal neural network based on both the anatomy and the control theoretic models. We will begin by simulating the network with an intrinsic set-point that arises due to the network connectivity. Later on (in another notebook) we will simulate it with varying levels of set-point inputs.
# Import necessary libraries
import nest
import numpy as np
from copy import deepcopy
from pprint import pprint
from collections import defaultdict
from pprint import pprint
# #%matplotlib inline
# %pylab inline
# ## Bristle field neural network
#
# A class that defines the underlying connectivity of a single bristle field. Sensory neurons underlying the bristle field are either active or inactive, defined by the cuticle position. These neurons feed onto the antennal motor neurons (along with interneurons carrying set-point), which are simply integrate and fire neurons (iaf_psc_alpha). The motor neurons control muscle firing, which in turn is a low pass (exponential moving average) filter with a time constant of mTau. The muscle firing in turn indirectly controls cuticle position, based on other antennal muscle activity.
# +
class bbnn():
'''The bbnn class defines the Bohm's bristle neural network.
It initializes sensory and motor neurons, and changes its rate
based on bristle activation. It saves all the data in a dict'''
def __init__(self, sim_t, Bnum, delay, r_act,
r_inact, r_in, weight, in_weight,
mTau, CutPos):
'''
sim_t - Simulation step (default = 1 ms);
Bnum - Bristle number per field (default = 100 bristles);
delay - Synaptic delay in transmission (default = 1 ms);
r_act - Activated sensory neuron firing rate (default = 50.0);
r_inact - Inactivated sensory neuron firing rate (default = 10.0);
r_in - Interneuron firing rate;
weight - synaptic weight between sensory and motor neurons;
in_weight - synaptic weight between interneuron and motor neurons;
mTau - integration time of the muscle;
CutPos - starting cuticle (/antenna) position;
'''
# Save constants
self.sim_t = sim_t
self.Bnum = Bnum
self.weight = weight
self.delay = delay
self.in_weight = in_weight
self.mTau = mTau
# Initialize network
self.initNetwork(CutPos, r_act, r_inact, r_in)
def initNetwork(self, CutPos, r_act, r_inact, r_in):
'''Set up the network in NEST
CutPos - starting cuticle (/antenna) position;
r_act - firing rate of activated bristles;
r_inact - firing rate of inactivated bristles
r_in - firing rate of set-point interneuron'''
# Save act, inact rates
self.r_act = r_act
self.r_inact = r_inact
self.r_in = r_in
# Obtain bristle activity rate
sn_rate = self.getFieldRate(CutPos, r_act, r_inact)
# Declare Nodes
## Initialize poisson generators
self.sn_pgs = nest.Create("poisson_generator", self.Bnum, sn_rate)
self.jo = nest.Create("poisson_generator", 1,
{"rate": float(self.r_in)})
## Initialize neurons
self.sn = nest.Create("parrot_neuron", self.Bnum) # sensory neuron
self.mn = nest.Create("iaf_psc_alpha") # motor neuron
self.ms_in = nest.Create("parrot_neuron", 1) # interneuron
## Intialize spike detector
self.sn_sd = nest.Create("spike_detector", 1)
self.in_sd = nest.Create("spike_detector", 1)
self.mn_sd = nest.Create("spike_detector", 1)
# Setup Connections
## Poisson generators to parrot neurons
nest.Connect(self.sn_pgs, self.sn, 'one_to_one')
nest.Connect(self.jo, self.ms_in, 'one_to_one')
## Sensory to motor neurons
nest.Connect(self.sn, self.mn, 'all_to_all', {
"model": "static_synapse",
"weight": self.weight,
"delay": self.delay
})
nest.Connect(self.ms_in, self.mn, 'all_to_all', {
"model": "static_synapse",
"weight": self.in_weight * self.weight,
"delay": self.delay
})
## Neurons to spike detectors
nest.Connect(self.sn, self.sn_sd)
nest.Connect(self.ms_in, self.in_sd)
nest.Connect(self.mn, self.mn_sd)
nest.PrintNetwork() # Not working for some reason
# NEST network setup complete
# Muscle activity related datastructure
self.time = 0.0
self.curr_ind = 0
self.muscle_activity = []
self.muscle_activity.append((self.time, 0.0))
# Initialization complete
def getMuscleActivity(self):
''' Obtain muscle firing rate based on the simulation time '''
self.time += self.sim_t
self.curr_ind += 1
muscle_rate = moving_avg_rate(
self.time,
nest.GetStatus(self.mn_sd)[0]['events']['times'], self.mTau)
self.muscle_activity.append((self.time, muscle_rate))
return muscle_rate
def changeRates(self, CutPos):
''' Change the firing rate of the poisson generators
based on bristle activity (cuticle position)'''
# Get Rate
rate = self.getFieldRate(CutPos, self.r_act, self.r_inact)
# Set rate
nest.SetStatus(self.sn_pgs, rate)
def getFieldRate(self, CutPos, r_act, r_inact):
''' Obtain activity rate of each of the sensory neurons underneath the bristles '''
Bfield_closed = np.concatenate(
(np.zeros(self.Bnum - CutPos),
np.ones(CutPos))) # 1 to CutPos are active bristles
Bfield_open = np.concatenate(
(np.ones(self.Bnum - CutPos),
np.zeros(CutPos))) # CutPos to end are inactive bristles
# Arbitary defintion of active and inactive. Simply change r_act, r_inact to inverse it
field = ((Bfield_open * r_inact) + (Bfield_closed * r_act))
return [{"rate": m} for m in field]
def moving_avg_rate(t, spike_times, tau):
''' Find rate at time t based on tau
assuming an moving average of spikes
with a window size of tau'''
ind = len(spike_times) - 1
muscle_rate = 0
# Sum up spikes based on calcium decay
while ((ind >= 0) and (t - spike_times[ind] < tau)):
muscle_rate += 1000/tau
ind -= 1
return muscle_rate
# -
# ## Antennal neural network
#
# A overarching class representing the antennal neural network. This class creates and connects bristle field neural network objects, two in this simulation, and updates cuticle/antennal position based on muscle activity of each of the bristle field neural network.
class ann():
'''Antennal neural network class.'''
def __init__(self, CutLoc, sim_t=10.0, Bnum=100, weight=50.0,
delay=1.0, in_weight=1.0, mTau=50.0, mKp=50.0,
r_act=50.0, r_inact=5.0, r_in=50.0):
'''Set defaults for the antennal neural network
and initialize the network.
Inputs:
CutLoc - starting cuticle location'''
# Set defaults
self.sim_t = sim_t
self.Bnum = Bnum
self.weight = weight
self.delay = delay
self.in_weight = in_weight
# muscle properties
self.mTau = mTau
self.mKp = mKp
# neuron characteristics
self.r_act = r_act
self.r_inact = r_inact
self.r_in = r_in
# cuticle position
self.CutLoc = CutLoc
self.cuticle_position = []
# Initialize field 1
self.field1 = bbnn(self.sim_t, self.Bnum, self.delay, self.r_act,
self.r_inact, self.r_in, self.weight,
self.in_weight, self.mTau, self.CutLoc)
# Initialize field 2 (flipped activation and inactivation rates
self.field2 = bbnn(self.sim_t, self.Bnum, self.delay, self.r_inact,
self.r_act, self.r_in, self.weight, self.in_weight,
self.mTau, self.CutLoc)
def Simulate(self, simulation_time):
'''Simulate the neural network.'''
time = np.arange(0, simulation_time, self.sim_t)
CutLoc = self.CutLoc
# Simulate till end of time (literally :P)
for t in time:
# Simulate all networks
nest.Simulate(self.sim_t)
# Obtain muscle activity
firing1 = self.field1.getMuscleActivity()
firing2 = self.field2.getMuscleActivity()
# Update position based on activity
CutShift = np.round((firing1 - firing2) / self.mKp)
CutLoc = int(CutLoc - CutShift)
if CutLoc < 0: CutLoc = 0
if CutLoc > self.Bnum: CutLoc = self.Bnum
# Change rates
self.field1.changeRates(CutLoc)
self.field2.changeRates(CutLoc)
# Append cuticle location
self.cuticle_position.append((t, CutLoc))
# Continue simulation
# Simulation done; Extract data
field1_activity = self.getBristleFieldActivity(self.field1)
field2_activity = self.getBristleFieldActivity(self.field2)
return field1_activity, field2_activity
def getBristleFieldActivity(self, field):
'''Extract relevant activity details from the field'''
# Extract sensory neuron spike trains
sn_spikes = defaultdict(list)
sn_spikes_raw = zip(
nest.GetStatus(field.sn_sd)[0]['events']['times'],
nest.GetStatus(field.sn_sd)[0]['events']['senders'])
for time, sender in sn_spikes_raw:
sn_spikes[sender].append(time)
# Extract motor neuron spike trains
mn_spikes = defaultdict(list)
mn_spikes_raw = zip(
nest.GetStatus(field.mn_sd)[0]['events']['times'],
nest.GetStatus(field.mn_sd)[0]['events']['senders'])
for time, sender in mn_spikes_raw:
mn_spikes[sender].append(time)
# Extract interneuron spike trains
in_spikes = defaultdict(list)
in_spikes_raw = zip(
nest.GetStatus(field.in_sd)[0]['events']['times'],
nest.GetStatus(field.in_sd)[0]['events']['senders'])
for time, sender in in_spikes_raw:
in_spikes[sender].append(time)
# Extract muscle activity
muscle_activity = field.muscle_activity
return (sn_spikes, mn_spikes, in_spikes, muscle_activity)
# +
nest.ResetKernel()
network_1 = ann(0, sim_t=10.0, weight=50, mKp=25)
field1_activity, field2_activity = network_1.Simulate(500)
sn_spikes1, mn_spikes1, in_spikes1, muscle_activity1 = field1_activity
muscle_activity1 = zip(*muscle_activity1)
sn_spikes2, mn_spikes2, in_spikes2, muscle_activity2 = field2_activity
muscle_activity2 = zip(*muscle_activity2)
cuticle_position = zip(*network_1.cuticle_position)
# -
subplot(311)
plot(cuticle_position[0], cuticle_position[1])
subplot(312)
plot(muscle_activity1[0], muscle_activity1[1])
plot(muscle_activity2[0], muscle_activity2[1])
subplot(313)
plot(muscle_activity2[0], np.array(muscle_activity2[1]) - np.array(muscle_activity1[1]))
# +
nest.ResetKernel()
network_1 = ann(0, sim_t=1.0, mKp=125)
field1_activity, field2_activity = network_1.Simulate(500)
sn_spikes1, mn_spikes1, in_spikes1, muscle_activity1 = field1_activity
muscle_activity1 = zip(*muscle_activity1)
sn_spikes2, mn_spikes2, in_spikes2, muscle_activity2 = field2_activity
muscle_activity2 = zip(*muscle_activity2)
cuticle_position = zip(*network_1.cuticle_position)
# -
subplot(311)
plot(cuticle_position[0], cuticle_position[1])
subplot(312)
plot(muscle_activity1[0], muscle_activity1[1])
plot(muscle_activity2[0], muscle_activity2[1])
subplot(313)
plot(muscle_activity2[0], np.array(muscle_activity2[1]) - np.array(muscle_activity1[1]))
|
Neural Circuit Simulation/.ipynb_checkpoints/nn-simulation-intrinsic-setpoint-rate-model-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc" style="margin-top: 1em;"><ul class="toc-item"></ul></div>
# -
# # Generative Adversarial Networks <a class="tocSkip">
# +
import os
import numpy as np
np.random.seed(123)
print("NumPy:{}".format(np.__version__))
import pandas as pd
print("Pandas:{}".format(pd.__version__))
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
rcParams['figure.figsize']=15,10
print("Matplotlib:{}".format(mpl.__version__))
import tensorflow as tf
tf.set_random_seed(123)
print("TensorFlow:{}".format(tf.__version__))
import keras
print("Keras:{}".format(keras.__version__))
# +
DATASETSLIB_HOME = '../datasetslib'
import sys
if not DATASETSLIB_HOME in sys.path:
sys.path.append(DATASETSLIB_HOME)
# %reload_ext autoreload
# %autoreload 2
import datasetslib
from datasetslib import util as dsu
datasetslib.datasets_root = os.path.join(os.path.expanduser('~'),'datasets')
# -
# # Get the MNIST data
# +
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(os.path.join(datasetslib.datasets_root,'mnist'), one_hot=False)
x_train = mnist.train.images
x_test = mnist.test.images
y_train = mnist.train.labels
y_test = mnist.test.labels
pixel_size = 28
def norm(x):
return (x-0.5)/0.5
# -
n_z = 256
z_test = np.random.uniform(-1.0,1.0,size=[8,n_z])
# Function to display the images and labels
def display_images(images):
for i in range(images.shape[0]):
plt.subplot(1, 8, i + 1)
plt.imshow(images[i])
plt.axis('off')
plt.tight_layout()
plt.show()
# # Simple GAN in TensorFlow
tf.reset_default_graph()
keras.backend.clear_session()
# +
# graph hyperparameters
g_learning_rate = 0.00001
d_learning_rate = 0.01
n_x = 784 # number of pixels in the MNIST image as number of inputs
# number of hidden layers for generator and discriminator
g_n_layers = 3
d_n_layers = 1
# neurons in each hidden layer
g_n_neurons = [256, 512, 1024]
d_n_neurons = [256]
# define parameter ditionary
d_params = {}
g_params = {}
activation = tf.nn.leaky_relu
w_initializer = tf.glorot_uniform_initializer
b_initializer = tf.zeros_initializer
# define generator
z_p = tf.placeholder(dtype=tf.float32, name='z_p', shape=[None, n_z])
layer = z_p
# add generator network weights, biases and layers
with tf.variable_scope('g'):
for i in range(0, g_n_layers):
w_name = 'w_{0:04d}'.format(i)
g_params[w_name] = tf.get_variable(
name=w_name,
shape=[n_z if i == 0 else g_n_neurons[i - 1], g_n_neurons[i]],
initializer=w_initializer())
b_name = 'b_{0:04d}'.format(i)
g_params[b_name] = tf.get_variable(
name=b_name, shape=[g_n_neurons[i]], initializer=b_initializer())
layer = activation(
tf.matmul(layer, g_params[w_name]) + g_params[b_name])
#output (logit) layer
i = g_n_layers
w_name = 'w_{0:04d}'.format(i)
g_params[w_name] = tf.get_variable(
name=w_name,
shape=[g_n_neurons[i - 1], n_x],
initializer=w_initializer())
b_name = 'b_{0:04d}'.format(i)
g_params[b_name] = tf.get_variable(
name=b_name, shape=[n_x], initializer=b_initializer())
g_logit = tf.matmul(layer, g_params[w_name]) + g_params[b_name]
g_model = tf.nn.tanh(g_logit)
# define discriminator(s)
# add discriminator network weights, biases
with tf.variable_scope('d'):
for i in range(0, d_n_layers):
w_name = 'w_{0:04d}'.format(i)
d_params[w_name] = tf.get_variable(
name=w_name,
shape=[n_x if i == 0 else d_n_neurons[i - 1], d_n_neurons[i]],
initializer=w_initializer())
b_name = 'b_{0:04d}'.format(i)
d_params[b_name] = tf.get_variable(
name=b_name, shape=[d_n_neurons[i]], initializer=b_initializer())
#output (logit) layer
i = d_n_layers
w_name = 'w_{0:04d}'.format(i)
d_params[w_name] = tf.get_variable(
name=w_name, shape=[d_n_neurons[i - 1], 1], initializer=w_initializer())
b_name = 'b_{0:04d}'.format(i)
d_params[b_name] = tf.get_variable(
name=b_name, shape=[1], initializer=b_initializer())
# define discriminator_real
# input real images
x_p = tf.placeholder(dtype=tf.float32, name='x_p', shape=[None, n_x])
layer = x_p
with tf.variable_scope('d'):
for i in range(0, d_n_layers):
w_name = 'w_{0:04d}'.format(i)
b_name = 'b_{0:04d}'.format(i)
layer = activation(
tf.matmul(layer, d_params[w_name]) + d_params[b_name])
layer = tf.nn.dropout(layer,0.7)
#output (logit) layer
i = d_n_layers
w_name = 'w_{0:04d}'.format(i)
b_name = 'b_{0:04d}'.format(i)
d_logit_real = tf.matmul(layer, d_params[w_name]) + d_params[b_name]
d_model_real = tf.nn.sigmoid(d_logit_real)
# define discriminator_fake
# input generated fake images
z = g_model
layer = z
with tf.variable_scope('d'):
for i in range(0, d_n_layers):
w_name = 'w_{0:04d}'.format(i)
b_name = 'b_{0:04d}'.format(i)
layer = activation(
tf.matmul(layer, d_params[w_name]) + d_params[b_name])
layer = tf.nn.dropout(layer,0.7)
#output (logit) layer
i = d_n_layers
w_name = 'w_{0:04d}'.format(i)
b_name = 'b_{0:04d}'.format(i)
d_logit_fake = tf.matmul(layer, d_params[w_name]) + d_params[b_name]
d_model_fake = tf.nn.sigmoid(d_logit_fake)
g_loss = -tf.reduce_mean(tf.log(d_model_fake))
d_loss = -tf.reduce_mean(tf.log(d_model_real) + tf.log(1 - d_model_fake))
g_optimizer = tf.train.AdamOptimizer(g_learning_rate)
d_optimizer = tf.train.GradientDescentOptimizer(d_learning_rate)
g_train_op = g_optimizer.minimize(g_loss, var_list=list(g_params.values()))
d_train_op = d_optimizer.minimize(d_loss, var_list=list(d_params.values()))
# +
# training hyperparameters
n_epochs = 400
batch_size = 100
n_batches = int(mnist.train.num_examples / batch_size)
n_epochs_print = 50
with tf.Session() as tfs:
tfs.run(tf.global_variables_initializer())
for epoch in range(n_epochs+1):
epoch_d_loss = 0.0
epoch_g_loss = 0.0
for batch in range(n_batches):
x_batch, _ = mnist.train.next_batch(batch_size)
x_batch = norm(x_batch)
z_batch = np.random.uniform(-1.0,1.0,size=[batch_size,n_z])
feed_dict = {x_p: x_batch,z_p: z_batch}
_,batch_d_loss = tfs.run([d_train_op,d_loss], feed_dict=feed_dict)
z_batch = np.random.uniform(-1.0,1.0,size=[batch_size,n_z])
feed_dict={z_p: z_batch}
_,batch_g_loss = tfs.run([g_train_op,g_loss], feed_dict=feed_dict)
epoch_d_loss += batch_d_loss
epoch_g_loss += batch_g_loss
if epoch%n_epochs_print == 0:
average_d_loss = epoch_d_loss / n_batches
average_g_loss = epoch_g_loss / n_batches
print('epoch: {0:04d} d_loss = {1:0.6f} g_loss = {2:0.6f}'
.format(epoch,average_d_loss,average_g_loss))
# predict images using generator model trained
x_pred = tfs.run(g_model,feed_dict={z_p:z_test})
display_images(x_pred.reshape(-1,pixel_size,pixel_size))
# -
# # Simple GAN in Keras
import keras
from keras.layers import Dense, Input, LeakyReLU, Dropout
from keras.models import Sequential, Model
tf.reset_default_graph()
keras.backend.clear_session()
# +
# graph hyperparameters
g_learning_rate = 0.00001
d_learning_rate = 0.01
n_x = 784 # number of pixels in the MNIST image as number of inputs
# number of hidden layers for generator and discriminator
g_n_layers = 3
d_n_layers = 1
# neurons in each hidden layer
g_n_neurons = [256, 512, 1024]
d_n_neurons = [256]
# define generator
g_model = Sequential()
g_model.add(Dense(units=g_n_neurons[0],
input_shape=(n_z,),
name='g_0'))
g_model.add(LeakyReLU())
for i in range(1,g_n_layers):
g_model.add(Dense(units=g_n_neurons[i],
name='g_{}'.format(i)
))
g_model.add(LeakyReLU())
g_model.add(Dense(units=n_x, activation='tanh',name='g_out'))
print('Generator:')
g_model.summary()
g_model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=g_learning_rate)
)
# define discriminator
d_model = Sequential()
d_model.add(Dense(units=d_n_neurons[0],
input_shape=(n_x,),
name='d_0'
))
d_model.add(LeakyReLU())
d_model.add(Dropout(0.3))
for i in range(1,d_n_layers):
d_model.add(Dense(units=d_n_neurons[i],
name='d_{}'.format(i)
))
d_model.add(LeakyReLU())
d_model.add(Dropout(0.3))
d_model.add(Dense(units=1, activation='sigmoid',name='d_out'))
print('Discriminator:')
d_model.summary()
d_model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.SGD(lr=d_learning_rate)
)
# define GAN network
d_model.trainable=False
z_in = Input(shape=(n_z,),name='z_in')
x_in = g_model(z_in)
gan_out = d_model(x_in)
gan_model = Model(inputs=z_in,outputs=gan_out,name='gan')
print('GAN:')
gan_model.summary()
gan_model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=g_learning_rate)
)
# +
# training hyperparameters
n_epochs = 400
batch_size = 100
n_batches = int(mnist.train.num_examples / batch_size)
n_epochs_print = 50
for epoch in range(n_epochs+1):
epoch_d_loss = 0.0
epoch_g_loss = 0.0
for batch in range(n_batches):
x_batch, _ = mnist.train.next_batch(batch_size)
x_batch = norm(x_batch)
z_batch = np.random.uniform(-1.0,1.0,size=[batch_size,n_z])
g_batch = g_model.predict(z_batch)
x_in = np.concatenate([x_batch,g_batch])
y_out = np.ones(batch_size*2)
y_out[:batch_size]=0.9
y_out[batch_size:]=0.1
d_model.trainable=True
batch_d_loss = d_model.train_on_batch(x_in,y_out)
z_batch = np.random.uniform(-1.0,1.0,size=[batch_size,n_z])
x_in=z_batch
y_out = np.ones(batch_size)
d_model.trainable=False
batch_g_loss = gan_model.train_on_batch(x_in,y_out)
epoch_d_loss += batch_d_loss
epoch_g_loss += batch_g_loss
if epoch%n_epochs_print == 0:
average_d_loss = epoch_d_loss / n_batches
average_g_loss = epoch_g_loss / n_batches
print('epoch: {0:04d} d_loss = {1:0.6f} g_loss = {2:0.6f}'
.format(epoch,average_d_loss,average_g_loss))
# predict images using generator model trained
x_pred = g_model.predict(z_test)
display_images(x_pred.reshape(-1,pixel_size,pixel_size))
|
Chapter14/ch-14a_SimpleGAN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise Notebook (DS)
# ` Make sure to finish DAY-4 of WEEK-1 before continuing here!!!`
# this code conceals irrelevant warning messages
import warnings
warnings.simplefilter('ignore', FutureWarning)
# ## Exercise 1: Numpy
#
#
# ### Numpy
#
# NumPy, which stands for Numerical Python, is a library consisting of multidimensional array objects and a collection of routines for processing those arrays. Using NumPy, mathematical and logical operations on arrays can be performed.
# #### Operations using NumPy (IMPORTANCE)
# Using NumPy, a developer can perform the following operations:
#
# 1. Mathematical and logical operations on arrays.
# 2. Fourier transforms (In mathematics, a Fourier series (/ˈfʊrieɪ, -iər/) is a periodic function composed of harmonically related sinusoids, combined by a weighted summation. ... The process of deriving the weights that describe a given function is a form of Fourier analysis.) and routines for shape manipulation.
# 3. Operations related to linear algebra. NumPy has in-built functions for linear algebra and random number generation.
# The most important object defined in NumPy is an N-dimensional array type called ndarray. It describes the collection of items of the same type. Items in the collection can be accessed using a zero-based index.
# `An instance of ndarray class can be constructed by different array creation routines described later in the tutorial. The basic ndarray is created using an array function in NumPy as follows`
import numpy
numpy.array
# It creates an ndarray from any object exposing array interface, or from any method that returns an array.
numpy.array(object, dtype = None, copy = True, order = None, subok = False, ndmin = 0)
# ### The above constructor takes the following parameters
#
# #### Sr.No. Parameter & Description:
# 1. object
#
# Any object exposing the array interface method returns an array, or any (nested) sequence.
#
# 2. dtype
#
# Desired data type of array, optional
#
# 3. copy
#
# Optional. By default (true), the object is copied
#
# 4. order
#
# C (row major) or F (column major) or A (any) (default)
#
# 5. subok
#
# By default, returned array forced to be a base class array. If true, sub-classes passed through
#
# 6. ndmin
#
# Specifies minimum dimensions of resultant array
# Note: All arithmetic operations can be perform on a numpy array
import numpy as np
# `Examples`
# #### Operations on Numpy Array
# Base Ball Player's Heights AS a in 2D
a = np.array([[1,2,3], [4,1,5]])
print (a)
# Addition
a+3
# Multiplication
a*2
# Subtraction
a-2
# Division
a/3
# ### Task
#
# 1. Write a NumPy program to test whether none of the elements of a given array is zero.
a = np.array([2,3,1,0,6,7])
a
for index,item in enumerate(a):
if item==0:
print('Zero value found at Index',index)
else:
print(item," is not zero")
# 2. Write a NumPy program to test whether any of the elements of a given array is non-zero.
import numpy as np
a = np.array([10,33,56,89,0,3,8,9,0,6])
a
a = np.array([10,33,56,89,0,3,8,9,0,6])
print("Original array")
print(a)
print("Test whether any of the elements of a given array is non-zero")
print(np.any(a))
a = np.array([10,33,56,89,0,3,8,9,0,6])
print("Original array:")
print(a)
print("Test whether any of the elements of a give array is non-zero")
print(np.any(a))
for index,item in enumerate(a):
if item==0:
print('Zero value found at Index',index)
else:
print(item," is not zero")
|
Anita Mburu-WT-21-022-Week -4-Assessment/8.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# This quickstart guide explains how to join two tables A and B using Jaccard similarity measure. First, you need to import the required packages as follows (if you have installed **py_stringsimjoin** it will automatically install the dependencies **py_stringmatching** and **pandas**):
# Import libraries
import py_stringsimjoin as ssj
import py_stringmatching as sm
import pandas as pd
import os, sys
print('python version: ' + sys.version)
print('py_stringsimjoin version: ' + ssj.__version__)
print('py_stringmatching version: ' + sm.__version__)
print('pandas version: ' + pd.__version__)
# Joining two tables using Jaccard measure typically consists of four steps:
# 1. Loading the input tables
# 2. Profiling the tables
# 3. Creating a tokenizer
# 4. Performing the join
# # 1. Loading the input tables
# We begin by loading the two tables. For the purpose of this guide,
# we use the sample dataset that comes with the package.
# +
# construct the path of the tables to be loaded. Since we are loading a
# dataset from the package, we need to access the data from the path
# where the package is installed. If you need to load your own data, you can directly
# provide your table path to the read_csv command.
table_A_path = os.sep.join([ssj.get_install_path(), 'datasets', 'data', 'person_table_A.csv'])
table_B_path = os.sep.join([ssj.get_install_path(), 'datasets', 'data', 'person_table_B.csv'])
# -
# Load csv files as dataframes.
A = pd.read_csv(table_A_path)
B = pd.read_csv(table_B_path)
print('Number of records in A: ' + str(len(A)))
print('Number of records in B: ' + str(len(B)))
A
B
# # 2. Profiling the tables
# Before performing the join, we may want to profile the tables to
# know about the characteristics of the attributes. This can help identify:
#
# a) unique attributes in the table which can be used as key attribute when performing
# the join. A key attribute is needed to uniquely identify a tuple.
#
# b) the number of missing values present in each attribute. This can
# help you in deciding the attribute on which to perform the join.
# For example, an attribute with a lot of missing values may not be a good
# join attribute. Further, based on the missing value information you
# need to decide on how to handle missing values when performing the join
# (See the section below on 'Handling missing values' to know more about
# the options available for handling missing values when performing the join).
#
# You can profile the attributes in a table using the following command:
# profile attributes in table A
ssj.profile_table_for_join(A)
# profile attributes in table B
ssj.profile_table_for_join(B)
# If the input tables does not contain any key attribute, then you need
# to create a key attribute. In the current example, both the input tables
# A and B have key attributes, and hence you can proceed to the next step.
# In the case the table does not have any key attribute, you can
# add a key attribute using the following command:
B['new_key_attr'] = range(0, len(B))
B
# For the purpose of this guide, we will now join tables A and B on
# 'name' attribute using Jaccard measure. Next, we need to decide on what
# threshold to use for the join. For this guide, we will use a threshold of 0.3.
# Specifically, the join will now find tuple pairs from A and B such that
# the Jaccard score over the 'name' attributes is at least 0.3.
# # 3. Creating a tokenizer
# Since Jaccard measure treats input strings as sets of tokens, we
# need to select a tokenizer which can be used to tokenize each string
# into a set of tokens. Currently, we support tokenizers from **py_stringmatching**
# package which provides five different tokenizer types: alphabetical tokenizer,
# alphanumeric tokenizer, delimiter-based tokenizer, qgram tokenizer,
# and whitespace tokenizer.
#
# For the purpose of this guide, we will use a whitespace tokenizer. Once
# we have selected a tokenizer type, we need to create a tokenizer object as
# shown below:
# +
# create whitespace tokenizer for tokenizing 'name' attribute. The return_set flag should be set to True since
# Jaccard is a set based measure.
ws = sm.WhitespaceTokenizer(return_set=True)
# a whitespace tokenizer will tokenize the input string using whitespace
ws.tokenize('William Bridge')
# -
# # 4. Performing the join
# The next step after creating a tokenizer is to perform the join.
# The Jaccard join can be performed using the following command:
# +
# find all pairs from A and B such that the Jaccard score
# on 'name' is at least 0.3.
# l_out_attrs and r_out_attrs denote the attributes from the
# left table (A) and right table (B) that need to be included in the output.
output_pairs = ssj.jaccard_join(A, B, 'A.id', 'B.id', 'A.name', 'B.name', ws, 0.3,
l_out_attrs=['A.name'], r_out_attrs=['B.name'])
# -
len(output_pairs)
# examine the output pairs
output_pairs
# # Handling empty values
# By default, the pairs with empty sets of tokens are included
# in the output. This is because Jaccard of two empty sets is not
# well defined and we do not want to miss any possible matches.
# As you can see from the previous output, the tuple pair (a6, b7)
# is included in the output with a similarity score of 1. If you do
# not want to allow pairs containing empty sets of tokens in the
# output, then you need to set the **allow_empty** flag to False as
# shown below:
output_pairs = ssj.jaccard_join(A, B, 'A.id', 'B.id', 'A.name', 'B.name', ws, 0.3, allow_empty=False,
l_out_attrs=['A.name'], r_out_attrs=['B.name'])
output_pairs
# As you can see, the tuple pair (a6, b7) is not present in the output.
# # Handling missing values
# By default, pairs with missing values are not included
# in the output. This is because a string with a missing value
# can potentially match with all strings in the other table and
# hence the number of output pairs can become huge. If you want
# to include pairs with missing value in the output, you need to
# set the **allow_missing** flag to True, as shown below:
output_pairs = ssj.jaccard_join(A, B, 'A.id', 'B.id', 'A.name', 'B.name', ws, 0.3, allow_missing=True,
l_out_attrs=['A.name'], r_out_attrs=['B.name'])
output_pairs
# # Enabling parallel processing
# If you have multiple cores which you want to exploit for performing the
# join, you need to use the **n_jobs** option. If n_jobs is -1, all CPUs
# are used. If 1 is given, no parallel computing code is used at all,
# which is useful for debugging and is the default option. For n_jobs below
# -1, (n_cpus + 1 + n_jobs) are used (where n_cpus is the total number of
# CPUs in the machine). Thus for n_jobs = -2, all CPUs but one are used. If
# (n_cpus + 1 + n_jobs) becomes less than 1, then no parallel computing code
# will be used (i.e., equivalent to the default).
#
# The following command exploits all the cores available to perform the join:
output_pairs = ssj.jaccard_join(A, B, 'A.id', 'B.id', 'A.name', 'B.name', ws, 0.3,
l_out_attrs=['A.name'], r_out_attrs=['B.name'], n_jobs=-1)
len(output_pairs)
# You need to set n_jobs to 1 when you are debugging or you do not want
# to use any parallel computing code. If you want to execute the join as
# fast as possible, you need to set n_jobs to -1 which will exploit all
# the CPUs in your machine. In case there are other concurrent processes
# running in your machine and you do not want to halt them, then you may
# need to set n_jobs to a value below -1.
# # Additional options
# You can find all the options available for the Jaccard join
# function using the **help** command as shown below:
help(ssj.jaccard_join)
# # More information
# Similar to Jaccard measure, you can use the package to perform
# join using other measures such as cosine, Dice, edit distance, overlap and
# overlap coefficient. For measures such as TF-IDF which are not
# directly supported, you can perform the join using the filters provided
# in the package. To know more about other join methods as well as how to
# use filters, refer to the how-to guide (available from the
# [package homepage](https://sites.google.com/site/anhaidgroup/projects/py_stringsimjoin)).
|
notebooks/Joining two tables using Jaccard measure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="artistic-ferry" active=""
# Program1.
# Theory: -
# List The list is a most versatile datatype available in Python which can be written as a list of comma-separated values (items) between square brackets. Important thing about a list is that items in a list need not be of the same type. It consists of a group of elements.
# A single list may contain DataTypes like Integers, Strings, as well as Objects. They are dynamic, mutable & ordered. It is represented by square brackets.
#
# Operation on List used
# 1.append(x) - To add value x at end of the list
# 2.sort() -To sort items in a list in ascending order
#
# Question 1: -
# Write a menu driven program to demonstrate use of list in python
# ●Put even and odd elements in two different list
# ●Merge and sort two list
# ●Update the first element with a value X
# ●Print middle element of list
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="reflected-mistake" outputId="0249e3a5-6aea-41bf-b335-ab2efcf4f6c3"
print("Output:-")
choice = 1
while choice != 0:
print("1.Put even and odd elemnts in two different list")
print("2.Merge and sort two list")
print("3.Update the first element with a value X")
print("4.Print middle element of list")
print("5.Exit")
choice = int(input("Enter choice: "))
if choice == 1:
a = []
n = int(input("Enter number of elements:"))
for i in range(1, n + 1):
b = int(input("Enter element:"))
a.append(b)
even = []
odd = []
for j in a:
if j % 2 == 0:
even.append(j)
else:
odd.append(j)
print("The even list", even)
print("The odd list", odd)
if (choice == 2):
c = []
d = []
new = []
n1 = int(input("Enter number of elements:"))
for k in range(1, n1 + 1):
e = int(input("Enter element:"))
c.append(e)
n1 = int(input("Enter number of elements:"))
for i in range(1, n1 + 1):
f = int(input("Enter element:"))
d.append(f)
new = c + d
new.sort()
print('Sorted list is:', new)
if choice==3:
h=[]
n2=int(input("Enter number of elements:"))
for i in range(1,n2+1):
k=int(input("Enter element:"))
h.append(k)
print("original list ",h)
elmnt=int(input("Enter the Element which is to be added"))
h[0]=elmnt
print("The updated list",h)
if choice==4:
y=[]
n3=int(input("Enter number of elements:"))
for i in range(1,n3+1):
x=int(input("Enter element:"))
y.append(x)
print("list is ",y)
print("mid value is ",y[int(len(y)/2)])
if choice==5:
break
else:
print("Enter a valid choice")
# + id="intensive-attraction" active=""
# Program 2: -
#
# Tuples
# A tuple is a collection of objects which ordered and immutable. Tuples are sequences, just like lists. The differences between tuples and lists are, the tuples cannot be changed unlike lists and tuples use parentheses, whereas lists use square brackets.
# A tuple in Python is similar to a list.
# <br>The difference between the two is that we cannot change the elements of a tuple once it is assigned whereas we can change the elements of a list.
# <br>A tuple is created by placing all the items (elements) inside parentheses (), separated by commas. The parentheses are optional, however, it is a good practice to use them.
#
#
# Question 2: -
# Write a menu driven program to demonstrate use of tuple in python
# ●Add and show details i.e roll no, name and marks of three subjects of N students in a list of tuple
# ●Display details of a student whose name is X
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="animal-dublin" outputId="60ed293e-dadf-42f7-8d0d-342fdb021473"
students=[]
ch=0
while(ch!=3):
print("MENU\n")
print("1. Add new student\n2. Display student info\n3. Exit\nEnter your choice:")
ch=int(input())
if (ch==1):
list=[]
rollno = input("Enter student roll no:")
list.append(rollno)
name = input("Enter student name:")
list.append(name)
marks1= input("Enter student physics marks:")
list.append(marks1)
marks2= input("Enter student chemistry marks:")
list.append(marks2)
marks3= input("Enter student maths marks:")
list.append(marks3)
students.append(list)
elif (ch==2):
flag=0
target=input("Enter name of student whose details are to be displayed:")
for index, tuple in enumerate(students):
name=tuple[1]
if(name==target):
print(students[index])
flag=1
break
if (flag==0):
print("Student does not exist in the list")
elif (ch==3):
print("Thank you for using the system!")
# + id="single-queen" active=""
# Program 3: -
#
# Sets
# Sets are unordered
# Set elements are unique.
# Duplicate elements are not allowed
# A set itself is mutable i.e. you can add or remove elements from set
# Elements contained in the sets must be of immutable type. (strings, tuples, Numeric)
# Sets can have different type of elements that is they can be heterogeneous in nature
#
# Set Methods: -
# 1.intersection(): - Returns a set, that is the intersection of two other sets
# 2.union(): - Return a set containing the union of sets
# 3.difference(): - Returns a set containing the difference between two or more sets
# 4.symmetric_difference(): - Returns a set with the symmetric differences of two sets
#
# Question 3: -
# Write a menu driven program to demonstrate use of set in python
# ●Read two sets A and B from user and display set A and B
# ●Perform intersection A ∩ B of two sets A and B
# ●Perform union A UB of two sets A and B
# ●Perform set difference A -B of two sets A and B
# ●Perform symmetric difference A ^ B of two sets A and B
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="failing-farming" outputId="e2317be7-fcaf-4f69-c3cc-306478aede00"
print("Output:-")
A={}
B={}
a=input('Enter the set A:').split(' ')
A=set(a)
print(A)
b=input('Enter the set B:').split(' ')
B=set(b)
print(B)
choice=1
while choice!=0:
print("1.Perform intersection A ∩ B of two sets A and B")
print("2.Perform union A U B of two sets A and B")
print("3.Perform set difference A U B of two sets A and B")
print("4.Perform symmetric difference A ^ B of two sets A and B")
print("5.Exit")
choice=int(input("Enter choice: "))
if choice==1:
print(A & B)
print()
elif choice==2:
print(A | B)
print()
elif choice==3:
print(A - B)
print()
elif choice==4:
print(A ^ B)
print()
elif choice==5:
break
else:
print("Enter a valid choice")
print()
# + id="alternative-speed" active=""
# Program 4: -
#
# Dictionary
# A dictionary represents a group of elements arranged in the form of key-value pairs.
# In the dictionary, the first element is considered as 'key' and immediate next element is taken as its 'value'.
# The key and values are separated by colon ( : )
# All the key-value pairs in a dictionary are inserted in curly braces
# sorted( ) function can be used to sort the dictionary.
# By default, the elements are sorted in ascending order.
# The format of sorted function is: -
# sorted(elements, key = <function> )
# Elements of the dictionary can be accessed using method d.items()
# key can be assigned with lambda function which will determine whether data to be sorted using Keys or Values.
# Following function will consider keys for sorting the elements
# key = lambda t : t[0]
#
# Question 4:-
# Write a program to demonstrate use of dictionary in python
# ●Read a dictionary from the user and display.
# ●To sort a dictionary by key
# ●Concatenate two Python dictionaries into a new one
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="failing-circus" outputId="7d69cb56-2906-436d-e69b-f8f68f9a88f8"
print("Output:-")
a={}
b={}
n=int(input("Enter the no of elements for a "))
for i in range(n):
k=input("Enter the key ")
v=input("Enter the value ")
a.update({k:v})
print()
print("The dictionary a is: ",a)
print()
n1=int(input("Enter the no of elements for b "))
for i in range(n1):
t=input("Enter the key ")
w=input("Enter the value ")
b.update({t:w})
print()
print("The dictionary b is:",b)
print()
print("The sorted dictionary a according to the key is ", sorted(a.items()))
print()
print("The sorted dictionary b according to the key is ", sorted(b.items()))
print()
a.update(b)
print("The concatenated dictionary is ",a)
3
|
Experiment4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from tftb.generators import noisecu
from scipy import signal
def simple_sin(frequency, amplitude, phase, time):
#time = np.linspace(0, 2 * np.pi, time_points)
time_series = np.sin((time) * 2 * np.pi * frequency + phase) * amplitude
return time, time_series
def chirp(init_freq, chirp_rate, amplitude, time):
#time = np.linspace(0, 1, time_points)
time_series = signal.chirp(time, init_freq, 1, chirp_rate) * amplitude
return time, time_series
# +
# Sampling
fs = 10e3
T = 1/float(Fs)
N = 1e5
f1 = 20
a1 = 20
phase = 0
f2 = 0
rate = 2
a2 = 20
time_points = np.arange(N)/float(fs)
input_signals = []
_, time_series1 = simple_sin(f1, a1, phase, time_points)
_, time_series2 = chirp(f2, rate, a2, time_points)
time_series = np.array(time_series1) + np.array(time_series2)
print(len(time_series))
"""
# Analytic complex uniform white noise.
noise = noisecu(L)
S = s1 + s2 + s3 + s4 + s5# + np.absolute(noise)
plt.figure(1)
plt.suptitle("Signals")
plt.subplot(7,1,6)
plt.ylabel("3 Hz")
plt.plot(s1)
plt.subplot(7,1,5)
plt.ylabel("7 Hz")
plt.plot(s2)
plt.subplot(7,1,4)
plt.ylabel("12 Hz")
plt.plot(s3)
plt.subplot(7,1,3)
plt.ylabel("15 Hz")
plt.plot(s4)
plt.subplot(7,1,2)
plt.ylabel("18 Hz")
plt.plot(s5)
plt.subplot(7,1,1)
plt.ylabel("Noise")
plt.plot(noise)
plt.subplot(7,1,7)
plt.ylabel("Sum")
plt.plot(S)
"""
# +
# FFT
result = np.fft.fft(time_series)
freq = np.fft.fftfreq(int(N), 1/float(fs))
plt.figure(2)
plt.grid()
plt.xlabel("Hz")
plt.plot(freq[0:int(N)//2], 2*np.absolute(result//int(N))[0:int(N)//2])
#plt.plot(freq, 2*np.absolute(result))
# +
# Short-time Fourier Transform
from scipy.signal import hamming
from tftb.processing.linear import ShortTimeFourierTransform
Nx = len(S)
nsc = int(np.floor(Nx/4.5))
window = hamming(nsc)
nov = int(np.floor(nsc/2))
stft = ShortTimeFourierTransform(S, n_fbins=nov, fwindow=window)
stft.run()
stft.plot(show_tf=True, cmap=plt.cm.gray)
# -
# Wigner-Ville Distribution
from tftb.processing.cohen import WignerVilleDistribution
wvd = WignerVilleDistribution(S)
wvd.run()
wvd.plot(kind='contour')
# Continous Wavelet Transform
from scipy.signal import cwt, ricker
widths = np.arange(1, 71)
cwtmatr = cwt(S, ricker, widths) # ricker is mexican hat wavelet
plt.figure(5)
plt.xlabel("Time [s]")
plt.ylabel("Scale [a] of Wavelet")
plt.imshow(cwtmatr, aspect='auto')
# +
# Hilbert Transform
# from https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.signal.hilbert.html
from scipy.signal import hilbert
analytic_signal = hilbert(S)
amplitude_envelope = np.abs(analytic_signal)
instantaneous_phase = np.unwrap(np.angle(analytic_signal))
instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0*np.pi) * Fs)
fig = plt.figure(6)
ax0 = fig.add_subplot(211)
ax0.plot(t, S, label='signal')
ax0.plot(t, amplitude_envelope, label='envelope')
ax0.set_xlabel("time in seconds")
ax0.legend()
ax1 = fig.add_subplot(212)
ax1.plot(t[1:], instantaneous_frequency)
ax1.set_xlabel("time in seconds")
ax1.set_ylim(0.0, 120.0)
|
analysis/timefreqanalysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/eyesimk/CS412-MachineLearning/blob/main/hw1_kebabci_eceyesim.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="MM-wwHR8qL0M"
# ## 1) Load training dataset
#
# * Read from Keras library.
#
#
# + id="Iz3iMpjVfa5I"
# Load the Pandas libraries with alias 'pd'
import pandas as pd
import numpy as np
from keras.datasets import mnist
# Read data
(x_train, y_train) , (x_test, y_test) = mnist.load_data()
# + id="CA_AjGQasjvS" colab={"base_uri": "https://localhost:8080/", "height": 263} outputId="a0ed9457-7335-42a8-b25c-a662e50eb6b1"
# print shape
print('Data Dimensionality: ')
print ("Train X, Y shape: ", x_train.shape, y_train.shape )
print ("Test X, Y shape: ", x_test.shape, y_test.shape)
# print first 5 rows in your dataset
train_df = pd.DataFrame(x_train.reshape(60000, -1))
train_df['label'] = y_train
print('Head of Data: ')
print(train_df.head())
# print attribute names
print ("Attribute Names: ", train_df.columns.tolist())
# + [markdown] id="Vop4rwZVxh9Z"
# ##2) Shuffle and split training data as train(80%) and validation(20%)
# + id="KEhk8R24xhdY" colab={"base_uri": "https://localhost:8080/", "height": 332} outputId="56c6cba3-4337-402a-adb0-7c853d64f59a"
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# Shuffle the training data
x_train,y_train = shuffle (x_train, y_train, random_state=42)
# Split 80-20
x_ntrain, x_valid, y_ntrain, y_valid = train_test_split(x_train,y_train, test_size=2/10, random_state=42)
print("Train data shape:", x_ntrain.shape, "Train label shape:",y_ntrain.shape, "Validation data shape:", x_valid.shape, "Validation label shape:", y_valid.shape)
ntrain_df = pd.DataFrame(x_train.reshape(60000, -1))
valid_df = pd.DataFrame(x_valid.reshape(len(x_valid), -1))
print("New train set",ntrain_df.head())
print("Validation set",ntrain_df.head())
# + [markdown] id="xR1oMsPu0AV_"
# ##3) Training a decision tree classifier on train data and model selection using the validation data
#
#
# + id="Nv6oac-T3Wy5" colab={"base_uri": "https://localhost:8080/", "height": 383} outputId="9a5d179d-1de8-411b-e390-8d4813132be0"
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn import metrics
# Train decision tree classifiers
# random state parameter to have the same random value each run
class_2 = DecisionTreeClassifier(min_samples_split = 2, random_state = 42)
class_5 = DecisionTreeClassifier(min_samples_split = 5, random_state = 42)
class_10 = DecisionTreeClassifier(min_samples_split = 10, random_state = 42)
x_ntrain_reshaped = x_ntrain.reshape((48000,28*28))
class_2.fit(x_ntrain_reshaped,y_ntrain)
class_5.fit(x_ntrain_reshaped,y_ntrain)
class_10.fit(x_ntrain_reshaped,y_ntrain)
# Evaluate on validation set
valid_2 = class_2.predict(x_valid.reshape(len(x_valid),-1))
valid_5 = class_5.predict(x_valid.reshape(len(x_valid),-1))
valid_10 = class_10.predict(x_valid.reshape(len(x_valid),-1))
val_acc1 = metrics.accuracy_score(y_valid,valid_2)
val_acc2 = metrics.accuracy_score(y_valid,valid_5)
val_acc3 = metrics.accuracy_score(y_valid,valid_10)
y_train1 = class_2.predict(x_ntrain.reshape(len(x_ntrain),-1))
y_train2 = class_5.predict(x_ntrain.reshape(len(x_ntrain),-1))
y_train3 = class_10.predict(x_ntrain.reshape(len(x_ntrain),-1))
train_acc1 = metrics.accuracy_score(y_ntrain,y_train1)
train_acc2 = metrics.accuracy_score(y_ntrain,y_train2)
train_acc3 = metrics.accuracy_score(y_ntrain,y_train3)
print("Accuracy for minimum samples split 2 for validation: ", val_acc1)
print("Accuracy for minimum samples split 5 for validation: ",val_acc2)
print("Accuracy for minimum samples split 10 for validation: ", val_acc3)
print("Accuracy for minimum samples split 2 for training: ", train_acc1)
print("Accuracy for minimum samples split 5 for training: ",train_acc2)
print("Accuracy for minimum samples split 10 for training: ", train_acc3)
# Plot errors
x_axis = ['model1', 'model2', 'model3']
train_acc = [train_acc1, train_acc2, train_acc3]
val_acc = [val_acc1, val_acc2, val_acc3]
plt.scatter(x_axis, val_acc)
plt.scatter(x_axis, train_acc)
plt.plot(x_axis, val_acc, label = "Validation acc")
plt.plot(x_axis, train_acc, label = "Train acc")
plt.xlabel('Models')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# + [markdown] id="boqe46St1--f"
# ## 4) Testing with the chosen classifier on test set
#
# + id="IPLke8jyFGng" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="7a62d60f-5f27-48c4-af4a-7809f32a1bed"
from sklearn.metrics import accuracy_score
# Load test data
# already loaded in the first section
test_df = pd.DataFrame(x_test.reshape(len(x_test), -1))
test_df['label'] = y_test
print(test_df.head())
# test prediction using a decision tree with all default parameters and ..... min-split value
x_test_reshape = x_test.reshape((10000,28*28))
#test_1 = class_2.predict(x_test_reshape)
test_2 = class_5.predict(x_test_reshape)
#test_3 = class_10.predict(x_test_reshape)
# Report your accuracy
test_acc = metrics.accuracy_score(y_test,test_2)
print("Accuracy for minimum samples split 5 for test: ", test_acc)
# + [markdown] id="WG3473I9dGE8"
# ##5) Report
#
#
# + [markdown] id="nfeKfkooMhHG"
# In this assignment, I aimed to develop a Decision Tree Classifier to find the most accurate classifier that will predict the digits in the images in the MNIST dataset. For the dataset, the train set and test set are loaded separately from Keras. The size of the train set is 60000, while the size of the test set is 10000. To generate a validation set, the training set is firstly shuffled, then split with the respected percentage, %80-%20. For data preprocessing, I benefitted from 3 main libraries; Pandas, numPy, and matplotlib. I used "numpy.reshape" to make the dataset structure compatible with the functions, panda for converting my dataset into a Dataframe so that the head of the data can be printed & observed, I used Matplotlib to plot the accuracies,split the data for validation, as I mentioned before. After training the decision tree classifier on training data, I used the validation dataset to select a model between 3 classifiers. I received three different accuracy values for three different decision tree classifier values: 2,5 and 10(min_samples_split).
#
# Min Samples Split Validation Accuracies
# 2 0.8640833333333333
# 5 0.8688333333333333
# 10 0.86825
#
# I have obtained the best result with classifier (min_samples_split = 5) , within the accuracy rate %86,88 on validation set. Therefore I decided to choose this model on test data. The chosen model, with the decision tree classifier (min_samples_split = 5), gave the classificaiton accuracy %86.6 on test data.
#
#
|
hw1_kebabci_eceyesim.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df = pd.read_csv('healthcare-dataset-stroke-data.csv')
df.head()
df.shape
# __Target Variable: Stroke__
df['stroke'].value_counts()
df.isnull().sum()
# __Gender, ever_married, work_type, Residence_type, smoking_status, hypertension, heart_disease : Categorical Variables__
# __age , avg_glucose_level, bmi : Numerical Variables__
df['gender'].value_counts()
df['hypertension'].value_counts()
df['heart_disease'].value_counts()
df['ever_married'].value_counts()
df['work_type'].value_counts()
df['smoking_status'].value_counts()
df['Residence_type'].value_counts()
# #### Data Preprocessing
df = df[df['gender']!='Other'].copy()
df.head()
df.columns
df = pd.get_dummies(columns=['gender','ever_married',
'work_type', 'Residence_type','smoking_status'], data = df, prefix_sep= '_', drop_first= True)
df.head()
df.info()
df.isnull().sum()
# bmi has ~200 missing values, which can be imputed.
from sklearn.impute import KNNImputer
knn_impute = KNNImputer(n_neighbors= 10, weights= 'distance')
imputed_bmi = knn_impute.fit_transform(df[['bmi']])
df = df.drop(columns=['bmi'], axis= 1)
df.head()
df['bmi_imputed'] = imputed_bmi
df.head()
# #### Modeling
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
df.columns
X = df.drop(columns=['id','stroke'])
y = df['stroke']
X.shape, y.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=420)
X_train.shape , X_test.shape
model_rf = RandomForestClassifier(n_estimators= 100 , n_jobs= -1, max_depth=2, criterion= 'entropy')
model_svm = SVC()
model_dt = DecisionTreeClassifier(criterion='gini', splitter= 'best')
model_lr = LogisticRegression()
model_rf.fit(X_train, y_train)
model_svm.fit(X_train, y_train)
model_dt.fit(X_train, y_train)
#model_lr.fit(X_train,y_train)
y_pred_rf = model_rf.predict(X_test)
y_pred_svm = model_svm.predict(X_test)
y_pred_dt = model_dt.predict(X_test)
y_pred_lr = model_lr.predict(X_test)
print('Accuracy of Random Forest', np.round(accuracy_score(y_pred= y_pred_rf, y_true = y_test),3))
print('Accuracy of SVM', np.round(accuracy_score(y_pred= y_pred_svm, y_true = y_test),3))
print('Accuracy of Decision Tree', np.round(accuracy_score(y_pred= y_pred_dt, y_true = y_test),3))
print('Accuracy of Logistic Regression', np.round(accuracy_score(y_pred= y_pred_lr, y_true = y_test),3))
# __We will go with Random Forest Classifier__
|
FastAPI-Deployment/fastapi-ml-demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Vectors, matrices and norms
# The notebook demonstrate the computation and use of some important concepts in linear algebra. NumPy is used for the numerical computations.
# ## Vector norms
# The $l_{p}$-norm,of a vector $\boldsymbol{x} \in \mathbb{C}^{n}$ is
#
# $$
# \| \boldsymbol{x} \|_{p} = \left( \sum_{i=1}^{n} |x_{i}|^{p} \right)^{1/p}
# $$
#
# Recall that when $p = \infty$, we have have the maxiumum norm:
#
# $$
# \| \boldsymbol{x} \|_{\infty} = \max(|x_{1}|, \ldots , |x_{n}|)
# $$
#
#
#
# NumPy can compute $l_{p}$ norms of vectors. To see how, we first import NumPy and create a random vectors of length 10:
# +
import numpy as np
np.random.seed(2)
x = np.random.rand(10) + 1j*np.random.rand(10)
print(x)
# -
# We can now compute a number of $l_{p}$ norms of $\boldsymbol{x}$:
for p in range(1, 5):
x_norm = np.linalg.norm(x, p)
print("The l_{} norm of x is: {}".format(p, x_norm))
# For the $l_{\infty}$ norm:
x_inf = np.linalg.norm(x, np.inf)
print("The max norm of x is: {}".format(x_inf))
# ## Matrix norms
# Norms of matrices can also be computed. The more interesting (and abstract) norms are *operator* norms. These are also known as *induced* norms.
# ### Operator norms
# For an $n \times n$ matrix $\boldsymbol{A}$, the norm of the matrix is a measure of the 'maximum change' in relative length it can induce when applied to a vector. If we consider:
#
# $$
# \| \boldsymbol{A} \boldsymbol{x} \| \le C \| \boldsymbol{x}\| \quad \forall \boldsymbol{x} \in \mathbb{C}^{d},
# $$
#
# then the smallest possible $C$ is the norm of $\boldsymbol{A}$. The norm of $\boldsymbol{A}$ is denoted by $\|\boldsymbol{A}\|$:
#
# $$
# \| \boldsymbol{A} \boldsymbol{x} \| \le \| \boldsymbol{A}\| \| \boldsymbol{x}\| \quad \forall \boldsymbol{x} \in \mathbb{C}^{d},
# $$
#
# This can be rearranged to provide the usual definition of a matrix norm:
#
# $$
# \| \boldsymbol{A} \| = \max_{\boldsymbol{x} \in \mathbb{C}^{n} \backslash \boldsymbol{0}}
# \frac{\| \boldsymbol{A} \boldsymbol{x}\|}{\|\boldsymbol{x}\| }
# $$
#
# To compute actual norms of a matrix, we need to choose how we measure the length of a vector, i.e. which norm to use. If we choose the $l_{2}$-norm, then:
#
# $$
# \| \boldsymbol{A} \|_{2} = \max_{\boldsymbol{x} \in \mathbb{C}^{n} \backslash \boldsymbol{0}}
# \frac{\| \boldsymbol{A} \boldsymbol{x}\|_{2}}{\|\boldsymbol{x}\|_{2} }
# $$
#
# As discussed in the lectures, some norms are relatively inexpensive to compute for large matrices, and others are expensive. We can again use NumPy to compute some matrix norms. We first create a matrix filled with random numbers:
A = np.random.rand(5, 5) + 1j*np.random.rand(5, 5)
print(A)
# and then compute some norms:
print("The 1-norm of A is: {}".format(np.linalg.norm(A, 1)))
print("The 2-norm of A is: {}".format(np.linalg.norm(A, 2)))
print("The max-norm of A is: {}".format(np.linalg.norm(A, np.inf)))
# ### Vector-like norms
# It sometimes convenient to work with matrix norms that are similar to vector norms. A commonly used matrix norm is the Frobenius norm. It is analogous to the $l_{2}$ norm of a vector, and is defined by:
#
# $$
# \|\boldsymbol{A} \|_{F} = \left( \sum_{i}\sum_{i} a_{ij}^{2} \right)^{1/2}.
# $$
#
# To compute the Frobenius norm:
A_frobenius = np.linalg.norm(A, 'fro')
print("The Frobenius norm of A is: {}".format(A_frobenius))
# ## Condition number
# The condition number of a matrix is important when working with matrices numerically because is tells us something about how stable algorithms will be with respect to round-off errors, and how fast some iterative techniques will converge. Recall that the condition number $\kappa$ of a matrix $\boldsymbol{A}$ is defined as:
#
# $$
# \kappa(\boldsymbol{A}) = \| \boldsymbol{A} \| \|\boldsymbol{A}^{-1}\|
# $$
#
# If we use the 2-norm, it was shown that:
#
# $$
# \kappa_{2}(\boldsymbol{A}) = \frac{\sqrt{\lambda_{\max}(\boldsymbol{A}^{T}\boldsymbol{A})}}{\sqrt{\lambda_{\min}(\boldsymbol{A}^{T}\boldsymbol{A})}}
# $$
# ### Effect of poor conditioning on errors
# It was shown in lectures that when solving $\boldsymbol{A} \boldsymbol{x} = \boldsymbol{b}$, if the condition number of $\boldsymbol{A}$ is large then small errors in $\boldsymbol{b}$ can manifest themselves as large errors in the solution, $\boldsymbol{x}$. We explore this now for the notoriously ill-conditioned *Hilbert matrix*. Entries of the Hilbert matrix $\boldsymbol{H}$ are given by
#
# $$
# H_{ij} = \frac{1}{i + j - 1}.
# $$
#
# We can use a SciPy function to create a $n \times n$ Hilbert matrix:
# +
import scipy.linalg as la
H = la.hilbert(6)
print(H)
print("Condition number is: {}".format(np.linalg.cond(A, 2)))
# -
# Even for this small Hilbert matrix, the condition number is large.
#
# We now experiment with solving $\boldsymbol{A} (\boldsymbol{x}+ \delta \boldsymbol{x}) = \boldsymbol{b} + \delta \boldsymbol{b}$, and compare the error $\|\delta{\boldsymbol{x}}\| / \|\boldsymbol{x}\|$ to $\|\delta{\boldsymbol{b}}\| / \|\boldsymbol{b}\|$. We will presume that the NumPy linear solvers can cope with the exact system $\boldsymbol{A}\boldsymbol{x} =\boldsymbol{b}$ (in practice this will be an issue).
#
# We first construct $\boldsymbol{b}$, $\delta\boldsymbol{b}$ and $\boldsymbol{b} + \delta\boldsymbol{b}$:
# +
b = np.ones(H.shape[0])
b_delta = 1.0e-6*np.random.rand(H.shape[0])
# Perturbed RHS
b1 = b + b_delta
# -
# We now solve for $\boldsymbol{A} \boldsymbol{x}= \boldsymbol{b}$ and $\boldsymbol{A} (\boldsymbol{x}+ \delta \boldsymbol{x}) = \boldsymbol{b} + \delta \boldsymbol{b}$:
x = np.linalg.solve(H, b)
x1 = np.linalg.solve(H, b1)
# We now compare $\|\delta{\boldsymbol{x}}\| / \|\boldsymbol{x}\|$ and $\|\delta{\boldsymbol{b}}\| / \|\boldsymbol{b}\|$ using the $l_{2}$-norm:
error_x = np.linalg.norm(x - x1, 2)/np.linalg.norm(x, 2)
error_b = np.linalg.norm(b_delta, 2)/np.linalg.norm(b, 2)
print("Relative error in x and b: {}, {}".format(error_x, error_b))
# Even for this small Hilbert matrix, a small error in $\boldsymbol{b}$ leads to a much larger error in $\boldsymbol{x}$. This will get worse with problem size. We'll now put the test in side a loop to test larger matrix sizes:
for n in (20, 100, 1000):
H = la.hilbert(n)
print("- For {} x {} matrix, the condition number is: {}".format(n, n, np.linalg.cond(H, 2)))
b = np.ones(H.shape[0])
b_delta = 1.0e-5*np.random.rand(H.shape[0])
b1 = b + b_delta
x = np.linalg.solve(H, b)
x1 = np.linalg.solve(H, b1)
error_x = np.linalg.norm(x - x1, 2)/np.linalg.norm(x, 2)
error_b = np.linalg.norm(b_delta, 2)/np.linalg.norm(b, 2)
print(" Relative error in x and b: {}, {}".format(error_x, error_b))
# ### Condition number versus determinant
# It was discussed in lectures that the condition number of a matrix and its determinant are not necessarily related. Some small examples were presented. Here we consider some larger problems.
#
# We consider an $n \times n$ upper triangular matrix filled with two, and one on the diagonal:
#
# $$
# \boldsymbol{A} = \begin{bmatrix}
# 1 & 2 & \ldots & 2
# \\
# & 1 & \ddots & \vdots
# \\
# & & \ddots & 2
# \\
# & & & 1
# \end{bmatrix}
# $$
#
# This matrix has a determinant of one, and a condition number that grows with $n$. We can explore this with NumPy for increasing $n$.
# +
def test_matrix(n):
A = np.zeros((n, n))
A[np.triu_indices(n)] = 2.0
np.fill_diagonal(A, 1.0)
return A
for n in (2, 10, 100, 500):
A = test_matrix(n)
print("- Matrix size: {} x {}".format(n, n))
print(" * l_2 condition number is: {}".format(np.linalg.cond(A, 2)))
print(" * determinant is: {}".format(np.linalg.det(A)))
|
01-Fundamentals.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import cv2
from PIL import Image, ImageOps
import imgaug as ia
import imgaug.augmenters as iaa
import requests
from io import BytesIO
from keras.models import load_model, Model
import keras.models
import keras.optimizers
from keras.layers import Flatten, Dense, GlobalAveragePooling2D, Reshape
from keras.models import load_model
import tensorflow as tf
import glob
import warnings
warnings.simplefilter('ignore')
import datetime
# # Train Dataset
idx_class = 0
for folder in glob.glob('latih/*'):
image_list = []
target_list = []
for filename in glob.glob(folder + '/*.jpg'):
im=cv2.imread(filename)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = Image.fromarray(im)
desired_size = 224
old_size = im.size
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (desired_size, desired_size))
new_im.paste(im, ((desired_size-new_size[0])//2,
(desired_size-new_size[1])//2))
image_list.append(np.array(new_im))
target_list.append(idx_class)
image_list = np.array(image_list)
target_list = np.array(target_list)
np.save('dataset_latih/' + folder[6:] + '_image.npy', image_list)
np.save('dataset_latih/' + folder[6:] + '_target.npy', target_list)
print(folder[6:], idx_class)
idx_class += 1
# +
image_list = np.zeros([0,224,224,3], dtype = np.uint8)
for file in glob.glob('dataset_latih/*_image.npy'):
image_list = np.append(image_list, np.load(file), axis = 0)
target_list = []
for file in glob.glob('dataset_latih/*_target.npy'):
target_list = np.append(target_list, np.load(file), axis = 0)
np.save('image_train.npy', image_list)
np.save('target_train.npy', target_list)
# -
image_list.shape
# # Test Dataset
idx_class = 0
for folder in glob.glob('uji/*'):
image_list = []
target_list = []
for filename in glob.glob(folder + '/*.jpg'):
im=cv2.imread(filename)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = Image.fromarray(im)
desired_size = 224
old_size = im.size
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (desired_size, desired_size))
new_im.paste(im, ((desired_size-new_size[0])//2,
(desired_size-new_size[1])//2))
image_list.append(np.array(new_im))
target_list.append(idx_class)
image_list = np.array(image_list)
target_list = np.array(target_list)
np.save('dataset_uji/' + folder[4:] + '_image.npy', image_list)
np.save('dataset_uji/' + folder[4:] + '_target.npy', target_list)
print(folder[4:], idx_class)
idx_class += 1
# +
image_list = np.zeros([0,224,224,3], dtype = np.uint8)
for file in glob.glob('dataset_uji/*_image.npy'):
image_list = np.append(image_list, np.load(file), axis = 0)
target_list = []
for file in glob.glob('dataset_uji/*_target.npy'):
target_list = np.append(target_list, np.load(file), axis = 0)
np.save('image_test.npy', image_list)
np.save('target_test.npy', target_list)
# -
|
load_image_to_array.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright (c) 2017 <NAME> & <NAME>
# MIT License -- see LICENSE for details
#
# This file is part of the code to reproduce the core results of:
# <NAME>., and <NAME>. (2017). Continual Learning Through
# Synaptic Intelligence. In Proceedings of the 34th International Conference on
# Machine Learning, D. Precup, and <NAME>, eds. (International Convention
# Centre, Sydney, Australia: PMLR), pp. 3987–3995.
# http://proceedings.mlr.press/v70/zenke17a.html
# +
# %load_ext autoreload
# %autoreload 2
# %pylab inline
import tensorflow as tf
slim = tf.contrib.slim
graph_replace = tf.contrib.graph_editor.graph_replace
import sys, os
sys.path.extend([os.path.expanduser('..')])
from pathint import utils
import seaborn as sns
sns.set_style("white")
from tqdm import trange, tqdm
import matplotlib.colors as colors
import matplotlib.cm as cmx
# -
# ## Parameters
# +
# Data params
input_dim = 784
output_dim = 10
# Network params
n_hidden_units = 2000
activation_fn = tf.nn.relu
# Optimization params
batch_size = 256
epochs_per_task = 20
learning_rate=1e-3
xi = 0.1
# Reset optimizer after each age
reset_optimizer = False
# -
# ## Construct datasets
n_tasks = 10
full_datasets, final_test_datasets = utils.construct_permute_mnist(num_tasks=n_tasks)
# training_datasets, validation_datasets = utils.mk_training_validation_splits(full_datasets, split_fractions=(0.9, 0.1))
training_datasets = full_datasets
validation_datasets = final_test_datasets
# ## Construct network, loss, and updates
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.InteractiveSession(config=config)
sess.run(tf.global_variables_initializer())
from keras.models import Sequential
from keras.layers import Dense, Dropout
model = Sequential()
model.add(Dense(n_hidden_units, activation=activation_fn, input_dim=input_dim))
model.add(Dense(n_hidden_units, activation=activation_fn))
model.add(Dense(output_dim, activation='softmax'))
# +
from pathint import protocols
from pathint.optimizers import KOOptimizer
from keras.optimizers import SGD, Adam, RMSprop
from keras.callbacks import Callback
from pathint.keras_utils import LossHistory
protocol_name, protocol = protocols.PATH_INT_PROTOCOL(omega_decay='sum', xi=xi)
# protocol_name, protocol = protocols.FISHER_PROTOCOL(omega_decay='sum')
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999)
opt_name = 'adam'
# opt = SGD(lr=learning_rate)
# opt_name = 'sgd'
oopt = KOOptimizer(opt, model=model, **protocol)
model.compile(loss="categorical_crossentropy", optimizer=oopt, metrics=['accuracy'])
history = LossHistory()
callbacks = [history]
# -
file_prefix = "data_%s_opt%s_lr%.2e_bs%i_ep%i_tsks%i"%(protocol_name, opt_name, learning_rate, batch_size, epochs_per_task, n_tasks)
datafile_name = "%s.pkl.gz"%(file_prefix)
# ## Train!
# +
diag_vals = dict()
all_evals = dict()
# all_evals = utils.load_zipped_pickle(datafile_name)
# returns empty dict if file not found
def run_fits(cvals, training_data, valid_data, eval_on_train_set=False):
for cidx, cval_ in enumerate(cvals):
fs = []
evals = []
sess.run(tf.global_variables_initializer())
cstuffs = []
cval = cval_
print( "setting cval")
oopt.set_strength(cval)
print("cval is %e"%sess.run(oopt.lam))
for age, tidx in enumerate(range(n_tasks)):
print("Age %i, cval is=%f"%(age,cval))
oopt.set_nb_data(len(training_data[tidx][0]))
stuffs = model.fit(training_data[tidx][0], training_data[tidx][1], batch_size, epochs_per_task, callbacks=callbacks,
verbose=0)
oopt.update_task_metrics(training_data[tidx][0], training_data[tidx][1], batch_size)
oopt.update_task_vars()
ftask = []
for j in range(n_tasks):
if eval_on_train_set:
f_ = model.evaluate(training_data[j][0], training_data[j][1], batch_size, verbose=0)
else:
f_ = model.evaluate(valid_data[j][0], valid_data[j][1], batch_size, verbose=0)
ftask.append(np.mean(f_[1]))
evals.append(ftask)
cstuffs.append(stuffs)
# Re-initialize optimizer variables
if reset_optimizer:
oopt.reset_optimizer()
# diag_vals[cval_] = oopt.get_numvals('omega')
evals = np.array(evals)
all_evals[cval_] = evals
# backup all_evals to disk
utils.save_zipped_pickle(all_evals, datafile_name)
# -
cvals = [0, 0.01, 0.1, 1.0]
print(cvals)
# # %%capture
run_fits(cvals, training_datasets, validation_datasets)
# backup all_evals to disk
# all_evals = dict() # uncomment to delete on disk
utils.save_zipped_pickle(all_evals, datafile_name)
o = oopt.get_numvals_list('omega')
cmap = plt.get_cmap('cool')
cNorm = colors.Normalize(vmin=-4, vmax=np.log(np.max(list(all_evals.keys()))))
# cNorm = colors.Normalize()
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
print(scalarMap.get_clim())
# +
figure(figsize=(14, 4))
axs = [subplot(1,n_tasks+1,1)]#, None, None]
for i in range(1, n_tasks + 1):
axs.append(subplot(1, n_tasks+1, i+1, sharex=axs[0], sharey=axs[0]))
keys = list(all_evals.keys())
sorted_keys = np.sort(keys)
for cval in sorted_keys:
evals = all_evals[cval]
for j in range(n_tasks):
colorVal = scalarMap.to_rgba(np.log(cval))
axs[j].plot(evals[:, j], c=colorVal)#, label="t%d, c%g"%(j, cval))
label = "c=%g"%cval
average = evals.mean(1)
axs[-1].plot(average, c=colorVal, label=label)
for i, ax in enumerate(axs):
ax.legend(bbox_to_anchor=(1.0,1.0))
ax.set_title((['task %d'%j for j in range(n_tasks)] + ['average'])[i])
gcf().tight_layout()
# +
for cval in sorted_keys:
stuff = []
for i in range(len(all_evals[cval])):#n_tasks):
stuff.append(all_evals[cval][i][:i+1].mean())
plot(range(1,n_tasks+1), stuff, 'o-', label="c=%g"%cval)
axhline(all_evals[cval][0][0], linestyle='--', color='k')
xlabel('Number of tasks')
ylabel('Fraction correct')
legend(loc='best')
ylim(0.9, 1.02)
xlim(0.5, 10.5)
grid('on')
savefig("%s.pdf"%(file_prefix))
# -
|
fig_permuted_mnist/Permuted MNIST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1 RNN
# +
import tensorflow as tf
import numpy as np
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=128) # state_size = 128
print(cell.state_size) # 128
inputs = tf.placeholder(np.float32, shape=(32, 100)) # 32 是 batch_size
h0 = cell.zero_state(32, np.float32) # 通过zero_state得到一个全0的初始状态,形状为(batch_size, state_size)
output, h1 = cell.__call__(inputs, h0) #调用call函数
print(h1.shape) # (32, 128)
# +
import tensorflow as tf
import numpy as np
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=128)
inputs = tf.placeholder(np.float32, shape=(32, 100)) # 32 是 batch_size
h0 = lstm_cell.zero_state(32, np.float32) # 通过zero_state得到一个全0的初始状态
output, h1 = lstm_cell.__call__(inputs, h0)
print(h1.h) # shape=(32, 128)
print(h1.c) # shape=(32, 128)
# +
# 参考:https://zhuanlan.zhihu.com/p/28196873
|
words_segmentation/test_rnn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Syilun/face_recognition/blob/master/23_vggface_mlp512_128_8_v2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="MMWSB22LC-Ba" colab_type="code" colab={}
# 用單個模型同時執行兩個分類任務:
# age 分成8個classes
# gender 分成2個classes
# mlp 每個全連接層的unit個數: 512 - 128 -- 8
# \_ 2
# trainning:
# 改用generator產生資料給fit_generator
# class_weight
# random_state
# callback: EarlyStop, model.save
#用少量資料
FULL_DATA = 0
per_cls_trn = 500
per_cls_eval = 20 #100
#用全部資料
#FULL_DATA = 1
IMG_SIZE = 224
BATCH_SIZE = 32
EPOCHS = 1
model_folder_path = '/content/drive/My Drive/AIoT_Project/face'
#img_folder_path = '/content/drive/My Drive/AIoT_Project/Datasets/資料集_IMDB-Wiki'
img_folder_path = '/content/drive/My Drive/AIoT_Project/Datasets/cleandataset'
# + id="-bv3RgQfbZ_r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="c2099adc-bc72-4c23-db9d-b0adb75c5b11"
from google.colab import drive
drive.mount('/content/drive')
# + id="Ri1ni0ZmaYPP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="b0ac48f5-3dc3-449e-92cd-c48a2f76ceaa"
# to measure execution time
# !pip install ipython-autotime
# %load_ext autotime
# + id="WIxoBByJgDOB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="c8e7cd75-dd3d-4de8-ab5a-2fb299576d4b"
# ! nvidia-smi
# + id="Enr3u7SZ0rHX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="2573ec57-b4dd-4ac0-997c-99f1c637a410"
# !pip install mtcnn
# + id="1HiBB7Hk1Cr-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b81f5d4f-defb-41ca-d4ff-9d7b06864789"
import scipy.io
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
# import tensorflow as tf
# from tensorflow import keras
import keras
from keras.preprocessing.image import load_img
from keras.callbacks import ModelCheckpoint,EarlyStopping
from keras.layers import Dense, Activation, Dropout, Flatten, Input, Convolution2D, ZeroPadding2D, MaxPooling2D, Activation
from keras.layers import Conv2D, AveragePooling2D, BatchNormalization
from keras.models import Model, Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.utils import to_categorical
from keras import metrics
import matplotlib.pyplot as plt
# %matplotlib inline
from keras.models import load_model
import cv2
from glob import glob
import os
from mtcnn import MTCNN
import numpy as np
# + id="KvutOpBGdpVr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="421e37e7-8268-4f1c-dae5-2b127613154f"
print(keras.__version__)
# + id="Hfiio5_KHufK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d13f4512-e34f-4f57-af14-8a4d3b93a437"
# 資料集由csv檔案讀入, 也可新增其他的csv檔案來擴充資料
# df = pd.read_csv('drive/My Drive/Tibame_AIoT_Project/Datasets/資料集_IMDB-Wiki/age_gender_wiki.csv')
# df_under10 = pd.read_csv('drive/My Drive/Tibame_AIoT_Project/Datasets/資料集_IMDB-Wiki/age_gender_imdb_under10.csv')
# df_over70 = pd.read_csv('drive/My Drive/Tibame_AIoT_Project/Datasets/資料集_IMDB-Wiki/age_gender_imdb_over70.csv')
# df = pd.concat([df, df_under10, df_over70])
# + id="hPi60EWkDL6K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="49e13279-f378-4e4d-ad7e-9e5f0ff5785d"
# cleandata: 清除wiki資料集原本的一些年齡標註錯誤
df = pd.read_csv(os.path.join(img_folder_path, 'cleandata.csv'))
# + id="6vj29J-vpoyz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d5f8cb15-ade1-4033-fa26-d1167a423b05"
#some guys seem to be greater than 100. some of these are paintings. remove these old guys
df = df[df['age'] <= 100]
#some guys seem to be unborn in the data set
df = df[df['age'] > 0]
# + id="ZI0vTSSFtF3o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 436} outputId="5f381867-541d-4003-8a25-af95fd2c10f9"
# 每10歲分一類,70歲以上歸為同一類,共8類
df['age_grp'] = pd.cut(df['age'], bins=[0,10,20,30,40,50,60,70,110], right=False)
le = LabelEncoder()
le.fit(df['age_grp'].astype('str'))
df['age_cls'] = le.transform(df['age_grp'].astype('str'))
df
# + id="wBOpqcMoS90h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="707cd52f-ba70-4716-fbd7-973f477a83d4"
df['age_cls'].value_counts().sort_index()
# + id="ynsfUWLDp4r7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="d29d025e-2594-47cb-8459-665a8d3aa275"
histogram_age = df['age_cls'].hist(bins=df['age_cls'].nunique())
# + id="SC37vJjo_AB_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="cefa0d17-6a1c-4608-8df0-f45c06f00440"
histogram_gender = df['gender'].hist(bins=df['gender'].nunique())
# + id="YSCJlWc444MN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d932b65f-428e-4d02-ecc8-b61ce5ca1421"
#先用少量資料比較不同模型:
#每個類別各取部分資料,用train_test_split來切train and test
df_f0 = df[(df['age_cls'] == 0) & (df['gender'] == 0)]
df_f1 = df[(df['age_cls'] == 1) & (df['gender'] == 0)]
df_f2 = df[(df['age_cls'] == 2) & (df['gender'] == 0)]
df_f3 = df[(df['age_cls'] == 3) & (df['gender'] == 0)]
df_f4 = df[(df['age_cls'] == 4) & (df['gender'] == 0)]
df_f5 = df[(df['age_cls'] == 5) & (df['gender'] == 0)]
df_f6 = df[(df['age_cls'] == 6) & (df['gender'] == 0)]
df_f7 = df[(df['age_cls'] == 7) & (df['gender'] == 0)]
df_m0 = df[(df['age_cls'] == 0) & (df['gender'] == 1)]
df_m1 = df[(df['age_cls'] == 1) & (df['gender'] == 1)]
df_m2 = df[(df['age_cls'] == 2) & (df['gender'] == 1)]
df_m3 = df[(df['age_cls'] == 3) & (df['gender'] == 1)]
df_m4 = df[(df['age_cls'] == 4) & (df['gender'] == 1)]
df_m5 = df[(df['age_cls'] == 5) & (df['gender'] == 1)]
df_m6 = df[(df['age_cls'] == 6) & (df['gender'] == 1)]
df_m7 = df[(df['age_cls'] == 7) & (df['gender'] == 1)]
# train and val data
if FULL_DATA == 1:
#每個類別保留最後per_cls_eval筆資料作為evaluate用
train_df = pd.concat([
df_f0[:-per_cls_eval], df_f1[:-per_cls_eval], df_f2[:-per_cls_eval], df_f3[:-per_cls_eval],
df_f4[:-per_cls_eval], df_f5[:-per_cls_eval], df_f6[:-per_cls_eval], df_f7[:-per_cls_eval],
df_m0[:-per_cls_eval], df_m1[:-per_cls_eval], df_m2[:-per_cls_eval], df_m3[:-per_cls_eval],
df_m4[:-per_cls_eval], df_m5[:-per_cls_eval], df_m6[:-per_cls_eval], df_m7[:-per_cls_eval]
])
else:
#先用少量資料比較不同模型
train_df = pd.concat([
df_f0[:per_cls_trn], df_f1[:per_cls_trn], df_f2[:per_cls_trn], df_f3[:per_cls_trn],
df_f4[:per_cls_trn], df_f5[:per_cls_trn], df_f6[:per_cls_trn], df_f7[:per_cls_trn],
df_m0[:per_cls_trn], df_m1[:per_cls_trn], df_m2[:per_cls_trn], df_m3[:per_cls_trn],
df_m4[:per_cls_trn], df_m5[:per_cls_trn], df_m6[:per_cls_trn], df_m7[:per_cls_trn]
])
# evaluate data: 每個類別保留最後per_cls_eval筆資料作為evaluate用
eval_df = pd.concat([
df_f0[-per_cls_eval:], df_f1[-per_cls_eval:], df_f2[-per_cls_eval:], df_f3[-per_cls_eval:],
df_f4[-per_cls_eval:], df_f5[-per_cls_eval:], df_f6[-per_cls_eval:], df_f7[-per_cls_eval:],
df_m0[-per_cls_eval:], df_m1[-per_cls_eval:], df_m2[-per_cls_eval:], df_m3[-per_cls_eval:],
df_m4[-per_cls_eval:], df_m5[-per_cls_eval:], df_m6[-per_cls_eval:], df_m7[-per_cls_eval:]
])
x_eval = np.array(eval_df['full_path'])
# 先把模型的兩個輸出的答案合併
y_eval = np.array(pd.concat([eval_df['age_cls'], eval_df['gender']], axis=1))
print("train:", len(train_df), "predict:", len(eval_df))
# + colab_type="code" id="PQi3zwjxagcW" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="2c23fcc1-bccf-4fd1-9569-3c624f188cb9"
# 處理答案 把它轉成one-hot (後面再做)
# y_train_category = to_categorical(df['age_cls'], num_classes=8)
# 2個輸出: age, gender
y_df = pd.concat([pd.DataFrame(train_df['age_cls']), pd.DataFrame(train_df['gender'])], axis=1)
# 切分訓練data
x_train, x_test, y_train, y_test = train_test_split(np.array(train_df['full_path']), np.array(y_df), test_size=0.2, random_state=0)
print(x_train[0], x_test[0], y_train[0], y_test[0])
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
# + id="FZJoKwSbtpb8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="73f16740-ab8e-49bf-d57b-564e5fa97541"
detector = MTCNN()
#feature_extractor = load_model(os.path.join(model_folder_path, 'facenet_keras.h5'))
# + id="DypYAJ7cJlrC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="35dab32d-6dd7-46c6-d4f8-241d9202a15f"
# VGGFace: https://github.com/rcmalli/keras-vggface
# !pip install keras_vggface
# !pip install keras_applications
from keras_vggface.vggface import VGGFace
from keras_vggface.utils import preprocess_input
feature_extractor = VGGFace(model='resnet50', include_top=False,
input_shape=(224, 224, 3), pooling='avg')
# + id="fOgsPn1i3E21" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="51c3af9f-5951-44cc-de8e-c210303482f3"
feature_extractor.summary()
# + id="7iJ6QxgZ_8iI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8e249adc-e153-4da7-8eab-0efdf36c0328"
# 固定pre-train model的參數
for lyr in feature_extractor.layers:
lyr.trainable = False
# BN
x = BatchNormalization()(feature_extractor.output)
# MLP
# x = Flatten()(x)
#x = Dense(units=2048, activation='relu')(x)
x = Dense(units=512, activation='relu')(x)
x = Dense(units=128, activation='relu')(x)
age = Dense(units=8, activation='softmax', name='age')(x)
gender = Dense(units=2, activation='softmax', name='gender')(x)
# 2個輸出: age, gender
age_gender_model = Model(inputs=feature_extractor.input, outputs=[age, gender])
age_gender_model.summary()
# + id="Bs6tioz-AvmI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="af75c336-b8f3-4025-b534-44797165d9bf"
age_gender_model.compile(loss=["categorical_crossentropy","categorical_crossentropy"],
optimizer='adam', metrics=[{'age':'accuracy'},{'gender':'accuracy'}]) # 2個輸出: age, gender
#age_gender_model.load_weights(os.path.join(model_folder_path,'23_vggface_weight_mlp512-128-8_2_epoch.h5'))
# + id="D-Mn0wTy5Bze" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ffaef15b-5b64-4234-91dd-55046e800106"
# 資料預處理 for facenet?
# Standardization
def preprocess(imgs):
for i in range(imgs.shape[0]):
# standardization
img = imgs[i]
mean, std = img.mean(), img.std()
img = (img - mean) / std
imgs[i] = img
return imgs
# Normalization
def normalize(img):
return img / 255.
# -1 <= x <= 1
def preprocess_1(imgs):
x = np.array(imgs, dtype = float)
x /= 127.5
x -= 1.
return x
# + id="evAgdxQ20ICI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="155adf43-a902-477e-8044-1e7cc199cace"
# detect face
def detect_faces(img):
face_imgs = []
results = detector.detect_faces(img)
# extract the bounding box from the first face
# print('# of faces: ', len(results))
for i in range(len(results)):
x1, y1, width, height = results[i]['box']
x2, y2 = x1 + width, y1 + height
patch = img[y1:y2, x1:x2] # crop face
face_imgs.append(patch)
return face_imgs
# + id="GBpYGPqZDTkT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ef75ce86-2245-4ee6-9894-37179f76bec6"
def data_generator(data_paths, y_data, batch_size=BATCH_SIZE, eval=0):
'''data generator for fit_generator'''
n = len(data_paths)
i = 0
data_paths = data_paths
# eval=0: 產生infinite train and validate 資料
# eval=1: 產生finite evaluate 資料
while i < n:
x_ori, x_norm, y_age, y_gender = [], [], [], []
i_batch = i
for b in range(batch_size):
path = data_paths[i]
print("n:", n, "idx:", i, "cls:", y_data[i], path)
# 讀取圖片,切下臉的部分,並使用借來的模型的預處理方式來作預處理
try:
img = cv2.imread(os.path.join(img_folder_path,path))[:,:,::-1]
except:
print('imread failed')
if eval == 0:
i = (i+1) % n
else:
i = (i+1)
continue
# plt.imshow(img)
# plt.show()
faces = detect_faces(img)
if len(faces) == 0 or faces[0].shape[0] == 0 or faces[0].shape[1] == 0:
print('No face')
if eval == 0:
i = (i+1) % n
else:
i = (i+1)
continue
# print(faces[0].shape)
img_crop = cv2.resize(faces[0], (IMG_SIZE, IMG_SIZE))
# plt.imshow(faces[0])
# plt.show()
# 使用借來的模型的預處理方式來作預處理
img_pre = preprocess_input(np.array(img_crop,dtype=float))
# 把原圖留下來
x_ori.append(img)
x_norm.append(img_pre)
y_age.append(y_data[i][0])
y_gender.append(y_data[i][1])
if eval == 0:
i = (i+1) % n
else:
i = (i+1)
# print("len(image_data)",len(x_ori))
# plt.figure(figsize=(10, 40))
# for j,m in enumerate(x_ori):
# plt.subplot(1, BATCH_SIZE, (j%BATCH_SIZE)+1)
# plt.title("idx:{} y_data:{}".format(i_batch+j, y_data[i_batch+j]))
# plt.axis("off")
# plt.imshow(m)
# plt.show()
# 2個輸出: age, gender
# print(type(y_age), len(y_age), y_age[:8])
# print(type(y_gender), len(y_gender), y_gender[:8])
y_age_category = to_categorical(y_age, num_classes=8)
y_gender_category = to_categorical(y_gender, num_classes=2)
# print(y_age_category)
# print(y_gender_category)
x_input = {'input_4':np.array(x_norm)}
y_category = {'age':np.array(y_age_category), 'gender':np.array(y_gender_category)}
# print(type(np.array(x_norm)), np.array(x_norm).shape)
# print(type(y_category), np.array(y_age_category), np.array(y_gender_category))
yield x_input, y_category
# + id="3DidZIPzKHzS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e96e1d02-8cae-4a82-e1fb-6cc442916201"
# 用generator產生資料
generator_train = data_generator(x_train, y_train, batch_size=BATCH_SIZE)
generator_test = data_generator(x_test, y_test, batch_size=BATCH_SIZE)
type(generator_train)
# + colab_type="code" id="ZEWln4Tua3dg" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="dc5cce9b-acc9-4ca1-96d5-93b1d21af338"
if FULL_DATA == 1:
age_weights = {0:12., 1:5., 2:1., 3:2., 4:3., 5:4., 6:6., 7:3.}
else:
# for temp
age_weights = {0:1., 1:1., 2:1., 3:1., 4:1., 5:1., 6:1., 7:1.}
# + id="jc_L3HTVMio7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b1ca20de-bef9-4663-d686-48e45f79c492"
# fit_generator
checkpoint = ModelCheckpoint(os.path.join(model_folder_path,"23_vggface_weight_mlp512-128-8_BS32_epoch.h5"),
save_best_only=False, save_weights_only=True) #Defaults: save_freq='epoch', save_weights_only=False
earlystop = EarlyStopping(patience=5, restore_best_weights=True)
#logs = age_gender_model.fit_generator(
logs = age_gender_model.fit(
generator_train,
epochs=EPOCHS,
use_multiprocessing=False, #for deadlock issue while 2nd epoch running
steps_per_epoch=len(x_train)//BATCH_SIZE,
validation_data=generator_test,
validation_steps=len(x_test)//BATCH_SIZE,
#class_weight=age_weights, #class_weight` is only supported for Models with a single output.
#validation_split=0.1, #fit_generator() got an unexpected keyword argument 'validation_split'
callbacks=[checkpoint, earlystop]
)
# + id="HXsF6L2e-vzG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2f51af38-ed96-4c07-e444-4ff2d529b523"
age_gender_model.save_weights(os.path.join(model_folder_path,'23_vggface_weight_mlp512-128-8_2_BS32.h5'))
# + id="8_zCJfVyisiK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="999f015f-f09c-4476-c403-2174ca3e43fa"
history = logs.history
history
# + id="ETe6gpnqjZzx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="6aba7451-9421-4aa3-f669-48429006b84e"
plt.plot(history['age_accuracy'])
plt.plot(history['val_age_accuracy'])
plt.legend(['age_accuracy', 'val_age_accuracy'])
plt.title('age_accuracy')
plt.show()
# + id="PYa3D33Gi_Yx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="6047b537-1efe-4366-b486-795dbe3cc47a"
plt.plot(history['gender_accuracy'])
plt.plot(history['val_gender_accuracy'])
plt.legend(['gender_accuracy', 'val_gender_accuracy'])
plt.title('gender_accuracy')
# + id="eibGTK-gjaTU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="492ebbe8-3a26-4bea-faa9-595444d0da71"
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.title('loss')
# + id="veQ_S9mOWx0B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d9d262e3-17d5-4b41-b01e-55fe7bd0c113"
# cur_train_idx = 0
# cur_test_idx = 0
# def get_data(x, y, batch=20, IMG_SIZE=160, test=1):
# # 要注意 numpy 中的 randint 的上限是不包含的 和一般的randint不同
# # numpy array 的索引可以是個 list, 即可同時取出不只一個元素
# global cur_train_idx, cur_test_idx
# print("cur train/test idx:", cur_train_idx, cur_test_idx)
# if test == 0:
# #idx = np.random.randint(0, len(x), batch)
# idx = list(range(cur_train_idx, cur_train_idx+batch, 1))
# cur_train_idx = (cur_train_idx + batch) % len(x)
# else:
# #idx = np.random.randint(0, len(x), batch)
# idx = list(range(cur_test_idx, cur_test_idx+batch, 1))
# cur_test_idx += batch
# #print("idx:", idx, x[idx], y[idx])
# x_idx = x[idx]
# y_idx = y[idx]
# x_ori, x_norm, y_ori = [], [], y_idx
# for i,p in enumerate(x_idx):
# print(p)
# # 讀取圖片,切下臉的部分,並使用借來的模型的預處理方式來作預處理
# img = np.array(cv2.imread(os.path.join(img_folder_path,p))[:,:,::-1])
# # plt.imshow(img)
# # plt.show()
# faces = detect_faces(img)
# if len(faces) == 0 or faces[0].shape[0] == 0:
# print('No face')
# continue
# img = cv2.resize(faces[0], (IMG_SIZE, IMG_SIZE))
# # plt.imshow(faces[0])
# # plt.show()
# # 使用借來的模型的預處理方式來作預處理
# img_pre = preprocess_input(np.array(img,dtype=float))
# #img_pre = preprocess_1(img)
# #img_pre = normalize(img)
# # 把原圖留下來
# x_ori.append(img)
# x_norm.append(img_pre)
# return np.array(x_ori), np.array(x_norm), np.array(y_ori)
# # 取出要用來預測的資料
# x_ori_batch, x_batch, y_batch = get_data(x_eval, y_eval, batch=100, IMG_SIZE=224)
# print(y_batch)
# + id="fShhRRl2qaFZ" colab_type="code" colab={}
# + id="ngKRFAYgS5Zd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f6c5070e-3c9f-4d1e-8e68-3a8e62705ef1"
# evaluate
#
# 從保留作為evaluate用的資料,用generator產生資料 to predict
generator_predict = data_generator(x_eval, y_eval, batch_size=BATCH_SIZE, eval=1)
# 把generator_predict返回的物件轉成list
eval_data = list(generator_predict)
# elements of the list are tuples, elements of the tuples are dicts
# [(dict of x, dict of y), (dict of x, dict of y), .....]
# 取出圖片資料與正確答案
x_eval_data, y_true_age, y_true_gender = [], [], []
for x_dict,y_dict in eval_data:
# print("x_eval_data:", len(list(x_dict['input_4'])))
x_eval_data = x_eval_data + list(x_dict['input_4'])
# print("y_true_age:", y_dict['age'].argmax(axis=-1))
# print("y_true_gender:", y_dict['gender'].argmax(axis=-1))
y_true_age = y_true_age + (list(y_dict['age'].argmax(axis=-1)))
y_true_gender = y_true_gender + (list(y_dict['gender'].argmax(axis=-1)))
# print("-------------------------")
print("x_eval_data:", type(x_eval_data), "np.array:", np.array(x_eval_data).shape)
print("y_true_age:", y_true_age)
print("y_true_gender:", y_true_gender)
# + id="f0VkMu89Pn36" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} outputId="0f1c40fc-be3d-41db-ef82-b2d6853ed1a3"
# predict
pre = age_gender_model.predict(np.array(x_eval_data), steps=len(x_eval)//BATCH_SIZE)
#pre[0] is predicted probabilities for age
#pre[1] is predicted probabilities for gender
pred_age = pre[0].argmax(axis=-1)
pred_gender = pre[1].argmax(axis=-1)
print("predict age:",pred_age)
print("predict gender:",pred_gender)
# + id="mkfU4iT97pGb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="97a23134-1f42-455a-e1aa-521629151fb6"
len(pred_age), len(pred_gender)
# + id="6P7GHnZJ58DJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="cdfe7859-afe2-4858-cfdd-013a5dbf7d50"
from sklearn.metrics import classification_report
print(np.array(y_true_age).shape, np.array(pred_age).shape, np.array(y_true_gender).shape, np.array(pred_gender).shape)
age_target_names = [str(i) for i in range(8)]
gender_target_names = [str(i) for i in range(2)]
print(classification_report(np.array(y_true_age), np.array(pred_age), target_names=age_target_names))
print(classification_report(np.array(y_true_gender), np.array(pred_gender), target_names=gender_target_names))
# + id="3pFTzAkoWIiT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 314} outputId="e3fe5d98-ed73-4f2d-cdce-bd7af482058d"
from sklearn.metrics import confusion_matrix
pd.DataFrame(confusion_matrix(y_true_age, pred_age),
index=["{}(真實)".format(i) for i in range(8)],
columns=["{}(預測)".format(i) for i in range(8)]
)
# + id="BVTIZJsXZyjn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="e436bf08-2800-4218-d486-a6887a0e5e20"
pd.DataFrame(confusion_matrix(y_true_gender, pred_gender),
index=["{}(真實)".format(i) for i in range(2)],
columns=["{}(預測)".format(i) for i in range(2)]
)
# + id="LdyuPoYL6Q7K" colab_type="code" colab={}
# + id="AOfcIMb86Q_t" colab_type="code" colab={}
# + id="veqfvA2J2UB3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9f36e860-8f8a-4fc8-f648-a2b7ccffe32b"
def euclidean_distance(x, y):
sum_square = np.sum(np.square(x - y), keepdims=True)
return np.sqrt(sum_square)
# + id="9ic1ekdt2Xli" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2ccfbad5-5be7-41ee-abc6-5bfdbfa982b7"
def predict_age(img):
img_size = 100
img = normalize(img)
img = cv2.resize(img, (img_size, img_size))
model_input = np.zeros((1, img_size, img_size, 3))
model_input[0] = img
ages = age_model.predict(model_input)
print('age: ', ages.argmax(axis=-1))
return
# def predict_gender(img):
# img_size = 100
# img = normalize(img)
# img = cv2.resize(img, (img_size, img_size))
# model_input = np.zeros((1, img_size, img_size, 3))
# model_input[0] = img
# genders = model_gender.predict(model_input)
# gender = genders[0]
# if gender > 0.5:
# print('Male')
# else:
# print('Female')
# return
# + id="huWs2jy-2jT8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8705fa58-a5f7-41dc-814d-7847c8618057"
folder_path = '/content/drive/My Drive/勞動部/week10/face_detection'
def face_id(filename, IMG_SIZE=160):
raw_img = cv2.imread(os.path.join(folder_path, filename))[:,:,::-1]
faces = detect_faces(raw_img)
if len(faces) == 0:
print('No face')
return
else:
# get face embeddings
face = faces[0]
# More predictions
predict_age(face)
# predict_emotion(face)
# predict_gender(face)
# # ID
# face = cv2.resize(face, (IMG_SIZE, IMG_SIZE))
# model_input = np.zeros((1, IMG_SIZE, IMG_SIZE, 3))
# model_input[0] = face
# model_input = preprocess(model_input)
# query_embeddings = feature_extractor.predict(model_input)
# query_embedding = query_embeddings[0]
# # compute distance
# distances = np.zeros((len(embeddings)))
# for i, embed in enumerate(embeddings):
# distance = euclidean_distance(embed, query_embedding)
# distances[i] = distance
# # find min distance
# idx_min = np.argmin(distances)
# distance, name = distances[idx_min], names[idx_min]
# print('name: ', name, ' distance: ',distance)
# + id="AGRrHjOu2tPf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="25c5774d-8813-487a-fca0-7702658a5cba"
# path = 'face3.jpg'
# face_id(path)
# plt.imshow(cv2.imread(os.path.join(folder_path, path))[:,:,::-1])
# + id="pmkvTAM3F2m9" colab_type="code" colab={}
|
23_vggface_mlp512_128_8_v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Задача 1
# * Удаление из списка дубликатов
names = ["Michele", "Alex", "Alex", "Michele", "Robin", "Sara", "Alex", "Michele"]
# пишите здесь свой код
#print(help(set))
my_set={''}
my_set.remove('')
for number in range(0, len(names)):
my_set.add(names[number])
print(my_set)
# # Задача 2
# Анализ данных. Dataset - https://www.kaggle.com/ronitf/heart-disease-uci/version/1. Из приложенного файла(heart.csv) загружается dataset о наличии сердечных заболеваний.
# Найдите следующие ответы:
# * Какой средний возраст женщин у которых присутсвуют сердечные заболевания
# * Какой самый распространненый вид боли в груди при наличии сердечных заболеваний
# * Подумать. Попробуйте определить влияет ли ЭКГ(restecg) на наличие сердечных заболеваний, сделайте выводы
#
import csv
dataset = []
with open('../datasets/heart.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip header
for row in spamreader:
dataset.append(list(map(lambda x: int(x) if x.find('.') == -1 else float(x), row)))
dataset
# +
sum_of_ages=0
sick_female_count=0
sick_male_count=0
sick=[0,0,0]
healthy=[0,0,0]
pain_of_chest=[0,0,0,0]
print(sick_female_count)
for i in range(len(dataset)):
if dataset[i][13]==0: #Если есть заболевание
healthy[dataset[i][6]]+=1
else:
sick[dataset[i][6]]+=1
pain_of_chest[dataset[i][2]]+=1
if dataset[i][1]==0: #Если женщина
sum_of_ages+=dataset[i][0]
sick_female_count+=1
else:
sick_male_count+=1
average_age=sum_of_ages/sick_female_count
for i in range(len(pain_of_chest)):
sort_pain[i]=pain_of_chest[i]
sort_pain.sort(reverse=True)
print('Средний возраст женщин, имеющих сердечное заболевание, согласно приведенным данным, составляет',round(average_age,1), 'лет.')
print('Самый распространенный вид боли в груди имеет номер',pain_of_chest.index(sort_pain[0]),".")
for number in range(len(sick)):
print(f"Из {sick[number]+healthy[number]}\t людей, получивших {number} результат кардиограммы, {round(sick[number]/(sick[number]+healthy[number])*100, 1)}% больных людей.")
# -
|
module_001_python/lesson_003_list_tuple_set_dict/student_tasks/HomeWork.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="53j9aIWx8POY" colab_type="code" colab={}
# !pip install tensorflow-gpu
# !pip install konlpy
# + [markdown] id="W_drZ3BQ_1Is" colab_type="text"
# 실행에 필요한 tensorflow와 konlpy를 설치합니다
# + id="QVES9c787Idx" colab_type="code" colab={}
import pandas as pd
import numpy as np
from konlpy.tag import Okt
from string import punctuation
from collections import Counter
from tensorflow.keras import models, layers
from tensorflow.keras.callbacks import TensorBoard
import tensorflow as tf
import matplotlib.pyplot as plt
# + [markdown] id="7Pq6hb7LACQ6" colab_type="text"
# 실행에 필요한 모듈들을 import합니다
# + id="gaj5l43u7QqW" colab_type="code" colab={}
okt = Okt()
counts = Counter()
# + id="hUfoJlQe7R_3" colab_type="code" colab={}
df = pd.read_csv('good.csv',sep='\n')
df1 = pd.read_csv('bad.csv',sep='\n')
arr = np.array(df['review'])
arr1 = np.array(df1['review'])
label1 = np.ones(len(arr))
label2 = np.zeros(len(arr1))
data = np.concatenate([arr, arr1])
label = np.concatenate([label1, label2])
# + [markdown] id="XyBBRv_1ATUb" colab_type="text"
# Pandas를 이용하여 Enter로 구분된 csv를 불러옵니다.
# 불러들인 두개의 DataFrame을 합쳐서 numpy배열의 데이터셋을 만듭니다.
# 0과1로 데이터 라벨링을 진행한 후 numpy배열의 label셋을 만듭니다.
# + id="kBPEx7Cz7U4U" colab_type="code" colab={}
shuffle_data = np.arange((data).shape[0])
np.random.shuffle(shuffle_data)
data = data[s]
label = label[s]
# + [markdown] id="D_PF0g_4BW4z" colab_type="text"
# shuffle_data 배열에 data의 인덱스를 넣고 data와 label 배열을 동일한 순서로 섞어 데이터셋을 셔플합니다.
# + id="l1wnWAo29HRF" colab_type="code" colab={}
revised_reviews = [' '.join(okt.morphs(review)) for review in data]
# + [markdown] id="vTYKnbJaC_G1" colab_type="text"
# 한국어 자연어 처리 모듈인 konlpy의 Okt를 이용하여 형태소를 분리하여 데이터를 토큰화합니다.
# + id="6CR5MJyN7kuX" colab_type="code" colab={}
for i,review in enumerate(revised_reviews):
text = ''.join([c if c not in punctuation else ' '+c+' ' \
for c in review]).lower()
revised_reviews[i] = text
counts.update(text.split())
# + [markdown] id="-TK4IJJCDwtW" colab_type="text"
# 단어를 분리한 후 각 단어의 빈도를 카운트합니다.
# + id="3rS-ptcj7lPI" colab_type="code" colab={}
word_counts = sorted(counts, key=counts.get, reverse=True)
word_to_int = {word: ii for ii, word in enumerate(word_counts, 1)}
# + [markdown] id="ISjx3t7mERTE" colab_type="text"
# 각 단어를 딕셔너리를 통해 정수로 변환합니다.
# + id="-FRg8pSp7oBy" colab_type="code" colab={}
mapped_reviews = []
for review in revised_reviews:
mapped_reviews.append([word_to_int[word] for word in review.split()])
sequence_length = 300
sequences = np.zeros((len(mapped_reviews), sequence_length), dtype=int)
df = []
for i, row in enumerate(mapped_reviews):
review_arr = np.array(row)
df.append(label[i])
sequences[i, -len(row):] = review_arr[-sequence_length:]
# + [markdown] id="zmcJs0_7EsKe" colab_type="text"
# 총 길이가300인 시퀀스에 단어 인덱스를 대입합니다.
# 단어 인덱스의 길이가 300보다 작을 경우 남은 공간을 제로 패딩으로 채웁니다.
#
# + id="zNl04B9y8E4j" colab_type="code" colab={}
X_train = sequences[:20000, :]
y_train = np.array(df[:20000])
X_test = sequences[20000:, :]
y_test = np.array(df[20000:])
# + [markdown] id="kELTisntHb_t" colab_type="text"
# 데이터셋을 학습 데이터셋과 테스트 셋으로 나눕니다.
# + id="gAzNOuU_HsSL" colab_type="code" colab={}
n_words = len(word_to_int) + 1
# + id="5zRW9tc58HKS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="31363094-0ca1-46f7-f565-93ddd1f7ec75"
model = models.Sequential()
model.add(layers.Embedding(n_words, 300,
embeddings_regularizer='l2'))
model.add(layers.LSTM(16))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
# + [markdown] id="kaLTbjlbIBk1" colab_type="text"
# 학습을 진행할 LSTM모델을 구축합니다.
# + id="U4kYrhuZ8HG6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 504} outputId="f060af97-db00-4342-a0fc-3ce8ad992365"
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['acc'])
callback_list = [TensorBoard(),tf.keras.callbacks.ModelCheckpoint(filepath='Sentiment_Classification.h5',
monitor='val_loss', save_best_only=True),
tf.keras.callbacks.EarlyStopping(patience=5)]
history = model.fit(X_train, y_train,
batch_size=4096, epochs=10,
validation_split=0.2, callbacks=callback_list)
# + [markdown] id="UOHxf6kdIGjJ" colab_type="text"
# 학습할 데이터 셋을 할당한 후 손실 함수데이터 셋을 할당한 후 최적화를 위해 손실 함수와 하이퍼파라미터를 지정하고 10회 가량 반복학습을 진행합니다.
#
# 반복학습 중 가장 결과가 좋은 모델을 자동으로 저장합니다.
# + id="jV5nCnxh8HEx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="8181ae4b-f1ae-49f9-bace-cb0badca09bb"
epochs = np.arange(1, 11)
plt.plot(epochs, history.history['loss'])
plt.plot(epochs, history.history['val_loss'])
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()
# + [markdown] id="7AdDW5e7DUcE" colab_type="text"
# 학습 중 기록된 손실률을 시각화 합니다.
# + id="YCBoL5Ku8Lwb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="fe550236-f069-499f-f439-a6846e35fa1e"
epochs = np.arange(1, 11)
plt.plot(epochs, history.history['acc'])
plt.plot(epochs, history.history['val_acc'])
plt.xlabel('epochs')
plt.ylabel('acc')
plt.show()
# + [markdown] id="FNdcZstQDqEa" colab_type="text"
# 학습 중 기록된 정확도를 시각화 합니다.
# + id="wgKGmnZo_qKl" colab_type="code" colab={}
|
DIgital_Contents.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from Bio import Seq
# +
# Copyright 2000, 2004 by <NAME>.
# Revisions copyright 2010-2013, 2015-2018 by <NAME>.
# All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Code for dealing with sequence alignments.
One of the most important things in this module is the MultipleSeqAlignment
class, used in the Bio.AlignIO module.
"""
from __future__ import print_function
import sys # Only needed to check if we are using Python 2 or 3
from Bio._py3k import raise_from
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord, _RestrictedDict
from Bio import Alphabet
from Bio.Align import _aligners
# Import errors may occur here if a compiled aligners.c file
# (_aligners.pyd or _aligners.so) is missing or if the user is
# importing from within the Biopython source tree, see PR #2007:
# https://github.com/biopython/biopython/pull/2007
class MultipleSeqAlignment(object):
"""Represents a classical multiple sequence alignment (MSA).
By this we mean a collection of sequences (usually shown as rows) which
are all the same length (usually with gap characters for insertions or
padding). The data can then be regarded as a matrix of letters, with well
defined columns.
You would typically create an MSA by loading an alignment file with the
AlignIO module:
>>> from Bio import AlignIO
>>> align = AlignIO.read("Clustalw/opuntia.aln", "clustal")
>>> print(align)
SingleLetterAlphabet() alignment with 7 rows and 156 columns
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273291|gb|AF191665.1|AF191
In some respects you can treat these objects as lists of SeqRecord objects,
each representing a row of the alignment. Iterating over an alignment gives
the SeqRecord object for each row:
>>> len(align)
7
>>> for record in align:
... print("%s %i" % (record.id, len(record)))
...
gi|6273285|gb|AF191659.1|AF191 156
gi|6273284|gb|AF191658.1|AF191 156
gi|6273287|gb|AF191661.1|AF191 156
gi|6273286|gb|AF191660.1|AF191 156
gi|6273290|gb|AF191664.1|AF191 156
gi|6273289|gb|AF191663.1|AF191 156
gi|6273291|gb|AF191665.1|AF191 156
You can also access individual rows as SeqRecord objects via their index:
>>> print(align[0].id)
gi|6273285|gb|AF191659.1|AF191
>>> print(align[-1].id)
gi|6273291|gb|AF191665.1|AF191
And extract columns as strings:
>>> print(align[:, 1])
AAAAAAA
Or, take just the first ten columns as a sub-alignment:
>>> print(align[:, :10])
SingleLetterAlphabet() alignment with 7 rows and 10 columns
TATACATTAA gi|6273285|gb|AF191659.1|AF191
TATACATTAA gi|6273284|gb|AF191658.1|AF191
TATACATTAA gi|6273287|gb|AF191661.1|AF191
TATACATAAA gi|6273286|gb|AF191660.1|AF191
TATACATTAA gi|6273290|gb|AF191664.1|AF191
TATACATTAA gi|6273289|gb|AF191663.1|AF191
TATACATTAA gi|6273291|gb|AF191665.1|AF191
Combining this alignment slicing with alignment addition allows you to
remove a section of the alignment. For example, taking just the first
and last ten columns:
>>> print(align[:, :10] + align[:, -10:])
SingleLetterAlphabet() alignment with 7 rows and 20 columns
TATACATTAAGTGTACCAGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAGTGTACCAGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAGTGTACCAGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAGTGTACCAGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAGTGTACCAGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAGTATACCAGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAGTGTACCAGA gi|6273291|gb|AF191665.1|AF191
Note - This object replaced the older Alignment object defined in module
Bio.Align.Generic but is not fully backwards compatible with it.
Note - This object does NOT attempt to model the kind of alignments used
in next generation sequencing with multiple sequencing reads which are
much shorter than the alignment, and where there is usually a consensus or
reference sequence with special status.
"""
def __init__(self, records, alphabet=None,
annotations=None, column_annotations=None):
"""Initialize a new MultipleSeqAlignment object.
Arguments:
- records - A list (or iterator) of SeqRecord objects, whose
sequences are all the same length. This may be an be an
empty list.
- alphabet - The alphabet for the whole alignment, typically a gapped
alphabet, which should be a super-set of the individual
record alphabets. If omitted, a consensus alphabet is
used.
- annotations - Information about the whole alignment (dictionary).
- column_annotations - Per column annotation (restricted dictionary).
This holds Python sequences (lists, strings, tuples)
whose length matches the number of columns. A typical
use would be a secondary structure consensus string.
You would normally load a MSA from a file using Bio.AlignIO, but you
can do this from a list of SeqRecord objects too:
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a = SeqRecord(Seq("AAAACGT", generic_dna), id="Alpha")
>>> b = SeqRecord(Seq("AAA-CGT", generic_dna), id="Beta")
>>> c = SeqRecord(Seq("AAAAGGT", generic_dna), id="Gamma")
>>> align = MultipleSeqAlignment([a, b, c],
... annotations={"tool": "demo"},
... column_annotations={"stats": "CCCXCCC"})
>>> print(align)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
>>> align.annotations
{'tool': 'demo'}
>>> align.column_annotations
{'stats': 'CCCXCCC'}
"""
if alphabet is not None:
if not isinstance(alphabet, (Alphabet.Alphabet, Alphabet.AlphabetEncoder)):
raise ValueError("Invalid alphabet argument")
self._alphabet = alphabet
else:
# Default while we add sequences, will take a consensus later
self._alphabet = Alphabet.single_letter_alphabet
self._records = []
if records:
self.extend(records)
if alphabet is None:
# No alphabet was given, take a consensus alphabet
self._alphabet = Alphabet._consensus_alphabet(rec.seq.alphabet for
rec in self._records
if rec.seq is not None)
# Annotations about the whole alignment
if annotations is None:
annotations = {}
elif not isinstance(annotations, dict):
raise TypeError("annotations argument should be a dict")
self.annotations = annotations
# Annotations about each colum of the alignment
if column_annotations is None:
column_annotations = {}
# Handle this via the property set function which will validate it
self.column_annotations = column_annotations
def _set_per_column_annotations(self, value):
if not isinstance(value, dict):
raise TypeError("The per-column-annotations should be a "
"(restricted) dictionary.")
# Turn this into a restricted-dictionary (and check the entries)
if len(self):
# Use the standard method to get the length
expected_length = self.get_alignment_length()
self._per_col_annotations = _RestrictedDict(length=expected_length)
self._per_col_annotations.update(value)
else:
# Bit of a problem case... number of columns is undefined
self._per_col_annotations = None
if value:
raise ValueError("Can't set per-column-annotations without an alignment")
def _get_per_column_annotations(self):
if self._per_col_annotations is None:
# This happens if empty at initialisation
if len(self):
# Use the standard method to get the length
expected_length = self.get_alignment_length()
else:
# Should this raise an exception? Compare SeqRecord behaviour...
expected_length = 0
self._per_col_annotations = _RestrictedDict(length=expected_length)
return self._per_col_annotations
column_annotations = property(
fget=_get_per_column_annotations,
fset=_set_per_column_annotations,
doc="""Dictionary of per-letter-annotation for the sequence.""")
def _str_line(self, record, length=50):
"""Return a truncated string representation of a SeqRecord (PRIVATE).
This is a PRIVATE function used by the __str__ method.
"""
if record.seq.__class__.__name__ == "CodonSeq":
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" \
% (record.seq[:length - 3], record.seq[-3:], record.id)
else:
if len(record.seq) <= length:
return "%s %s" % (record.seq, record.id)
else:
return "%s...%s %s" \
% (record.seq[:length - 6], record.seq[-3:], record.id)
def __str__(self):
"""Return a multi-line string summary of the alignment.
This output is intended to be readable, but large alignments are
shown truncated. A maximum of 20 rows (sequences) and 50 columns
are shown, with the record identifiers. This should fit nicely on a
single screen. e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align)
Gapped(IUPACUnambiguousDNA(), '-') alignment with 3 rows and 12 columns
ACTGCTAGCTAG Alpha
ACT-CTAGCTAG Beta
ACTGCTAGATAG Gamma
See also the alignment's format method.
"""
rows = len(self._records)
lines = ["%s alignment with %i rows and %i columns"
% (str(self._alphabet), rows, self.get_alignment_length())]
if rows <= 20:
lines.extend(self._str_line(rec) for rec in self._records)
else:
lines.extend(self._str_line(rec) for rec in self._records[:18])
lines.append("...")
lines.append(self._str_line(self._records[-1]))
return "\n".join(lines)
def __repr__(self):
"""Return a representation of the object for debugging.
The representation cannot be used with eval() to recreate the object,
which is usually possible with simple python ojects. For example:
<Bio.Align.MultipleSeqAlignment instance (2 records of length 14,
SingleLetterAlphabet()) at a3c184c>
The hex string is the memory address of the object, see help(id).
This provides a simple way to visually distinguish alignments of
the same size.
"""
# A doctest for __repr__ would be nice, but __class__ comes out differently
# if run via the __main__ trick.
return "<%s instance (%i records of length %i, %s) at %x>" % \
(self.__class__, len(self._records),
self.get_alignment_length(), repr(self._alphabet), id(self))
# This version is useful for doing eval(repr(alignment)),
# but it can be VERY long:
# return "%s(%s, %s)" \
# % (self.__class__, repr(self._records), repr(self._alphabet))
def format(self, format):
"""Return the alignment as a string in the specified file format.
The format should be a lower case string supported as an output
format by Bio.AlignIO (such as "fasta", "clustal", "phylip",
"stockholm", etc), which is used to turn the alignment into a
string.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> print(align.format("fasta"))
>Alpha
ACTGCTAGCTAG
>Beta
ACT-CTAGCTAG
>Gamma
ACTGCTAGATAG
<BLANKLINE>
>>> print(align.format("phylip"))
3 12
Alpha ACTGCTAGCT AG
Beta ACT-CTAGCT AG
Gamma ACTGCTAGAT AG
<BLANKLINE>
For Python 2.6, 3.0 or later see also the built in format() function.
"""
# See also the __format__ added for Python 2.6 / 3.0, PEP 3101
# See also the SeqRecord class and its format() method using Bio.SeqIO
return self.__format__(format)
def __format__(self, format_spec):
"""Return the alignment as a string in the specified file format.
This method supports the python format() function added in
Python 2.6/3.0. The format_spec should be a lower case
string supported by Bio.AlignIO as an output file format.
See also the alignment's format() method.
"""
if format_spec:
from Bio._py3k import StringIO
from Bio import AlignIO
handle = StringIO()
AlignIO.write([self], handle, format_spec)
return handle.getvalue()
else:
# Follow python convention and default to using __str__
return str(self)
def __iter__(self):
"""Iterate over alignment rows as SeqRecord objects.
e.g.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> for record in align:
... print(record.id)
... print(record.seq)
...
Alpha
ACTGCTAGCTAG
Beta
ACT-CTAGCTAG
Gamma
ACTGCTAGATAG
"""
return iter(self._records)
def __len__(self):
"""Return the number of sequences in the alignment.
Use len(alignment) to get the number of sequences (i.e. the number of
rows), and alignment.get_alignment_length() to get the length of the
longest sequence (i.e. the number of columns).
This is easy to remember if you think of the alignment as being like a
list of SeqRecord objects.
"""
return len(self._records)
def get_alignment_length(self):
"""Return the maximum length of the alignment.
All objects in the alignment should (hopefully) have the same
length. This function will go through and find this length
by finding the maximum length of sequences in the alignment.
>>> from Bio.Alphabet import IUPAC, Gapped
>>> from Bio.Align import MultipleSeqAlignment
>>> align = MultipleSeqAlignment([], Gapped(IUPAC.unambiguous_dna, "-"))
>>> align.add_sequence("Alpha", "ACTGCTAGCTAG")
>>> align.add_sequence("Beta", "ACT-CTAGCTAG")
>>> align.add_sequence("Gamma", "ACTGCTAGATAG")
>>> align.get_alignment_length()
12
If you want to know the number of sequences in the alignment,
use len(align) instead:
>>> len(align)
3
"""
max_length = 0
for record in self._records:
if len(record.seq) > max_length:
max_length = len(record.seq)
return max_length
def add_sequence(self, descriptor, sequence, start=None, end=None,
weight=1.0):
"""Add a sequence to the alignment.
This doesn't do any kind of alignment, it just adds in the sequence
object, which is assumed to be prealigned with the existing
sequences.
Arguments:
- descriptor - The descriptive id of the sequence being added.
This will be used as the resulting SeqRecord's
.id property (and, for historical compatibility,
also the .description property)
- sequence - A string with sequence info.
- start - You can explicitly set the start point of the sequence.
This is useful (at least) for BLAST alignments, which can
just be partial alignments of sequences.
- end - Specify the end of the sequence, which is important
for the same reason as the start.
- weight - The weight to place on the sequence in the alignment.
By default, all sequences have the same weight. (0.0 =>
no weight, 1.0 => highest weight)
In general providing a SeqRecord and calling .append is preferred.
"""
new_seq = Seq(sequence, self._alphabet)
# We are now effectively using the SeqRecord's .id as
# the primary identifier (e.g. in Bio.SeqIO) so we should
# populate it with the descriptor.
# For backwards compatibility, also store this in the
# SeqRecord's description property.
new_record = SeqRecord(new_seq,
id=descriptor,
description=descriptor)
# hack! We really need to work out how to deal with annotations
# and features in biopython. Right now, I'll just use the
# generic annotations dictionary we've got to store the start
# and end, but we should think up something better. I don't know
# if I'm really a big fan of the LocatableSeq thing they've got
# in BioPerl, but I'm not positive what the best thing to do on
# this is...
if start:
new_record.annotations["start"] = start
if end:
new_record.annotations["end"] = end
# another hack to add weight information to the sequence
new_record.annotations["weight"] = weight
self._records.append(new_record)
def extend(self, records):
"""Add more SeqRecord objects to the alignment as rows.
They must all have the same length as the original alignment, and have
alphabets compatible with the alignment's alphabet. For example,
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a = SeqRecord(Seq("AAAACGT", generic_dna), id="Alpha")
>>> b = SeqRecord(Seq("AAA-CGT", generic_dna), id="Beta")
>>> c = SeqRecord(Seq("AAAAGGT", generic_dna), id="Gamma")
>>> d = SeqRecord(Seq("AAAACGT", generic_dna), id="Delta")
>>> e = SeqRecord(Seq("AAA-GGT", generic_dna), id="Epsilon")
First we create a small alignment (three rows):
>>> align = MultipleSeqAlignment([a, b, c])
>>> print(align)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
Now we can extend this alignment with another two rows:
>>> align.extend([d, e])
>>> print(align)
DNAAlphabet() alignment with 5 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
AAAACGT Delta
AAA-GGT Epsilon
Because the alignment object allows iteration over the rows as
SeqRecords, you can use the extend method with a second alignment
(provided its sequences have the same length as the original alignment).
"""
if len(self):
# Use the standard method to get the length
expected_length = self.get_alignment_length()
else:
# Take the first record's length
records = iter(records) # records arg could be list or iterator
try:
rec = next(records)
except StopIteration:
# Special case, no records
return
expected_length = len(rec)
self._append(rec, expected_length)
# Can now setup the per-column-annotations as well, set to None
# while missing the length:
self.column_annotations = {}
# Now continue to the rest of the records as usual
for rec in records:
self._append(rec, expected_length)
def append(self, record):
"""Add one more SeqRecord object to the alignment as a new row.
This must have the same length as the original alignment (unless this is
the first record), and have an alphabet compatible with the alignment's
alphabet.
>>> from Bio import AlignIO
>>> align = AlignIO.read("Clustalw/opuntia.aln", "clustal")
>>> print(align)
SingleLetterAlphabet() alignment with 7 rows and 156 columns
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273291|gb|AF191665.1|AF191
>>> len(align)
7
We'll now construct a dummy record to append as an example:
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> dummy = SeqRecord(Seq("N"*156), id="dummy")
Now append this to the alignment,
>>> align.append(dummy)
>>> print(align)
SingleLetterAlphabet() alignment with 8 rows and 156 columns
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273285|gb|AF191659.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273284|gb|AF191658.1|AF191
TATACATTAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273287|gb|AF191661.1|AF191
TATACATAAAAGAAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273286|gb|AF191660.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273290|gb|AF191664.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273289|gb|AF191663.1|AF191
TATACATTAAAGGAGGGGGATGCGGATAAATGGAAAGGCGAAAG...AGA gi|6273291|gb|AF191665.1|AF191
NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN...NNN dummy
>>> len(align)
8
"""
if self._records:
self._append(record, self.get_alignment_length())
else:
self._append(record)
def _append(self, record, expected_length=None):
"""Validate and append a record (PRIVATE)."""
if not isinstance(record, SeqRecord):
raise TypeError("New sequence is not a SeqRecord object")
# Currently the get_alignment_length() call is expensive, so we need
# to avoid calling it repeatedly for __init__ and extend, hence this
# private _append method
if expected_length is not None and len(record) != expected_length:
# TODO - Use the following more helpful error, but update unit tests
# raise ValueError("New sequence is not of length %i" \
# % self.get_alignment_length())
raise ValueError("Sequences must all be the same length")
# Using not self.alphabet.contains(record.seq.alphabet) needs fixing
# for AlphabetEncoders (e.g. gapped versus ungapped).
if not Alphabet._check_type_compatible([self._alphabet, record.seq.alphabet]):
raise ValueError("New sequence's alphabet is incompatible")
self._records.append(record)
def __add__(self, other):
"""Combine two alignments with the same number of rows by adding them.
If you have two multiple sequence alignments (MSAs), there are two ways to think
about adding them - by row or by column. Using the extend method adds by row.
Using the addition operator adds by column. For example,
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a1 = SeqRecord(Seq("AAAAC", generic_dna), id="Alpha")
>>> b1 = SeqRecord(Seq("AAA-C", generic_dna), id="Beta")
>>> c1 = SeqRecord(Seq("AAAAG", generic_dna), id="Gamma")
>>> a2 = SeqRecord(Seq("GT", generic_dna), id="Alpha")
>>> b2 = SeqRecord(Seq("GT", generic_dna), id="Beta")
>>> c2 = SeqRecord(Seq("GT", generic_dna), id="Gamma")
>>> left = MultipleSeqAlignment([a1, b1, c1],
... annotations={"tool": "demo", "name": "start"},
... column_annotations={"stats": "CCCXC"})
>>> right = MultipleSeqAlignment([a2, b2, c2],
... annotations={"tool": "demo", "name": "end"},
... column_annotations={"stats": "CC"})
Now, let's look at these two alignments:
>>> print(left)
DNAAlphabet() alignment with 3 rows and 5 columns
AAAAC Alpha
AAA-C Beta
AAAAG Gamma
>>> print(right)
DNAAlphabet() alignment with 3 rows and 2 columns
GT Alpha
GT Beta
GT Gamma
And add them:
>>> combined = left + right
>>> print(combined)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAA-CGT Beta
AAAAGGT Gamma
For this to work, both alignments must have the same number of records (here
they both have 3 rows):
>>> len(left)
3
>>> len(right)
3
>>> len(combined)
3
The individual rows are SeqRecord objects, and these can be added together. Refer
to the SeqRecord documentation for details of how the annotation is handled. This
example is a special case in that both original alignments shared the same names,
meaning when the rows are added they also get the same name.
Any common annotations are preserved, but differing annotation is lost. This is
the same behaviour used in the SeqRecord annotations and is designed to prevent
accidental propagation of inappropriate values:
>>> combined.annotations
{'tool': 'demo'}
Similarly any common per-column-annotations are combined:
>>> combined.column_annotations
{'stats': 'CCCXCCC'}
"""
if not isinstance(other, MultipleSeqAlignment):
raise NotImplementedError
if len(self) != len(other):
raise ValueError("When adding two alignments they must have the same length"
" (i.e. same number or rows)")
alpha = Alphabet._consensus_alphabet([self._alphabet, other._alphabet])
merged = (left + right for left, right in zip(self, other))
# Take any common annotation:
annotations = {}
for k, v in self.annotations.items():
if k in other.annotations and other.annotations[k] == v:
annotations[k] = v
column_annotations = {}
for k, v in self.column_annotations.items():
if k in other.column_annotations:
column_annotations[k] = v + other.column_annotations[k]
return MultipleSeqAlignment(merged, alpha, annotations, column_annotations)
def __getitem__(self, index):
"""Access part of the alignment.
Depending on the indices, you can get a SeqRecord object
(representing a single row), a Seq object (for a single columns),
a string (for a single characters) or another alignment
(representing some part or all of the alignment).
align[r,c] gives a single character as a string
align[r] gives a row as a SeqRecord
align[r,:] gives a row as a SeqRecord
align[:,c] gives a column as a Seq (using the alignment's alphabet)
align[:] and align[:,:] give a copy of the alignment
Anything else gives a sub alignment, e.g.
align[0:2] or align[0:2,:] uses only row 0 and 1
align[:,1:3] uses only columns 1 and 2
align[0:2,1:3] uses only rows 0 & 1 and only cols 1 & 2
We'll use the following example alignment here for illustration:
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> a = SeqRecord(Seq("AAAACGT", generic_dna), id="Alpha")
>>> b = SeqRecord(Seq("AAA-CGT", generic_dna), id="Beta")
>>> c = SeqRecord(Seq("AAAAGGT", generic_dna), id="Gamma")
>>> d = SeqRecord(Seq("AAAACGT", generic_dna), id="Delta")
>>> e = SeqRecord(Seq("AAA-GGT", generic_dna), id="Epsilon")
>>> align = MultipleSeqAlignment([a, b, c, d, e], generic_dna)
You can access a row of the alignment as a SeqRecord using an integer
index (think of the alignment as a list of SeqRecord objects here):
>>> first_record = align[0]
>>> print("%s %s" % (first_record.id, first_record.seq))
Alpha AAAACGT
>>> last_record = align[-1]
>>> print("%s %s" % (last_record.id, last_record.seq))
Epsilon AAA-GGT
You can also access use python's slice notation to create a sub-alignment
containing only some of the SeqRecord objects:
>>> sub_alignment = align[2:5]
>>> print(sub_alignment)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAAGGT Gamma
AAAACGT Delta
AAA-GGT Epsilon
This includes support for a step, i.e. align[start:end:step], which
can be used to select every second sequence:
>>> sub_alignment = align[::2]
>>> print(sub_alignment)
DNAAlphabet() alignment with 3 rows and 7 columns
AAAACGT Alpha
AAAAGGT Gamma
AAA-GGT Epsilon
Or to get a copy of the alignment with the rows in reverse order:
>>> rev_alignment = align[::-1]
>>> print(rev_alignment)
DNAAlphabet() alignment with 5 rows and 7 columns
AAA-GGT Epsilon
AAAACGT Delta
AAAAGGT Gamma
AAA-CGT Beta
AAAACGT Alpha
You can also use two indices to specify both rows and columns. Using simple
integers gives you the entry as a single character string. e.g.
>>> align[3, 4]
'C'
This is equivalent to:
>>> align[3][4]
'C'
or:
>>> align[3].seq[4]
'C'
To get a single column (as a string) use this syntax:
>>> align[:, 4]
'CCGCG'
Or, to get part of a column,
>>> align[1:3, 4]
'CG'
However, in general you get a sub-alignment,
>>> print(align[1:5, 3:6])
DNAAlphabet() alignment with 4 rows and 3 columns
-CG Beta
AGG Gamma
ACG Delta
-GG Epsilon
This should all seem familiar to anyone who has used the NumPy
array or matrix objects.
"""
if isinstance(index, int):
# e.g. result = align[x]
# Return a SeqRecord
return self._records[index]
elif isinstance(index, slice):
# e.g. sub_align = align[i:j:k]
new = MultipleSeqAlignment(self._records[index], self._alphabet)
if self.column_annotations and len(new) == len(self):
# All rows kept (although could have been reversed)
# Perserve the column annotations too,
for k, v in self.column_annotations.items():
new.column_annotations[k] = v
return new
elif len(index) != 2:
raise TypeError("Invalid index type.")
# Handle double indexing
row_index, col_index = index
if isinstance(row_index, int):
# e.g. row_or_part_row = align[6, 1:4], gives a SeqRecord
return self._records[row_index][col_index]
elif isinstance(col_index, int):
# e.g. col_or_part_col = align[1:5, 6], gives a string
return "".join(rec[col_index] for rec in self._records[row_index])
else:
# e.g. sub_align = align[1:4, 5:7], gives another alignment
new = MultipleSeqAlignment((rec[col_index] for rec in self._records[row_index]),
self._alphabet)
if self.column_annotations and len(new) == len(self):
# All rows kept (although could have been reversed)
# Perserve the column annotations too,
for k, v in self.column_annotations.items():
new.column_annotations[k] = v[col_index]
return new
def sort(self, key=None, reverse=False):
"""Sort the rows (SeqRecord objects) of the alignment in place.
This sorts the rows alphabetically using the SeqRecord object id by
default. The sorting can be controlled by supplying a key function
which must map each SeqRecord to a sort value.
This is useful if you want to add two alignments which use the same
record identifiers, but in a different order. For example,
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> align1 = MultipleSeqAlignment([
... SeqRecord(Seq("ACGT", generic_dna), id="Human"),
... SeqRecord(Seq("ACGG", generic_dna), id="Mouse"),
... SeqRecord(Seq("ACGC", generic_dna), id="Chicken"),
... ])
>>> align2 = MultipleSeqAlignment([
... SeqRecord(Seq("CGGT", generic_dna), id="Mouse"),
... SeqRecord(Seq("CGTT", generic_dna), id="Human"),
... SeqRecord(Seq("CGCT", generic_dna), id="Chicken"),
... ])
If you simple try and add these without sorting, you get this:
>>> print(align1 + align2)
DNAAlphabet() alignment with 3 rows and 8 columns
ACGTCGGT <unknown id>
ACGGCGTT <unknown id>
ACGCCGCT Chicken
Consult the SeqRecord documentation which explains why you get a
default value when annotation like the identifier doesn't match up.
However, if we sort the alignments first, then add them we get the
desired result:
>>> align1.sort()
>>> align2.sort()
>>> print(align1 + align2)
DNAAlphabet() alignment with 3 rows and 8 columns
ACGCCGCT Chicken
ACGTCGTT Human
ACGGCGGT Mouse
As an example using a different sort order, you could sort on the
GC content of each sequence.
>>> from Bio.SeqUtils import GC
>>> print(align1)
DNAAlphabet() alignment with 3 rows and 4 columns
ACGC Chicken
ACGT Human
ACGG Mouse
>>> align1.sort(key = lambda record: GC(record.seq))
>>> print(align1)
DNAAlphabet() alignment with 3 rows and 4 columns
ACGT Human
ACGC Chicken
ACGG Mouse
There is also a reverse argument, so if you wanted to sort by ID
but backwards:
>>> align1.sort(reverse=True)
>>> print(align1)
DNAAlphabet() alignment with 3 rows and 4 columns
ACGG Mouse
ACGT Human
ACGC Chicken
"""
if key is None:
self._records.sort(key=lambda r: r.id, reverse=reverse)
else:
self._records.sort(key=key, reverse=reverse)
class PairwiseAlignment(object):
"""Represents a pairwise sequence alignment.
Internally, the pairwise alignment is stored as the path through
the traceback matrix, i.e. a tuple of pairs of indices corresponding
to the vertices of the path in the traceback matrix.
"""
def __init__(self, target, query, path, score):
"""Initialize a new PairwiseAlignment object.
Arguments:
- target - The first sequence, as a plain string, without gaps.
- query - The second sequence, as a plain string, without gaps.
- path - The path through the traceback matrix, defining an
alignment.
- score - The alignment score.
You would normally obtain a PairwiseAlignment object by iterating
over a PairwiseAlignments object.
"""
self.target = target
self.query = query
self.score = score
self.path = path
# For Python2 only
def __cmp__(self, other):
if self.path < other.path:
return -1
if self.path > other.path:
return +1
return 0
def __eq__(self, other):
return self.path == other.path
def __ne__(self, other):
return self.path != other.path
def __lt__(self, other):
return self.path < other.path
def __le__(self, other):
return self.path <= other.path
def __gt__(self, other):
return self.path > other.path
def __ge__(self, other):
return self.path >= other.path
def __format__(self, format_spec):
if format_spec == "psl":
return self._format_psl()
return str(self)
def __str__(self):
if isinstance(self.query, str) and isinstance(self.target, str):
return self.format()
else:
return self._format_generalized()
def format(self):
"""Create a human-readable representation of the alignment."""
query = self.query
target = self.target
try:
# check if query is a SeqRecord
query = query.seq
except AttributeError:
# query is a Seq object or a plain string
pass
try:
# check if target is a SeqRecord
target = target.seq
except AttributeError:
# target is a Seq object or a plain string
pass
seq1 = str(target)
seq2 = str(query)
n1 = len(seq1)
n2 = len(seq2)
aligned_seq1 = ""
aligned_seq2 = ""
pattern = ""
path = self.path
end1, end2 = path[0]
if end1 > 0 or end2 > 0:
end = max(end1, end2)
aligned_seq1 += " " * (end - end1) + seq1[:end1]
aligned_seq2 += " " * (end - end2) + seq2[:end2]
pattern += " " * end
start1 = end1
start2 = end2
for end1, end2 in path[1:]:
gap = 0
if end1 == start1:
gap = end2 - start2
aligned_seq1 += "-" * gap
aligned_seq2 += seq2[start2:end2]
pattern += "-" * gap
elif end2 == start2:
gap = end1 - start1
aligned_seq1 += seq1[start1:end1]
aligned_seq2 += "-" * gap
pattern += "-" * gap
else:
s1 = seq1[start1:end1]
s2 = seq2[start2:end2]
aligned_seq1 += s1
aligned_seq2 += s2
for c1, c2 in zip(s1, s2):
if c1 == c2:
pattern += "|"
else:
pattern += "."
start1 = end1
start2 = end2
n1 -= end1
n2 -= end2
n = max(n1, n2)
aligned_seq1 += seq1[end1:] + " " * (n - n1)
aligned_seq2 += seq2[end2:] + " " * (n - n2)
pattern += " " * n
return "%s\n%s\n%s\n" % (aligned_seq1, pattern, aligned_seq2)
def _format_generalized(self):
seq1 = self.target
seq2 = self.query
n1 = len(seq1)
n2 = len(seq2)
aligned_seq1 = []
aligned_seq2 = []
pattern = []
path = self.path
end1, end2 = path[0]
if end1 > 0 or end2 > 0:
if end1 <= end2:
for c2 in seq2[:end2 - end1]:
s2 = str(c2)
s1 = " " * len(s2)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s1)
else: # end1 > end2
for c1 in seq1[:end1 - end2]:
s1 = str(c1)
s2 = " " * len(s1)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s2)
start1 = end1
start2 = end2
for end1, end2 in path[1:]:
if end1 == start1:
for c2 in seq2[start2:end2]:
s2 = str(c2)
s1 = "-" * len(s2)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s1)
start2 = end2
elif end2 == start2:
for c1 in seq1[start1:end1]:
s1 = str(c1)
s2 = "-" * len(s1)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
pattern.append(s2)
start1 = end1
else:
for c1, c2 in zip(seq1[start1:end1], seq2[start2:end2]):
s1 = str(c1)
s2 = str(c2)
m1 = len(s1)
m2 = len(s2)
if c1 == c2:
p = "|"
else:
p = "."
if m1 < m2:
space = (m2 - m1) * " "
s1 += space
pattern.append(p * m1 + space)
elif m1 > m2:
space = (m1 - m2) * " "
s2 += space
pattern.append(p * m2 + space)
else:
pattern.append(p * m1)
aligned_seq1.append(s1)
aligned_seq2.append(s2)
start1 = end1
start2 = end2
aligned_seq1 = " ".join(aligned_seq1)
aligned_seq2 = " ".join(aligned_seq2)
pattern = " ".join(pattern)
return "%s\n%s\n%s\n" % (aligned_seq1, pattern, aligned_seq2)
def _format_psl(self):
query = self.query
target = self.target
try:
Qname = query.id
except AttributeError:
Qname = "query"
else:
query = query.seq
try:
Tname = target.id
except AttributeError:
Tname = "target"
else:
target = target.seq
seq1 = str(target)
seq2 = str(query)
n1 = len(seq1)
n2 = len(seq2)
match = 0
mismatch = 0
repmatch = 0
Ns = 0
Qgapcount = 0
Qgapbases = 0
Tgapcount = 0
Tgapbases = 0
Qsize = n2
Qstart = 0
Qend = Qsize
Tsize = n1
Tstart = 0
Tend = Tsize
blockSizes = []
qStarts = []
tStarts = []
strand = "+"
start1 = 0
start2 = 0
start1, start2 = self.path[0]
for end1, end2 in self.path[1:]:
count1 = end1 - start1
count2 = end2 - start2
if count1 == 0:
if start2 == 0:
Qstart += count2
elif end2 == n2:
Qend -= count2
else:
Qgapcount += 1
Qgapbases += count2
start2 = end2
elif count2 == 0:
if start1 == 0:
Tstart += count1
elif end1 == n1:
Tend -= count1
else:
Tgapcount += 1
Tgapbases += count1
start1 = end1
else:
assert count1 == count2
tStarts.append(start1)
qStarts.append(start2)
blockSizes.append(count1)
for c1, c2 in zip(seq1[start1:end1], seq2[start2:end2]):
if c1 == "N" or c2 == "N":
Ns += 1
elif c1 == c2:
match += 1
else:
mismatch += 1
start1 = end1
start2 = end2
blockcount = len(blockSizes)
blockSizes = ",".join(map(str, blockSizes)) + ","
qStarts = ",".join(map(str, qStarts)) + ","
tStarts = ",".join(map(str, tStarts)) + ","
words = [str(match),
str(mismatch),
str(repmatch),
str(Ns),
str(Qgapcount),
str(Qgapbases),
str(Tgapcount),
str(Tgapbases),
strand,
Qname,
str(Qsize),
str(Qstart),
str(Qend),
Tname,
str(Tsize),
str(Tstart),
str(Tend),
str(blockcount),
blockSizes,
qStarts,
tStarts,
]
line = "\t".join(words) + "\n"
return line
@property
def aligned(self):
"""Return the indices of subsequences aligned to each other.
This property returns the start and end indices of subsequences
in the target and query sequence that were aligned to each other.
If the alignment between target (t) and query (q) consists of N
chunks, you get two tuples of length N:
(((t_start1, t_end1), (t_start2, t_end2), ..., (t_startN, t_endN)),
((q_start1, q_end1), (q_start2, q_end2), ..., (q_startN, q_endN)))
For example,
>>> from Bio import Align
>>> aligner = Align.PairwiseAligner()
>>> alignments = aligner.align("GAACT", "GAT")
>>> alignment = alignments[0]
>>> print(alignment)
GAACT
||--|
GA--T
<BLANKLINE>
>>> alignment.aligned
(((0, 2), (4, 5)), ((0, 2), (2, 3)))
>>> alignment = alignments[1]
>>> print(alignment)
GAACT
|-|-|
G-A-T
<BLANKLINE>
>>> alignment.aligned
(((0, 1), (2, 3), (4, 5)), ((0, 1), (1, 2), (2, 3)))
Note that different alignments may have the same subsequences
aligned to each other. In particular, this may occur if alignments
differ from each other in terms of their gap placement only:
>>> aligner.mismatch_score = -10
>>> alignments = aligner.align("AAACAAA", "AAAGAAA")
>>> len(alignments)
2
>>> print(alignments[0])
AAAC-AAA
|||--|||
AAA-GAAA
<BLANKLINE>
>>> alignments[0].aligned
(((0, 3), (4, 7)), ((0, 3), (4, 7)))
>>> print(alignments[1])
AAA-CAAA
|||--|||
AAAG-AAA
<BLANKLINE>
>>> alignments[1].aligned
(((0, 3), (4, 7)), ((0, 3), (4, 7)))
The property can be used to identify alignments that are identical
to each other in terms of their aligned sequences.
"""
segments1 = []
segments2 = []
if sys.version_info[0] > 2:
i1, i2 = self.path[0]
for node in self.path[1:]:
j1, j2 = node
if j1 > i1 and j2 > i2:
segment1 = (i1, j1)
segment2 = (i2, j2)
segments1.append(segment1)
segments2.append(segment2)
i1, i2 = j1, j2
else:
# Python 2: convert all long ints to ints to be consistent
# with the doctests
i1, i2 = self.path[0]
i1 = int(i1)
i2 = int(i2)
for node in self.path[1:]:
j1, j2 = node
j1 = int(j1)
j2 = int(j2)
if j1 > i1 and j2 > i2:
segment1 = (i1, j1)
segment2 = (i2, j2)
segments1.append(segment1)
segments2.append(segment2)
i1, i2 = j1, j2
return tuple(segments1), tuple(segments2)
class PairwiseAlignments(object):
"""Implements an iterator over pairwise alignments returned by the aligner.
This class also supports indexing, which is fast for increasing indices,
but may be slow for random access of a large number of alignments.
Note that pairwise aligners can return an astronomical number of alignments,
even for relatively short sequences, if they align poorly to each other. We
therefore recommend to first check the number of alignments, accessible as
len(alignments), which can be calculated quickly even if the number of
alignments is very large.
"""
def __init__(self, seqA, seqB, score, paths):
"""Initialize a new PairwiseAlignments object.
Arguments:
- seqA - The first sequence, as a plain string, without gaps.
- seqB - The second sequence, as a plain string, without gaps.
- score - The alignment score.
- paths - An iterator over the paths in the traceback matrix;
each path defines one alignment.
You would normally obtain an PairwiseAlignments object by calling
aligner.align(seqA, seqB), where aligner is a PairwiseAligner object.
"""
self.seqA = seqA
self.seqB = seqB
self.score = score
self.paths = paths
self.index = -1
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
if index == self.index:
return self.alignment
if index < self.index:
self.paths.reset()
self.index = -1
while self.index < index:
try:
alignment = next(self)
except StopIteration:
raise_from(IndexError("index out of range"), None)
return alignment
def __iter__(self):
self.paths.reset()
self.index = -1
return self
def __next__(self):
path = next(self.paths)
self.index += 1
alignment = PairwiseAlignment(self.seqA, self.seqB, path, self.score)
self.alignment = alignment
return alignment
if sys.version_info[0] < 3: # Python 2
next = __next__
class PairwiseAligner(_aligners.PairwiseAligner):
"""Performs pairwise sequence alignment using dynamic programming.
This provides functions to get global and local alignments between two
sequences. A global alignment finds the best concordance between all
characters in two sequences. A local alignment finds just the
subsequences that align the best.
To perform a pairwise sequence alignment, first create a PairwiseAligner
object. This object stores the match and mismatch scores, as well as the
gap scores. Typically, match scores are positive, while mismatch scores
and gap scores are negative or zero. By default, the match score is 1,
and the mismatch and gap scores are zero. Based on the values of the gap
scores, a PairwiseAligner object automatically chooses the appropriate
alignment algorithm (the Needleman-Wunsch, Smith-Waterman, Gotoh, or
Waterman-Smith-Beyer global or local alignment algorithm).
Calling the "score" method on the aligner with two sequences as arguments
will calculate the alignment score between the two sequences.
Calling the "align" method on the aligner with two sequences as arguments
will return a generator yielding the alignments between the two
sequences.
Some examples:
>>> from Bio import Align
>>> aligner = Align.PairwiseAligner()
>>> alignments = aligner.align("TACCG", "ACG")
>>> for alignment in sorted(alignments):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 3.0:
TACCG
-|-||
-A-CG
<BLANKLINE>
Score = 3.0:
TACCG
-||-|
-AC-G
<BLANKLINE>
Specify the aligner mode as local to generate local alignments:
>>> aligner.mode = 'local'
>>> alignments = aligner.align("TACCG", "ACG")
>>> for alignment in sorted(alignments):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 3.0:
TACCG
|-||
A-CG
<BLANKLINE>
Score = 3.0:
TACCG
||-|
AC-G
<BLANKLINE>
Do a global alignment. Identical characters are given 2 points,
1 point is deducted for each non-identical character.
>>> aligner.mode = 'global'
>>> aligner.match_score = 2
>>> aligner.mismatch_score = -1
>>> for alignment in aligner.align("TACCG", "ACG"):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 6.0:
TACCG
-||-|
-AC-G
<BLANKLINE>
Score = 6.0:
TACCG
-|-||
-A-CG
<BLANKLINE>
Same as above, except now 0.5 points are deducted when opening a
gap, and 0.1 points are deducted when extending it.
>>> aligner.open_gap_score = -0.5
>>> aligner.extend_gap_score = -0.1
>>> aligner.target_end_gap_score = 0.0
>>> aligner.query_end_gap_score = 0.0
>>> for alignment in aligner.align("TACCG", "ACG"):
... print("Score = %.1f:" % alignment.score)
... print(alignment)
...
Score = 5.5:
TACCG
-|-||
-A-CG
<BLANKLINE>
Score = 5.5:
TACCG
-||-|
-AC-G
<BLANKLINE>
The alignment function can also use known matrices already included in
Biopython:
>>> from Bio.Align import substitution_matrices
>>> aligner = Align.PairwiseAligner()
>>> aligner.substitution_matrix = substitution_matrices.load("BLOSUM62")
>>> alignments = aligner.align("KEVLA", "EVL")
>>> alignments = list(alignments)
>>> print("Number of alignments: %d" % len(alignments))
Number of alignments: 1
>>> alignment = alignments[0]
>>> print("Score = %.1f" % alignment.score)
Score = 13.0
>>> print(alignment)
KEVLA
-|||-
-EVL-
<BLANKLINE>
"""
def __setattr__(self, key, value):
if key not in dir(_aligners.PairwiseAligner):
# To prevent confusion, don't allow users to create new attributes
message = "'PairwiseAligner' object has no attribute '%s'" % key
raise AttributeError(message)
_aligners.PairwiseAligner.__setattr__(self, key, value)
def align(self, seqA, seqB):
"""Return the alignments of two sequences using PairwiseAligner."""
if isinstance(seqA, Seq):
seqA = str(seqA)
if isinstance(seqB, Seq):
seqB = str(seqB)
score, paths = _aligners.PairwiseAligner.align(self, seqA, seqB)
alignments = PairwiseAlignments(seqA, seqB, score, paths)
return alignments
def score(self, seqA, seqB):
"""Return the alignments score of two sequences using PairwiseAligner."""
if isinstance(seqA, Seq):
seqA = str(seqA)
if isinstance(seqB, Seq):
seqB = str(seqB)
return _aligners.PairwiseAligner.score(self, seqA, seqB)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
Bio.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
% matplotlib inline
prec_total = pickle.load(open('precion.pkl', 'rb'))
recall_total = pickle.load(open('recall.pkl', 'rb'))
spec_total = pickle.load(open('specificity.pkl', 'rb'))
titles_total = pickle.load(open('titles.pkl', 'rb'))
model_list = pickle.load(open('models.pkl', 'rb'))
auc_list = pickle.load(open('aucs.pkl', 'rb'))
feature_headers = pickle.load(open('feature_headers.pkl', 'rb'))
girls_df = pickle.load(open('girls_df.pkl', 'rb'))
girls_lutheran_df = pickle.load(open('girls_lutheran_df.pkl', 'rb'))
boys_df = pickle.load(open('boys_df.pkl', 'rb'))
boys_lutheran_df = pickle.load(open('boys_lutheran_df.pkl', 'rb'))
# +
print('ROC curve:')
plt.figure(figsize=(10,10))
for ix in range(len(prec_total)):
lab = auc_list[ix][0]+' AUC='+str(auc_list[ix][1])+' ['+str(auc_list[ix][2])+', '+str(auc_list[ix][3])+']'
if 'w/ Lutheran' in auc_list[ix][0]:
plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), linestyle='--', label=lab)
else:
plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), linestyle='-', label=lab)
plt.legend(fontsize = 12, )
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.axis('equal')
plt.title('ROC Curve: Obesity in Girls Predicted at 5 years from 24 Months')
plt.grid(True);
plt.tight_layout()
plt.savefig('Pediatric_ROC.png', dpi=300)
plt.show()
# +
print('ROC curve:')
plt.figure(figsize=(10,10))
for ix in range(len(prec_total)):
lab = auc_list[ix][0]+' AUC='+str(auc_list[ix][1])+' ['+str(auc_list[ix][2])+', '+str(auc_list[ix][3])+']'
if 'girls' in auc_list[ix][0]:
plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), label=lab)
# if 'w/ Lutheran' in auc_list[ix][0]:
# plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), linestyle='--', label=lab)
# else:
# plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), linestyle='-', label=lab)
plt.legend(fontsize = 12, )
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.axis('equal')
plt.title('ROC Curve: Obesity in Girls Predicted at 5 years from 24 Months')
plt.grid(True);
plt.savefig('Pediatric_Girls_ROC.png', dpi=300)
plt.show()
# +
print('ROC curve:')
plt.figure(figsize=(10,10))
for ix in range(len(prec_total)):
lab = auc_list[ix][0]+' AUC='+str(auc_list[ix][1])+' ['+str(auc_list[ix][2])+', '+str(auc_list[ix][3])+']'
if 'girls' in auc_list[ix][0]:
continue
# if 'w/ Lutheran' in auc_list[ix][0]:
# plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), linestyle='--', label=lab)
# else:
# plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), linestyle='-', label=lab)
plt.plot(1- np.array(spec_total[ix]), np.array(recall_total[ix]), label=lab)
plt.legend(fontsize = 12, )
plt.xlabel('1 - Specificity')
plt.ylabel('Sensitivity')
plt.axis('equal')
plt.title('ROC Curve: Obesity in Boys Predicted at 5 years from 24 Months')
plt.grid(True);
plt.savefig('Pediatric_Boys_ROC.png', dpi=300)
plt.show()
# -
for name, auc, lower, upper in auc_list:
print(name, ' = ', auc, ' - ', upper-lower)
# +
features = np.zeros((model_list[0].coef_.shape[0], len(model_list)))
for ix, model in enumerate(model_list):
try:
weights = model.coef_
except:
weights = model.feature_importances_
features[:,ix] = np.array(weights.ravel())
df = pd.DataFrame(features, columns=[x[0] for x in auc_list])
df.insert(loc=0, column='features', value=feature_headers)
df.head()
# -
df.to_csv('feaure_importances.csv', sep=',', index=False)
|
src/Pediatric Study - Plotting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Entropy
# language: python
# name: entropy
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Tutorial: QPU Database
#
# **This tutorial requires version >=0.0.5 of the QPU DB**
#
# ## Using the QPU DB
#
# The QPU database is a permanent store built for storing calibration data for Quantum Processing Units (QPU).
#
# It provides the following features and benefits:
#
# * Persistent storage of any python object related to QPU calibration info
# * Metadata on parameter calibration state and last modified time
# * Convenient addressing of quantum elements
# * Easy revert to previously stored parameters
#
# In this short tutorial we will learn how to use the QPU DB by looking at a simplified example of a QPU with two superconducting
# qubits, two readout resonators and a parametric coupling element.
#
# ### Creating the database
#
# Below we can see a simple usage example. The DB is created by calling the `create_new_database` method.
# This method is similar to initializing a git repo in the sense that we only do it once. Here we initialize it
# with an initial dictionary which contains some basic attributes of our QPU. We'll be able to add more attributes,
# and also elements, later on. Once we call `create_new_qpu_database`, a set of database files will be created for us at
# the working directory of the python script.
#
# These files are the persistent storage of our DB. They can be saved to a different location by specifying
# the `path` argument to the function.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
# # %load_ext autoreload
# # %autoreload 2
from entropylab_qpudb import create_new_qpu_database, CalState, QpuDatabaseConnection
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
initial_dict = {
'q1': {
'f01': 5.65e9 # an initial guess for our transition frequency
},
'q2': {
'f01': 5.25e9
},
'res1': {
'f_r': 7.1e9
},
'res2': {
'f_r': 7.3e9
},
'c1_2': {
'f_r': 0.4e9
}
}
create_new_qpu_database('db1', initial_dict, force_create=True)
# + [markdown] pycharm={"name": "#%% md\n"}
# Notes:
#
# 1. here we allow for the possibility of overwriting an existing database
# by passing the `force_create=True` flag. This option is useful when experimenting with the database creation, however in
# common usage it is recommended to remove this flag, since when it's false (by default), it will prevent overwriting an existing
# database and deleting all the data stored in it.
#
# 2. (For experts): if you need to create a DB server, rather than create a filesystem storage, please let us know.
# The DB backend is currently
# the [ZODB](https://zodb.org/en/latest/) database, with plans to be replaced by
# [gitdb](https://github.com/gitpython-developers/gitdb).
#
# The keys of `initial_dict` are called the *elements* (and are similar in nature to QUA's quantum elements), and the
# values of these elements are subdictionaries of *attributes*. The values of the attributes can be anything you like,
# or more accurately, any python object that can be pickled. The different elements need not have the same attributes.
#
# ### Connecting to the database and basic usage
#
# Now create a connection to our DB. The connection to the DB is our main "workhorse" - we create the DB once, and
# whenever we want to connect to it in order to retrieve or store data, we open a connection object. Note that currently
# only a single connection object per DB is allowed.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1 = QpuDatabaseConnection('db1')
# + [markdown] pycharm={"name": "#%% md\n"}
# and let's view the contents of our DB by calling `print`:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.print()
# + [markdown] pycharm={"name": "#%% md\n"}
# Congratulations! You've just created your first QPU DB. As you can see when calling `print` the values we entered
# in `initial_dict` are now objects of type `QpuParameter`. These objects have 3 attributes:
#
# * `value`: the value you created initially and can be any python object
# * `last_updated`: the time when this parameter was last updated (see *committing* section to understand how to
# update). This parameter is handled by the DB itself.
# * `cal_state`: an enumerated metadata that can take the values `UNCAL`, `COARSE`, `MED` and `FINE`. This
# can be used by the user to communicate what is the calibration level of these parameters. They can be set and queried
# during the script execution, but are not used by the DB itself.
#
# ### Modifying and using QPU parameters
#
# We can use and modify values and calibration states of QPU parameters in two different ways:
#
# #### Using `get` and `set`
#
# let's modify the value of `f01` and then get the actual value:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.set('q1', 'f01', 5.33e9)
db1.get('q1', 'f01').value
# + [markdown] pycharm={"name": "#%% md\n"}
# We can also modify the calibration state when setting:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.set('q1', 'f01', 5.36e9, CalState.COARSE)
# + [markdown] pycharm={"name": "#%% md\n"}
# To get the full `QpuParameter` object we can omit `.value`. We can see that the cal state and modification date were updated.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.get('q1', 'f01')
#db1.get('q1', 'f01').cal_state
# + [markdown] pycharm={"name": "#%% md\n"}
# Note that we can't modify the value by assigning to value directly - this will raise an exception.
# -
# #### Using resolved names
#
# The names we chose for the elements, namely `'q1'`, `'res1'` and `'c1_2'` have a special significance. If we follow this
# convention of naming qubit elements with the format 'q'+number, resonators with the format 'res'+number
# and couplers with the format 'c'+number1+'_'+number2, as shown above, this allows us to get and set values in a more
# convenient way:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
print(db1.q(1).f01.value)
print(db1.res(1).f_r.value)
print(db1.coupler(1, 2).f_r.value)
print(db1.coupler(2, 1).f_r.value)
# + [markdown] pycharm={"name": "#%% md\n"}
# while this method basically syntactic sugar, it allows us to conveniently address elements by indices, which is useful when
# working with multiple qubit systems, and especially with couplers. We can also set values using this resolved addressing method:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.update_q(1, 'f01', 5.4e9)
db1.q(1).f01
# + [markdown] pycharm={"name": "#%% md\n"}
# Note: This default mapping between integer indices and strings can be modified by subclassing the
# `Resolver` class found under `entropylab_qpudb._resolver.py`.
#
# ### Committing (saving to persistent storage) and viewing history
#
# Everything we've done so far did not modify the persistent storage. In order to do this, we need to *commit* the changes we made.
# This allows us to control at which stages we want to make aggregated changes to the database.
#
# Let's see how this is done. We need to call `commit`, and specify an optional commit message:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.update_q(1, 'f01', 6.e9)
db1.commit('a test commit')
# + [markdown] pycharm={"name": "#%% md\n"}
# Now the actual file was changed. To see this, we need to close the db. We can then delete db1,
# and when re-opening the DB we'll see f01 of q1 has the modified value.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.close()
del db1
db1 = QpuDatabaseConnection('db1')
db1.q(1).f01
# + [markdown] pycharm={"name": "#%% md\n"}
# Note that the commit was saved with an index. This index can be later used to revert to a [previous state](#reverting-to-a-previous-state).
#
# To view a history of all the commits, we call `get_history`.
#
# Note that the timestamps of the commits are in UTC time.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.get_history()
# -
# ### Adding attributes and elements
#
# In many cases you realize while calibrating your system that you want to add attributes that did not exist in the initial
# dictionary, or even new elements. This is easy using the `add_element` and `add_attribute` methods.
# Let's see an example for `add_attribute`:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.add_attribute('q1', 'anharmonicity')
print(db1.q(1).anharmonicity)
db1.update_q(1, 'anharmonicity', -300e6, new_cal_state=CalState.COARSE)
print(db1.q(1).anharmonicity)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Reverting to a previous state
#
# Many times when we work on bringing up a QPU, we reach a point where everything is calibrated properly and our measurements
# and calibrations give good results. We want to be able to make additional changes, but to possibly revert to the good state
# if things go wrong. We can do this using `restore_from_history`. We simply need to provide it with the history
# index to which we want to return:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
db1.restore_from_history(0)
print(db1.q(1).f01)
assert db1.q(1).f01.value == initial_dict['q1']['f01']
# + [markdown] pycharm={"name": "#%% md\n"}
# Calling this method will replace the current working DB with the DB that was stored in the commit with the index
# supplied to `restore_from_history`. The new values will not be committed. It is possible to modify the values and
# commit them as usual.
#
# ## Next steps
#
# While the QPU DB is a standalone tool, it is designed with QUA calibration node framework in mind.
# In the notebook called `2_qubit_graph_calibration.ipynb` we explore how the QUA calibration nodes framework can be used
# to generate calibration graphs.
#
# ## Remove DB files
#
# To remove the DB files created in your workspace for the purpose of this demonstration, first close the db connection:
# -
db1.close()
# then run this cell:
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
from glob import glob
import os
for fl in glob("db1*"):
os.remove(fl)
|
docs/1_qpu_db.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={"grade": false, "grade_id": "intro_1", "locked": false, "solution": false}
# # Assignment 2: Markov Decision Processes
#
#
# ## Homework Instructions
# All your answers should be written in this notebook. You shouldn't need to write or modify any other files.
# Look for four instances of "YOUR CODE HERE"--those are the only parts of the code you need to write. To grade your homework, we will check whether the printouts immediately following your code match up with the results we got. The portions used for grading are highlighted in yellow. (However, note that the yellow highlighting does not show up when github renders this file.)
#
# To submit your homework, send an email to <<EMAIL>> with the subject line "Deep RL Assignment 2" and two attachments:
# 1. This `ipynb` file
# 2. A pdf version of this file (To make the pdf, do `File - Print Preview`)
#
# The homework is due Febrary 22nd, 11:59 pm.
#
# --------------------------
# + [markdown] nbgrader={"grade": false, "grade_id": "intro_2", "locked": false, "solution": false}
# ## Introduction
#
# This assignment will review the two classic methods for solving Markov Decision Processes (MDPs) with finite state and action spaces.
# We will implement value iteration (VI) and policy iteration (PI) for a finite MDP, both of which find the optimal policy in a finite number of iterations.
#
# The experiments here will use the Frozen Lake environment, a simple gridworld MDP that is taken from `gym` and slightly modified for this assignment. In this MDP, the agent must navigate from the start state to the goal state on a 4x4 grid, with stochastic transitions.
# -
from frozen_lake import FrozenLakeEnv
env = FrozenLakeEnv()
print(env.__doc__)
# Let's look at what a random episode looks like.
# +
# Some basic imports and setup
import numpy as np, numpy.random as nr, gym
np.set_printoptions(precision=3)
def begin_grading(): print("\x1b[43m")
def end_grading(): print("\x1b[0m")
# Seed RNGs so you get the same printouts as me
env.seed(0); from gym.spaces import prng; prng.seed(10)
# Generate the episode
env.reset()
for t in range(100):
env.render()
a = env.action_space.sample()
ob, rew, done, _ = env.step(a)
if done:
break
assert done
env.render();
# -
# In the episode above, the agent falls into a hole after two timesteps. Also note the stochasticity--on the first step, the DOWN action is selected, but the agent moves to the right.
#
# We extract the relevant information from the gym Env into the MDP class below.
# The `env` object won't be used any further, we'll just use the `mdp` object.
# +
class MDP(object):
def __init__(self, P, nS, nA, desc=None):
self.P = P # state transition and reward probabilities, explained below
self.nS = nS # number of states
self.nA = nA # number of actions
self.desc = desc # 2D array specifying what each grid cell means (used for plotting)
mdp = MDP( {s : {a : [tup[:3] for tup in tups] for (a, tups) in a2d.items()} for (s, a2d) in env.P.items()}, env.nS, env.nA, env.desc)
print("mdp.P is a two-level dict where the first key is the state and the second key is the action.")
print("The 2D grid cells are associated with indices [0, 1, 2, ..., 15] from left to right and top to down, as in")
print(np.arange(16).reshape(4,4))
print("mdp.P[state][action] is a list of tuples (probability, nextstate, reward).\n")
print("For example, state 0 is the initial state, and the transition information for s=0, a=0 is \nP[0][0] =", mdp.P[0][0], "\n")
print("As another example, state 5 corresponds to a hole in the ice, which transitions to itself with probability 1 and reward 0.")
print("P[5][0] =", mdp.P[5][0], '\n')
# + [markdown] nbgrader={"grade": false, "grade_id": "4", "locked": false, "solution": false}
# ## Part 1: Value Iteration
# + [markdown] nbgrader={"grade": false, "locked": false, "solution": false}
# ### Problem 1: implement value iteration
# In this problem, you'll implement value iteration, which has the following pseudocode:
#
# ---
# Initialize $V^{(0)}(s)=0$, for all $s$
#
# For $i=0, 1, 2, \dots$
# - $V^{(i+1)}(s) = \max_a \sum_{s'} P(s,a,s') [ R(s,a,s') + \gamma V^{(i)}(s')]$, for all $s$
#
# ---
#
# We additionally define the sequence of greedy policies $\pi^{(0)}, \pi^{(1)}, \dots, \pi^{(n-1)}$, where
# $$\pi^{(i)}(s) = \arg \max_a \sum_{s'} P(s,a,s') [ R(s,a,s') + \gamma V^{(i)}(s')]$$
#
# Your code will return two lists: $[V^{(0)}, V^{(1)}, \dots, V^{(n)}]$ and $[\pi^{(0)}, \pi^{(1)}, \dots, \pi^{(n-1)}]$
#
# To ensure that you get the same policies as the reference solution, choose the lower-index action to break ties in $\arg \max_a$. This is done automatically by np.argmax. This will only affect the "# chg actions" printout below--it won't affect the values computed.
#
# <div class="alert alert-warning">
# Warning: make a copy of your value function each iteration and use that copy for the update--don't update your value function in place.
# Updating in-place is also a valid algorithm, sometimes called Gauss-Seidel value iteration or asynchronous value iteration, but it will cause you to get different results than me.
# </div>
# + nbgrader={"grade": false, "grade_id": "vstar_backup", "locked": false, "solution": true}
def value_iteration(mdp, gamma, nIt):
"""
Inputs:
mdp: MDP
gamma: discount factor
nIt: number of iterations, corresponding to n above
Outputs:
(value_functions, policies)
len(value_functions) == nIt+1 and len(policies) == n
"""
print("Iteration | max|V-Vprev| | # chg actions | V[0]")
print("----------+--------------+---------------+---------")
Vs = [np.zeros(mdp.nS)] # list of value functions contains the initial value function V^{(0)}, which is zero
pis = []
for it in range(nIt):
oldpi = pis[-1] if len(pis) > 0 else None # \pi^{(it)} = Greedy[V^{(it-1)}]. Just used for printout
Vprev = Vs[-1] # V^{(it)}
# YOUR CODE HERE
# Your code should define the following two variables
# pi: greedy policy for Vprev,
# corresponding to the math above: \pi^{(it)} = Greedy[V^{(it)}]
# numpy array of ints
# V: bellman backup on Vprev
# corresponding to the math above: V^{(it+1)} = T[V^{(it)}]
# numpy array of floats
max_diff = np.abs(V - Vprev).max()
nChgActions="N/A" if oldpi is None else (pi != oldpi).sum()
print("%4i | %6.5f | %4s | %5.3f"%(it, max_diff, nChgActions, V[0]))
Vs.append(V)
pis.append(pi)
return Vs, pis
GAMMA=0.95 # we'll be using this same value in subsequent problems
begin_grading()
Vs_VI, pis_VI = value_iteration(mdp, gamma=GAMMA, nIt=20)
end_grading()
# -
# Below, we've illustrated the progress of value iteration. Your optimal actions are shown by arrows.
# At the bottom, the value of the different states are plotted.
import matplotlib.pyplot as plt
# %matplotlib inline
for (V, pi) in zip(Vs_VI[:10], pis_VI[:10]):
plt.figure(figsize=(3,3))
plt.imshow(V.reshape(4,4), cmap='gray', interpolation='none', clim=(0,1))
ax = plt.gca()
ax.set_xticks(np.arange(4)-.5)
ax.set_yticks(np.arange(4)-.5)
ax.set_xticklabels([])
ax.set_yticklabels([])
Y, X = np.mgrid[0:4, 0:4]
a2uv = {0: (-1, 0), 1:(0, -1), 2:(1,0), 3:(-1, 0)}
Pi = pi.reshape(4,4)
for y in range(4):
for x in range(4):
a = Pi[y, x]
u, v = a2uv[a]
plt.arrow(x, y,u*.3, -v*.3, color='m', head_width=0.1, head_length=0.1)
plt.text(x, y, str(env.desc[y,x].item().decode()),
color='g', size=12, verticalalignment='center',
horizontalalignment='center', fontweight='bold')
plt.grid(color='b', lw=2, ls='-')
plt.figure()
plt.plot(Vs_VI)
plt.title("Values of different states");
# ## Problem 2: construct an MDP where value iteration takes a long time to converge
#
# When we ran value iteration on the frozen lake problem, the last iteration where an action changed was iteration 6--i.e., value iteration computed the optimal policy at iteration 6.
# Are there any guarantees regarding how many iterations it'll take value iteration to compute the optimal policy?
# There are no such guarantees without additional assumptions--we can construct the MDP in such a way that the greedy policy will change after arbitrarily many iterations.
#
# Your task: define an MDP with at most 3 states and 2 actions, such that when you run value iteration, the optimal action changes at iteration >= 50. Use discount=0.95. (However, note that the discount doesn't matter here--you can construct an appropriate MDP with any discount.)
chg_iter = 50
# YOUR CODE HERE
# Your code will need to define an MDP (mymdp)
# like the frozen lake MDP defined above
begin_grading()
Vs, pis = value_iteration(mymdp, gamma=GAMMA, nIt=chg_iter+1)
end_grading()
# ## Problem 3: Policy Iteration
#
# The next task is to implement exact policy iteration (PI), which has the following pseudocode:
#
# ---
# Initialize $\pi_0$
#
# For $n=0, 1, 2, \dots$
# - Compute the state-value function $V^{\pi_{n}}$
# - Using $V^{\pi_{n}}$, compute the state-action-value function $Q^{\pi_{n}}$
# - Compute new policy $\pi_{n+1}(s) = \operatorname*{argmax}_a Q^{\pi_{n}}(s,a)$
# ---
#
# Below, you'll implement the first and second steps of the loop.
#
# ### Problem 3a: state value function
#
# You'll write a function called `compute_vpi` that computes the state-value function $V^{\pi}$ for an arbitrary policy $\pi$.
# Recall that $V^{\pi}$ satisfies the following linear equation:
# $$V^{\pi}(s) = \sum_{s'} P(s,\pi(s),s')[ R(s,\pi(s),s') + \gamma V^{\pi}(s')]$$
# You'll have to solve a linear system in your code. (Find an exact solution, e.g., with `np.linalg.solve`.)
def compute_vpi(pi, mdp, gamma):
# YOUR CODE HERE
return V
# Now let's compute the value of an arbitrarily-chosen policy.
#
begin_grading()
print(compute_vpi(np.ones(16), mdp, gamma=GAMMA))
end_grading()
# As a sanity check, if we run `compute_vpi` on the solution from our previous value iteration run, we should get approximately (but not exactly) the same values produced by value iteration.
Vpi=compute_vpi(pis_VI[15], mdp, gamma=GAMMA)
V_vi = Vs_VI[15]
print("From compute_vpi", Vpi)
print("From value iteration", V_vi)
print("Difference", Vpi - V_vi)
# ### Problem 3b: state-action value function
#
# Next, you'll write a function to compute the state-action value function $Q^{\pi}$, defined as follows
#
# $$Q^{\pi}(s, a) = \sum_{s'} P(s,a,s')[ R(s,a,s') + \gamma V^{\pi}(s')]$$
#
# + nbgrader={"grade": false, "grade_id": "compute_qpi", "locked": false, "solution": true}
def compute_qpi(vpi, mdp, gamma):
# YOUR CODE HERE
return Qpi
begin_grading()
Qpi = compute_qpi(np.arange(mdp.nS), mdp, gamma=0.95)
print("Qpi:\n", Qpi)
end_grading()
# -
# Now we're ready to run policy iteration!
# + nbgrader={"grade": false, "locked": false, "solution": false}
def policy_iteration(mdp, gamma, nIt):
Vs = []
pis = []
pi_prev = np.zeros(mdp.nS,dtype='int')
pis.append(pi_prev)
print("Iteration | # chg actions | V[0]")
print("----------+---------------+---------")
for it in range(nIt):
vpi = compute_vpi(pi_prev, mdp, gamma)
qpi = compute_qpi(vpi, mdp, gamma)
pi = qpi.argmax(axis=1)
print("%4i | %6i | %6.5f"%(it, (pi != pi_prev).sum(), vpi[0]))
Vs.append(vpi)
pis.append(pi)
pi_prev = pi
return Vs, pis
Vs_PI, pis_PI = policy_iteration(mdp, gamma=0.95, nIt=20)
plt.plot(Vs_PI);
# -
# Now we can compare the convergence of value iteration and policy iteration on several states.
# For fun, you can try adding modified policy iteration.
for s in range(5):
plt.figure()
plt.plot(np.array(Vs_VI)[:,s])
plt.plot(np.array(Vs_PI)[:,s])
plt.ylabel("value of state %i"%s)
plt.xlabel("iteration")
plt.legend(["value iteration", "policy iteration"], loc='best')
|
hw2/HW2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial: FICO Explainable Machine Learning Challenge - updating binning
# In this tutorial, we extend the previous tutorial using the FICO dataset by replacing the usual binning with a piecewise continuous binning. The piecewise continuous binning uses a Gradient Boosting Tree (GBT) as an estimator.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# +
from lightgbm import LGBMClassifier
from optbinning import BinningProcess
from optbinning import OptimalPWBinning
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import auc, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
# -
# Download the dataset from the link above and load it: https://community.fico.com/s/explainable-machine-learning-challenge.
# +
df = pd.read_csv("data/FICO_challenge/heloc_dataset_v1.csv", sep=",")
variable_names = list(df.columns[1:])
X = df[variable_names]
# -
# Transform the categorical dichotomic target variable into numerical.
y = df.RiskPerformance.values
mask = y == "Bad"
y[mask] = 1
y[~mask] = 0
y = y.astype(int)
# #### Modeling
# The data dictionary of this challenge includes three special values/codes:
#
# * -9 No Bureau Record or No Investigation
# * -8 No Usable/Valid Trades or Inquiries
# * -7 Condition not Met (e.g. No Inquiries, No Delinquencies)
special_codes = [-9, -8, -7]
# This challenge imposes monotonicity constraints with respect to the probability of a bad target for many of the variables. We apply these rules by passing the following dictionary of parameters for these variables involved.
binning_fit_params = {
"ExternalRiskEstimate": {"monotonic_trend": "descending"},
"MSinceOldestTradeOpen": {"monotonic_trend": "descending"},
"MSinceMostRecentTradeOpen": {"monotonic_trend": "descending"},
"AverageMInFile": {"monotonic_trend": "descending"},
"NumSatisfactoryTrades": {"monotonic_trend": "descending"},
"NumTrades60Ever2DerogPubRec": {"monotonic_trend": "ascending"},
"NumTrades90Ever2DerogPubRec": {"monotonic_trend": "ascending"},
"PercentTradesNeverDelq": {"monotonic_trend": "descending"},
"MSinceMostRecentDelq": {"monotonic_trend": "descending"},
"NumTradesOpeninLast12M": {"monotonic_trend": "ascending"},
"MSinceMostRecentInqexcl7days": {"monotonic_trend": "descending"},
"NumInqLast6M": {"monotonic_trend": "ascending"},
"NumInqLast6Mexcl7days": {"monotonic_trend": "ascending"},
"NetFractionRevolvingBurden": {"monotonic_trend": "ascending"},
"NetFractionInstallBurden": {"monotonic_trend": "ascending"},
"NumBank2NatlTradesWHighUtilization": {"monotonic_trend": "ascending"}
}
# Instantiate a ``BinningProcess`` object class with variable names, special codes and dictionary of binning parameters. Choose a logistic regression as a classifier.
binning_process = BinningProcess(variable_names, special_codes=special_codes,
binning_fit_params=binning_fit_params)
clf = LogisticRegression(solver="lbfgs")
# Split dataset into train and test. Fit pipelines with training data, then generate classification reports to show the main classification metrics.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
binning_process.fit(X_train, y_train)
# Now, we replace the usual binning of a few numerical variables with a piecewise continuous binning. Since version 0.9.2, the binning process includes the method ``update_binned_variable`` which allows updating an optimal binning without the need of re-processing the rest of the variables.
# +
update_variables = ["ExternalRiskEstimate", "MSinceOldestTradeOpen", "PercentTradesWBalance"]
for variable in update_variables:
optb = OptimalPWBinning(estimator=LGBMClassifier(),
name=variable, objective="l1")
optb.fit(X_train[variable], y_train, lb=0.001, ub=0.999)
binning_process.update_binned_variable(name=variable, optb=optb)
# -
# #### Performance
clf.fit(binning_process.transform(X_train), y_train)
y_pred = clf.predict(binning_process.transform(X_test))
print(classification_report(y_test, y_pred))
# If we compare with the results from the previous tutorial, we observe a slight improvement in all three metrics.
probs = clf.predict_proba(binning_process.transform(X_test))
preds = probs[:,1]
fpr1, tpr1, threshold = roc_curve(y_test, preds)
roc_auc1 = auc(fpr1, tpr1)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr1, tpr1, 'b', label='Binning+LR: AUC = {0:.4f}'.format(roc_auc1))
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1],'k--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
# Finally, let's check the piecewise continuous binning for one of the variables with more importance.
optb = binning_process.get_binned_variable("ExternalRiskEstimate")
optb.binning_table.build()
optb.binning_table.analysis()
optb.binning_table.plot(metric="event_rate")
|
doc/source/tutorials/tutorial_binning_process_FICO_update_binning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zkufh760uvF3"
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_label_text_classification/NLU_training_multi_token_label_text_classifier_stackoverflow_tags.ipynb)
#
#
#
# # Training a Deep Learning Classifier for sentences with multiple classes at the same time
# MultiClassifierDL is a Multi-label Text Classification. MultiClassifierDL uses a Bidirectional GRU with Convolution model that we have built inside TensorFlow and supports up to 100 classes. The input to MultiClassifierDL is Sentence Embeddings such as state-of-the-art UniversalSentenceEncoder, BertSentenceEmbeddings, or SentenceEmbeddings
#
#
#
# ### Multi ClassifierDL (Multi-class Text Classification with multiple classes per sentence)
# With the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotators#multiclassifierdl-multi-label-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem
#
# This notebook showcases the following features :
#
# - How to train the deep learning classifier
# - How to store a pipeline to disk
# - How to load the pipeline from disk (Enables NLU offline mode)
#
#
# + [markdown] id="dur2drhW5Rvi"
# # 1. Install Java 8 and NLU
# + id="hFGnBCHavltY"
import os
# ! apt-get update -qq > /dev/null
# Install java
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! pip install nlu > /dev/null pyspark==2.4.7
import nlu
# + [markdown] id="f4KkTfnR5Ugg"
# # 2 Download sample dataset 60k Stack Overflow Questions with Quality Rating
#
#
# https://www.kaggle.com/imoore/60k-stack-overflow-questions-with-quality-rate
# + colab={"base_uri": "https://localhost:8080/"} id="y4xSRWIhwT28" outputId="f7ac934c-b18f-4ffd-d773-842c81b2a80a"
import pandas as pd
# ! wget -N https://ckl-it.de/wp-content/uploads/2020/11/60kstackoverflow.csv -P /tmp
test_path = '/tmp/60kstackoverflow.csv'
train_df = pd.read_csv(test_path)
# + id="gBxgVIB787wd"
# Split labels and clean them.
import pandas as pd
train_df = pd.read_csv(test_path)
f = lambda x : x.replace('<','').replace('>','')
g = lambda l : list(map(f,l))
train_df['y'] = train_df.Tags.str.split('><').map(g).str.join(',')
train_df['text'] = train_df['Title']
# train_df = train_df.iloc[:50]
# + colab={"base_uri": "https://localhost:8080/", "height": 430} id="OfMCrNk-L_pq" outputId="6ce7798d-ff2f-4b02-a066-67497ba0bdfa"
counts = train_df.explode('y').y.value_counts()
counts.iloc[0:100].plot.bar(figsize=(40,8), title='Distribution of Label Tags in Dataset')
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="73UChGrePhr1" outputId="af8b97e5-cec0-469e-c55d-433364ee31a5"
exp = train_df.y.str.split(',').explode().value_counts()
top_100_tags = list(exp[0:25].index)
# z = lambda r : True if r.split(',') in top_100_tags else False
z = lambda r : True if all(x in top_100_tags for x in r.split(',') ) else False
top_100_idx = train_df.y.map(z)
train_df = train_df[top_100_idx]
train_df
# + colab={"base_uri": "https://localhost:8080/", "height": 653} id="e_z1IU-XT0a0" outputId="dc80c79e-11a0-4e63-bd40-8d933dbbb6aa"
import nlu
# load a trainable pipeline by specifying the train prefix
unfitted_pipe = nlu.load('train.multi_classifier')
#configure epochs
unfitted_pipe['multi_classifier'].setMaxEpochs(100)
unfitted_pipe['multi_classifier'].setLr(0.005)
# fit it on a datset with label='y' and text columns. Labels seperated by ','
fitted_pipe = unfitted_pipe.fit(train_df[['y','text']], label_seperator=',')
# predict with the trained pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df[['y','text']])
preds
# + [markdown] id="DL_5aY9b3jSd"
# # 4. Evaluate the model
# + colab={"base_uri": "https://localhost:8080/"} id="0YDA2KunCeqQ" outputId="8f72b51d-8e4c-49e8-884e-af5b0fdfa1ac"
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
mlb = MultiLabelBinarizer()
mlb = mlb.fit(preds.y.str.split(','))
y_true = mlb.transform(preds['y'].str.split(','))
y_pred = mlb.transform(preds.multi_classifier_classes.str.join(',').str.split(','))
print("Classification report: \n", (classification_report(y_true, y_pred)))
print("F1 micro averaging:",(f1_score(y_true, y_pred, average='micro')))
print("ROC: ",(roc_auc_score(y_true, y_pred, average="micro")))
# + [markdown] id="mhFKVN93o1ZO"
# # 5. Lets try different Sentence Emebddings
# + colab={"base_uri": "https://localhost:8080/"} id="CzJd8omao0gt" outputId="c3903ffc-ee61-47c1-87cf-bb1876436e25"
# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!
nlu.print_components(action='embed_sentence')
# + colab={"base_uri": "https://localhost:8080/"} id="0ofYHpu7sloS" outputId="ea715585-daa2-433d-d281-02b9e61222a4"
pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.multi_classifier')
pipe.print_info()
# + colab={"base_uri": "https://localhost:8080/", "height": 570} id="ABHLgirmG1n9" outputId="60e9995e-080c-4213-cf03-c7baba89bd6a"
# Load pipe with bert embeds
# using large embeddings can take a few hours..
pipe['multi_classifier'].setMaxEpochs(120)
pipe['multi_classifier'].setLr(0.0005)
fitted_pipe = pipe.fit(train_df[['y','text']],label_seperator=',')
preds = fitted_pipe.predict(train_df[['y','text']])
preds
# + colab={"base_uri": "https://localhost:8080/"} id="E7ah2LM6tIhG" outputId="edaa6235-c8d2-474a-9cc1-331e0967086c"
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
mlb = MultiLabelBinarizer()
mlb = mlb.fit(preds.y.str.split(','))
y_true = mlb.transform(preds['y'].str.split(','))
y_pred = mlb.transform(preds.multi_classifier_classes.str.join(',').str.split(','))
print("Classification report: \n", (classification_report(y_true, y_pred)))
print("F1 micro averaging:",(f1_score(y_true, y_pred, average='micro')))
print("ROC: ",(roc_auc_score(y_true, y_pred, average="micro")))
# + [markdown] id="2BB-NwZUoHSe"
# # 5. Lets save the model
# + colab={"base_uri": "https://localhost:8080/"} id="eLex095goHwm" outputId="bbf99f56-d4b1-4440-ecb7-fe9d61935c62"
stored_model_path = './models/multi_classifier_dl_trained'
fitted_pipe.save(stored_model_path)
# + [markdown] id="e_b2DPd4rCiU"
# # 6. Lets load the model from HDD.
# This makes Offlien NLU usage possible!
# You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
# + id="SO4uz45MoRgp"
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('Tesla plans to invest 10M into the ML sector')
preds
# + id="e0CVlkk9v6Qi"
hdd_pipe.print_info()
# + id="M1LjAwJVJxun"
|
nlu/colab/Training/multi_label_text_classification/NLU_training_multi_token_label_text_classifier_stackoverflow_tags.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Classes and Objects in Python</h1>
# <p>
# <strong>Welcome!</strong>
# Objects in programming are like objects in real life. Like life, there are different classes of objects. In this notebook, we will create two classes called Circle and Rectangle. By the end of this notebook, you will have a better idea about :
# <ul>
# <li>what a class is</li>
# <li>what an attribute is</li>
# <li>what a method is</li>
# </ul>
#
# Don’t worry if you don’t get it the first time, as much of the terminology is confusing. Don’t forget to do the practice tests in the notebook.
# </p>
# <h2>Table of Contents</h2>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li>
# <a href="#intro">Introduction to Classes and Objects</a>
# <ul>
# <li><a href="create">Creating a class</a></li>
# <li><a href="instance">Instances of a Class: Objects and Attributes</a></li>
# <li><a href="method">Methods</a></li>
# </ul>
# </li>
# <li><a href="creating">Creating a class</a></li>
# <li><a href="circle">Creating an instance of a class Circle</a></li>
# <li><a href="rect">The Rectangle Class</a></li>
# </ul>
# <p>
# Estimated time needed: <strong>40 min</strong>
# </p>
# </div>
#
# <hr>
# <h2 id="intro">Introduction to Classes and Objects</h2>
# <h3>Creating a Class</h3>
# The first part of creating a class is giving it a name: In this notebook, we will create two classes, Circle and Rectangle. We need to determine all the data that make up that class, and we call that an attribute. Think about this step as creating a blue print that we will use to create objects. In figure 1 we see two classes, circle and rectangle. Each has their attributes, they are variables. The class circle has the attribute radius and color, while the rectangle has the attribute height and width. Let’s use the visual examples of these shapes before we get to the code, as this will help you get accustomed to the vocabulary.
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/ClassesClass.png" width="500" />
# <i>Figure 1: Classes circle and rectangle, and each has their own attributes. The class circle has the attribute radius and colour, the rectangle has the attribute height and width.</i>
#
# <h3 id="instance">Instances of a Class: Objects and Attributes</h3>
# An instance of an object is the realisation of a class, and in Figure 2 we see three instances of the class circle. We give each object a name: red circle, yellow circle and green circle. Each object has different attributes, so let's focus on the attribute of colour for each object.
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/ClassesObj.png" width="500" />
# <i>Figure 2: Three instances of the class circle or three objects of type circle.</i>
# The colour attribute for the red circle is the colour red, for the green circle object the colour attribute is green, and for the yellow circle the colour attribute is yellow.
#
# <h3 id="method">Methods</h3>
# Methods give you a way to change or interact with the object; they are functions that interact with objects. For example, let’s say we would like to increase the radius by a specified amount of a circle. We can create a method called **add_radius(r)** that increases the radius by **r**. This is shown in figure 3, where after applying the method to the "orange circle object", the radius of the object increases accordingly. The “dot” notation means to apply the method to the object, which is essentially applying a function to the information in the object.
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/ClassesMethod.png" width="500" />
# <i>Figure 3: Applying the method “add_radius” to the object orange circle object.</i>
# <hr>
# <h2 id="creating">Creating a Class</h2>
# Now we are going to create a class circle, but first, we are going to import a library to draw the objects:
# +
# Import the library
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# The first step in creating your own class is to use the <code>class</code> keyword, then the name of the class as shown in Figure 4. In this course the class parent will always be object:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/ClassesDefine.png" width="400" />
# <i>Figure 4: Three instances of the class circle or three objects of type circle.</i>
# The next step is a special method called a constructor <code>__init__</code>, which is used to initialize the object. The input are data attributes. The term <code>self</code> contains all the attributes in the set. For example the <code>self.color</code> gives the value of the attribute color and <code>self.radius</code> will give you the radius of the object. We also have the method <code>add_radius()</code> with the parameter <code>r</code>, the method adds the value of <code>r</code> to the attribute radius. To access the radius we use the syntax <code>self.radius</code>. The labeled syntax is summarized in Figure 5:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%203/Images/ClassesCircle.png" width="600" />
# <i>Figure 5: Labeled syntax of the object circle.</i>
# The actual object is shown below. We include the method <code>drawCircle</code> to display the image of a circle. We set the default radius to 3 and the default colour to blue:
# +
# Create a class Circle
class Circle(object):
# Constructor
def __init__(self, radius=3, color='blue'):
self.radius = radius
self.color = color
# Method
def add_radius(self, r):
self.radius = self.radius + r
return(self.radius)
# Method
def drawCircle(self):
plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color))
plt.axis('scaled')
plt.show()
# -
# <hr>
# <h2 id="circle">Creating an instance of a class Circle</h2>
# Let’s create the object <code>RedCircle</code> of type Circle to do the following:
# +
# Create an object RedCircle
RedCircle = Circle(10, 'red')
# -
# We can use the <code>dir</code> command to get a list of the object's methods. Many of them are default Python methods.
# +
# Find out the methods can be used on the object RedCircle
dir(RedCircle)
# -
# We can look at the data attributes of the object:
# +
# Print the object attribute radius
RedCircle.radius
# +
# Print the object attribute color
RedCircle.color
# -
# We can change the object's data attributes:
# +
# Set the object attribute radius
RedCircle.radius = 1
RedCircle.radius
# -
# We can draw the object by using the method <code>drawCircle()</code>:
# +
# Call the method drawCircle
RedCircle.drawCircle()
# -
# We can increase the radius of the circle by applying the method <code>add_radius()</code>. Let increases the radius by 2 and then by 5:
# +
# Use method to change the object attribute radius
print('Radius of object:',RedCircle.radius)
RedCircle.add_radius(2)
print('Radius of object of after applying the method add_radius(2):',RedCircle.radius)
RedCircle.add_radius(5)
print('Radius of object of after applying the method add_radius(5):',RedCircle.radius)
# -
# Let’s create a blue circle. As the default colour is blue, all we have to do is specify what the radius is:
# +
# Create a blue circle with a given radius
BlueCircle = Circle(radius=100)
# -
# As before we can access the attributes of the instance of the class by using the dot notation:
# +
# Print the object attribute radius
BlueCircle.radius
# +
# Print the object attribute color
BlueCircle.color
# -
# We can draw the object by using the method <code>drawCircle()</code>:
# +
# Call the method drawCircle
BlueCircle.drawCircle()
# -
# Compare the x and y axis of the figure to the figure for <code>RedCircle</code>; they are different.
# <hr>
# <h2 id="rect">The Rectangle Class</h2>
# Let's create a class rectangle with the attributes of height, width and color. We will only add the method to draw the rectangle object:
# +
# Create a new Rectangle class for creating a rectangle object
class Rectangle(object):
# Constructor
def __init__(self, width=2, height=3, color='r'):
self.height = height
self.width = width
self.color = color
# Method
def drawRectangle(self):
plt.gca().add_patch(plt.Rectangle((0, 0), self.width, self.height ,fc=self.color))
plt.axis('scaled')
plt.show()
# -
# Let’s create the object <code>SkinnyBlueRectangle</code> of type Rectangle. Its width will be 2 and height will be 3, and the color will be blue:
# +
# Create a new object rectangle
SkinnyBlueRectangle = Rectangle(2, 10, 'blue')
# -
# As before we can access the attributes of the instance of the class by using the dot notation:
# +
# Print the object attribute height
SkinnyBlueRectangle.height
# +
# Print the object attribute width
SkinnyBlueRectangle.width
# +
# Print the object attribute color
SkinnyBlueRectangle.color
# -
# We can draw the object:
# +
# Use the drawRectangle method to draw the shape
SkinnyBlueRectangle.drawRectangle()
# -
# Let’s create the object <code>FatYellowRectangle</code> of type Rectangle :
# +
# Create a new object rectangle
FatYellowRectangle = Rectangle(20, 5, 'yellow')
# -
# We can access the attributes of the instance of the class by using the dot notation:
# +
# Print the object attribute height
FatYellowRectangle.height
# +
# Print the object attribute width
FatYellowRectangle.width
# +
# Print the object attribute color
FatYellowRectangle.color
# -
# We can draw the object:
# +
# Use the drawRectangle method to draw the shape
FatYellowRectangle.drawRectangle()
|
Python-Classes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.5 64-bit (''ai_tutorial'': venv)'
# name: python37564bitaitutorialvenvbfa9976514ab457184b1b6f4ee41b3e6
# ---
# データのダウンロード
# !wget https://raw.githubusercontent.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv
# +
import pandas as pd
# ファイルを表示
csv = pd.read_csv('iris.csv', encoding='utf-8')
csv
# +
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# ラベルと入力データに分離
y = csv.loc[:, 'Name']
x = csv.loc[:, ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth']]
print(y)
print(x)
# -
# 学習データとテストデータに分離
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, train_size=0.8, shuffle=True)
print(f'train = {len(x_train)}, {len(y_train)}')
print(f'test = {len(x_test)}, {len(y_test)}')
# 学習
clf = SVC()
clf.fit(x_train, y_train)
# 評価
y_pred = clf.predict(x_test)
print(f'正解率 = {accuracy_score(y_test, y_pred)}')
|
iris.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 64-bit
# name: python37464bitb45f476b377543668ef99e7df0953184
# ---
# # Commonwealth Bank Statement Anaylser
from PyPDF2 import PdfFileReader
from money import Money
file_path = 'Statement20200111.pdf'
opening_balance_indicator = 'OPENING BALANCE'
closing_balance_indicator = 'CLOSING BALANCE'
currency = 'AUD'
def get_pdf_text(file_path: str) -> [str]:
pdf = PdfFileReader(open(file_path, 'rb'))
pdf.decrypt('')
return pdf.getPage(0).extractText().splitlines()
def get_balance(text: [str], indicator: str,currency: str) -> Money:
balance = None
for index, line in enumerate(text):
if line.find(indicator) is -1:
continue
balance = Money(text[index + 1].lstrip('$').replace(',', ''), currency)
break
return balance
text = get_pdf_text(file_path)
opening_balance = get_balance(text, opening_balance_indicator, currency)
closing_balance = get_balance(text, closing_balance_indicator, currency)
print(f'Your opening balance is {opening_balance} and your closing balance is {closing_balance}')
|
statement_analyser.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="V13V1hY28z9P"
# # ICP Lecture 7
#
# This ICP demonstrates converting a MATLAB file into acceptable Python code.
#
# Start with the MATLAB code provided and edit until it runs successfully in Python--either in Spyder or in a Jupyter Notebook.
#
# The MATLAB solution is
#
# x =
#
# 3.0000
#
# -2.5000
#
# 7.0000
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Cn4bXGzT8vnV" outputId="a5b9b83e-4e72-4e59-df8a-1552700c04cd"
# -*- coding: utf-8 -*-
"""
ICPL5python
Used for comparison to MATLAB function executing the same method.
Example: ax = b
Equation 1: $3x_1-0.1x_2-0.2x_3=7.85$.
Equation 2: $0.1x_1+7x_2-0.3x_3=-19.3$.
Equation 3: $0.3x_1-0.2x_2+10x_3=71.4$.
Soln x = [3; -2.5; 7]
Created on Tue Aug 30 12:30:54 2016
@author: <NAME>
"""
import numpy as np
# Initialize a matrix and b vector as numpy arrays
a = np.array( [ [3.0, -.1, -.2], [0.1, 7.0, -0.3],[ 0.3, -.2, 10.0] ] )
b = np.array( [ [7.85], [-19.3], [71.4] ] )
# Define Gaussian elimination function
def gaussElimin(a,b):
n = len(b)
for k in range(0,n-1):
for i in range(k+1,n):
if a[i,k] != 0.0:
lam = a[i,k]/a[k,k]
a[i,k+1:n] = a[i,k+1:n] - lam*a[k,k+1:n]
b[i] = b[i] - lam*b[k]
for k in range(n-1,-1,-1):
b[k] = (b[k] - np.dot(a[k,k+1:n],b[k+1:n]))/a[k,k]
return b
# Solve for x
x = gaussElimin(a,b)
# Print x
print("x =",x)
# + colab={} colab_type="code" id="cWBZmX8u_J_Q"
|
CHEclassFa20/In Class Problem Solutions/Python/ConvertFromMATLABtoPythonSoln.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basketball Player Analysis: <NAME>
#
# The Round Mound of Rebound, Sir Charles, and Chuck are some of the few nicknames this basketball great has acquired over the years. One of my favorite basketball personalities as he is very much unfiltered in Inside the NBA with his wild insults to anyone he wants and also having some of the funniest moments on television(His story about getting a bracelet from a guy in steam room always makes my laugh out loud!). So I wanted to do an indepth analysis of his career and how he stacked up with some of the greats that were playing the power forward position. I will also be looking at how he stacks up at the end of his career and also his time on the 1992 Dream Team.
#
# Sections will be split as followed
# Rookie year
# 76ers years and playoffs
# Sun's Years
# Rockets Years
# How he stacks up statistically at the end of his career
# Dream Team
# Closing remarks
from bba_functions.b_scrape import getBarkleyData
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from bba_functions.b_scrape import getBarkleyData
ch_barkl = getBarkleyData('Charles', 'Barkley')
ch_barkl.head()
ch_barkl.info()
avg = pd.read_csv('avg_basketball.csv')
avg.info()
avg['Ht'] = avg['Ht'].astype('str')
avg_sub = avg[['FGA', 'FG%','3PA', '3P%','FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']]
ch_barkl.iloc[[0]]
# ## Rookie Year
# Drafted by the Philedelphia 76ers 5th over all, <NAME> was in a draft class the included the number 1 overall pick in Hakeem Olajuwon, 3rd overall <NAME> and 16th overall <NAME> in one of the best draft classes of all time.
# <NAME> started 60 out of the 82 games he played in his first year with the 76ers. In his rookie season he ended up averaging 14.0 PTS, 1.9 AST, 8.6 TRB 1.0 BLK, and 1.9 STL.
#
# The 76ers ended with the 3rd best record in the east with a record of 58-24 while Barkley got on the All Rookie team along with <NAME> and <NAME>. These other two rookies however had monstrous rookie seasons and I will analyze their careers another time. Barkley ended up losing to the Bird led Boston Celtics 4-1 while averaging 14.0 PTS, 11.2 TRB, 1.6 AST, 2.0 STL, and 1.2 BLK.
#
# Barkley was able to do a lot his rookie year and showed he was a very promising rookie but in his next season he was able to make a giant leap towards his legendary career and his game just kept on evolving until his last season in Philedelphia
stats = ['G', 'GS', 'MP', 'Tm', 'FGA', 'FG%','3PA', '3P%','FT%', 'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']
ch_barkl.iloc[[1]]
ch_barkl[stats][1:][ch_barkl['Tm'] == 'PHI']
ch_barkl[stats][ch_barkl['Tm'] == 'PHO']
ch_barkl[stats][ch_barkl['Tm'] == 'HOU']
# ## The Philedelphia 76er's years
#
ch_barkl[['MP', '2P%', 'FT%', 'ORB', 'DRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']].describe()
sns.dist(ch_barkl['MP'])
|
ch_barkley.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Functions (Methods)
# ---
#
# * Functions are known by many names in other languages. Most commonly methods and subroutines.
# * A function has a contract that guarantees certain output based on certain input(s)
# * Variables get passed into the function
# * The function then preforms actions based on the variables that are passed
# * A new value is returned from the function
#
#
# Like a recipe!
#
#
# In python we are able to define a function with `def`. Here we define a function that we will call "add_two_numbers"
#
#
# `def add_two_numbers():`
#
def add_two_numbers():
answer = 50 + 15
return answer
add_two_numbers()
# The parenthesis after the name allow us to 'pass' values, or arguments, to the function. Just like if/else statements we begin the definition with a colon (`:`).
#
# Let's pass in a value! Here we will define a function (add_number) that takes 1 arguments (num1)
def add_number(num1):
answer = 50 + num1
return answer
# The `return` gives a value back. A function that doesn’t explicitly return a value automatically returns None.
#
# **NOTE: Defining a function does not run it.**
#
# You must 'call' the function to execute the code it contains.
add_number(90)
# You can pass multiple arguments to a function by separating them with a comma. The arguments are passed to the function in the order in which they are defined
# +
# A function called divide_two_numbers that take 2 arguments: num1 and num2
def divide_two_numbers(num1, num2):
# This is the body of the function
total = num1 / num2 # do the stuff
return total # use the return statment to tell the function what to return
first_result = divide_two_numbers(100,50)
print(first_result)
second_result = divide_two_numbers(50,100)
print(second_result)
# -
# ## Why Use Functions?
# Functions let us break down our programs into smaller bits that can be reused and tested
# * Human beings can only keep a few items in working memory at a time.
# * Understand larger/more complicated ideas by understanding and combining pieces.
# * Functions serve the same purpose in programs.
# * Encapsulate complexity so that we can treat it as a single “thing”.
# * **Enables reusablility**.
# * Write one time, use many times.
# ### 1.Testability
# Imagine a really big program with lots of lines of code. There is a problem somewhere in the code because you are not getting the results you expect
#
# * How do you find the problem in your code?
# * If your program is composed of lots of small functions that only do one thing then you can test each function individually.
#
# ### 2. Reusability
# Imagine a really big program with lots of lines of code. There is a section of code you want to use in a different part of the program.
#
# * How do you reuse that part of the code?
# * If you just have one big program then you have to copy and paste that bit of code where you want it to go, but if that bit was a function, you could just use that function
#
#
# #### Always keep both of these concepts in mind when writing programs.
#
#
# * Write small functions that do one thing
# * Never have one giant function that does a million things.
# * A well written script is composed of lots of functions that do one thing
# ---
# ## EtherPad
# What does the following program print? (Don't actually code, just think about it.)
#
# def report(pressure):
# print('The pressure is: ', pressure)
#
# report(22.5)
#
# Post your answer to EtherPad or vote for the correct answer if you see it
#
# ---
# ---
# ## EXERCISE:
# “Adding” two strings produces their concatenation: 'a' + 'b' is 'ab'.
# 1. Write a function called quote that takes two parameters called `original` and `wrapper` and returns a new string that has the wrapper value at the beginning and end of the original.
#
# 1. Call your function with the inputs `"name"` and `'"'`
#
# ---
# +
def quote(original,rapper):
final=original+rapper
return(final)
ans=quote("name",'"')
print(ans)
# -
# ---
# ## EXERCISE:
# If the variable 's' refers to a string, then s[0] is the string’s first character and s[-1] is its last.
# 1. Write a function called outer that returns a string made up of just the first and last characters of its input.
# 1. Call you function with the input `"helium"`
# +
def outer(s):
return s[0]+s[-1]
outer('helium')
# -
# ---
# ## EXERCISE:
# 1. Explain why the two lines of output below appeared in the order they did.
# ```
# def print_date(year, month, day):
# joined = str(year) + '/' + str(month) + '/' + str(day)
# print(joined)
#
# result = print_date(1871, 3, 19)
# print('result of call is:', result)
# ```
# OUTPUT:
# ~~~
# 1871/3/19
# result of call is: None
# ~~~
#
# ---
# ---
# ## EXERCISE:
# ## The Problem
# Last month we ran an experiment in the lab, but one of the windows was left opened.
# If the temperature in the lab fell below 285 degrees Kelvin all of the data are ruined.
#
# Luckily a data logger was running, but unfortunately it only collects the temperature in fahrenheit.
# 
# **Example log data:**
#
# ```
# beginTime,endTime,Temp
# 1/1/2017 0:00,1/1/2017 1:00,54.0
# 1/1/2017 1:00,1/1/2017 2:00,11.7
# 1/1/2017 2:00,1/1/2017 3:00,11.7
# ```
# 1\. Write a function that converts temperatures from Fahrenheit to Kelvin. ((temp_f - 32) * (5/9) + 273.15)
# +
def fahr_to_kelvin(temp_f):
# write your function here
temp_c=((temp_f - 32) * (5/9) + 273.15)
return(temp_c)
print(fahr_to_kelvin(32))
# -
# 2\. We read the packaging on the materials wrong! If the temperature in the lab fell below -5 degrees Celsius all of the data is ruined.
#
# Write a function that converts temperatures from Kelvin into Celsius. (temp_k - 273.15)
# +
# write your function here
def kelvin_to_cel(temp_k):
# write your function here
temp_c=(temp_k -273.15)
return(temp_c)
print(kelvin_to_cel(332))
type(kelvin_to_cel(332))
# -
# Because we know issues like this happen all of the time, let's prepare for the inevitability.
#
# 3\. Write a function to convert fahrenheit to celsius, without a formula.
# * We could write out the formula, but we don’t need to. Instead, we can compose the two functions we have already created
# write your function here
print(kelvin_to_cel(fahr_to_kelvin(32)))
# This is our first taste of how larger programs are built: we define basic operations, then combine them in ever-large chunks to get the effect we want. Real-life functions will usually be larger than the ones shown here — typically half a dozen to a few dozen lines — but they shouldn’t ever be much longer than that, or the next person who reads it won’t be able to understand what’s going on.
# ### Allowing for Default Values in a Function
# If we usually want a function to work one way, but occasionally need it to do something else, we can allow people to pass a parameter when they need to but provide a default to make the normal case easier.
# +
def display(a=1, b=2, c=3):
print('a:', a, 'b:', b, 'c:', c)
print('no parameters:')
display()
print('one parameter:')
display(55)
print('two parameters:')
display(55, 66)
# -
# As this example shows, parameters are matched up from left to right, and any that haven’t been given a value explicitly get their default value. We can override this behavior by naming the value as we pass it in:
print('only setting the value of c')
display(c=77)
# ---
# ## EXERCISE:
# It looks like the logger actually can collect celsius after all! Unfortunately it forgets what temperature type to log and has been intermittently logging both.
#
# **Example log data:**
# ```
# beginTime,endTime,Temp,TempType
# 1/1/2017 0:00,1/1/2017 1:00,54.0,F
# 1/1/2017 1:00,1/1/2017 2:00,11.7,C
# 1/1/2017 2:00,1/1/2017 3:00,11.7,C
# ```
#
# 1. Write a function that either converts to fahrenheit or celsius based on a parameter, with a default assuming fahrenheit ('F')
# * Remember if/else:
# +
def temp_conversion(temp,temptype='F'):
if temptype=='F':
temp=kelvin_to_cel(fahr_to_kelvin(temp))
elif temptype=='C':
temp=temp
return(temp)
print(temp_conversion(54,'F'))
print(temp_conversion(54,'C'))
# -
# # -- COMMIT YOUR WORK TO GITHUB --
# # Key Points
#
# * Functions let us break down our programs into smaller bits that can be reused and tested
# * Write small functions that do one thing; never have one giant function that does a million things
# * A well written script is composed of lots of functions that do one thing
# * Functions can have default values set within the parentheses:
|
python-lessons/02 - Functions-Python.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas
# ### Overview
# To be able to use pandas you need to import it into the current python scope. It is common to import pandas using the alias pd becaue it makes calling it more succinct using the `import <module> as <alias>`.
#
# ```Python
# import pandas as pd
# ```
# Pandas has a custom data structure called a `DataFrame` which is a very powerful multidmensional, hetrogeneous data type storer. The data are stored using an index and a column name. For example, in the example below the indexes are r0-r4 and columns are c0-c3.
#
# | | c0 | c1 | c2 | c3 |
# |:---|----------:|---------:|---------:|---------:|
# | r0 | 0.169489 | 0.226653 | 0.35706 | 0.199845 |
# | r1 | 0.181289 | 0.289734 | 0.392262 | 0.228899 |
# | r2 | 0.0492157 | 0.429355 | 0.299516 | 0.566478 |
# | r3 | 0.304988 | 0.149374 | 0.801296 | 0.699043 |
# | r4 | 0.236938 | 0.652044 | 0.132635 | 0.519792 |
#
#
# #### Creating a DataFrame
# ```Python
# import pandas as pd
# import numpy as np
# df = pd.DataFrame(np.random.random((5,4)),columns=['c0','c1','c2','c3'],index=['r0','r1','r2','r3','r4'])
# ```
#
# #### Importing from files
# Pandas can import data from many different regularly formatted datasets
# * When importing from a csv file you can specify:
# * the delimiter - e.g. comma, semicolon, tab, space
# * the header names for the column names
# * Column index for the row index names, default is None and these are just assigned to be indexed 0-Nrows
# ```Python
# import pandas as pd
# df = pd.read_csv(filename,sep=',',header,index_col=None)
# ```
# * Importing from excel requires
# * sheet_name
# * header row index
# * index_col
# ```Python
# import pandas as pd
# df = pd.read_excel(filename,sheet_name,header,index_col=None)
# ```
# * Python datasets and objects can also be saved in a binary format using Pickel. It is possible to save the files using this format as well.
# ```Python
# import pandas as pd
# df = pd.read_pickle(filepath)
# ```
#
# **Exercise**
# 1. Create a pandas DataFrame
# 2. Load a csv file into pandas
# 3. Load an excel spreadsheet
# ### Selecting data by column and row
# Using the same DataFrame as before:
#
# | | c0 | c1 | c2 | c3 |
# |:---|----------:|---------:|---------:|---------:|
# | r0 | 0.169489 | 0.226653 | 0.35706 | 0.199845 |
# | r1 | 0.181289 | 0.289734 | 0.392262 | 0.228899 |
# | r2 | 0.0492157 | 0.429355 | 0.299516 | 0.566478 |
# | r3 | 0.304988 | 0.149374 | 0.801296 | 0.699043 |
# | r4 | 0.236938 | 0.652044 | 0.132635 | 0.519792 |
#
# We can access the elements inside the DataFrame by either:
# * using the column name and the index: `df.loc[index,column_name]`
# ```Python
# df.loc['r1','c3']
# 0.2288986004071356
# ```
# * Or we can access the same value using the column index and row index `df.iloc[rowindex,colindex]`
# ```Python
# df.iloc[1,3]
# 0.2288986004071356
# ```
# We can also use this syntax to update the values of the DataFrame and combine it with slicing
# ```Python
# df.iloc[1,:] = 0
# df.loc['r3',:] = 10
# ```
#
# This would update the previous dataframe to be
#
# | | c0 | c1 | c2 | c3 |
# |:---|----------:|---------:|---------:|---------:|
# | r0 | 0.169489 | 0.226653 | 0.35706 | 0.199845 |
# | r1 | 0.000000 | 0.000000 | 0.000000 | 0.000000 |
# | r2 | 0.0492157 | 0.429355 | 0.299516 | 0.566478 |
# | r3 | 10.000000 | 10.000000 | 10.000000 | 10.000000 |
# | r4 | 0.236938 | 0.652044 | 0.132635 | 0.519792 |
#
# **Exercise**
#
# Using the following DataFrame
# ```Python
# import pandas as pd
# df = pd.read_csv("example_data_frame.csv")
# ```
# Create a dataframe with a row index that has a keyword and a number. Use a boolean mask to update only the values of different experiments to be `**2` and others to be normalised and some can be standardised
# ```Python
# def normalise(array):
# '''
# Make a dataset range from 0 to 1
# '''
# minval = np.min(array)
# maxval = np.max(array)
# array-=minval
# array/=(maxval-minval)
# ```
#
# 1. Update every second row of the DataFrame to be squared
# 2.
# df.loc['r1','c3']
df.iloc[1,:] = 0
df.loc['r3',:] = 10
df
# ### Slicing data frame
# We can extract only the data for one column by slicing the dataframe with the column name e.g. `dataframe['col1']` would return a pandas series for just a single column.
#
# If you want to extract multiple columns we can do this by passing a list of column headers e.g. `dataframe[['col1','col2']]` which returns a copy of the original dataframe with only `col1` and `col2`.
#
# **Note: Slicing DataFrames creates a copy of the DataFrame, and if the DataFrame is updated the using the copy the original DataFrame won't be changed**
# ### Adding columns and rows to a DataFrame
# Adding a new column to a DataFrame is very easy, it is similar to adding a new item to a dictionary where the column index is the key and the column data is the item.
#
# ```Python
# dataframe['newcolumn'] = values
# ```
#
# Adding a new row to the DataFrame can be done by inserting a new index into the DataFrame.
# ```Python
# dataframe.loc['newrowindex'] = rowvalues
# ```
# ### Deleting columns and rows
# We can delete a column from the DataFrame using the `drop()` method:
# ```Python
# df.drop('c2',axis=1,inplace=False) # return a new df object without column 'c2'
# df.drop('c2',axis=1,inplace=True) # update the current df object to remove 'c2'
# ```
#
# In the same way rows can be deleted setting the axis argument to 0
# ```Python
# df.drop('r2',axis=0,inplace=False) # return a new df object without row 'r2'
# df.drop('r2',axis=0,inplace=True) # update the current df object to remove row 'r2'
# ```
#
#
# ### Rename columns or indexes in the DataFrame
# Column names in the DataFrame can be updated using the rename method
# ```Python
# column_names = {'c0' : 'new_c0',
# 'c1' : 'new_c1',
# 'c2' : 'new_c2',
# 'c3' : 'new_c3'
# }
# df.rename(columns=column_names)
# ```
# ### Combining DataFrames
# Multiple DataFrames can be combined using the concat function. Concat will joint similarly named columns together but will not overwrite indexes. This means you can have multiple rows with the same index inside a pandas dataframe.
#
# ```Python
# combined = pd.concat([df1,df2])
# ```
#
# ### Updating the index
# The index of a dataframe can be reset using the `reset_index` method
# ```Python
# combined_new_index = combined.reset_index()
# ```
# ### Exporting data frames
# DataFrames can be exported to many different formats using Pandas in built functions.
# * The contents of a numerical DataFrame can be expored into a numpy array using `to_numpy()`
# * The DataFrame can be exported into pythons binary fomart using `to_pickle(filename)`
# * The DataFrame can be exported into a csv using `to_csv(filename)` many of the arguments for importing csv's such as the delimiter and header can be specified as additional arguments.
# * The DataFrame can be exported into an excel worksheet using `to_excel(filename)`. If you want to export multiple DataFrames to the same spreadsheet a unique `worksheet` needs to be defined in as an argument in the function.
#
#
|
4b. Data input and output with pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The key is that we have a connected space that undergoes a continuous transformation. (is this true?!?)
#
#
#
# \begin{align*}
# \frac{d f(x)}{dx} &= \mathop{lim}_{\delta \rightarrow 0} \frac{f(x+\delta) - f(x)}{\delta} \\
# \end{align*}
#
#
# $\forall x,\epsilon \exists \delta: \epsilon > XX - (\frac{f(x + \delta) - f(x)}{\delta})$. We can make the approximation error ($\epsilon$) arbitrarily small by reducing $\delta$ (the local/linear approximation `approaches' the true gradient). This seems weird as in practice we use learning rates ($\alpha$) that are much greater that $\delta$. So we would expect that the local estimates of the gradient are not accurate unless ???.
#
# This seems interesting?!? Related to linear algebra? But we really care about locally linear, not necessarily linear algebra itself?
#
# \subsection{Incremental changes}
#
# How can we make incremental changes efficient? And how does this relation to incremental lambda calculus and memoization?
#
# Want: ???
#
# Is this really just a more efficient version of forward AD?
#
# The reason we use reverse mode AD is because we have lost of inputs. But, by only considering changes, this greatly reduces the number of inputs. We could also parallelise the computation to give a feasible $\mathcal O(t)$ algorithm for updating weights?
|
gradient-estimation/locally-linear.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Flowtron Style Transfer Demo
# #### Import libraries and setup matplotlib
# +
# %matplotlib inline
import matplotlib
import matplotlib.pylab as plt
import IPython.display as ipd
import json
import sys
import torch
from torch.distributions import Normal
from flowtron import Flowtron
from data import Data
from train import update_params
sys.path.insert(0, "tacotron2")
sys.path.insert(0, "tacotron2/waveglow")
from denoiser import Denoiser
# -
# #### Load Flowtron
# +
config_path = "config.json"
params = ["model_config.dummy_speaker_embedding=0",
"data_config.p_arpabet=1.0"]
with open(config_path) as f:
data = f.read()
config = json.loads(data)
update_params(config, params)
data_config = config["data_config"]
model_config = config["model_config"]
# -
model_path = "models/flowtron_ljs.pt"
state_dict = torch.load(model_path, map_location='cpu')['state_dict']
model = Flowtron(**model_config)
model.load_state_dict(state_dict)
_ = model.eval().cuda()
# #### Load WaveGlow
waveglow_path = 'models/waveglow_256channels_universal_v5.pt'
waveglow = torch.load(waveglow_path)['model']
_ = waveglow.eval().cuda()
denoiser = Denoiser(waveglow).cuda().eval()
# #### Download samples with surprised style and unzip them in the 'data' folder
# [Surprised samples](https://drive.google.com/file/d/100YJu80Y-k5katrwzzE6rFoEHJ2rLmkc/view?usp=sharing) https://drive.google.com/file/d/100YJu80Y-k5katrwzzE6rFoEHJ2rLmkc/view?usp=sharing
# #### Prepare the dataloader
dataset_path = 'data/surprised_samples/surprised_audiofilelist_text.txt'
dataset = Data(
dataset_path,
**dict((k, v) for k, v in data_config.items() if k not in ['training_files', 'validation_files']))
# #### Collect z values
z_values = []
force_speaker_id = 0
for i in range(len(dataset)):
mel, sid, text, attn_prior = dataset[i]
mel, sid, text = mel[None].cuda(), sid.cuda(), text[None].cuda()
if force_speaker_id > -1:
sid = sid * 0 + force_speaker_id
in_lens = torch.LongTensor([text.shape[1]]).cuda()
out_lens = torch.LongTensor([mel.shape[2]]).cuda()
with torch.no_grad():
z = model(mel, sid, text, in_lens, out_lens)[0]
z_values.append(z.permute(1, 2, 0))
z.size()
len(z_values)
# #### Compute the posterior distribution
# +
lambd = 0.0001
sigma = 1.
n_frames = 300
aggregation_type = 'batch'
if aggregation_type == 'time_and_batch':
z_mean = torch.cat([z.mean(dim=2) for z in z_values])
z_mean = torch.mean(z_mean, dim=0)[:, None]
ratio = len(z_values) / lambd
mu_posterior = (ratio * z_mean / (ratio + 1))
elif aggregation_type == 'batch':
for k in range(len(z_values)):
expand = z_values[k]
while expand.size(2) < n_frames:
expand = torch.cat((expand, z_values[k]), 2)
z_values[k] = expand[:, :, :n_frames]
z_mean = torch.mean(torch.cat(z_values, dim=0), dim=0)[None]
z_mean_size = z_mean.size()
z_mean = z_mean.flatten()
ratio = len(z_values) / float(lambd)
mu_posterior = (ratio * z_mean / (ratio + 1)).flatten()
mu_posterior = mu_posterior.view(80, -1)
print(ratio)
dist = Normal(mu_posterior.cpu(), sigma)
# -
z_baseline = torch.FloatTensor(1, 80, n_frames).cuda().normal_() * sigma
if aggregation_type == 'time_and_batch':
z_posterior = dist.sample([n_frames]).permute(2,1,0).cuda()
elif aggregation_type == 'batch':
z_posterior = dist.sample().view(1, 80, -1)[..., :n_frames].cuda()
text = "Humans are walking on the streets?"
text_encoded = dataset.get_text(text).cuda()[None]
# !pwd
z_baseline.size()
# text_encodedrform inference sampling the posterior and a standard gaussian baseline
speaker = 0
speaker_id = torch.LongTensor([speaker]).cuda()
with torch.no_grad():
mel_posterior = model.infer(z_posterior, speaker_id, text_encoded)[0]
mel_baseline = model.infer(z_baseline, speaker_id, text_encoded)[0]
# +
# %matplotlib inline
fig, axes = plt.subplots(2, 2, figsize=(16, 6))
axes[0, 0].imshow(mel_posterior[0].cpu(), aspect='auto', origin='lower', interpolation='none')
im = axes[0, 1].imshow(z_posterior[0].cpu(), aspect='auto', origin='lower', interpolation='none')
plt.colorbar(im, ax=axes[0, 1])
axes[1, 0].imshow(mel_baseline[0].cpu(), aspect='auto', origin='lower', interpolation='none')
im = axes[1, 1].imshow(z_baseline[0].cpu(), aspect='auto', origin='lower', interpolation='none')
plt.colorbar(im, ax=axes[1, 1])
# -
mel_posterior.size()
# #### Posterior sample
with torch.no_grad():
audio = denoiser(waveglow.infer(mel_posterior, sigma=0.75), 0.01)
ipd.Audio(audio[0].data.cpu().numpy(), rate=data_config['sampling_rate'])
# #### Baseline sample
with torch.no_grad():
audio = denoiser(waveglow.infer(mel_baseline, sigma=0.75), 0.01)
ipd.Audio(audio[0].data.cpu().numpy(), rate=data_config['sampling_rate'])
audio[0].data.cpu().numpy().shape
|
inference_style_transfer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 2 - Intro to Python : Intermediate Functions I
x = [ [5,2,3], [10,8,9] ]
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'}
]
sports_directory = {
'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],
'soccer' : ['Messi', 'Ronaldo', 'Rooney']
}
z = [ {'x': 10, 'y': 20} ]
# #### Change the value 10 in x to 15. Once you're done, x should now be [ [5,2,3], [15,8,9] ].
x[1][0] = 15
x
# #### Change the last_name of the first student from 'Jordan' to 'Bryant'
students[0]['last_name'] = 'Bryant'
students
# #### In the sports_directory, change 'Messi' to 'Andres'
sports_directory['soccer'][0] = 'Andres'
sports_directory
# #### Change the value 20 in z to 30
z[0]['y'] = 30
z
# ## Iterate Through a List of Dictionaries
# #### Create a function iterateDictionary(some_list) that, given a list of dictionaries, the function loops through each dictionary in the list and prints each key and the associated value. For example, given the following list:
# +
students = [
{'first_name': 'Michael', 'last_name' : 'Jordan'},
{'first_name' : 'John', 'last_name' : 'Rosales'},
{'first_name' : 'Mark', 'last_name' : 'Guillen'},
{'first_name' : 'KB', 'last_name' : 'Tonel'}
]
def iterateDictionary(mylist):
for i in range(len(mylist)):
keys = list(mylist[i].keys())
values = list(mylist[i].values())
print(keys[0],"-",values[0],",",keys[1],"-",values[1])
iterateDictionary(students)
# -
# ## Get Values From a List of Dictionaries
# #### Create a function iterateDictionary2(key_name, some_list) that, given a list of dictionaries and a key name, the function prints the value stored in that key for each dictionary. For example, iterateDictionary2('first_name', students) should output:
# +
def iterateDictionary2(key_name, mylist):
for i in range(len(mylist)):
print(mylist[i][key_name])
iterateDictionary2('first_name', students)
# -
iterateDictionary2('last_name', students)
# ## Iterate Through a Dictionary with List Values
# #### Create a function printInfo(some_dict) that given a dictionary whose values are all lists, prints the name of each key along with the size of its list, and then prints the associated values within each key's list. For example:
# +
dojo = {
'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],
'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']
}
def printInfo(mydict):
keys = list(mydict.keys())
for i in range(len(mydict)):
print(keys[i].upper())
for j in range(len(mydict[keys[i]])):
print(mydict[keys[i]][j])
print("\n")
printInfo(dojo)
# +
##
|
W1_D2_IntermediateFunctions2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import glob
import pandas
dir_path = "./all_images"
csv_paths = glob.glob(os.path.join(dir_path, "*.csv"))
for path in csv_paths:
dataframe = pandas.read_csv(filepath_or_buffer=path,encoding="utf-8")
dataframe = dataframe.drop_duplicates()
dataframe.to_csv(path)
|
delete_duplicate_csv.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
from convokit import Corpus, User, Utterance
import json
from collections import Counter
#you may need this:#
# !pip install spacy
#And this#
# !python -m spacy download en
#One more#
# !pip install nltk
input_files = ['friends_season_01.json',
'friends_season_02.json',
'friends_season_03.json',
'friends_season_04.json',
'friends_season_05.json',
'friends_season_06.json',
'friends_season_07.json',
'friends_season_08.json',
'friends_season_09.json',
'friends_season_10.json']
# +
#for the purposes of the notebook, here's a means of dowloading the corpus virtually.#
import requests
import json
# read the JSON file from the web
for file in input_files:
link = 'https://raw.githubusercontent.com/emorynlp/character-mining/master/json/' + file
r = requests.get(link)
#loading as seasons#
season = json.loads(r.text)
# -
# **USER CORPUS**
# Developing a robust user corpus is a priority of my project given its focus on characterization. Knowing this, I spent the bulk of my working generating a few additional types of metadata to include with each user. Another priority of the code is to retain season-level information within the grander context of the series as a whole.
#
# In future versions of this dataset, I would be interested in including episode-level and scene-level information about users. In terms of sourcing outside metadata, I think that Gender metadata could be sourced from crosslisting character names with IMDB.
#
# Below, I offer an example of the current pipeline to give an overview of the conversion process. This pipeline Since two definitions have not been defined yet, it is unfunctional as is.
character_distribution = []
for name in input_files:
with open(name) as data:
season = json.load(data)
season_id = season['season_id']
episodes = season['episodes']
character_distribution.append([season_id,season_speaking_users(episodes)])
character_matrix = series_speaking_users(character_distribution)
# The pipeline above stresses the production of a season-level _character distribution_ and a series-level _character matrix_. I divide the dataset as such in order to better account for the distribution of user dialogue and reference across different scales of narrative. The function **season_speaking_users** works to divide and count users into two main roles, speakers and figures of reference, and assign the quality of being a nonspeaker, a nonspeaking user who is referenced. Nonspeaking users fascinatingly are typically either famous guest stars (like <NAME> Jr.) or entirely generic onscreen figures, like a silent airplane steward.
#
# While seasons one through four feature an additional category called "character_entities" that refers to the characters mentioned in or around the conversation, this convention is dropped from season five on. That is, it's not possible using the current dataset to track nonspeaking users throughout the duration of the season. I do believe, however, it would be worthwhile to see if: 1.)nonspeaking characters reccur or become a type of trope (Are there silent characters who show up on screen and shrug for laugh? Is it usually poorly-disguised celebrities who fill these roles to produce a sight gag?) 2.)If the characaters who are referenced by main characters in earlier seasons end up getting more speaking roles as the series progresses.
def season_speaking_users(episodes):
ssu = []
sru = []
for id in range(len(episodes)):
episode = episodes[id]
scenes = episode['scenes']
for scene in scenes:
for place in range(len((scene['utterances']))):
utterance = scene['utterances'][place]
speakers = utterance['speakers']
##1.) See markup below##
if len(speakers) >= 1:
for p in range(len(speakers)):
ssu.append(speakers[p])
if 'character_entities' in utterance:
character_entities = utterance['character_entities']
for place in range(len(character_entities)):
if len(character_entities[place]) >1:
character_range = character_entities[place]
for position in range(len(character_range)):
sru.append(character_range[position][2])
##counting##
season_speaking_users = Counter(ssu)
season_referenced_users = Counter(sru)
season_non_speaking_users = []
##finding non-speaking users##
for key in [i for i in season_referenced_users]:
if key in [i for i in season_speaking_users]:
pass
else:
season_non_speaking_users.append(key)
return [season_speaking_users, season_referenced_users, season_non_speaking_users]
# The code above uses counters to measure the number of utterance and references each character makes. It also features a small amount of code to discover nonspeaking users. Ultimately, the code produces two dictionaries that take the names of characters for keys and return the number of utterances and references that respectively occur within the season. It also returns a list of nonspeaking users.
#
# 1.) One of the larger peculiarities of the dataset was instances where characters would talk in unison. I'm not sure if there's a good way to parse this - should the collective be treated as a single user? - though it would be interesting to see if there are certain combinations that happen frequently throughout the seasons.
def series_speaking_users(character_distribution):
## 1.##
all_characters =[]
for season in character_distribution:
speakers = season[1][0]
referees = season[1][1]
nonspeakers = season[1][2]
for speaker in speakers:
if speaker not in all_characters:
all_characters.append(speaker)
for referee in referees:
if referee not in all_characters:
all_characters.append(referee)
for ns in nonspeakers:
if ns not in all_characters:
all_characters.append(ns)
## 2. ##
characters_tagged = dict()
for character in all_characters:
total_spoken = 0
season_spoken = []
total_referenced = 0
season_referenced = []
for season in character_distribution:
speakers = season[1][0]
referees = season[1][1]
season_id = season[0]
total_spoken += speakers[character]
season_spoken.append([season_id , speakers[character]])
total_referenced += referees[character]
season_referenced.append([season_id, referees[character]])
characters_tagged[character] = [total_spoken, season_spoken, total_referenced, season_referenced]
return characters_tagged
# 1.) In order to collect series-level statistics from the season-level this function's first half collects the names of all users across the two qualities and nonspeaking condition.
#
# 2.) That list is then fed into and combined with the season-level dictionaries created by the previous function. Two **total** qualities track all of the utterances and references that occur for a user within the series, while season_spoken and season_reference return each season and its counts. Part of the intuition for this approach is to see how much a character's share and quantity of dialogue changes through the course of several seasons.
character_distribution = []
for file in input_files:
link = 'https://raw.githubusercontent.com/emorynlp/character-mining/master/json/' + file
r = requests.get(link)
season = json.loads(r.text)
season_id = season['season_id']
episodes = season['episodes']
character_distribution.append([season_id,season_speaking_users(episodes)])
character_matrix = series_speaking_users(character_distribution)
# Let's see an example of how the character_matrix works!
character_matrix['<NAME>']
# +
user_meta = {}
for user in character_matrix.keys():
user_meta[user] = {"character_name": character_matrix[user],
"total_utterances": character_matrix[user][0],
"utterances_per_season": character_matrix[user][1],
"total_references": character_matrix[user][2],
"references_per_season": character_matrix[user][3]}
##making Corpus##
corpus_users = {k: User(name = k, meta = v) for k,v in user_meta.items()}
# -
# In this version of the User Corpus, each user ends up with five characteristics. I intend to use the proportions of these characteristics to determine what role utterance and reference quantity play in separating major and minor characters across seasons.
#
# In terms of adding future metadata, I would be interested in seeing the average-sized converation each character participate in per season and the average size of the groups those conversations take place in. When thinking about major and minor characters, it would be curious to see if speaking-but-not-major characters tend to be relegated to certain group sizes or shorter conversational instances.
#
# What I'd love most in terms of metadata, however, was a way to parse the "transcript with note" subcategory in each utterance for character information/set directions. It would be fascinating to see which physical behaviors and details get assigned to which character.
# **UTTERANCE CORPUS**
# Given my project's interest in users, I've made significantly fewer modifications in generating metadata for the utterance corpus. Nevertheless, I believe that in future iterations of this code, the utterance corpus will make some of the user-processing accomodations I made above redundant. So it goes with version 1!
#
#
utterance_corpus = {}
for file in input_files:
link = 'https://raw.githubusercontent.com/emorynlp/character-mining/master/json/' + file
r = requests.get(link)
season = json.loads(r.text)
season_id = season['season_id']
episodes = season['episodes']
#############################
for id in range(len(episodes)):
episode = episodes[id]
scenes = episode['scenes']
for scene in scenes:
for place in range(len((scene['utterances']))):
utterance = scene['utterances'][place]
id = utterance['utterance_id']
##1.)Some utterances involve multiple speakers stored as a list. This process treats them individually, though it is worth##
##contesting whether two characters speaking in unison is a singular utterance##
if len(utterance["speakers"]) >= 1:
for p in range(len(utterance["speakers"])):
user = User(utterance["speakers"][p])
##2.)Getting the root is relatively easy given how organized the dataset is already. This code replaces the ending of scenes##
##with the first utterance of the scene##
part = id.split('_')[:3]
part.append('u001')
root = '_'.join(part)
##3.)There is a 'character_entities' subsection of each utterance that features all characters involved and or referenced.##
##Designating a reply from this data is theoretically more accurate than going to the previous utterance, but not all seasons##
##contain this metadata##
if id.split('_')[3] == "u001":
reply_to = None
else:
prior = scene['utterances'][place - 1]['speakers']
if len(prior) >= 1:
for p in range(len(prior)):
reply_to = prior[p]
else:
reply_to = None
timestamp = None
##there's a tokenized version available in the data set too##
text = utterance["transcript"]
utterance_corpus[id] = Utterance(id, user, root, reply_to, timestamp, text)
# To expand on the comments above:
#
# 2.) In thinking more about the qualities of syndicated televison, many scenes and conversations begin in medias res or correspond to a cliffhanger from before the commercial break. I can't think of a better to determine where a conversation starts, but I do think it's important to measure the composition and similarity of conversations across scenes.
#
# 3.) In measuring replies, the code-as-is assumes that the current utterance is responding to the one immediately before it. Honestly, I don't feel super comfortable with this assumption. For one, _Friends_ is known for having at least one two characters who introduce nonsequitirs into conversation. **A distinction should be made between if being a part of a scene and being a part of a conversation are the same thing, especially in a comedy**. I would be interested to see how well the character_entities data corresponds to assuming conversationality in a linear representation of a scene.
utterance_list = [utterance for k,utterance in utterance_corpus.items()]
series_corpus = Corpus(utterances=utterance_list, version=1)
convo_ids = series_corpus.get_conversation_ids()
# Above are the three bits of code from the tutorial. Since not much worthwhile, additional metadata exits to add to this corpus, I have forgone it. If I was able to link IMDB to characters, however, I would also be able to get average ratings for each episode. This coudl be interesting in seeing if there's a generally positive response to certain cohorts of users.
print("number of conversations in the dataset = {}".format(len(series_corpus.get_conversation_ids())))
series_corpus.dump("friends_corpus", base_path= "./")
series_corpus.print_summary_stats()
# **Transformations and Parsing**
#
# This section involves parsing conversations in the series corpus. It takes quite a long time.
from convokit import Parser
annotator = Parser()
series_corpus = annotator.fit_transform(series_corpus)
# **Dalliances with Politeness**
#
# In this section, I explore a few ways that politeness could be correlated with a character's longevity in the show. Another way of framing this is do characters who have similar compositions of impolite|polite conversation to major characters have a higher likelihood of remaining on the show?
#this might be necessary#
import nltk
nltk.download('punkt')
#this certainly is necessary#
from convokit import PolitenessStrategies
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
ps = PolitenessStrategies(verbose=100)
politeness_corpus = ps.transform(series_corpus)
utterance_ids = politeness_corpus.get_utterance_ids()
rows = []
for uid in utterance_ids:
rows.append(politeness_corpus.get_utterance(uid).meta["politeness_strategies"])
politeness_strategies = pd.DataFrame(rows, index=utterance_ids)
politeness_strategies.head(10)
# **Politeness Vector**
#
# As you can see above, there are many utterances and many different types of politeness. It's worth theorizing which of these qualities is most important for increasingly the likelihood of of a returning character. First, however, it's important to look at whether there are noticable differences between all character speech patterns by treating politeness as a vector.
# +
characters = politeness_corpus.get_usernames()
#treating politeness as vector#
politeness_speech_patterns = []
for character in characters:
user1 = series_corpus.get_user(character)
utterances = user1.get_utterance_ids()
rows = []
for uid in utterances:
rows.append(list(corpus.get_utterance(uid).meta["politeness_strategies"].values()))
res = np.sum(rows, 0)
res = [i/len(rows) for i in res]
politeness_speech_patterns.append([character, res])
# +
#maths#
def norm(vector):
return sum([i**2 for i in vector]) ** .5
def dot_prod(vector1, vector2):
total = 0
for i in range(len(vector1)):
total += vector1[i] * vector2[i]
return(total)
def cos_sim(matrix):
cos_sim_matrix = []
for v1 in matrix:
cos_sim_vec =[]
for v2 in matrix:
cos_sim = dot_prod(v1, v2)/(norm(v1) * norm(v2))
cos_sim = ("%.3f" % cos_sim)
cos_sim_vec.append(float(cos_sim))
cos_sim_matrix.append(cos_sim_vec)
return(cos_sim_matrix)
# -
politeness_speech_matrix = [i[1] for i in politeness_speech_patterns]
print(politeness_speech_patterns[:10])
cos_politeness_speech_matrix = cos_sim(politeness_speech_matrix)
# +
#plotting#
psm_array = np.array(cos_politeness_speech_matrix)
fig, ax = plt.subplots()
im = ax.imshow(psm_array)
cbarlabel = "Cosine Similarity"
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
ax.set_title("All Characters")
fig=plt.figure(figsize=(18, 16), dpi= 300, facecolor='w', edgecolor='k')
plt.show()
# -
# This graph isn't very helpful. Let's try reordering characters in order of number of utterances.
# +
characters = politeness_corpus.get_usernames()
char_uter = []
for character in characters:
user1 = series_corpus.get_user(character)
utterances = user1.get_utterance_ids()
series_spoken = len(utterances)
char_uter.append([character, series_spoken])
char_uter[0]
char_uter_most = sorted(char_uter, key=lambda x: x[1])
max_char = [i[0] for i in reversed(char_uter_most)]
# -
max_politeness_speech_patterns = []
for character in max_char:
user1 = series_corpus.get_user(character)
utterances = user1.get_utterance_ids()
rows = []
for uid in utterances:
rows.append(list(corpus.get_utterance(uid).meta["politeness_strategies"].values()))
res = np.sum(rows, 0)
res = [i/len(rows) for i in res]
max_politeness_speech_patterns.append([character, res])
max_politeness_speech_matrix = [i[1] for i in max_politeness_speech_patterns]
max_cos_politeness_speech_matrix = cos_sim(max_politeness_speech_matrix)
max_cos_politeness_speech_patterns
# +
labs = max_char
psm_array = np.array(max_cos_politeness_speech_matrix)
fig, ax = plt.subplots()
im = ax.imshow(psm_array)
cbarlabel = "Cosine Similarity"
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
ax.set_title("All Characters")
fig=plt.figure(figsize=(18, 16), dpi= 300, facecolor='w', edgecolor='k')
plt.show()
# -
ax.set_xticks(np.arange(len(labs)))
ax.set_yticks(np.arange(len(labs)))
# +
recurring_characters = []
major_series_speaking_characters = []
major_season_speaking_characters = []
major_episode_speaking_characters = []
major_scene_speaking_characters = []
for character in characters:
user1 = series_corpus.get_user(character)
utterances = user1.get_utterance_ids()
series_spoken = len(utterances)
if series_spoken > 100:
major_series_speaking_characters.append(character)
season_spoken = []
episode_spoken = []
scene_spoken = []
for utterance in utterances:
u_split = utterance.split("_")
season_spoken.append(u_split[0])
episode_spoken.append("_".join(u_split[:1]))
scene_spoken.append("_".join(u_split[:2]))
season_sc = list(Counter(season_spoken).items())
episode_sc = list(Counter(episode_spoken).items())
scene_sc = list(Counter(scene_spoken).items())
if len(season_sc) > 1:
recurring_characters.append(character)
for item in season_sc:
if item[1] > 75:
major_season_speaking_characters.append(character)
for item in episode_sc:
if item[1] > 25:
major_episode_speaking_characters.append(character)
for item in scene_sc:
if item[1] > 7:
major_scene_speaking_characters.append(character)
print("Total Characters:")
print(len(characters))
print("Characters who appear in more than 1 season")
print(len(recurring_characters))
print("Characters who speak more than 100 times in the series:")
print(len(major_series_speaking_characters))
print("Characters who speak more than 75 times in any given season:")
print(len(set(major_series_speaking_characters)))
print("Characters who speak more than 25 times per episode:")
print(len(set(major_episode_speaking_characters)))
print("Characters who speak more than 7 times per scene:")
print(len(set(major_scene_speaking_characters)))
major_series_speaking_characters = list(set(major_series_speaking_characters))
major_season_speaking_characters = list(set(major_season_speaking_characters))
major_episode_speaking_characters = list(set(major_episode_speaking_characters))
major_scene_speaking_characters = list(set(major_scene_speaking_characters ))
# -
|
datasets/friends-corpus/friends-corpus_convert.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # make dictionary from terms
#
#
# ### variables
#
#
# +
# specific problem
PROJECT = "dictionary"
# where is ami3? (assume you have checked out or copied `ami3` distrib)
HOME = "/Users/pm286/"
WORKSPACE = HOME + "workspace/"
AMI3 = WORKSPACE + "cmdev/ami3/"
# local workspace
WORK = WORKSPACE + "work/"
PROJECT_WORK = WORK + PROJECT + "/"
### ami uses a CProject
# PROJECT = THERMO_WORK
# general data resource within the `ami3` distrib
TEST_RESOURCES = AMI3 + "src/test/resources/"
AMI_DATA = TEST_RESOURCES + "org/contentmine/ami/"
# specific
PROJECT_DATA = AMI_DATA + PROJECT + "/"
# +
# ! cd $PROJECT_WORK
# ! amidict -v --dictionary=bird --directory=birds create --input=birds.txt
# -
# ! cd birds
pwd
# ls
# ! more bird.xml
# ls
# cd birds
# ls
# ! more bird.xml
# ! getpapers -h
# ! getpapers -q "aardvark" -o aardvarkx -x -k 200
# ! ami -p aardvarkx search --dictionary country
# ! ami search --help
# ls
# ls aardvarkx/__cooccurrence/*.svg
|
src/ipynb/work/dictionary/search.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Using OpenDR for Robotic Perception
# OpenDR aims to provide a homogeneous, easy to use interface for various perception tools.
#
# The main inference pipeline is the following:
#
# ```python
# from opendr.perception.X import yLeaner
#
# yLeaner(device='your inference device')
# yLeaner.download() # get a pretrained model
# # or
# yLeaner.load('x') # load an existing model
#
# ````
#
# After initializing the model, we can readily use it just by calling `infer()`:
#
# ```python
# results = yLearner.infer(my_data)
# ```
#
# The `results` are encapsulated OpenDR datatypes that provide a uniform interface among different Learners used for similar tasks.
# Before doing anything, we will load some helper functions:
# %matplotlib inline
from matplotlib import pyplot as plt
from utils import VideoReader, FPSCounter
import cv2, numpy as np
# *matplotlib* is used for plotting, while *utils* provides an easy to use camera interface to grab frames, as well as FPS counter. *cv2* and *numpy* can be also useful for various tasks.
# So let's start by trying a Pose Estimation algorithm. First, we need to import the corresponding learner class
from opendr.perception.pose_estimation import LightweightOpenPoseLearner
# Also note that some tools also provide various utilities, such as visualization tools. Pose estimation provides the `draw()` function that can be used for drawing the detected keypoints.
from opendr.perception.pose_estimation import draw
# Now we are ready to initialize the Learner, download and load a pretrained model.
pose_estimator = LightweightOpenPoseLearner(device='cuda')
pose_estimator.download(path=".", verbose=True)
pose_estimator.load("openpose_default")
# We also need an image provider and a way to measure the number of FPS:
image_provider = VideoReader(0)
fps_counter = FPSCounter()
# So, we can start getting the frames from the web cam and then use the pose estimation algorithm to predict the keypoints and draw them onto the frame:
# +
for counter, img in enumerate(image_provider):
fps_counter.tic()
# This is where we perform pose estimation
poses = pose_estimator.infer(img)
fps_counter.toc(img)
# We also annotate the poses on the acquired frames
for pose in poses:
draw(img, pose)
cv2.imshow('Result', img)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
image_provider.close()
# -
# Note that we can easily run the algorithm on `cpu`
pose_estimator = LightweightOpenPoseLearner(device='cpu')
pose_estimator.load("openpose_default")
# The rest of the code does now change:
# +
fps_counter = FPSCounter()
image_provider = VideoReader(0)
for counter, img in enumerate(image_provider):
fps_counter.tic()
poses = pose_estimator.infer(img)
fps_counter.toc(img)
for pose in poses:
draw(img, pose)
cv2.imshow('Result', img)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
image_provider.close()
# -
# We can then further optimize the model just by calling `optimize()` and setting other optional arguments (documented in OpenDR's documentation):
pose_estimator = LightweightOpenPoseLearner(device='cpu', num_refinement_stages=1, mobilenet_use_stride=True, half_precision=True)
pose_estimator.load("openpose_default")
pose_estimator.optimize()
# Again, the main inference loop does not change
#
# +
fps_counter = FPSCounter()
image_provider = VideoReader(0)
for counter, img in enumerate(image_provider):
fps_counter.tic()
poses = pose_estimator.infer(img)
fps_counter.toc(img)
for pose in poses:
draw(img, pose)
cv2.imshow('Result', img)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
image_provider.close()
# -
# We can also grab an isolated frame and perform inference on this:
image_provider = iter(VideoReader(0))
img = next(image_provider)
image_provider.close()
plt.imshow(img[:, :, [2, 1, 0]])
poses = pose_estimator.infer(img)
# Let's examine the output:
print(poses[0])
# Note that OpenDR datatypes are cast into a human-readable format, allowing for easily examing what happens under the hood.
# We can of course plot these poses using the `draw()` function as before:
draw(img, pose)
plt.imshow(img[:, :, [2, 1, 0]])
|
demos/.ipynb_checkpoints/1. Pose Estimation-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The 1-D Linear Convection equation is the simplest, most basic model that can be used to learn something about CFD. It is surprising that this little equation can teach us so much! Here it is:
#
# $$\frac{\partial u}{\partial t} + c \frac{\partial u}{\partial x} = 0$$
#
# With given initial conditions (understood as a *wave*), the equation represents the propagation of that initial *wave* with speed $c$, without change of shape. Let the initial condition be $u(x,0)=u_0(x)$. Then the exact solution of the equation is $u(x,t)=u_0(x-ct)$.
#
# We discretize this equation in both space and time, using the Forward Difference scheme for the time derivative and the Backward Difference scheme for the space derivative. Consider discretizing the spatial coordinate $x$ into points that we index from $i=0$ to $N$, and stepping in discrete time intervals of size $\Delta t$.
#
# From the definition of a derivative (and simply removing the limit), we know that:
#
# $$\frac{\partial u}{\partial x}\approx \frac{u(x+\Delta x)-u(x)}{\Delta x}$$
#
# Our discrete equation, then, is:
#
# $$\frac{u_i^{n+1}-u_i^n}{\Delta t} + c \frac{u_i^n - u_{i-1}^n}{\Delta x} = 0 $$
#
# Where $n$ and $n+1$ are two consecutive steps in time, while $i-1$ and $i$ are two neighboring points of the discretized $x$ coordinate. If there are given initial conditions, then the only unknown in this discretization is $u_i^{n+1}$. We can solve for our unknown to get an equation that allows us to advance in time, as follows:
#
# $$u_i^{n+1} = u_i^n - c \frac{\Delta t}{\Delta x}(u_i^n-u_{i-1}^n)$$
import numpy as np
import matplotlib.pyplot as plt
nx = 61 # try changing this number from 41 to 81 and Run All ... what happens?
dx = 2 / (nx-1)
nt = 25 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
c = 1 #assume wavespeed of c = 1
uinit = np.ones(nx) #numpy function ones()
uinit[int(.5 / dx):int(1 / dx + 1)] = 2 #setting u = 2 between 0.5 and 1 as per our I.C.s
uinit[int(1/dx+1)+1:int(1.5/dx+1)] = 0
u=uinit.copy()
print(u)
plt.plot(np.linspace(0,2,nx),u);plt.show()
# Now it's time to implement the discretization of the convection equation using a finite-difference scheme.
#
# For every element of our array `u`, we need to perform the operation $u_i^{n+1} = u_i^n - c \frac{\Delta t}{\Delta x}(u_i^n-u_{i-1}^n)$
#
# We'll store the result in a new (temporary) array `un`, which will be the solution $u$ for the next time-step. We will repeat this operation for as many time-steps as we specify and then we can see how far the wave has convected.
#
# We first initialize our placeholder array `un` to hold the values we calculate for the $n+1$ timestep, using once again the NumPy function `ones()`.
#
# Then, we may think we have two iterative operations: one in space and one in time (we'll learn differently later), so we'll start by nesting one loop inside the other. Note the use of the nifty `range()` function. When we write: `for i in range(1,nx)` we will iterate through the `u` array, but we'll be skipping the first element (the zero-th element). *Why?* Including the zeroth element leads to an index of -1, creating a periodic boundary condition in which the wave re-emerges from the left side.
un=np.ones(nx)
ucomp=u.copy()
for _ in range(nt):
un=ucomp.copy()
plt.plot(np.linspace(0,2,nx),ucomp)
for index in range(nx):
ucomp[index]=un[index]-c*(dt/dx)*(un[index]-un[index-1])
plt.plot(np.linspace(0,2,nx),ucomp)
plt.show()
#
|
CFDPython/lessons-own/01_Step_1_own.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Термодинамические параметры. Газовые законы
#
# * [Микро- и макропараметры состояния газа](#Микро--и-макропараметры-состояния-газа)
# * [Основное уравнение МКТ](#Основное-уравнение-МКТ)
# * [Температура. Абсолютная температура](#Температура.-Абсолютная-температура)
# * [Модель идеального газа](#Модель-идеального-газа)
# * [Уравнение Менделеева-Клапейрона](#Уравнение-Менделеева-–-Клапейрона-(уравнение-состояния-идеального-газа))
# * [Связь температуры со средней кинетической энергией молекул вещества](#Связь-температуры-со-средней-кинетической-энергией-молекул-вещества)
# * [Определение первого закона термодинамики](#Определение-первого-закона-термодинамики)
# * [Первый закон термодинамики в процессах](#Первый-закон-термодинамики-в-процессах)
# * [Применение](#Применение)
# * [Функции распределения](#Функции-распределения)
# * [Распределение Максвелла](#Распределение-Максвелла)
# * [Распределение Больцмана](#Распределение-Больцмана)
# * [Распределение Максвелла-Больцмана](#Распределение-Максвелла-Больцмана)
#
# **Термодинамика** — раздел физики, в котором изучаются процессы изменения и превращения внутренней энергии тел, а также способы использования внутренней энергии тел в двигателях. Собственно, именно с анализа принципов первых тепловых машин, паровых двигателей и их эффективности и зародилась термодинамика. Можно сказать, что этот раздел физики начинается с небольшой, но очень важно работы молодого французского физика Никол<NAME>.
#
# ### Микро- и макропараметры состояния газа
# Система, состоящая из большого числа молекул, называется макросистемой. Макросистема, отделенная от внешних тел стенками с постоянными свойствами, после длительного промежутка времени приходит в равновесное состояние. Это состояние можно описать рядом параметров, называемых *параметрами состояния*. Различают *микропараметры* и *макропараметры* состояния.
#
# К микропараметрам состояния можно отнести следующие физические величины: массу $m_0$ молекул, их скорость, среднюю квадратичную скорость молекул, среднюю кинетическую энергию молекул, среднее время между соударениями молекул, длину их свободного пробега и др. Это такие параметры, которые можно отнести и к одной молекуле макросистемы.
#
# Макропараметры состояния характеризуют только равновесную систему в целом. К ним относятся объем $V$, давление $P$, температура $T$, плотность $\rho$, концентрация $n$, внутренняя энергия $U$, электрические, магнитные и оптические параметры. Значения этих параметров могут быть установлены с помощью измерительных приборов.
#
# Молекулярно-кинетическая теория идеального газа устанавливает соответствие между микропараметрами и макропараметрами газа.
#
# **Таблица. Mикропараметры состояния**
#
# |Параметр | Обозначение | Единицы в СИ |
# |:----------------------------------------------------|:----------------:|:--------------:|
# |Масса молекулы | $m_0$ | $кг$ |
# |Скорость молекулы | $v$ | $м/c$ |
# |Cредняя квадратичная скорость движения молекул |$\overline v_{кв}$| $м/c$ |
# |Средняя кинетическая энергия поступательного движения|$\overline E_{к}$ | $Дж$ |
#
# **Таблица. Макропараметры состояния**
#
# |Параметр |Обозначение| Единицы в СИ |Способ измерения (косвенный способ)|
# |:-----------|:-------------:|:---------------:|:-------------------------------:|
# |Масса газа |$M$ |$кг$|Весы|
# |Объем сосуда| $V$ |$м^3$|Мерный цилиндр с водой\\измерение размеров и расчет по формулам геометрии|
# |Давление |$P$ |$Па$|Манометр|
# |Температура| $T$ |$К$|Термометр|
# |Плотность | $\rho$|$кг/м^3$|Измерение массы, объема и расчет|
# |Концентрация| $n$ |$1/м^3 = м^{-3}$ |Измерение плотности и расчет с учетом молярной массы|
# |Cостав (молярная масса и соотношение количеств)|$М_1$, $М_2$, $\frac{n_1}{n_2}$ |$\frac{кг}{моль}$, $безразмерная$|Приготовление газа смешением заданных масс или объемов|
#
# ### Основное уравнение молекулярно-кинетической теории идеального газа
#
# Это уравнение связывает макропараметры системы – давление $P$ и концентрацию молекул $n=\frac{N}{V}$ с ее микропараметрами – массой молекул, их средним квадратом скорости или средней кинетической энергией:
#
# $$p=\frac{1}{3}nm_0\overline{v^2} = \frac{2}{3}n\overline{E_k}$$
#
# Вывод этого уравнения основан на представлениях о том, что молекулы идеального газа подчиняются законам классической механики, а давление – это отношение усредненной по времени силы, с которой молекулы бьют по стенке, к площади стенки.
#
# Пропорциональность силы, с которой молекулы воздействуют на стенку, их концентрации, массе и скорости каждой молекулы качественно понятны. Квадратичный рост давления со скоростью связан с тем, что от скорости зависит не только сила отдельного удара, но и частота соударений молекул со стенкой.
#
# Учитывая связь между концентрацией молекул в газе и его плотностью $(\rho = nm_0)$, можно получить еще одну форму основного уравнения МКТ идеального газа:
#
# $$p=\frac{1}{3}\rho\overline{v^2}$$
#
# ### Температура. Абсолютная температура
#
# <img src="images/thermometer.jpg"/>
#
# **Рис. 2. Жидкостные термометры**
#
# При контакте двух макросистем, каждая из которых находится в равновесии, например, при открывании крана между двумя теплоизолированными сосудами с газом или контакте их через теплопроводящую стенку, равновесие нарушается. Через большой промежуток времени в частях объединенной системы устанавливаются новые значения параметров системы. Если говорить только о макропараметрах, то выравниваются температуры тел.
#
# Понятие «температура» было введено в физику в качестве физической величины, характеризующей степень нагретости тела не по субъективным ощущениям экспериментатора, а на основании объективных показаний физических приборов.
#
# *Термометр* – прибор для измерения температуры, действие которого основано на взаимно-однозначной связи наблюдаемого параметра системы (давления, объема, электропроводности, яркости свечения и т. д.) с температурой (рис. 2).
#
# Считается, что если этот вторичный параметр (например, объем ртути в ртутном термометре) при длительном контакте с одним телом и при длительном контакте с другим телом одинаков, то это значит, что равны температуры этих двух тел. В экспериментах по установлению распределения молекул по скоростям было показано, что это распределение зависит только от степени нагретости тела, измеряемой термометром. В современной статистической физике характер распределения частиц системы по энергиям характеризует ее температуру.
#
# Для калибровки термометра необходимы тела, температура которых считается неизменной и воспроизводимой. Обычно это температура равновесной системы лед – вода при нормальном давлении $(0 °С)$ и температура кипения воды при нормальном давлении $(100 °С)$.
#
# В СИ температура выражается в кельвинах $(К)$. По этой шкале $0 °С = 273,15 К$ и $100 °С = 373,15 К$. В обиходе используются и другие температурные шкалы.
#
# <img src="images/temp_scales.jpg"/>
#
# ### Модель идеального газа
# Идеальный газ – это модель разреженного газа, в которой пренебрегается взаимодействием между молекулами. Силы взаимодействия между молекулами довольно сложны. На очень малых расстояниях, когда молекулы вплотную подлетают друг к другу, между ними действуют большие по величине силы отталкивания. На больших или промежуточных расстояниях между молекулами действуют сравнительно слабые силы притяжения. Если расстояния между молекулами в среднем велики, что наблюдается в достаточно разреженном газе, то взаимодействие проявляется в виде относительно редких соударений молекул друг с другом, когда они подлетают вплотную. В идеальном газе взаимодействием молекул вообще пренебрегают.
#
# Теория создана немецким физиком Р. Клаузисом в 1857 году для модели реального газа, которая называется идеальный газ. Основные признаки модели:
#
# * расстояния между молекулами велики по сравнению с их размерами;
# * взаимодействие между молекулами на расстоянии отсутствует;
# * при столкновениях молекул действуют большие силы отталкивания;
# * время столкновения много меньше времени свободного движения между столкновениями;
# * движения подчиняются законам Ньютона;
# * молекулы - упругие шары;
# * силы взаимодействия возникают при столкновении.
#
# Границы применимости модели идеального газа зависят от рассматриваемой задачи. Если необходимо установить связь между давлением, объемом и температурой, то газ с хорошей точностью можно считать идеальным до давлений в несколько десятков атмосфер. Если изучается фазовый переход типа испарения или конденсации или рассматривается процесс установления равновесия в газе, то модель идеального газа нельзя применять даже при давлениях в несколько миллиметров ртутного столба.
#
# Давление газа на стенку сосуда является следствием хаотических ударов молекул о стенку, вследствие их большой частоты действие этих ударов воспринимается нашими органами чувств или приборами как непрерывная сила, действующая на стенку сосуда и создающая давление.
#
# <img src="images/ideal_gas.gif"/>
#
# Пусть одна молекула находится в сосуде, имеющем форму прямоугольного параллелепипеда (см. рис. 1). Рассмотрим, например, удары этой молекулы о правую стенку сосуда, перпендикулярную оси $x$. Считаем удары молекулы о стенки абсолютно упругими, тогда угол отражения молекулы от стенки равен углу падения, а величина скорости в результате удара не изменяется. В нашем случае при ударе проекция скорости молекулы на ось $y$ не изменяется, а проекция скорости на ось $x$ меняет знак. Таким образом, проекция импульса изменяется при ударе на величину, равную $-2mv_x$, знак «-» означает, что проекция конечной скорости отрицательна, а проекция начальной – положительна.
#
# Определим число ударов молекулы о данную стенку за 1 секунду. Величина проекции скорости не изменяется при ударе о любую стенку, т.е. можно сказать, что движение молекулы вдоль оси $x$ равномерное. За 1 секунду она пролетает расстояние, равное проекции скорости $v_x$. От удара до следующего удара об эту же стенку молекула пролетает вдоль оси $x$ расстояние, равное удвоенной длине сосуда $2L$. Поэтому число ударов молекулы о выбранную стенку равно $\frac{v_x}{2L}$. Согласно 2-му закону Ньютона средняя сила равна изменению импульса тела за единицу времени. Если при каждом ударе о стенку частица изменяет импульс на величину $2mv_x$, а число ударов за единицу времени равно $\frac{v_x}{2L}$, то средняя сила, действующая со стороны стенки на молекулу (равная по величине силе, действующей на стенку со стороны молекулы), равна $f=\frac{2mv_x^2}{L}$, а среднее давление молекулы на стенку равно $p=\frac{f}{S}=\frac{mv_x^2}{LS}=\frac{mv_x^2}{V}$, где $V$ – объем сосуда.
#
# Если бы все молекулы имели одинаковую скорость, то общее давление получалось бы просто умножением этой величины на число частиц $N$, т.е. $p=\frac{Nmv_x^2}{V}$. Но поскольку молекулы газа имеют разные скорости, то в этой формуле будет стоять среднее значение квадрата скорости, тогда формула примет вид: $p=\frac{Nm<v_x^2>}{V}$.
#
# Квадрат модуля скорости равен сумме квадратов ее проекций, это имеет место и для их средних значений: $<v^2>=<v_x^2>+<v_y^2>+<v_z^2>$. Вследствие хаотичности теплового движения средние значения всех квадратов проекций скорости одинаковы, т.к. нет преимущественного движения молекул в каком-либо направлении. Поэтому $<v^2>=3<v_x^2>$, и тогда формула для давления газа примет вид: $p=\frac{Nmv^2}{3V}$. Если ввести кинетическую энергию молекулы $E_k=\frac{mv^2}{2}$, то получим $p=\frac{2N<E_k>}{3V}$, где $<E_k>$ - средняя кинетическая энергия молекулы.
#
# ### Уравнение Менделеева – Клапейрона (уравнение состояния идеального газа)
# В результате экспериментальных исследований многих ученых было установлено, что макропараметры реальных газов не могут изменяться независимо. Они связаны уравнением состояния:
#
# $$PV = \nu RT$$
#
# Где $R = 8,31 Дж/(K·моль)$ – универсальная газовая постоянная, $\nu = \frac{m}{M}$, где $m$ – масса газа и $M$ – молярная масса газа. Уравнение Менделеева – Клапейрона называют *уравнением состояния*, поскольку оно связывает функциональной зависимостью *параметры состояния*. Его записывают и в других видах:
#
# $$pV = \frac{m}{M}RT$$
#
# $$p=\frac{\rho}{M}RT$$
#
# Пользуясь уравнением состояния, можно выразить один параметр через другой и построить график первого из них, как функции второго.
#
# Графики зависимости одного параметра от другого, построенные при фиксированных температуре, объеме и давлении, называют соответственно *изотермой*, *изохорой* и *изобарой*.
#
# Например, зависимость давления $P$ от температуры $T$ при постоянном объеме $V$ и постоянной массе $m$ газа – это функция $p(T)=\frac{mR}{MV}T = kT$, где $K$ – постоянный числовой множитель. Графиком такой функции в координатах $P$, $Т$ будет прямая, идущая от начала координат, как и графиком функции $y(x)=kx$ в координатах $y, x$ (рис. 3).
#
# Зависимость давления $P$ от объема $V$ при постоянной массе $m$ газа и температуре $T$ выражается так:
#
# $$p(V)=\frac{mRT}{M}\cdot{\frac{1}{V}}=\frac{k_1}{V},$$
#
# Где $k_1$ – постоянный числовой множитель. График функции $y(x)=\frac{k_1}{x}$ в координатах $y$, $x$ представляет собой гиперболу, так же как и график функции $p(V)=\frac{k_1}{V}$ в координатах $P$, $V$.
#
# <img src="images/iso_lines.jpg"/>
#
# Рассмотрим частные газовые законы. При постоянной температуре и массе следует, что $pV=const$, т.е. при постоянной температуре и массе газа его давление обратно пропорционально объему. Этот закон называется *законом Бойля-Мариотта*, а процесс, при котором температура постоянна, называется изотермическим.
#
# Для изобарного процесса, происходящего при постоянном давлении, следует, что $V=(\frac{m}{pM}R)T$, т.е. объем пропорционален абсолютной температуре. Этот закон называют *законом Гей-Люссака*.
#
# Для изохорного процесса, происходящего при постоянном объеме, следует, что $p=(\frac{m}{VM}R)T$, т.е. давление пропорционально абсолютной температуре. Этот закон называют *законом Шарля*.
#
# Эти три газовых закона, таким образом, являются частными случаями уравнения состояния идеального газа. Исторически они сначала были открыты экспериментально, и лишь значительно позднее получены теоретически, исходя из молекулярных представлений.
#
# ### Связь температуры со средней кинетической энергией молекул вещества
# Количественное соотношение между температурой $T$ (макропараметром) системы и средней кинетической энергией описание: $\overline{E_k}$ (микропараметром) молекулы идеального газа может быть выведено из сопоставления основного уравнения МКТ идеального газа описание: $p=\frac{2}{3}n\overline{E_k}$ и уравнения состояния $p=\frac{\nu RT}{V} = nkT$, где описание: $k=\frac{R}{N_A}=1.38*10^{-23}\ Дж/К$ – постоянная Больцмана. Сопоставляя два выражения для давления, получим
#
# $$\overline{E_k}=\frac{3}{2}kT$$
#
# Средняя кинетическая энергия молекул идеального газа пропорциональна температуре газа. Если молекулы газа образованы двумя, тремя и т. д. атомами, то доказывается, что это выражение связывает только энергию поступательного движения молекулы в целом и температуру.
#
# С учетом этого соотношения на уровне микро — и макропараметров макросистемы можно утверждать, что в *cостоянии теплового равновесия* двух систем выравниваются температуры и в случае идеального газа средние кинетические энергии молекул
#
#
# ### Определение первого закона термодинамики
#
# Самым важным законом, лежащим в основе термодинамики является первый закон или первое начало термодинамики. Чтобы понять суть этого закона, для начала, вспомним что называется внутренней энергией. **Внутренняя энергия тела** — это энергия движения и взаимодействия частиц, из которых оно состоит. Нам хорошо известно, что внутреннюю энергию тела можно изменить, изменив температуру тела. А изменять температуру тела можно двумя способами:
#
# 1. совершая работу (либо само тело совершает работу, либо над телом совершают работу внешние силы);
# 2. осуществляя теплообмен — передачу внутренней энергии от одного тела к другому без совершения работы.
#
# Нам, также известно, что работа, совершаемая газом, обозначается $А_r$, а количество переданной или полученной внутренней энергии при теплообмене называется количеством теплоты и обозначается $Q$. Внутреннюю энергию газа или любого тела принято обозначать буквой $U$, а её изменение, как и изменение любой физической величины, обозначается с дополнительным знаком $Δ$, то есть $ΔU$.
#
# Сформулируем **первый закон термодинамики** для газа. Но, прежде всего, отметим, что когда газ получает некоторое количество теплоты от какого-либо тела, то его внутренняя энергия увеличивается, а когда газ совершает некоторую работу, то его внутренняя энергия уменьшается. Именно поэтому первый закон термодинамики имеет вид:
#
# $$ΔU = Q — A_r$$
#
# Так как работа газа и работа внешних сил над газом равны по модулю и противоположны по знаку, то первый закон термодинамики можно записать в виде:
#
# $$ΔU = Q + A_{внеш}.$$
#
# Понять суть этого закона довольно просто, ведь изменить внутреннюю энергию газа можно двумя способами: либо заставить его совершить работу или совершить над ним работу, либо передать ему некоторое количество теплоты или отвести от него некоторое количество теплоты.
#
# ### Первый закон термодинамики в процессах
#
# Применительно к изопроцессам первый закон термодинамики может быть записан несколько иначе, учитывая особенности этих процессов. Рассмотрим три основных изопроцесса и покажем, как будет выглядеть формула первого закона термодинамики в каждом из них.
#
# 1. Изотермический процесс — это процесс, происходящий при постоянной температуре. С учётом того, что количество газа также неизменно, становится ясно, что так как внутренняя энергия зависит от температуры и количества газа, то в этом процессе она не изменяется, то есть $U = const$, а значит $ΔU = 0$, тогда первый закон термодинамики будет иметь вид: $Q = A_r$.
# 2. Изохорный процесс — это процесс, происходящий при постоянном объёме. То есть в этом процессе газ не расширяется и не сжимается, а значит не совершается работа ни газом, ни над газом, тогда $А_r = 0$ и первый закон термодинамики приобретает вид: $ΔU = Q$.
# 3. Изобарный процесс — это процесс, при котором давление газа неизменно, но и температура, и объём изменяются, поэтому первый закон термодинамики имеет самый общий вид: $ΔU = Q — А_r$.
# 4. Адиабатический процесс — это процесс, при котором теплообмен газа с окружающей средой отсутствует (либо газ находится в теплоизолированном сосуде, либо процесс его расширения или сжатия происходит очень быстро). То есть в таком процессе газ не получает и не отдаёт количества теплоты и $Q = 0$. Тогда первый закон термодинамики будет иметь вид: $ΔU = -А_r$.
#
# ### Применение
# Первое начало термодинамики (первый закон) имеет огромное значение в этой науке. Вообще понятие внутренней энергии вывело теоретическую физику 19 века на принципиально новый уровень. Появились такие понятия как термодинамическая система, термодинамическое равновесие, энтропия, энтальпия. Кроме того, появилась возможность количественного определения внутренней энергии и её изменения, что в итоге привело учёных к пониманию самой природы теплоты, как формы энергии.
#
# Ну, а если говорить о применении первого закона термодинамики в каких-либо задачах, то для этого необходимо знать два важных факта. Во-первых, внутренняя энергия идеального одноатомного газа равна: $U=\frac{3}{2}\nu RT$, а во-вторых, работа газа численно равна площади фигуры под графиком данного процесса, изображённого в координатах $p-V$. Учитывая это, можно вычислять изменение внутренней энергии, полученное или отданное газом количество теплоты и работу, совершённую газом или над газом в любом процессе. Можно также определять коэффициент полезного действия двигателя, зная какие процессы в нём происходят.
#
# ### Функции распределения
#
# В качестве основной функции, применяемой при статистическом методе описания, выступает функция распределения, которая определяет статистические характеристики рассматриваемой системы. Знание её изменения с течением времени позволяет описывать поведение системы со временем. Функция распределения дает возможность рассчитывать все наблюдаемые термодинамические параметры системы.
#
# Для введения понятия функции распределения сначала рассмотрим какую-либо макроскопическую систему, состояние которой описывается некоторым параметром $x$, принимающим $K$ дискретных значений: $x_1,x_2,x_3,...,x_K$. Пусть при проведении над системой $N$ измерений были получены следующие результаты: значение $x_1$ наблюдалось при $N_1$ измерениях, значение $x_2$ наблюдалось соответственно при $N_2$ измерениях и т.д. При этом, очевидно, что общее число измерений $N$ равняется сумме всех измерений $N_i$ , в которых были получены значения $x_i$:
#
# $$N=\sum_{i=1}^K N_i$$
#
# Увеличение числа проведенных экспериментов до бесконечности приводит к стремлению отношения $\frac{N_i}{N}$ к пределу
#
# $$\tag{10.1} P(x_i)=\lim_{N\to\infty}\frac{N_i}{N}$$
#
# Величина $P(x_i)$ называется вероятностью измерения значения $x_i$.
#
# Вероятность $P(x_i)$ представляет собой величину, которая может принимать значения в интервале $0\le P(x_i)\le1$. Значение $P(x_i)=0$ соответствует случаю, когда ни при одном измерении не наблюдается значение $x_i$ и, следовательно, система не может иметь состояние, характеризующееся параметром $x_i$. Соответственно вероятность $P(x_i)=1$ возможна только, если при всех измерениях наблюдалось только значение $x_i$. В этом случае, система находится в детерминированном состоянии с параметром $x_i$.
#
# Сумма вероятностей $P(x_i)$ нахождения системы во всех состояниях с параметрами $x_i$ равна единице:
#
# $$\tag{10.2} \sum_{i=1}^{K}P(x_i)=\frac{\sum_{i=1}^{K}N_i}{N} = \frac{N}{N}=1$$
#
# Условие $(10.2)$ указывает на достаточно очевидный факт, что если набор возможных дискретных значений $x_i$, $i=1,2,...K$, является полным (то есть включает все возможные значения параметра $x$ в соответствии с условиями физической задачи), то при любых измерениях параметра $x$ должны наблюдаться значения этого параметра только из указанного набора $x_i$.
#
# Рассмотренный нами случай, когда параметр, характеризующий систему, принимает набор дискретных значений не является типичным при описании макроскопических термодинамических систем. Действительно, такие параметры как температура, давление, внутренняя энергия и т.д., обычно принимают непрерывный ряд значений. Аналогично и переменные, характеризующие движение микрочастиц (координата и скорость), изменяются для систем, описываемых классической механикой, непрерывным образом.
#
# Поэтому рассмотрим статистическое описание, применимое для случая, когда измеренный параметр $x_i$ может иметь любые значения в некотором интервале $a\le x\le b$. Причем, указанный интервал может быть и не ограниченным какими либо конечными значениями $a$ и $b$. В частности параметр $x$ в принципе может изменяться от $-\infty$ до $+\infty$, как, например, координаты молекулы газа для случая неограниченной среды.
#
# Пусть в результате измерений было установлено, что величина $x$ с вероятностью $dP(x)$ попадает в интервал значений от $x$ до $x+dx$. Тогда можно ввести функцию $f(x)$, характеризующую плотность распределения вероятностей:
#
# $$\tag{10.3} f(x)=\frac{dP(x)}{dx}$$
#
# Эта функция в физике обычно называется функцией распределения.
#
# Функция распределения $f(x)$ должна удовлетворять условию: $f(x) \ge 0$, так как вероятность попадания измеренного значения в интервал от $x$ до $x+dx$ не может быть отрицательной величиной. Вероятность того, что измеренное значение попадет в интервал $x_1\le x\le x_2$ равна
#
# $$\tag{10.4} P(x_1\le x\le x_2)=\int_{x_1}^{x_2}f(x)dx$$
#
# Соответственно, вероятность попадания измеренного значения в весь интервал возможных значений $a\le x\le b$ равна единице:
#
# $$\tag{10.5} \int_{a}^{b}f(x)dx=1$$
#
# Выражение $(10.5)$ называется условием нормировки функции распределения.
#
# Функция распределения $f(x)$ позволяет определить среднее значение любой функции $\phi(x)$:
#
# $$\tag{10.6} <\phi(x)>=\int_{a}^{b}\phi(x)f(x)dx$$
#
# В частности по формуле $(10.6)$ может быть найдено среднее значение параметра $x$:
#
# $$\tag{10.7} <x>=\int_{a}^{b}xf(x)dx$$
#
# Если состояние системы характеризуется двумя параметрами $x$ и $y$, то вероятность её нахождения в состоянии со значениями этих параметров в интервалах $x_1\le x\le x_2$ и $y_1\le x\le y_2$ соответственно равна
#
# $$\tag{10.8} P(x_1\le x\le x_2, y_1\le x\le y_2)=\int_{x_1}^{x_2}\int_{y_1}^{y_2}f(x,y)dxdy$$
#
# где $f(x, y)$ - двумерная функция распределения. Примером такой функции может служить совместное распределение для координат и скоростей молекул газа.
#
# Соответственно для бесконечно малых интервалов $dx$ и $dy$ вероятность $dP(x, y)$ можно представить в виде
#
# $$\tag{10.9}dP(x, y) = f(x, y)dxdy$$
#
# В случае статистической независимости значений параметров $x$ и $y$ друг от друга двумерная функция распределений $f(x, y)$ равна произведению функций распределения $f(x)$ и $f(y)$:
#
# $$\tag{10.10} f(x, y)=f(x)f(y)$$
#
# Это свойство функций распределения будет нами использовано при рассмотрении распределения Максвелла-Больцмана.
#
# ### Распределение Максвелла
#
# #### Функция распределения Максвелла
#
# Пусть имеется n тождественных молекул, находящихся в состоянии беспорядочного теплового движения при определенной температуре. После каждого акта столкновения между молекулами их скорости меняются случайным образом. В результате невообразимо большого числа столкновений устанавливается стационарное равновесное состояние, когда число молекул в заданном интервале скоростей сохраняется постоянным.
#
# Распределение молекул идеального газа по скоростям впервые было получено знаменитым английским ученым Дж. Максвеллом в 1860 г. с помощью методов теории вероятностей.
#
# **Функция распределения Максвелла характеризует распределение молекул по скоростям** и определяется отношением кинетической энергии молекулы $\frac{mv^2}{2}$ к средней энергии её теплового движения $kT$:
#
# $$f(v)=\frac{dn}{ndv}=\frac{4}{\sqrt\pi}(\frac{m}{2kT})^{\frac{3}{2}}\exp(-\frac{mv^2}{2kT})v^2$$
#
# Эта функция обозначает долю молекул единичного объёма газа, абсолютные скорости которых заключены в интервале скоростей от $v$ до $v + Δv$, включающем данную скорость.
#
# Обозначим множитель перед экспонентой через $А$, тогда из уравнения получим окончательное выражение **функции распределения Максвелла**:
#
# $$f(v)=Aexp(-\frac{mv^2}{2kT})v^2$$
#
# График этой функции показан на рисунке 3.2.1:
#
# <img src='images/i0166.png' width="500" height="300"/>
#
# #### Средние скорости распределения Максвелла
#
# Из графика функции распределения Максвелла, приведенного на рисунке 3.2.1, видно, что **наиболее вероятная скорость** - *скорость, на которую приходится максимум зависимости*.
#
# * *Наиболее вероятная скорость молекулы*
#
# $v_{вер}=\sqrt{\frac{2kT}{m}}$, для одного моля газа $v_{вер}=\sqrt{\frac{2RT}{M}}$
#
# * *Среднеарифметическая скорость молекул*
#
# $<v>=\sqrt{\frac{8kT}{\pi m}}$, для одного моля газа $<v>=\sqrt{\frac{8RT}{\pi M}}$
#
# * *Среднеквадратичная скорость молекулы*
#
# $<v>_{кв}=\sqrt{\frac{3kT}{m}}$, для одного моля газа $<v>_{кв}=\sqrt{\frac{3RT}{M}}$
#
# #### Зависимость функции распределения Максвелла от массы молекул и температуры газа
#
# На рисунке 3.2.2 показано, что при увеличении массы молекул $(m_1 > m_2 > m_3)$ и при уменьшении температуры $(T_1 < T_2 < T_3)$ максимум функции распределения Максвелла смещается вправо, в сторону увеличения скоростей.
#
# <img src='images/i0167.png' width="500" height="300"/>
#
# *Площадь под кривой* - *величина постоянная*, равная единице, поэтому важно знать, как будет изменяться положение максимума кривой:
#
# $f(v)\approx\sqrt{\frac{m}{T}}$, кроме того, $v\approx\sqrt{\frac{T}{m}}$.
#
# Выводы:
#
# • Вид распределения молекул газа по скоростям **зависит от рода газа и от температуры**. Давление $P$ и объём газа $V$ на распределение молекул не влияют.
#
# • В показателе степени $f(v)$ стоит отношение кинетической энергии, соответствующей данной скорости, к средней энергии теплового движения молекул; значит, **распределение Максвелла характеризует распределение молекул по значениям кинетической энергии**.
#
# • **Максвелловский закон - статистический**, и выполняется тем лучше, чем больше число молекул.
#
# #### Формула Максвелла для относительных скоростей
#
# Относительную скорость обозначим через $u=\frac{v}{v_{вер}}$. Тогда получим **закон распределения Максвелла** в приведенном виде:
#
# $$f(u)=\frac{dn}{ndu}=\frac{4}{\sqrt\pi}\exp(-u^2)u^2$$
#
# Это уравнение универсальное. В таком виде *функция распределения не зависит ни от рода газа, ни от температуры*.
#
# #### Барометрическая формула
#
# Атмосферное давление на какой-либо высоте $h$ обусловлено весом слоёв газа, лежащих выше. Пусть $P$ - давление на высоте $h$, а $P + dP$ - на высоте $h + dh$ (рис. 3.2.3).
#
# Разность давления $P - (P + dP)$ равна весу газа, заключённого в объёме цилиндра с площадью основания, равной единице, и высотой $dh$.
#
# Так как $P = ρgh$, где $ρ = PM/RT$ - плотность газа на высоте $h$, медленно убывающая с высотой, то можно записать: $P - (P + dP) = ρgdh$ .
#
# Отсюда можно получить **барометрическую формулу**, показывающую зависимость атмосферного давления от высоты:
#
# $$P=P_0\exp(-\frac{Mgh}{RT})$$
#
# Из барометрической формулы следует, что давление убывает с высотой тем быстрее, чем тяжелее газ (чем больше $M$)и чем ниже температура. Например, на больших высотах концентрация легких газов Не и Н2 гораздо больше, чем у поверхности Земли (рис. 3.2.4).
#
# <img src='images/i0168.png' width="800" height="500"/>
#
# ### Распределение Больцмана
#
# Исходя из основного уравнения молекулярно-кинетической теории $P = nkT$, заменим $P$ и $P_0$ в барометрической формуле на $n$ и $n_0$ и получим *распределение молекул во внешнем потенциальном поле* - **распределение Больцмана**:
#
# $n=n_0\exp(-\frac{Mgh}{RT})$, или $n=n_0\exp(-\frac{mgh}{kT}$,
#
# где $n_0$ и $n$ - число молекул в единичном объёме на высоте $h = 0$ и $h$.
#
# С уменьшением температуры число молекул на высотах, отличных от нуля, убывает. При $Т = 0$ тепловое движение прекращается, все молекулы расположились бы на земной поверхности. При высоких температурах, наоборот, молекулы оказываются распределёнными по высоте почти равномерно, а плотность молекул медленно убывает с высотой. Так как $mgh$ - это потенциальная энергия $Е_п$, то на разных высотах $E_п = mgh$ - различна. Следовательно, уравнение характеризует распределение частиц по значениям потенциальной энергии:
#
# $$n =n_0\exp(-{E_п}{kT})$$
#
# -**это закон распределения частиц по потенциальным энергиям - распределение Больцмана**.
#
# ### Распределение Максвелла-Больцмана
#
# Итак, закон Максвелла даёт распределение частиц по значениям кинетической энергии, а закон Больцмана - распределение частиц по значениям потенциальной энергии. Учитывая, что полная энергия $E = Е_п + Е_к$, оба распределения можно объединить в единый **закон Максвелла-Больцмана**:
#
# $$dn=n_0A\exp(-\frac{E}{kT})$$
#
# ### Задание:
# #### Реализовать модель поведения идеального газа в замкнутом пространстве, при заданных температуре, массе, количестве частиц.
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.stats import maxwell
# # %matplotlib tk
# # %matplotlib notebook
# from IPython.display import HTML
# plt.rcParams["animation.html"] = "jshtml"
# %matplotlib widget
# -
mw = maxwell()
k = 1.38e-23
R = 8.31
N = 10
T = 5000
m = 6.645e-27
dt = 10e-5
# +
v = np.sqrt(mw.rvs(size=N) * 2 * k * T / m)
alpha = np.random.uniform(0, 2 * np.pi, N)
vx = v * np.cos(alpha)
vy = v * np.sin(alpha)
x = np.random.uniform(0, 10, N)
y = np.random.uniform(0, 10, N)
# -
def ani_func(i):
global x, y, vx, vy, dt
eps = 0.01
plt.clf()
x += vx * dt
y += vy * dt
vx[x + eps >= 10] = -vx[x + eps >= 10]
vx[x - eps <= 0] = -vx[x - eps <= 0]
vy[y + eps >= 10] = -vy[y + eps >= 10]
vy[y - eps <= 0] = -vy[y - eps <= 0]
plt.scatter(x, y)
plt.xlim(0, 10)
plt.ylim(0, 10)
plt.show()
# +
fig = plt.figure(figsize=(5, 5))
skip = 1
ani = animation.FuncAnimation(fig, ani_func, frames=1000, repeat=False, interval=1)
# -
ani.event_source.stop()
# ### Задание:
# #### Реализовать модель смеси двух идеальных газов в замкнутом пространстве, при заданных температуре, массах, количествах частиц.
k = 1.38e-23
R = 8.31
N1 = 10
N2 = 10
T1 = 1000
T2 = 300
m1 = 6.645e-27
m2 = 14.325e-27
dt = 10e-5
# +
v1 = np.sqrt(mw.rvs(size=N1) * 2 * k * T1 / m1)
alpha = np.random.uniform(0, 2 * np.pi, N1)
vx1 = v1 * np.cos(alpha)
vy1 = v1 * np.sin(alpha)
v2 = np.sqrt(mw.rvs(size=N2) * 2 * k * T2 / m2)
alpha = np.random.uniform(0, 2 * np.pi, N2)
vx2 = v2 * np.cos(alpha)
vy2 = v2 * np.sin(alpha)
x1 = np.random.uniform(0, 5, N1)
y1 = np.random.uniform(0, 10, N1)
x2 = np.random.uniform(5, 10, N2)
y2 = np.random.uniform(0, 10, N2)
# -
def ani_func_2(i):
global x1, y1, x2, y2, vx1, vy1, vx2, vy2, dt
eps = 0.01
plt.clf()
x1 += vx1 * dt
y1 += vy1 * dt
x2 += vx2 * dt
y2 += vy2 * dt
vx1[x1 + eps >= 10] = -vx1[x1 + eps >= 10]
vx1[x1 - eps <= 0] = -vx1[x1 - eps <= 0]
vy1[y1 + eps >= 10] = -vy1[y1 + eps >= 10]
vy1[y1 - eps <= 0] = -vy1[y1 - eps <= 0]
vx2[x2 + eps >= 10] = -vx2[x2 + eps >= 10]
vx2[x2 - eps <= 0] = -vx2[x2 - eps <= 0]
vy2[y2 + eps >= 10] = -vy2[y2 + eps >= 10]
vy2[y2 - eps <= 0] = -vy2[y2 - eps <= 0]
plt.scatter(x1, y1)
plt.scatter(x2, y2)
plt.xlim(0, 10)
plt.ylim(0, 10)
plt.show()
# +
fig = plt.figure(figsize=(5, 5))
skip = 1
ani = animation.FuncAnimation(fig, ani_func_2, frames=1000, repeat=False, interval=1)
# -
ani.event_source.stop()
# ani.save("figure_2.gif")
|
Damarad_Viktor/thermodynamics_practice.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import tensorflow as tf
# -
def my_image_filter(input_images):
conv1_weights = tf.Variable(tf.random_normal([5, 5, 32, 32]),
name="conv1_weights")
conv1_biases = tf.Variable(tf.zeros([32]), name="conv1_biases")
conv1 = tf.nn.conv2d(input_images, conv1_weights,
strides=[1, 1, 1, 1], padding='SAME')
print(conv1_weights.name)
relu1 = tf.nn.relu(conv1 + conv1_biases)
conv2_weights = tf.Variable(tf.random_normal([5, 5, 32, 32]),
name="conv2_weights")
conv2_biases = tf.Variable(tf.zeros([32]), name="conv2_biases")
conv2 = tf.nn.conv2d(relu1, conv2_weights,
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv2 + conv2_biases)
# +
image1 = tf.placeholder(tf.float32, shape=(128, 32, 32 ,32))
image2 = tf.placeholder(tf.float32, shape=(128, 32, 32 ,32))
# First call creates one set of 4 variables.
result1 = my_image_filter(image1)
# Another set of 4 variables is created in the second call.
result2 = my_image_filter(image2)
# -
# As shown above, if the variable scope is not specified, the variables with the same name will be **automatically distinguished by adding extra suffix.**
def conv_relu(input, kernel_shape, bias_shape):
# Create variable named "weights".
weights = tf.get_variable("weights", kernel_shape,
initializer=tf.random_normal_initializer())
# Create variable named "biases".
biases = tf.get_variable("biases", bias_shape,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input, weights,
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv + biases)
def my_image_filter(input_images):
with tf.variable_scope("conv1"):
# Variables created here will be named "conv1/weights", "conv1/biases".
relu1 = conv_relu(input_images, [5, 5, 32, 32], [32])
with tf.variable_scope("conv2"):
# Variables created here will be named "conv2/weights", "conv2/biases".
return conv_relu(relu1, [5, 5, 32, 32], [32])
# +
#result1 = my_image_filter(image1)
#result2 = my_image_filter(image2)
# -
# If we explicitly specify the variable name scope and **use get_variable to create or get exsited variables**, there will be no automatic suffix. And in this situation, if we don't allow the reuse of variables, variables with the same name will incur a conflict and raise an error.
# Should do like this
with tf.variable_scope("image_filters") as scope:
result1 = my_image_filter(image1)
scope.reuse_variables()
result2 = my_image_filter(image2)
|
variable/variable.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import prettyplotlib as ppl
def plot_maker():
def make_plot(data,i):
fig,ax = plt.subplots(1)
ppl.plot(ax,data,data)
ax.set_xlim([0,10])
ax.set_ylim([0,10])
fig.savefig('num%d.png'%i, format = 'png')
i = 0
while i<10:
make_plot(range(i), i)
i+=1
print(i)
plot_maker()
# +
import matplotlib.pyplot as plt
sizes = [90, 10]
colors = ['#F05F40', '#252525']
explode = (0, 0, 0, 0) # explode a slice if required
plt.pie(sizes, colors=colors)
#draw a circle at the center of pie to make it look like a donut
centre_circle = plt.Circle((0,0),0.75,color='black', fc='white',linewidth=1.25)
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
plt.show()
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from stargazer.stargazer import Stargazer
# # Import data
file = 'PS_7_Buffetts_exercise.xlsx'
# import factors
factors = pd.read_excel(file, sheet_name='factors', parse_dates=["date"], index_col='date')
factors.head()
# import returns
data = pd.read_excel(file, sheet_name='buffet_data', parse_dates=["date"], index_col='date')
data.head()
# plot the cum return of Berkshire
(1 + data.loc[:, "Berkshire Hathaway, total return"].dropna()).cumprod().plot();
def analyze_performance(monthly_returns):
# monthly performance
avg = monthly_returns.mean()
sd = monthly_returns.std()
sr = avg / sd
#annualized performance
avg_ann = avg * 12
sd_ann = sd * np.sqrt(12)
sr_ann = avg_ann / sd_ann
# format in percent
avg, avg_ann = str(round(avg * 100, 2)) + "%", str(round(avg_ann * 100, 2)) + "%"
sd, sd_ann = str(round(sd * 100, 2)) + "%", str(round(sd_ann * 100, 2)) + "%"
# create output
stats = pd.DataFrame([[avg, sd, round(sr, 2)],
[avg_ann, sd_ann, round(sr_ann, 2)]],
columns=["Mean", "Std", "SR"],
index=["Monthly", "Annual"])
return stats
xrets_berk = data.loc[:, "Berkshire Hathaway, total return"].dropna() - factors.loc[:, "RF"]
analyze_performance(xrets_berk)
#beta
y = xrets_berk.dropna()
x = factors.loc[:, "Mkt-RF"].reindex(y.index)
regression = sm.OLS(y , sm.add_constant(x)).fit()
alpha = regression.params.loc["const"] * 12
beta = regression.params.loc["Mkt-RF"]
ssr = regression.ssr
print("Beta:", round(beta, 2))
y
# information ratio
ir = alpha / np.sqrt(ssr / len(xrets_berk.dropna()) * 12)
round(ir, 2)
# # Regression
def time_series_regressions(y, x):
# 1-factor market model
x_1 = x.loc[:, "Mkt-RF"]
model_1 = sm.OLS(y, sm.add_constant(x_1), missing="drop").fit()
# Fama-French 3-factor model
x_2 = x.loc[:, ["Mkt-RF", "SMB", "HML"]]
model_2 = sm.OLS(y, sm.add_constant(x_2), missing="drop").fit()
# FF 3-factor model augmented with BAB and QMJ
x_3 = x.loc[:, ["Mkt-RF", "SMB", "HML", "BAB", "QMJ"]]
model_3 = sm.OLS(y, sm.add_constant(x_3), missing="drop").fit()
return [model_1, model_2, model_3]
# compute excess returns
BRK = data.loc[:, "Berkshire Hathaway, total return"] - factors.loc[:, "RF"]
BRK = BRK.rename("Berkshire Hathaway, excess return")
# +
# run the regressions
models = time_series_regressions(BRK, factors)
# compute annaulized alpha of each model using list comprehension
alphas = [round(model.params.loc["const"] * 100 * 12, 2) for model in models]
# -
# present regression results
stargazer = Stargazer(models)
stargazer.add_line("Annualized Alpha (in %)", alphas)
stargazer.covariate_order(["const", "Mkt-RF", "SMB", "HML", "BAB", "QMJ"])
stargazer
# # Berkshire performance II
# compute excess returns
PS = data.loc[:, "Public stocks from 13F, total return"] - factors.loc[:, "RF"]
PS = PS.rename("Public stocks from 13F, excess return")
# +
# run the regressions
models = time_series_regressions(PS, factors)
# compute annaulized alpha of each model using list comprehension
alphas = [round(model.params.loc["const"] * 100 * 12, 2) for model in models]
# -
# present regression results
stargazer = Stargazer(models)
stargazer.add_line("Annualized Alpha (in %)", alphas)
stargazer.covariate_order(["const", "Mkt-RF", "SMB", "HML", "BAB", "QMJ"])
stargazer
# # Berkshire performance III
# put both time-series of excess returns into a single DataFrame
xrets = pd.concat([BRK, PS], axis=1)
# remove all rows which contain missing values
xrets = xrets.dropna()
xrets
avg = xrets.mean() * 12
round(avg * 100, 2)
sd = xrets.std() * np.sqrt(12)
round(sd * 100, 2)
sr = avg / sd
round(sr, 2)
|
Berkshire.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Índice
#
# 1. Modulos `os` y `pathlib`
#
# 2. Input/Output files
#
# 3. try/except
#
# 4. Buenas prácticas en Python
#
# # `os` module
#
# El módulo `os` nos da funcionalidades para hacer tareas en el sistema operativo (Operative System = os). Algunas de esas tareas son las siguientes:
#
# * Navegar por el sistema operativo
# * Crear archivos y carpetas
# * Eliminar archivos y carpetas
# * Modificar archivos y carpetas
# +
import os
dir(os)
# -
# ¿Cual es nuestro directorio de trabajo?
os.getcwd()
# Cambiar el directorio de trabajo:
# +
os.chdir("./Sesion 05")
os.getcwd()
# -
# Devolver lista de archivos y directorios:
os.listdir(".")
os.listdir(os.getcwd())
# Crear carpetas:
os.chdir(".")
os.mkdir("prueba1")
os.listdir()
os.mkdir("prueba2/subprueba")
os.makedirs("prueba2/subpruebas")
for root, dirs, files in os.walk("."):
if dir!= '.git':
level = root.replace(".", '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
# Eliminar carpetas:
os.rmdir("prueba1")
os.listdir()
os.rmdir("prueba2")
os.removedirs("prueba2")
os.removedirs("prueba2/subpruebas/")
os.listdir()
# Comprobar si un archivo o directorio existe:
os.path.isdir("./prueba2")
os.path.isdir("Population_Data")
os.path.isfile("Population_Data/Alaska")
os.path.isfile("Population_Data/Alaska/Alaska_population.csv")
# Ejemplo útil de procesamiento de datos con el módulo `os`:
os.getcwd()
for root, dirs, files in os.walk("."):
level = root.replace(".", '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
os.chdir("Population_Data/")
# +
import pandas as pd
# create a list to hold the data from each state
list_states = []
# iteratively loop over all the folders and add their data to the list
for root, dirs, files in os.walk(os.getcwd()):
print(root)
print(dirs)
print(files)
if files:
list_states.append(pd.read_csv(root+'/'+files[0], index_col=0))
# merge the dataframes into a single dataframe using Pandas library
merge_data = pd.concat(list_states[1:], sort=False)
merge_data
# -
os.chdir("..")
# # `pathlib` module
#
# `pathlib` es una libreria de Python que se utiliza para trabajar con _paths_. Pero, ¿que es un _path_? _path_ (ruta en castellano) es la forma de referenciar un archivo informático o directorio en un sistema de archivos de un sistema operativo determinado.
#
# Hay dos tipos de _paths_:
# * **Absolute paths**: Señalan la ubicación de un archivo o directorio desde el directorio raíz del sistema de archivos.
# * **Relative paths**: Señalan la ubicación de un archivo o directorio a partir de la posición actual del sistema operativo en el sistema de archivos.
#
# `pathlib` proporciona una forma más legible y fácil de construir *paths* representando las rutas del sistema de archivos como objetos adecuados.
os.chdir("..")
# +
from pathlib import Path
absolute_path = Path.cwd() / "Population_Data"
relative_path = Path("Population_Data")
print(f"Absolute path: {absolute_path}")
print(f"Relative path: {relative_path}")
# -
absolute_path.is_dir()
relative_path.is_dir()
# ### ¿Qué ventajas tiene `pathlib` respecto a `os.path`?
alaska_file_os = os.path.join(os.getcwd(), 'Population_Data', "Alaska", "Alaska_population.csv")
alaska_file_os
alaska_file_os = "C:/Users/"
alaska_file = Path.cwd() / "Population_Data" / "Alaska" / "Alaska_population.csv"
alaska_file
# Como podemos observar, el ejemplo de `pathlib` es más claro que el de `os.path`. Además, con `pathlib` se crea un objeto `Path`, que tiene asociado métodos.
os.path.isfile(alaska_file_os)
alaska_file.is_file()
# +
current_dir_os = os.getcwd()
current_dir = Path.cwd()
print(current_dir_os)
print(current_dir)
# -
os.mkdir(os.path.join(current_dir_os, "pruebaos"))
(current_dir / "pruebalib").mkdir()
os.rmdir(os.path.join(current_dir_os, "pruebaos"))
(current_dir / "pruebalib").rmdir()
# La conclusióne es que si podeís usar `pathlib` lo utilizeis porque aunque se puede obtener el mismo resultado con `os.path`, el código es más fácil de leer con `pathlib`.
# # Input/Output files
#
# Si ya hemos visto que los módulos como `numpy` o `pandas` tienen funciones para abrir archivos de diferentes tipos, ¿por qué nos interesa ahora aprender otra manera de trabajar con archivos?
#
# Con esas librerias, los archivos que leiamos tenían que tener un tipo de estructura clara. En cambio, con estos métodos que vamos a proporner, no necesitamos que el archivo que vayamos a leer tenga una estructura tan clara.
#
# Además, saber leer, escribir y guardar nuestras salidas en archivos puede ser útil. Aunque con `prints` podriamos hacer lo mismo, el problema es que lo que printeamos con print se guarda en la RAM y cuando cerramos Python, todos lo que habiamos mostrado desaparece.
# Para abrir un archivo usaremos la función `open()`. Hay dos formas de usar la función:
# +
nombre = "Juan"
edad = 22
with open("texto.txt", "w", encoding="UTF-8") as f:
f.write(f"Mi nombre es {nombre} y tengo {edad} años")
# +
nombre = "Ana"
edad = 23
f = open("texto.txt", "a", encoding="UTF-8")
f.write(f"\nMi nombre es {nombre} y tengo {edad} años")
f.close()
# -
# Estamos pasandole dos argumentos a la función `open()`. El primer argumento es una cadena que contiene el nombre del fichero. El segundo argumento es otra cadena que contiene unos pocos caracteres describiendo la forma en que el fichero será usado. mode puede ser `'r'` cuando el fichero solo se leerá, `'w'` para solo escritura (un fichero existente con el mismo nombre se borrará) y `'a'` abre el fichero para agregar.; cualquier dato que se escribe en el fichero se añade automáticamente al final. `'r+'` abre el fichero tanto para lectura como para escritura. El argumento mode es opcional; se asume que se usará `'r'` si se omite.
#
# Además de esos dos argumentos, también le podemos pasar otros argumentos importantes como `encoding` por ejemplo.
#
# Al usar el primer método, no tenemos porque cerrar el archivo expicitamente porque con el `with` Python se encarga de cerrarlo. En cambio, si usamos el segundo método tenemos que cerrarlo nosotros con el método `close()`.
#
# Con el método `write` hemos añadido el texto al fichero que hemos abierto con un modo que nos permite escribir en el.
# Para leer el contenido del archivo usamos el método `read()`.
f = open("texto.txt", encoding="UTF-8")
text = f.read(10)
text
text = f.read(20)
text
f.close()
with open("texto.txt", encoding="UTF-8") as f:
text = f.read()
text
print(text)
with open("texto.txt") as f:
for i, line in enumerate(f):
print(f"{i+1}ª linea: {line}")
f = open("texto.txt")
f.readline()
dir(f)
f.readline()
f.close()
# ¡IMPORTANTE!
# No reeinventeis la rueda. Si vais a leer un tipo de archivo estructurado para el que ya existen funciones programadas en Python para leerlo, usar estas funciones y no os compliquéis la cabeza.
#
# Algunas librerías para trabajar con diferentes tipos de archivos:
# * wave (audio)
# * aifc (audio)
# * tarfile
# * zipfile
# * xml.etree.ElementTree
# * PyPDF2
# * xlwings (Excel)
# * Pillow (imágenes)
# # Módulo `pickle`
#
# Pickle se utiliza para serializar y des-serializar las estructuras de los objetos de Python.
#
# Pickle es muy útil para cuando se trabaja con algoritmos de aprendizaje automático, en los que se requiere guardar los modelos para poder hacer nuevas predicciones más adelante, sin tener que reescribir todo o entrenar el modelo de nuevo.
import pickle
def preprocesamiento(x):
return x/10
def classificador(x):
if x < 0:
return 0
else:
return 1
modelo = { 'preprocess': preprocesamiento, 'model': classificador, 'accuracy': 0.9}
modelo['preprocess'](20)
filename = 'modelo.pickle'
outfile = open(filename,'wb')
pickle.dump(modelo, outfile)
outfile.close()
infile = open(filename,'rb')
new_dict = pickle.load(infile)
infile.close()
new_dict
new_dict['model'](-2)
# # `try\except`
#
# En Python podemos controlar los errores que sabemos de antemano que pueden ocurrir en nuestros programas. Podeís encontrar una lista de errores definidos en Python [aquí](https://docs.python.org/es/3.7/library/exceptions.html#bltin-exceptions).
2/0
2 + "a"
while True:
try:
n = int(input("Elige un número entero: "))
print(f"Tu número entero es : {n}")
break
except ValueError:
print("Vuelve a intentarlo...")
except KeyboardInterrupt:
print("Saliendo...")
break
# Podemos definir nuestros propios errores.
# +
class Error(Exception):
"""Base class for other exceptions"""
pass
class ValueTooSmallError(Error):
"""Raised when the input value is too small"""
pass
class ValueTooLargeError(Error):
"""Raised when the input value is too large"""
pass
# -
# `raise` se utiliza para devolver errores
# +
# numero que quermos predecir
number = 10
# el usuario dice un numero y le decimos si el nuestro es mayor o menor para que lo intente adivinar
while True:
try:
i_num = int(input("Enter a number: "))
if i_num < number:
raise ValueTooSmallError
elif i_num > number:
raise ValueTooLargeError
break
except ValueTooSmallError:
print("This value is too small, try again!")
print()
except ValueTooLargeError:
print("This value is too large, try again!")
print()
print("Congratulations! You guessed it correctly.")
# -
# `else` y `finally`:
# +
x = 0
try:
10/x
except ZeroDivisionError:
print("Has dividido por cero")
except:
print("El error ha sido otro")
else:
print("No ha habido error de dvidir entre 0")
finally:
print("Lo has intentado")
# -
# # Buenas prácticas con Python
# El Zen de Python (PEP 20) es una colección de 20 () principios de software que influyen en el diseño del Lenguaje de Programación Python:
from pandas import read_csv
import this
# En [este enlace](https://pybaq.co/blog/el-zen-de-python-explicado/) podeis encontrar explicado cada principio.
# El [PEP 8](https://www.python.org/dev/peps/pep-0008/) proporciona la guía de estilo para código de Python.
# ### Algunas curiosidades y funcionalidades útiles:
# * Enumerate:
# +
z = [ 'a', 'b', 'c', 'd' ]
i = 0
while i < len(z):
print(i, z[i])
i += 1
# -
for i in range(0, len(z)):
print(i, z[i])
for i, item in enumerate(z):
print(i, item)
# ?enumerate
list(enumerate(z))
# * zip
z_inv = ['z', 'y', 'x', 'w', 'v']
z_inv
for i in range(len(z_inv)):
print(z[i], z_inv[i])
for i, item in zip(z, z_inv):
print(i, item)
# ?zip
list(zip(z, z_inv))
# * itertools: Esto ya es un módulo propio con diferentes métodos.
import itertools
dir(itertools)
abc = ['a', 'b', 'c', 'd', 'e']
num = [1, 2, 3, 4]
l = []
cont = 0
for elem in num:
cont += elem
l.append(cont)
list(itertools.accumulate(num))
for comb in itertool.com
list(itertools.combinations(abc, 5))
list(itertools.permutations(num))
list(itertools.product(num, abc))
for number, letter in itertools.product(num, abc):
print(number, letter)
# * List comprehension:
# +
z = []
for i in range(0, 5):
if i%2 == 0:
z.append(i**2)
np.random.randn(i,i)
z
# -
z = [[] for i in range(0, 5)]
z
z = [ i**2 for i in range(0, 10) if i % 2 == 0 elif ]
z
# * Dict comprehension:
d = {'a': 1, 'b': 2, 'c': 3}
d
d_inv = {valor:llave for llave, valor in d.items()}
d_inv
# * La barra baja `_`: Si no vamos a utilizar una variable, se pone la barra baja para no gastar memoria
a, b = (1, 2)
print(a)
a, _ = (1, 2)
print(a)
# Y cuando no sabemos cuantas variables va a tener el objeto que nos van a devolver usamos `*`:
a, b = (1, 2, 3, 4, 5)
a, b, *c = (1, 2, 3, 4, 5)
print(a)
print(b)
print(c)
a, b, *_ = (1, 2, 3, 4, 5)
print(a)
print(b)
a, b, *c, d = (1, 2, 3, 4, 5)
print(a)
print(b)
print(c)
print(d)
# Estos conceptos son parecidos a los de `*args` y `**kwargs` de como argumentos de funciones en Python.
# ### `lambda`, `map` y `filter`
#
# `lambda` se usa para crear funciones pequeñas sin nombre, para usar en la ejecución del programa. Se suele utilizar en conjunto con `map` y `filter`.
suma = lambda x, y: x + y
suma(3, 4)
# ?map
list(map(lambda x: x**2, [1, 2, 3]))
for i in map(lambda x: x**2, [1, 2, 3]):
print(i)
for i in map(lambda x,y: x + y, [1, 2, 3], [4, 5, 6]):
print(i)
m = map(lambda x: x**2, [1,2,3])
m[1]
for i in m:
print(i)
break
list(m)
list(m)
# ?filter
for i in filter(lambda x: x%2 == 0, [1,2,3,4,5,6,7,8,9]):
print(i)
|
content/course/Sesion05/clase05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Windows 10, py-visa
#
# Testing on more platforms.
# +
import mhs5200
signal_gen = mhs5200.MHS5200("COM4")
# +
import pyvisa
rm = pyvisa.ResourceManager()
rm.list_resources()
scope = rm.open_resource('USB0::0x1AB1::0x0588::DS1EU152500705::INSTR')
# -
for channel in [1, 2]:
for setting in ["BWLIMIT", "COUPLING", "DISPLAY", "INVERT", "OFFSET", "PROBE", "SCALE", "FILTER", "MEMORYDEPTH", "VERNIER"]:
try:
result = scope.query(f":CHANNEL{channel}:{setting}?")
print(f"{channel}:{setting}:{result}")
except:
print(f"FAILED: {channel}:{setting}")
import time
def test_frequency_amplitude(frequency, amplitude, signal_gen, scope):
for chan in signal_gen.channels:
chan.frequency=frequency
chan.amplitude=amplitude
chan.phase=0
period = 1/float(frequency)
timescale="{:.20f}".format(float(period/5))
# Configure scope
scope.write(f":MEASURE:TOTAL ON")
scope.write(f":TIMebase:SCALE {timescale}")
for scope_channel in [1, 2]:
scope.write(f":CHANNEL{scope_channel}:probe 1")
scope.write(f":CHANNEL{scope_channel}:scale {amplitude/5}")
scope.write(f":CHANNEL{scope_channel}:offset 0")
# Configure signal generator
for chan in signal_gen.channels:
chan.frequency=frequency
chan.amplitude=amplitude
chan.offset = 0
chan.phase=0
for source in ["CHAN1", "CHAN2"]:
scope.write(f":MEASURE:SOURCE {source}")
time.sleep(1)
for param in ["FREQUENCY", "VPP", "VMIN", "VMAX", "VAMPLITUDE"]:
measured = scope.query_ascii_values(f":MEASURE:{param}?")[0]
print(f"{source}:{param}:{measured}")
test_frequency_amplitude(100, 10, signal_gen=signal_gen, scope=scope)
import numpy as np
np.log10(50e6)
for frequency in np.logspace(np.log10(100), np.log10(1000000), 2):
for amplitude in [20]:
test_frequency_amplitude(frequency, amplitude, signal_gen=signal_gen, scope=scope)
import pandas as pd
df = pandas.DataFrame()
import uuid
def test_frequency_amplitude2(frequency, amplitude, signal_gen, scope):
for chan in signal_gen.channels:
chan.frequency=frequency
chan.amplitude=amplitude
chan.phase=0
period = 1/float(frequency)
timescale="{:.20f}".format(float(period/5))
# Configure scope
scope.write(f":MEASURE:TOTAL ON")
scope.write(f":TIMebase:SCALE {timescale}")
for scope_channel in [1, 2]:
scope.write(f":CHANNEL{scope_channel}:probe 1")
scope.write(f":CHANNEL{scope_channel}:scale {amplitude/5}")
scope.write(f":CHANNEL{scope_channel}:offset 0")
# Configure signal generator
for chan in signal_gen.channels:
chan.frequency=frequency
chan.amplitude=amplitude
chan.offset = 0
chan.phase=0
df = dict()
df["uuid"] = str(uuid.uuid4())
df["frequency"] = frequency
df["amplitude"] = amplitude
for source in ["CHAN1", "CHAN2"]:
scope.write(f":MEASURE:SOURCE {source}")
time.sleep(1)
for param in ["FREQUENCY", "VPP", "VMIN", "VMAX", "VAMPLITUDE"]:
measured = scope.query_ascii_values(f":MEASURE:{param}?")[0]
df[f"{source}_{param}"] = measured
return pandas.DataFrame(df, index=[0])
df = df.append(test_frequency_amplitude2(100, 10, signal_gen, scope))
df = pd.DataFrame()
for frequency in np.logspace(np.log10(100), np.log10(1000000), 10):
for amplitude in [1, 5, 10, 20]:
result_df = test_frequency_amplitude2(frequency, amplitude, signal_gen=signal_gen, scope=scope)
df = df.append(result_df)
df.hist("frequency", bins=10)
def test_frequency_amplitude3(frequency, amplitude, signal_gen, scope):
for chan in signal_gen.channels:
chan.frequency=frequency
chan.amplitude=amplitude
chan.phase=0
period = 1/float(frequency)
timescale="{:.20f}".format(float(period/5))
# Configure scope
scope.write(f":MEASURE:TOTAL ON")
scope.write(f":TIMebase:SCALE {timescale}")
for scope_channel in [1, 2]:
scope.write(f":CHANNEL{scope_channel}:probe 1")
scope.write(f":CHANNEL{scope_channel}:scale {amplitude/5}")
scope.write(f":CHANNEL{scope_channel}:offset 0")
# Configure signal generator
for chan in signal_gen.channels:
chan.frequency=frequency
chan.amplitude=amplitude
chan.offset = 0
chan.phase=0
df = dict()
df["uuid"] = str(uuid.uuid4())
df["frequency"] = frequency
df["amplitude"] = amplitude
for source in ["CHAN1", "CHAN2"]:
scope.write(f":MEASURE:SOURCE {source}")
time.sleep(1)
for param in ['VPP',
'VMAX',
'VMIN',
'VAMPlitude',
'VTOP',
'VBASe',
'VAVerage',
'VRMS',
'OVERshoot',
'PREShoot',
'FREQuency',
'RISetime',
'FALLtime',
'PERiod',
'PWIDth',
'NWIDth',
'PDUTycycle',
'NDUTycycle',
'PDELay',
'NDELay',
'TOTal',
'SOURce',]:
try:
measured = scope.query_ascii_values(f":MEASURE:{param}?")[0]
except:
measured = scope.query(f":MEASURE:{param}?")[0]
df[f"{source}_{param}"] = measured
return pandas.DataFrame(df, index=[0])
df = pd.DataFrame()
for frequency in np.logspace(np.log10(100), np.log10(100000000), 20):
for amplitude in [1, 5, 10, 20]:
result_df = test_frequency_amplitude2(frequency, amplitude, signal_gen=signal_gen, scope=scope)
df = df.append(result_df)
# +
import seaborn as sns
sns.set(
rc={
"figure.figsize": (11, 8.5),
"figure.dpi": 300,
"figure.facecolor": "w",
"figure.edgecolor": "k",
}
)
palette = (sns.color_palette("Paired"))
sns.palplot(palette)
sns.set_palette(palette)
# -
df.groupby(["frequency", "amplitude"]).agg()
data = scope.query_binary_values(":WAVEFORM:DATA? CHAN1")
plt.plot(data)
data = scope.query_binary_values(":WAVEFORM:DATA? CHAN2")
plt.plot(data)
scope.query(":ACQ:SAMP? CHANnel2")
scope.query(":ACQ:MEMD?")
scope.write(":ACQ:MEMD LONG")
for depth in ["NORMAL", "LONG"]:
scope.write(f":ACQ:MEMD {depth}")
time.sleep(0.5)
assert depth == scope.query(":ACQ:MEMD?")
import matplotlib.pyplot as plt
data = scope.query_binary_values(":WAVEFORM:DATA? CHAN1", "B")
plt.plot(data)
data = scope.query_binary_values(":WAVEFORM:DATA? CHAN2", "B")
plt.plot(data)
scope.q
# ?scope.query_binary_values
scope.query(":WAVEFORM:POINTS:MODE?")
scope.write(":WAVEFORM:DATA? CHANNEL1")
header = scope.read_raw()[:10]
header
scope.write(":WAVEFORM:DATA? CHANNEL1")
data = scope.read_raw()[10:]
data[0]
data[0:1]
data[0:2]
import numpy as np
np.array(56).tobytes()
np.array(56).tobytes("C")
np.array(56).tobytes("F")
np.array(56.0).tobytes("F")
np.frombuffer(np.array(56).tobytes("F"))
dt = np.dtype(float)
dt = dt.newbyteorder(">")
plt.plot(np.frombuffer(data))
np.frombuffer(b'\x01\x02', dtype=np.uint8)
np.frombuffer(b'\x01\x02\x03\x04\x05', dtype=np.uint8, count=3)
dt = np.dtype(float)
dt = dt.newbyteorder("<")
plt.plot(np.frombuffer(data))
|
DevelopmentNotebooks/win_pyvisa-Copy2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # One Port Tiered Calibration
# ## Intro
# A one-port network analyzer can be used to measure a two-port device, provided that the device is reciprocal. This is accomplished by performing two calibrations, which is why its called a *tiered* calibration.
#
# First, the VNA is calibrated at the test-port like normal. This is called the *first tier*. Next, the device is connected to the test-port, and a calibration is performed at the far end of the device, the *second tier*. A diagram is shown below,
#
# 
# This notebook will demonstrate how to use [skrf](www.scikit-rf.org) to do a two-tiered one-port calibration. We'll use data that was taken to characterize a waveguide-to-CPW probe. So, for this specific example the diagram above looks like:
#
# 
# ## Some Data
# The data available is the folders `'tier1/'` and `'tier2/'`.
# !ls {"oneport_tiered_calibration/"}
# (if you dont have the git repo for these examples, the data for this notebook can be found [here](https://github.com/scikit-rf/examples/tree/master/oneport_tiered_calibration))
#
# In each folder you will find the two sub-folders, called `'ideals/' ` and `'measured/'`. These contain touchstone files of the calibration standards ideal and measured responses, respectively.
# !ls {"oneport_tiered_calibration/tier1/"}
# The first tier is at waveguide interface, and consisted of the following set of standards
#
# * short
# * delay short
# * load
# * radiating open (literally an open waveguide)
# !ls {"oneport_tiered_calibration/tier1/measured/"}
# ## Creating Calibrations
# ### Tier 1
# First defining the calibration for *Tier 1*
# +
from skrf.calibration import OnePort
import skrf as rf
# %matplotlib inline
from pylab import *
rf.stylely()
tier1_ideals = rf.read_all_networks('oneport_tiered_calibration/tier1/ideals/')
tier1_measured = rf.read_all_networks('oneport_tiered_calibration/tier1/measured/')
tier1 = OnePort(measured = tier1_measured,
ideals = tier1_ideals,
name = 'tier1',
sloppy_input=True)
tier1
# -
# Because we saved corresponding *ideal* and *measured* standards with identical names, the Calibration will automatically align our standards upon initialization. (More info on creating Calibration objects this can be found in [the docs](http://scikit-rf.readthedocs.org/en/latest/tutorials/calibration.html).)
#
# Similarly for the second tier 2,
# ### Tier 2
# +
tier2_ideals = rf.read_all_networks('oneport_tiered_calibration/tier2/ideals/')
tier2_measured = rf.read_all_networks('oneport_tiered_calibration/tier2/measured/')
tier2 = OnePort(measured = tier2_measured,
ideals = tier2_ideals,
name = 'tier2',
sloppy_input=True)
tier2
# -
# ## Error Networks
# Each one-port Calibration contains a two-port error network, that is determined from the calculated error coefficients. The error network for *tier1* models the VNA, while the error network for *tier2* represents the VNA **and** the DUT. These can be visualized through the parameter `'error_ntwk'`.
#
#
# For tier 1,
tier1.error_ntwk.plot_s_db()
title('Tier 1 Error Network')
# Similarly for tier 2,
tier2.error_ntwk.plot_s_db()
title('Tier 2 Error Network')
# ## De-embedding the DUT
# As previously stated, the error network for *tier1* models the VNA, and the error network for *tier2* represents the VNA+DUT. So to deterine the DUT's response, we cascade the inverse S-parameters of the VNA with the VNA+DUT.
#
# $$ DUT = VNA^{-1}\cdot (VNA \cdot DUT)$$
#
# In skrf, this is done as follows
dut = tier1.error_ntwk.inv ** tier2.error_ntwk
dut.name = 'probe'
dut.plot_s_db()
title('Probe S-parameters')
ylim(-60,10)
# You may want to save this to disk, for future use,
#
# dut.write_touchstone()
# !ls {"probe*"}
|
doc/source/examples/metrology/One Port Tiered Calibration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Data Scientist Project - Seattle AirBnB Data
#
# The purpose of the below code is to perform a data analysis on the Seattle's AirBnb data and answer the below questions
#
# ##### Question 1:
# Is the Seattle a seasonal visit attraction?
#
# ##### Question 2:
# Are the cancellation policy, the reviews and the neighboorhood correlated with the booking count?
#
# ##### Question 3:
# Which are the key indicators of an listing price?
#import all the needed libraries
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
import seaborn as sns
import DataScientistProject_Seattle_Functions as t
# %matplotlib inline
#set the width of the column
pd.options.display.max_colwidth = 50
# In order to process the files, i uploaded them in Jupyter notebook under folder AirBNBSeattleData
#read the CSV files from the folder and store them to pandas dataframes
df_calendar = pd.read_csv('./AirBNBSeattleData/calendar.csv')
df_listings = pd.read_csv('./AirBNBSeattleData/listings.csv')
#df_reviews = pd.read_csv('./AirBNBSeattleData/reviews.csv')
# First step is to try and understand the available data
#check the calendat df
df_calendar.head()
#check the listings df
df_listings.head()
# Check the shape of the datasets
df_calendar.shape
df_listings.shape
# ##### Question 1:
# Is the Seattle a seasonal visit attraction?
#
# It seems that we are in position to answer the 1st question, as the Calendars data provide a view of the bookings of all the appartments in Seaattle together with the data period.
#
# We will process further the dataframe in order to understand it better
#Get the maximum and minimum dates
df_calendar[['date']].agg(['max', 'min'])
#Perform a count aggregation in the date column, to check that we have data for all the months in between
pd.DatetimeIndex(df_calendar['date']).month.value_counts().sort_index()
set(df_calendar.columns[df_calendar.isnull().mean()==0])
# From the above analysis it seems that the calendar dataframe:
#
# 1. Has data for a full year
# 2. Has similar amount of data for all the months
# 3. Has available all the values for the columns we plan to use 'available' and 'date'
#Drop the not needed columns and store in a new dataframe
bookings_per_month = df_calendar.drop(['listing_id', 'price'], axis=1)
#Count the bookings per date
bookings_per_month = bookings_per_month[bookings_per_month['available']=='f'].groupby('date').count()
#Name the new column as Bookings_Count
bookings_per_month = bookings_per_month.reset_index().rename(columns={"available": "Bookings_Count"})
#Check the results
bookings_per_month.head()
#Introduce the month column
bookings_per_month['month'] = pd.DatetimeIndex(bookings_per_month['date']).month
#Drop the date column
bookings_per_month = bookings_per_month.drop(['date'], axis=1)
#Sum the bookings per month
bookings_per_month = bookings_per_month.groupby('month', as_index=False).sum()
#bookings_per_month = bookings_per_month.reset_index().rename(columns={"available": "Bookings_Count"})
#Check the results
bookings_per_month.head()
#Plot a barchart with the breakdown per months
#Sum per months and use it as index. Store in a plot dataframe
plot_df = bookings_per_month.groupby('month', as_index=True).sum()
(plot_df).plot(kind="bar");
plt.title("Bookings Per Month Seattle");
plt.xlabel("Month")
plt.ylabel("Bookings Seattle")
plt.xticks(rotation=45, horizontalalignment="center")
# It seems that the January is a high booking month for Seattle, maybe it is related many visitis for NYE vacations or there are some famous events at that period.
#
# Other high booking months are the July and August, maybe the good weather is related.
#
# It would be interesting to check in Season level also the above breakdown
# +
#Prepare the condition for the month values to be makred as segments
conditions = [
((bookings_per_month['month']==12)|(bookings_per_month['month']==1)|(bookings_per_month['month']==2)),
((bookings_per_month['month']==3)|(bookings_per_month['month']==4)|(bookings_per_month['month']==5)),
((bookings_per_month['month']==6)|(bookings_per_month['month']==7)|(bookings_per_month['month']==8)),
((bookings_per_month['month']==9)|(bookings_per_month['month']==10)|(bookings_per_month['month']==11))
]
#The different segment values are the 4 seasons
values = ['Winter', 'Spring', 'Summer', 'Fall']
#Create the Season column
bookings_per_month['Season'] = np.select(conditions, values)
bookings_per_month.head()
# -
#Plot a barchart with the breakdown per season
#Drop the month column and store in a plot 2 dataframe
plot_df_2 = bookings_per_month.drop(['month'], axis=1)
#Sum the bookings per season and use the
plot_df_2 = plot_df_2.groupby('Season', as_index=True).sum()
(plot_df_2).plot(kind="bar");
plt.title("Bookings Per Season Seattle");
plt.xlabel("Season")
plt.ylabel("Bookings Seattle")
plt.xticks(rotation=45, horizontalalignment="center")
# It seems that the top booking seasons are Summer and Winter. It is maybe related with the people vacation perios
# It would be interesting to compare these results with another city to see the difference. In the below cells we are reading the Boston AirBnB data and perform the same steps as above for Seattle.
#
# I will put all the code together and without comments, in order to run quickly
# +
#In order to process the files, i uploaded them in Jupyter notebook under folder AirBNBBostonData
#read the CSV files from the folder
df_calendar_Boston = pd.read_csv('./AirBNBBostonData/calendar.csv')
df_listings_Boston = pd.read_csv('./AirBNBBostonData/listings.csv')
#df_reviews_Boston = pd.read_csv('./AirBNBBostonData/reviews.csv')
#Get the number of rows and columns per dataset
df_listings_Boston.shape
#3585, 95
df_calendar_Boston.shape
#1308890, 4
#Get the minimum and maximum dates of the calendar dataset
df_calendar_Boston[['date']].agg(['max', 'min'])
#2016-09-06
#2017-09-05
#How many columns have no missing values
set(df_calendar_Boston.columns[df_calendar_Boston.isnull().mean()==0])
#{'available', 'date', 'listing_id'}
bookings_per_month_Boston = df_calendar_Boston[df_calendar_Boston['available']=='f'].groupby('date').count()
bookings_per_month_Boston = bookings_per_month_Boston.reset_index().rename(columns={"available": "Bookings_Count"})
bookings_per_month_Boston['month'] = pd.DatetimeIndex(bookings_per_month_Boston['date']).month
bookings_per_month_Boston = bookings_per_month_Boston.drop(['listing_id', 'price','date'], axis=1)
bookings_per_month_Boston = bookings_per_month_Boston.groupby('month', as_index=False).sum()
plot_df_Boston = bookings_per_month_Boston.groupby('month', as_index=True).sum()
(plot_df_Boston).plot(kind="bar");
plt.title("Bookings Per Month Boston");
plt.xlabel("Month")
plt.ylabel("Bookings Boston")
plt.xticks(rotation=45, horizontalalignment="center")
# -
# It seems that in Boston the high booking months are the September and October. Let's see also on Season level
# +
#Prepare the condition for the month values to be makred as segments
conditions = [
((bookings_per_month_Boston['month']==12)|(bookings_per_month_Boston['month']==1)|(bookings_per_month_Boston['month']==2)),
((bookings_per_month_Boston['month']==3)|(bookings_per_month_Boston['month']==4)|(bookings_per_month_Boston['month']==5)),
((bookings_per_month_Boston['month']==6)|(bookings_per_month_Boston['month']==7)|(bookings_per_month_Boston['month']==8)),
((bookings_per_month_Boston['month']==9)|(bookings_per_month_Boston['month']==10)|(bookings_per_month_Boston['month']==11))
]
#The different segment values are the 4 seasons
values = ['Winter', 'Spring', 'Summer', 'Fall']
#Create the Season column
bookings_per_month_Boston['Season'] = np.select(conditions, values)
#Plot a barchart with the breakdown per season
#Drop the month column and store in a plot 2 dataframe
plot_df_2_Boston = bookings_per_month_Boston.drop(['month'], axis=1)
#Sum the bookings per season and use the
plot_df_2_Boston = plot_df_2_Boston.groupby('Season', as_index=True).sum()
(plot_df_2_Boston).plot(kind="bar");
plt.title("Bookings Per Season Boston");
plt.xlabel("Season")
plt.ylabel("Bookings Boston")
plt.xticks(rotation=45, horizontalalignment="center")
# -
# You can see the difference in the distrubition for the two cities. In Boston the high booking season is Fall and the Sprint, it is completely the opposite from Seattle. Boston has more visits during not vacation periods.
# ##### Question 2:
# Are the cancellation policy, the reviews and the neighboorhood correlated with the booking count?
#
# In order to answer this question we will need to join the booking count data calculated above with the listing datafram using the ID from listing dataframe and Listing ID from Calendar Dataframe.
#
# Then we will do the breakdown for the 3 different attributes (cancellation policy, reviews and neighboorhood)
df_calendar.head()
#Drop the not needed columns and store on a new dataframe
bookings_per_listing = df_calendar.drop(['date', 'price'], axis=1)
#Calculate bookings count for each listing
bookings_per_listing = bookings_per_listing[df_calendar['available']=='f'].groupby('listing_id').count()
#Create the new column named
bookings_per_listing = bookings_per_listing.reset_index().rename(columns={"available": "Bookings_Count"})
#Left join the Bookings_Count column with the df_listings dataframe using the correlation listing_id and id
df_listings = df_listings.join(bookings_per_listing.set_index('listing_id'), on='id', how='left')
df_listings.head()
#Verify that the shape of df_listings is increase by one column and we did not lose any rows
df_listings.shape
# Now we are ready to check the correlation between booking counts and cancellation policy
#Check how many null values we have
df_listings['cancellation_policy'].isnull().mean()
#Breakdown of the different cancellation policies
df_listings['cancellation_policy'].value_counts()
#Early bookings count per cancellation policy
df_listings.groupby('cancellation_policy')['Bookings_Count'].sum()
# In order to check the correlation of the 2 attributes, we will need to normalize the results.
# Hence we will perofrm the division of the total bookings counts with the amount of the listings per cancellaion policy type
#Calculate the average booking counts per apparment on each cancellation policy and store on a new df
bookings_per_appartment_per_cancellation = df_listings.groupby('cancellation_policy')['Bookings_Count'].sum()/df_listings['cancellation_policy'].value_counts()
bookings_per_appartment_per_cancellation.head()
#Plot the results
(bookings_per_appartment_per_cancellation).plot(kind="bar");
plt.title("Booking Rate per Cancellation Policy Seattle");
plt.xlabel("Cancellation Policy")
plt.ylabel("Avg Bookings per Listing")
plt.xticks(rotation=45, horizontalalignment="center")
# We can see that the sctrict cancellation policy has the lower average while the flexible the highest
# Now we are ready to check the correlation between booking counts and Review Rates
#Check how many null values we have
df_listings['review_scores_value'].isnull().mean()
#Check how many null values we have
df_listings['review_scores_value'].isnull().sum()
df_listings['review_scores_value'].value_counts().sort_index()
# As we have 17% not available review scores, we should either remove the rows or replace the null values.
#
# Because there is a big distribution towards high scores, i will go with the remove option.
#Create a new df from listings df
bookings_per_review_score = df_listings[['Bookings_Count','review_scores_value']]
bookings_per_review_score = bookings_per_review_score.dropna(subset=['review_scores_value'], axis=0)
bookings_per_review_score['review_scores_value'].value_counts().sort_index()
#Check the total bookings per review rate
bookings_per_review_score.groupby('review_scores_value')['Bookings_Count'].sum()
# We can see that the high score reviews (9,10) have too many bookings. For this reason we will use segmentations instead of the real values
# +
#Set the segments
conditions = [
(bookings_per_review_score['review_scores_value']<=8),
(bookings_per_review_score['review_scores_value']==9),
(bookings_per_review_score['review_scores_value']==10)
]
values = ['<=8', '9', '10']
#Create a new column named review_scores_segments as per the above conditions
bookings_per_review_score['review_scores_segments'] = np.select(conditions, values)
bookings_per_review_score.head()
# -
# Again the correlation will be calculated with normalized results
bookings_per_review_score = bookings_per_review_score.groupby('review_scores_segments')['Bookings_Count'].sum()/bookings_per_review_score['review_scores_segments'].value_counts()
bookings_per_review_score.head()
(bookings_per_review_score).plot(kind="bar");
plt.title("Booking Rate per Review Rate Seattle");
plt.xlabel("Review Rate")
plt.ylabel("Avg Bookings per Listing")
plt.xticks(rotation=45, horizontalalignment="center")
# The review rates 10 have higher booking counts as expected.
# Though, it is strange as it seems that the review rates with '<=8' or lower have greater booking counts than 9. Maybe there are other factors that are more important or maybe the data amount for the segment '<=8' are not enough to have a proper answer.
# Now we can go and check the correlation between booking count and neighbourhood
#Pick columns Bookings_Count and neighbourhood_group_cleansed and store on a new dataframe
bookings_per_neighbourhood = df_listings[['Bookings_Count','neighbourhood_group_cleansed']]
bookings_per_neighbourhood.head()
#Check how many null values we have
bookings_per_neighbourhood['neighbourhood_group_cleansed'].isnull().mean()
#Check the count of listings per neighbourhood group
bookings_per_neighbourhood['neighbourhood_group_cleansed'].value_counts().sort_index()
#Check the amount of bookings per neighbourhood group
bookings_per_neighbourhood.groupby('neighbourhood_group_cleansed')['Bookings_Count'].sum()
# We will proceed with normalization of the data
# +
#Normalize the data per appartment
norm_bookings_per_neighbourhood = bookings_per_neighbourhood.groupby('neighbourhood_group_cleansed')['Bookings_Count'].sum()/bookings_per_neighbourhood['neighbourhood_group_cleansed'].value_counts()
#Sort the results based on the amount of booking rate
norm_bookings_per_neighbourhood = norm_bookings_per_neighbourhood.sort_values(axis=0, ascending=False)
#Plot the results
(norm_bookings_per_neighbourhood).plot(kind="bar");
plt.title("Booking Rate per NeighBourHood Group Seattle");
plt.xlabel("NeighBourHood Group")
plt.ylabel("Avg Bookings per Listing")
plt.xticks(rotation=65, horizontalalignment="center")
# -
# You can see that there are some specific neighbourhood groups much more popular than others
# ##### Question 3:
# Which are the key indicators of an appartment price?
#Store listings dataframe to a new df
df_listings_model = df_listings
#Drop the Booking_Count column from the new df
df_listings_model = df_listings_model.drop(['Bookings_Count'], axis=1)
#Check the shape
df_listings_model.shape
# In order to answer the above question we will build a linear regression model and we will try to predict the price of each listing based on its characteristics
# I will create a function which will be able to exclude the fields with big or low variance
#Function to isolate columns with very big or very small variance (>=1000 or <=2)
def find_proper_columns (df,min_var,max_var):
exclude_columns = []
for i in df.columns:
if ((df[i].value_counts().count()>=max_var)|(df[i].value_counts().count()<=min_var)):
exclude_columns.append(i)
find_proper_columns.exclude_columns = exclude_columns
# I will run the function by setting the relevant minimum and maximum values
#Run the function using the listings dataframe
find_proper_columns(df_listings_model,2,1000)
find_proper_columns.exclude_columns[:5]
len(find_proper_columns.exclude_columns)
# Exclude the columns that have more than 30% null values
#Find columns with more than 30% null values
exclude_mean = list(set(df_listings_model.columns[df_listings_model.isnull().mean()>0.30]))
exclude_mean[:5]
len(exclude_mean)
# Create 2 functions that do the below:
#
# 1. Transform a list of columns from string to numeric
# 2. Transform a list of columns from sting dates to numeric values with the logic now() - date
# +
#the function to remove non-numeric chars from numeric columns
def str_to_numeric(df,str_to_numeric_list):
for i in str_to_numeric_list:
df[i] = df[i].str.replace(',', '')
df[i] = df[i].str.replace('$', '')
df[i] = df[i].str.replace('%', '')
df[i] = df[i].astype(float)
#the function to convert date values to days until today (numeric)
def date_to_days(df,date_to_days_list):
for i in date_to_days_list:
df[i] = pd.to_datetime(df[i])
df[i] = (pd.Timestamp.now().normalize() - df[i]).dt.days
# -
# Run the above functions by setting the columns of our interest
# +
#Convert the below list to numbers
str_to_numeric_list = ['price','weekly_price','monthly_price','security_deposit','cleaning_fee','extra_people','host_response_rate']
str_to_numeric(df_listings_model,str_to_numeric_list)
#Convert the below list to days until today
date_to_days_list = ['first_review','last_review','host_since']
date_to_days(df_listings_model,date_to_days_list)
# -
# Create a function that will clean the dataset and run the linear regression model
#The function that calculates the model
def clean_fit_linear_mod(df_listings, response_col, drop_columns, dummy_na, test_size=.3, rand_state=42):
'''
INPUT
df_listings
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
This function cleans df_listings using the following steps to produce X and y:
1. Drop all the rows with no price
2. Create X as all the columns that are not the price column
3. Create y as the price column
4. Drop the not needed columns from X using the drop_columns list
5. For each numeric variable in X, fill the column with the mean value of the column.
6. Create dummy columns for all the categorical variables in X, drop the original columns
'''
# Drop rows with missing price values
df_listings_predict_price = df_listings.dropna(subset=[response_col], axis=0)
y = df_listings[response_col]
#Drop price column
df_listings_predict_price = df_listings_predict_price.drop([response_col], axis=1)
#Drop not needed columns
df_listings_predict_price = df_listings_predict_price.drop(drop_columns, axis=1)
# Fill numeric columns with the mean
num_vars = df_listings_predict_price.select_dtypes(include=['float', 'int']).columns
for col in num_vars:
df_listings_predict_price[col].fillna((df_listings_predict_price[col].mean()), inplace=True)
# Dummy the categorical variables
cat_vars = df_listings_predict_price.select_dtypes(include=['object']).copy().columns
for var in cat_vars:
df_listings_predict_price = pd.concat([df_listings_predict_price.drop(var, axis=1), pd.get_dummies(df_listings_predict_price[var], prefix=var, prefix_sep='_', drop_first=dummy_na)], axis=1)
X = df_listings_predict_price
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state=rand_state)
#Instantiate
lm_model = LinearRegression(normalize=True)
#Fit
lm_model.fit(X_train, y_train)
#Predict and score the model
y_train_preds = lm_model.predict(X_train)
y_test_preds = lm_model.predict(X_test)
train_score = r2_score(y_train, y_train_preds)
test_score = r2_score(y_test, y_test_preds)
return test_score, train_score, lm_model, X_train, X_test, y_train, y_test, X, y
# Run the functions by preparing the drop columns lists
# +
proper_columns = list(set(df_listings_model.columns) - set(find_proper_columns.exclude_columns) - set(exclude_mean)
-set(['neighbourhood_group_cleansed','zipcode','host_location','smart_location'
,'host_neighbourhood','neighbourhood','host_verifications','calendar_updated'
]))
drop_columns = list(set(df_listings_model.columns) - set(proper_columns))
test_score, train_score, t.lm_model, X_train, X_test, y_train, y_test, X, y = clean_fit_linear_mod(df_listings_model, 'price', drop_columns, dummy_na=True)
print("The rsquared on the training data was {}. The rsquared on the test data was {}.".format(train_score, test_score))
len(X.columns)
# -
# Run the provided cutoff function
# +
cutoffs = [5000, 3500, 2500, 1000, 100, 75, 50, 25, 10, 5, 2]
#Run this cell to pass your X and y to the model for testing
r2_scores_test, r2_scores_train, t.lm_model, X_train, X_test, y_train, y_test = t.find_optimal_lm_mod(X, y, cutoffs)
# -
# Check the best results that this model can give
print(X_train.shape[1]) #Number of columns
print(r2_scores_test[np.argmax(r2_scores_test)]) # The model we should implement test_r2
print(r2_scores_train[np.argmax(r2_scores_test)]) # The model we should implement train_r2
# +
#Run the coef function
coef_df = t.coef_weights(t.lm_model.coef_, X_train)
#A quick look at the top results
coef_df.head(10)
#coef_df.tail(10)
# -
# Remove the really high and low coefficients and rerun the model
# +
proper_columns = list(set(df_listings_model.columns) - set(find_proper_columns.exclude_columns) - set(exclude_mean)
-set(['neighbourhood_group_cleansed','zipcode','host_location','smart_location'
,'host_neighbourhood','neighbourhood','host_verifications','calendar_updated'
,'host_total_listings_count','host_listings_count'
,'maximum_nights','minimum_nights','city'
,'availability_365','availability_60','availability_30'
]))
drop_columns = list(set(df_listings_model.columns) - set(proper_columns))
test_score, train_score, t.lm_model, X_train, X_test, y_train, y_test, X, y = clean_fit_linear_mod(df_listings_model, 'price', drop_columns, dummy_na=True)
print("The rsquared on the training data was {}. The rsquared on the test data was {}.".format(train_score, test_score))
len(X.columns)
# -
# We can see from the above that by removing the really high and really low variables the model reverted slightly better results
#
# Rerun the coefficients after the improved run
# +
#Run the coef function
coef_df = t.coef_weights(t.lm_model.coef_, X_train)
#Check the higher variables and the lower variables
coef_df.head(30)
#coef_df.tail(30)
# -
print(X_train.shape[1]) #Number of columns
print(r2_scores_test[np.argmax(r2_scores_test)]) # The model we should implement test_r2
print(r2_scores_train[np.argmax(r2_scores_test)]) # The model we should implement train_r2
# From the above code and steps, it seems that we are in position to build a linear model that predicts well enough the price per listing.
#
# There are many variables in the model (133), some of them more important than others.
# The r square is calculated as ~0.62, that is quite good.
#
# From the coefficient function we can see that the most important factors that affect the price of the appartments in Seattle are the neighbourhood, the type of the property and the room type.
|
DataScientistProject_Seattle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # RC Setpoint generation
#
# We aim for acro mode control, where pilot inputs command rates. The most straight forward solution to multiply
# the normalized (-1.0 to 1.0) input by the maximum rate per axis. Additionally some math is applied to optimize
# it for better handling/feeling (expo).
#
# There are multiple variants available in open source flight controller codes. In the following the px4 variant
# is reimplemented and visualized.
import numpy as np
def px4_acro(pilot_in, e, g):
# clip inputs
x = np.clip(pilot_in, -0.999, 0.999)
e = np.clip(e, 0.0, 0.99)
g = np.clip(g, 0.00, 1.0)
# apply expo/superexpo
expo = (1 - e) * x + e * x * x * x
superexpo = expo * (1 - g) / (1 - abs(x) * g)
return superexpo
# ## Visualization
# +
input_range = np.linspace(-1.0, 1.0, 1000)
def plot_setpoint(pilot_in, e, g):
outputs = [px4_acro(x, e, g) for x in input_range]
y = px4_acro(pilot_in, e, g)
plt.figure(2)
plt.plot(input_range, outputs)
plt.plot(pilot_in, y, "ro")
plt.show()
# +
# %matplotlib inline
from matplotlib import pyplot as plt
from ipywidgets import Layout, Button, Box, IntSlider, FloatSlider, Output, Label, interactive_output
input_slider_options = {'value': 0.0, 'min':-1.0, 'max':1.0, 'step':0.01}
input_labels = ['input', 'e', 'g']
inputs = {
"pilot_in": FloatSlider(description="input", value=0.0, min=-1.0, max=1.0, step=0.01),
"e": FloatSlider(description="e", value=0.0, min=0.0, max=1.0, step=0.01),
"g": FloatSlider(description="g", value=0.0, min=0.0, max=0.99, step=0.01),
}
items = [Label(value='Controls'), *inputs.values()]
vbox_layout = Layout(display='flex',
flex_flow='column',
align_items='stretch',
width='400px')
vbox = Box(children=items, layout=vbox_layout)
hbox_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='100%')
out1 = interactive_output(plot_setpoint, inputs)
hbox = Box(children=(vbox, out1), layout=hbox_layout)
hbox
# -
# ## Test data
#
# Data generation for C/C++ implementation tests,
from itertools import product
# +
pilot_input_values = [-1.1, -1.0, -0.3, 0.0, 0.88, 0.999, 1.0, 1.5]
e_input_values = [-0.001, 0.0, 0.4, 0.99, 1.0]
g_input_values = [-0.9, 0.0, 0.1, 0.5, 1.0, 2.9]
data = []
for p_in, e_in, g_in in product(pilot_input_values, e_input_values, g_input_values):
res = px4_acro(p_in, e_in, g_in)
data.append([p_in, e_in, g_in, res])
# +
data = np.array(data)
with open("../../tests/drivers/rc/src/test_rate_setpoint.inc", "w") as afile:
afile.write("// This file is autogenerated by 03_SetpointGeneration, do not alter\n\n")
afile.write("// Test Data columns: pilot_in, e, g, max_rate, output\n")
afile.write(f"const float test_data_setpoint[{data.shape[0]}][{data.shape[1]}] = {{\n")
for row in data:
afile.write(("\t{{" + len(row)*"{:2.16f}f, " +"}},\n").format(*row))
afile.write("};\n")
# -
|
prototyping/05_Controller/nb03_setpoint_generation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ZjN_IJ8mhJ-4"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="sY3Ffd83hK3b"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="03Pw58e6mTHI"
# # NumPy API on TensorFlow
# + [markdown] id="7WpGysDJmZsg"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/tf_numpy"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/tf_numpy.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/tf_numpy.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/tf_numpy.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="s2enCDi_FvCR"
# ## Overview
#
# TensorFlow implements a subset of the [NumPy API](https://numpy.org/doc/1.16), available as `tf.experimental.numpy`. This allows running NumPy code, accelerated by TensorFlow, while also allowing access to all of TensorFlow's APIs.
# + [markdown] id="ob1HNwUmYR5b"
# ## Setup
#
# Note: `tf.experimental.numpy` will be available in the stable branch starting from TensorFlow 2.4. For now, it is available in `nightly`.
# + id="-JyixXW8F-z0"
# !pip install --quiet --upgrade tf-nightly
# + id="AJR558zjAZQu"
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
import timeit
print("Using TensorFlow version %s" % tf.__version__)
# + [markdown] id="yh2BwqUzH3C3"
# ## TensorFlow NumPy ND array
#
# An instance of `tf.experimental.numpy.ndarray`, which we will call **ND Array**, represents a multidimensional dense array of a given `dtype` placed on a certain device. Each one of these objects internally wraps a `tf.Tensor`. Check out the ND array class for useful methods like `ndarray.T`, `ndarray.reshape`, `ndarray.ravel` and others.
#
# First create an ND array object, and then invoke different methods.
# + id="-BHJjxigJ2H1"
# Create an ND array and check out different attributes.
ones = tnp.ones([5, 3], dtype=tnp.float32)
print("Created ND array with shape = %s, rank = %s, "
"dtype = %s on device = %s\n" % (
ones.shape, ones.ndim, ones.dtype, ones.data.device))
# Check out the internally wrapped `tf.Tensor` object.
print("The ND array wraps a tf.Tensor: %s\n" % ones.data)
# Try commonly used member functions.
print("ndarray.T has shape %s" % str(ones.T.shape))
print("narray.reshape(-1) has shape %s" % ones.reshape(-1).shape)
# + [markdown] id="Mub8-dvJMUr4"
# ### Type promotion
#
# TensorFlow NumPy APIs have well-defined semantics for converting literals to ND array, as well as for performing type promotion on ND array inputs. Please see [`np.result_type`](https://numpy.org/doc/stable/reference/generated/numpy.result_type.html) for more details. . When converting literals to ND array, NumPy prefers wide types like `tnp.int64` and `tnp.float64`.
#
# In contrast, `tf.convert_to_tensor` prefers `tf.int32` and `tf.float32` types for converting constants to `tf.Tensor`. TensorFlow APIs leave `tf.Tensor` inputs unchanged and do not perform type promotion on them.
#
# In the next example, you will perform type promotion. First, run addition on ND array inputs of different types and note the output types. None of these type promotions would be allowed on straight `tf.Tensor` objects. Finally,
# convert literals to ND array using `ndarray.asarrray` and note the resulting type.
# + id="uHmBi4KZI2t1"
print("Type promotion for operations")
values = [tnp.asarray(1, dtype=d) for d in
(tnp.int32, tnp.int64, tnp.float32, tnp.float64)]
for i, v1 in enumerate(values):
for v2 in values[i+1:]:
print("%s + %s => %s" % (v1.dtype, v2.dtype, (v1 + v2).dtype))
print("Type inference during array creation")
print("tnp.asarray(1).dtype == tnp.%s" % tnp.asarray(1).dtype)
print("tnp.asarray(1.).dtype == tnp.%s\n" % tnp.asarray(1.).dtype)
# + [markdown] id="MwCCDxSZOfA1"
# ### Broadcasting
#
# Similar to TensorFlow, NumPy defines rich semantics for "broadcasting" values.
# Checkout the [NumPy broadcasting guide](https://numpy.org/doc/stable/user/basics.broadcasting.html). Also compare this with [TensorFlow broadcasting semantics](https://www.tensorflow.org/guide/tensor#broadcasting).
#
# + id="qlyOShxIO0s2"
x = tnp.ones([2, 3])
y = tnp.ones([3])
z = tnp.ones([1, 2, 1])
print("Broadcasting shapes %s, %s and %s gives shape %s" % (
x.shape, y.shape, z.shape, (x + y + z).shape))
# + [markdown] id="LEVr4ctRPrqR"
# ### Indexing
#
# NumPy defines very sophistsicated indexing rules. See the [NumPy Indexing guide](https://numpy.org/doc/stable/reference/arrays.indexing.html). Note the use of ND arrays as indices below.
# + id="lRsrtnd3YyMj"
x = tnp.arange(24).reshape(2, 3, 4)
print("Basic indexing")
print(x[1, tnp.newaxis, 1:3, ...], "\n")
print("Boolean indexing")
print(x[:, (True, False, True)], "\n")
print("Advanced indexing")
print(x[1, (0, 0, 1), tnp.asarray([0, 1, 1])])
# + id="yRAaiGhlaNw7"
# Mutation is currently not supported
try:
tnp.arange(6)[1] = -1
except TypeError:
print("Currently, TensorFlow NumPy does not support mutation.")
# + [markdown] id="5XfJ602j-GVD"
# ### Example Model
#
# Next, you can see how to create a model and run inference on it. This simple model applies a relu layer followed by a linear projection. Later sections will show how to compute gradients for this model using TensorFlow's `GradientTape`.
# + id="kR_KCh4kYEhm"
class Model(object):
"""Model with a dense and a linear layer."""
def __init__(self):
self.weights = None
def predict(self, inputs):
if self.weights is None:
size = inputs.shape[1]
# Note that we use `tnp.float32` type for performance.
stddev = tnp.sqrt(size).astype(tnp.float32)
w1 = tnp.random.randn(size, 64).astype(tnp.float32) / stddev
bias = tnp.random.randn(64).astype(tnp.float32)
w2 = tnp.random.randn(64, 2).astype(tnp.float32) / 8
self.weights = (w1, bias, w2)
else:
w1, bias, w2 = self.weights
y = tnp.matmul(inputs, w1) + bias
y = tnp.maximum(y, 0) # Relu
return tnp.matmul(y, w2) # Linear projection
model = Model()
# Create input data and compute predictions.
print(model.predict(tnp.ones([2, 32], dtype=tnp.float32)))
# + [markdown] id="kSR7Ou5YcS38"
# ## TensorFlow NumPy and NumPy
#
# TensorFlow NumPy implements a subset of the full NumPy spec. While more symbols will be added over time, there are systematic features that will not be supported in the near future. These include NumPy C API support, Swig integration, Fortran storage order, views and `stride_tricks`, and some dtypes (like `np.recarray`, `np.object`). For more details, please see the [TensorFlow NumPy API Documentation](https://www.tensorflow.org/api_docs/python/tf/experimental/numpy)
#
# + [markdown] id="Jb1KXak2YlNN"
# ### NumPy interoperability
#
# TensorFlow ND arrays can interoperate with NumPy functions. These objects implement the `__array__` interface. NumPy uses this interface to convert function arguments to `np.ndarray` values before processing them.
#
# Similarly, TensorFlow NumPy functions can accept inputs of different types including `tf.Tensor` and `np.ndarray`. These input are converted to ND array by calling `ndarray.asarray` on them.
#
# Conversion of ND array to and from `np.ndarray` may trigger actual data copies. Please see section on [buffer copies](#Buffer-copies) for more details.
# + id="cMOCgzQmeXRU"
# ND array passed into NumPy function.
np_sum = np.sum(tnp.ones([2, 3]))
print("sum = %s. Class: %s" % (float(np_sum), np_sum.__class__))
# `np.ndarray` passed into TensorFlow NumPy function.
tnp_sum = tnp.sum(np.ones([2, 3]))
print("sum = %s. Class: %s" % (float(tnp_sum), tnp_sum.__class__))
# + id="ZaLPjzxft780"
# It is easy to plot ND arrays, given the __array__ interface.
labels = 15 + 2 * tnp.random.randn(1000)
_ = plt.hist(labels)
# + [markdown] id="kF-Xyw3XWKqJ"
# ### Buffer copies
#
# Intermixing TensorFlow NumPy with NumPy code may trigger data copies. This is because TensorFlow NumPy has stricter requirments on memory alignment than those of NumPy.
#
# When a `np.ndarray` is passed to TensorFlow Numpy, it will check for alignment requirements and trigger a copy if needed. When passing an ND array CPU buffer to NumPy, generally the buffer will satisfy alignment requirements and NumPy will not need to create a copy.
#
# ND arrays can refer to buffers placed on devices other than the local CPU memory. In such cases, invoking a NumPy function will trigger copies across the network or device as needed.
#
# Given this, intermixing with NumPy API calls should generally be done with caution and the user should watch out for overheads of copying data. Interleaving TensorFlow NumPy calls with TensorFlow calls is generally safe and avoids copying data. See the section on [tensorflow interoperability](#Tensorflow-interoperability) for more details.
# + [markdown] id="RwljbqkBc7Ro"
# ### Operator precedence
#
# TensorFlow NumPy defines an `__array_priority__` higher than NumPy's. This means that for operators involving both ND array and `np.ndarray`, the former will take precedence, i.e., `np.ndarray` input will get converted to an ND array and the TensorFlow NumPy implementation of the operator will get invoked.
# + id="Cbw8a3G_WUO7"
x = tnp.ones([2]) + np.ones([2])
print("x = %s\nclass = %s" % (x, x.__class__))
# + [markdown] id="DNEab_Ctky83"
# ## TF NumPy and TensorFlow
#
# TensorFlow NumPy is built on top of TensorFlow and hence interoperates seamlessly with TensorFlow.
# + [markdown] id="fCcfgrlOnAhQ"
# ### `tf.Tensor` and ND array
#
# ND array is a thin wrapper on `tf.Tensor`. These types can be converted cheaply to one another without triggering actual data copies.
# + id="BkHVauKwnky_"
x = tf.constant([1, 2])
# Convert `tf.Tensor` to `ndarray`.
tnp_x = tnp.asarray(x)
print(tnp_x)
# Convert `ndarray` to `tf.Tensor` can be done in following ways.
print(tnp_x.data)
print(tf.convert_to_tensor(tnp_x))
# Note that tf.Tensor.numpy() will continue to return `np.ndarray`.
print(x.numpy(), x.numpy().__class__)
# + [markdown] id="_151HQVBooxG"
# ### TensorFlow interoperability
#
# ND array can be passed to TensorFlow APIs. These calls internally convert ND array inputs to `tf.Tensor`. As mentioned earlier, such conversion does not actually do data copies, even for data placed on accelerators or remote devices.
#
# Conversely, `tf.Tensor` objects can be passed to `tf.experimental.numpy` APIs. These inputs will internally be converted to ND array without performing data copies.
# + id="-QvxNhrFoz09"
# ND array passed into TensorFlow function.
# This returns a `tf.Tensor`.
tf_sum = tf.reduce_sum(tnp.ones([2, 3], tnp.float32))
print("Output = %s" % tf_sum)
# `tf.Tensor` passed into TensorFlow NumPy function.
# This returns an ND array.
tnp_sum = tnp.sum(tf.ones([2, 3]))
print("Output = %s" % tnp_sum)
# + [markdown] id="PyPgpvbppGW2"
# #### Operator precedence
#
# When ND array and `tf.Tensor` objects are combined using operators, a precedence rule is used to determine which object executes the operator. This is controllled by the `__array_priority__` value defined by these classes.
#
# `tf.Tensor` defines an `__array_priority__` higher than that of ND array. This means that the ND array input will be converted to `tf.Tensor` and the `tf.Tensor` version of the operator will be called.
#
# The code below demonstrates how that affects the output type.
#
# + id="VRpWzx3FpQlY"
x = tnp.ones([2, 2]) + tf.ones([2, 1])
print("x = %s\nClass = %s" % (x, x.__class__))
# + [markdown] id="1b4HeAkhprF_"
# ### Gradients and Jacobians: tf.GradientTape
#
# TensorFlow's GradientTape can be used for backpropagation through TensorFlow and TensorFlow NumPy code. GradientTape APIs can also return ND array outputs.
#
# Use the model created in [Example Model](#example-modle) section, and compute gradients and jacobians.
# + id="T47C9KS8pbsP"
def create_batch(batch_size=32):
"""Creates a batch of input and labels."""
return (tnp.random.randn(batch_size, 32).astype(tnp.float32),
tnp.random.randn(batch_size, 2).astype(tnp.float32))
def compute_gradients(model, inputs, labels):
"""Computes gradients of squared loss between model prediction and labels."""
with tf.GradientTape() as tape:
assert model.weights is not None
# Note that `model.weights` need to be explicitly watched since they
# are not tf.Variables.
tape.watch(model.weights)
# Compute prediction and loss
prediction = model.predict(inputs)
loss = tnp.sum(tnp.square(prediction - labels))
# This call computes the gradient through the computation above.
return tape.gradient(loss, model.weights)
inputs, labels = create_batch()
gradients = compute_gradients(model, inputs, labels)
# Inspect the shapes of returned gradients to verify they match the
# parameter shapes.
print("Parameter shapes:", [w.shape for w in model.weights])
print("Gradient shapes:", [g.shape for g in gradients])
# Verify that gradients are of type ND array.
assert isinstance(gradients[0], tnp.ndarray)
# + id="TujVPDFwrdqp"
# Computes a batch of jacobians. Each row is the jacobian of an element in the
# batch of outputs w.r.t the corresponding input batch element.
def prediction_batch_jacobian(inputs):
with tf.GradientTape() as tape:
tape.watch(inputs)
prediction = model.predict(inputs)
return prediction, tape.batch_jacobian(prediction, inputs)
inp_batch = tnp.ones([16, 32], tnp.float32)
output, batch_jacobian = prediction_batch_jacobian(inp_batch)
# Note how the batch jacobian shape relates to the input and output shapes.
print("Output shape: %s, input shape: %s" % (output.shape, inp_batch.shape))
print("Batch jacobian shape:", batch_jacobian.shape)
# + [markdown] id="MYq9wxfc1Dv_"
# ### Trace compilation: tf.function
#
# Tensorflow's `tf.function` works by "trace compiling" the code and then optimizing these traces for much faster performance. See the [Introduction to Graphs and Functions](./guide/intro_to_graphs).
#
# `tf.function` can be used to optimize TensorFlow NumPy code as well. Here is a simple example to demonstrate the speedups. Note that the body of `tf.function` code includes calls to TensorFlow NumPy APIs, and the inputs and output are ND arrays.
#
# + id="05SrUulm1OlL"
inputs, labels = create_batch(512)
print("Eager performance")
compute_gradients(model, inputs, labels)
print(timeit.timeit(lambda: compute_gradients(model, inputs, labels),
number=10)* 100, "ms")
print("\ntf.function compiled performance")
compiled_compute_gradients = tf.function(compute_gradients)
compiled_compute_gradients(model, inputs, labels) # warmup
print(timeit.timeit(lambda: compiled_compute_gradients(model, inputs, labels),
number=10) * 100, "ms")
# + [markdown] id="5w8YxR6ELmo1"
# ### Vectorization: tf.vectorized_map
#
# TensorFlow has inbuilt support for vectorizing parallel loops, which allows speedups of one to two orders of magnitude. These speedups are accessible via `tf.vectorized_map` API and apply to TensorFlow NumPy code as well.
#
# It is sometimes useful to compute the gradient of each output in a batch w.r.t. the corresponding input batch element. Such computation can be done efficiently using `tf.vectorized_map` as shown below.
# + id="PemSIrs5L-VJ"
@tf.function
def vectorized_per_example_gradients(inputs, labels):
def single_example_gradient(arg):
inp, label = arg
return compute_gradients(model,
tnp.expand_dims(inp, 0),
tnp.expand_dims(label, 0))
# Note that a call to `tf.vectorized_map` semantically maps
# `single_example_gradient` over each row of `inputs` and `labels`.
# The interface is similar to `tf.map_fn`.
# The underlying machinery vectorizes away this map loop which gives
# nice speedups.
return tf.vectorized_map(single_example_gradient, (inputs, labels))
batch_size = 128
inputs, labels = create_batch(batch_size)
per_example_gradients = vectorized_per_example_gradients(inputs, labels)
for w, p in zip(model.weights, per_example_gradients):
print("Weight shape: %s, batch size: %s, per example gradient shape: %s " % (
w.shape, batch_size, p.shape))
# + id="_QZ5BjJmRAlG"
# Here we benchmark the vectorized computation above and compare with
# unvectorized sequential computation using `tf.map_fn`.
@tf.function
def unvectorized_per_example_gradients(inputs, labels):
def single_example_gradient(arg):
inp, label = arg
output = compute_gradients(model,
tnp.expand_dims(inp, 0),
tnp.expand_dims(label, 0))
return output
return tf.map_fn(single_example_gradient, (inputs, labels),
fn_output_signature=(tf.float32, tf.float32, tf.float32))
print("Running vectorized computaton")
print(timeit.timeit(lambda: vectorized_per_example_gradients(inputs, labels),
number=10) * 100, "ms")
print("\nRunning unvectorized computation")
per_example_gradients = unvectorized_per_example_gradients(inputs, labels)
print(timeit.timeit(lambda: unvectorized_per_example_gradients(inputs, labels),
number=5) * 200, "ms")
# + [markdown] id="UOTh-nkzaJd9"
# ### Device placement
#
# TensorFlow NumPy can place operations on CPUs, GPUs, TPUs and remote devices. It uses standard TensorFlow mechanisms for device placement. Below we show a simple example to list all device and then place some computaton on a particular device.
#
# TenorFlow also has APIs for replicating computation across devices and performing collective reductions which will not be covered here.
# + [markdown] id="-0gHrwYYaTCE"
# #### List devices
#
# `tf.config.list_logical_devices` and `tf.config.list_physical_devices` can be used to find what devices to use.
# + id="NDEAd9m9aemS"
print("All logical devices:", tf.config.list_logical_devices())
print("All physical devices:", tf.config.list_physical_devices())
# Try to get the GPU device. If unavailable, fallback to CPU.
try:
device = tf.config.list_logical_devices(device_type="GPU")[0]
except IndexError:
device = "/device:CPU:0"
# + [markdown] id="fihgfF_tahVx"
# #### Placing operations: **`tf.device`**
#
# Operations can be placed on a device by calling it in a `tf.device` scope.
#
# + id="c7ELvLmnazfV"
print("Using device: %s" % str(device))
# Run operations in the `tf.device` scope.
# If a GPU is available, these operations execute on the GPU and outputs are
# placed on the GPU memory.
with tf.device(device):
prediction = model.predict(create_batch(5)[0])
print("prediction is placed on %s" % prediction.data.device)
# + [markdown] id="e-LK6wsHbBiM"
# #### Copying ND arrays across devices: **`tnp.copy`**
#
# A call to `tnp.copy`, placed in a certain device scope, will copy the data to that device, unless the data is already on that device.
# + id="CCesyidaa-UT"
with tf.device("/device:CPU:0"):
prediction_cpu = tnp.copy(prediction)
print(prediction.data.device)
print(prediction_cpu.data.device)
# + [markdown] id="AiYzRDOtKzAH"
# ## Performance comparisons
#
# TensorFlow NumPy uses highly optimized TensorFlow kernels that can be dispatched on CPUs, GPUs and TPUs. TensorFlow also performs many compiler optimizations, like operation fusion, which translate to performance and memory improvements. See [TensorFlow graph optimization with Grappler](./guide/graph_optimization) to learn more.
#
# However TensorFlow has higher overheads for dispatching operations compared to NumPy. For workloads composed of small operations (less than about 10 microseconds), these overheads can dominate the runtime and NumPy could provide better performance. For other case, TensorFlow should generally provide better performance.
#
# Run the benchmark below to compare NumPy and TensorFlow Numpy performance for different input sizes.
# + cellView="code" id="RExwjI9_pJG0"
def benchmark(f, inputs, number=30, force_gpu_sync=False):
"""Utility to benchmark `f` on each value in `inputs`."""
times = []
for inp in inputs:
def _g():
if force_gpu_sync:
one = tnp.asarray(1)
f(inp)
if force_gpu_sync:
with tf.device("CPU:0"):
tnp.copy(one) # Force a sync for GPU case
_g() # warmup
t = timeit.timeit(_g, number=number)
times.append(t * 1000. / number)
return times
def plot(np_times, tnp_times, compiled_tnp_times, has_gpu, tnp_times_gpu):
"""Plot the different runtimes."""
plt.xlabel("size")
plt.ylabel("time (ms)")
plt.title("Sigmoid benchmark: TF NumPy vs NumPy")
plt.plot(sizes, np_times, label="NumPy")
plt.plot(sizes, tnp_times, label="TF NumPy (CPU)")
plt.plot(sizes, compiled_tnp_times, label="Compiled TF NumPy (CPU)")
if has_gpu:
plt.plot(sizes, tnp_times_gpu, label="TF NumPy (GPU)")
plt.legend()
# + id="p-fs_H1lkLfV"
# Here we define a simple implementation of `sigmoid`, and benchmark it using
# NumPy and TensorFlow NumPy for different input sizes.
def np_sigmoid(y):
return 1. / (1. + np.exp(-y))
def tnp_sigmoid(y):
return 1. / (1. + tnp.exp(-y))
@tf.function
def compiled_tnp_sigmoid(y):
return tnp_sigmoid(y)
sizes = (2**0, 2 ** 5, 2 ** 10, 2 ** 15, 2 ** 20)
np_inputs = [np.random.randn(size).astype(np.float32) for size in sizes]
np_times = benchmark(np_sigmoid, np_inputs)
with tf.device("/device:CPU:0"):
tnp_inputs = [tnp.random.randn(size).astype(np.float32) for size in sizes]
tnp_times = benchmark(tnp_sigmoid, tnp_inputs)
compiled_tnp_times = benchmark(compiled_tnp_sigmoid, tnp_inputs)
has_gpu = len(tf.config.list_logical_devices("GPU"))
if has_gpu:
with tf.device("/device:GPU:0"):
tnp_inputs = [tnp.random.randn(size).astype(np.float32) for size in sizes]
tnp_times_gpu = benchmark(compiled_tnp_sigmoid, tnp_inputs, 100, True)
else:
tnp_times_gpu = None
plot(np_times, tnp_times, compiled_tnp_times, has_gpu, tnp_times_gpu)
|
site/en/guide/tf_numpy.ipynb
|