text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We note that there is a stationary solution, $x(t)=a/\mu$. From the exponential in the solution, we can see that this is an attracting fixed point.
Step2: Chemical Langevin Equation
Step3: Similarly to the previous case, here is a run with multiple initial conditions.
Step4: We notice that the asymptotic behavior of the CLE is the same as that of the RRE. The only notable difference is the initial random kicks in the paths, all because of the stochasticicity.
Step5: Now that we have SSA setup, we can run multiple paths and compare the results to the previous cases.
Step6: We can see three chains above, all starting from $X_0=1$, and simulated with the SSA.
|
<ASSISTANT_TASK:>
Python Code:
# Solution of the RRE
def x(t,x0=3,a=10.0,mu=1.0):
return (x0-a/mu)*np.exp(-t*mu)+a/mu
t = np.linspace(0,3)
x0list = np.array([0.5,1,15])
sns.set_palette("Reds",n_colors=3)
for x0 in x0list:
pl.plot(t,x(t,x0),linewidth=4)
pl.title('Population numbers for different initial conditions.', fontsize=20)
pl.xlabel('Time',fontsize=20)
def EM(xinit,T,Dt=0.1,a=1,mu=2):
'''
Returns the solution of the CLE with parameters a, mu
Arguments
=========
xinit : real, initial condition.
Dt : real, stepsize of the Euler-Maruyama.
T : real, final time to reach.
a : real, parameter of the RHS.
mu : real, parameter of the RHS.
'''
n = int(T/Dt) # number of steps to reach T
X = np.zeros(n)
z = randn(n)
X[0] = xinit # Initial condition
# EM method
for i in xrange(1,n):
X[i] = X[i-1] + Dt* (a-mu*X[i-1])+(np.sqrt(a)-np.sqrt(mu*X[i-1]))*np.sqrt(Dt)*z[i]
return X
T = 10 # final time to reach
Dt = 0.01 # time-step for EM
# Set the palette to reds with ten colors
sns.set_palette('Reds',10)
def plotPaths(T,Dt):
n = int(T/Dt)
t = np.linspace(0,T,n)
xinitlist = np.linspace(10,15,10)
for x0 in xinitlist :
path = EM(xinit=x0,T=T,Dt=Dt,a=10.0,mu=1.0)
pl.plot(t, path,linewidth=5)
pl.xlabel('time', fontsize=20)
pl.title('Paths for initial conditions between 1 and 10.', fontsize=20)
return path
path = plotPaths(T,Dt)
print 'Paths decay towards', path[np.size(path)-1]
print 'The stationary point is', 1.0
def SSA(xinit, nsteps, a=10.0, mu=1.0):
'''
Using SSA to exactly simulate the death/birth process starting
from xinit and for nsteps.
a and mu are parameters of the propensities.
Returns
=======
path : array-like, the path generated.
tpath: stochastic time steps
'''
path = np.zeros(nsteps)
tpath= np.zeros(nsteps)
path[0] = xinit # initial population
u = rand(2,nsteps) # pre-pick all the uniform variates we need
for i in xrange(1,nsteps):
# The propensities will be normalized
tot_prop = path[i-1]*mu+a
prob = path[i-1]*mu/tot_prop # probability of death
if(u[0,i]<prob):
# Death
path[i] = path[i-1]-1
else:
# Birth
path[i] = path[i-1]+1
# Time stayed at current state
tpath[i] = -np.log(u[1,i])*1/tot_prop
tpath = np.cumsum(tpath)
return path, tpath
# Since the paths below are not really related
# let's use a more interesting palette
# for the plot.
sns.set_palette('hls',1)
for _ in xrange(1):
path, tpath = SSA(xinit=1,nsteps=100)
# Since this is the path of a jump process
# I'm switching from "plot" to "step"
# to get the figure right. :)
pl.step(tpath,path,linewidth=5,alpha=0.9)
pl.title('One path simulated with SSA, $a<\mu$. ', fontsize=20);
pl.xlabel('Time', fontsize=20)
# Since the paths below are not really related
# let's use a more interesting palette
# for the plot.
sns.set_palette('hls',3)
for _ in xrange(3):
path, tpath = SSA(xinit=1,nsteps=100)
# Since this is the path of a jump process
# I'm switching from "plot" to "step"
# to get the figure right. :)
pl.step(tpath,path,linewidth=5,alpha=0.9)
pl.title('Three paths simulated with SSA, $a<\mu$. ', fontsize=20);
pl.xlabel('Time', fontsize=20)
npaths = 1
nsteps = 30000
path = np.zeros([npaths,nsteps])
for i in xrange(npaths):
path[i,:], tpath = SSA(xinit=1,nsteps=nsteps)
skip = 20000
sum(path[0,skip:nsteps-1]*tpath[skip:nsteps-1])/sum(tpath[skip:nsteps-1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a function which sums the numbers between its first (inclusive) and second argument (exclusive). For example, if you pass in 3 and 6, it should return 12 (3+4+5). Your function should return None if the given arguments are not integers. Your function must be called int_sum to receive credit.
Step2: Create a function which takes in two arguments
|
<ASSISTANT_TASK:>
Python Code:
#The points awarded this cell corresopnd to partial credit and/or documentation
### BEGIN SOLUTION
def power(x, p=2):
'''Computes x^p
Args:
x: input number
p: input power, defaults to 2
returns: x^p as a floating point
'''
return x**p
### END SOLUTION
'''Check if your function returns the correct values'''
from numpy import testing as t
t.assert_almost_equal( power(3,2), 9 )
### BEGIN HIDDEN TESTS
import numpy as np
test_x = np.array([-2, -1.5, 0, 4])
test_p = np.array([-3, 2, 4, 1])
t.assert_almost_equal( power(test_x, test_p), test_x ** test_p)
t.assert_almost_equal( power(3), 3**2)
### END HIDDEN TESTS
#The points awarded this cell corresopnd to partial credit and/or documentation
### BEGIN SOLUTION
def int_sum(x, y):
'''Computes sum from x to y (excluding y)
Args:
x: start of sum
p: end of sum
returns: the sum as an integer
'''
if type(x) != type(1) or type(y) != type(1):
return None
s = 0
for i in range(x, y):
s += i
return s
### END SOLUTION
'''check that it returns correct answer'''
from numpy import testing as t
t.assert_equal( int_sum(3,6), 12)
### BEGIN HIDDEN TESTS
t.assert_equal( int_sum(-2, 7), sum(range(-2, 7)))
t.assert_equal( int_sum(0, 5), sum(range(0, 5)))
### END HIDDEN TESTS
'''check that it deals with invalid input correctly'''
from numpy import testing as t
t.assert_equal( int_sum(4.4, 4.6), None)
### BEGIN HIDDEN TESTS
t.assert_equal( int_sum('test', 4), None)
t.assert_equal( int_sum(3, 'test'), None)
t.assert_array_equal( int_sum(5,4), 0)
### END HIDDEN TESTS
#The points awarded this cell corresopnd to partial credit and/or documentation
### BEGIN SOLUTION
def pprint(x, i):
'''Prints x to the given precision indicated by i
Args:
x: the number to print
i: the integer precision
returns: a string
'''
if( not (type(x) == float or type(x) == int)):
return None
if(type(i) != int or i <= 0):
return None
return '{:.{}}'.format(x, i)
### END SOLUTION
'''check answer is correct'''
from numpy import testing as t
t.assert_equal( pprint(4.3212, 2), '4.3')
### BEGIN HIDDEN TESTS
t.assert_equal( pprint(-4.3212, 2), '-4.3')
t.assert_equal( pprint(5.45676, 3), '5.46')
t.assert_equal( pprint(11.2, 1), '1e+01')
### END HIDDEN TESTS
'''check that your function correctly deals with invalid input'''
from numpy import testing as t
t.assert_equal( pprint('not a number', 4), None)
### BEGIN HIDDEN TESTS
t.assert_equal( pprint(-4.3212, -2), None)
t.assert_equal( pprint(5.45676, 'b'), None)
t.assert_equal( pprint(55, 4.12), None)
t.assert_equal( pprint(55, 0), None)
### END HIDDEN TESTS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The two wells have all PE missed
Step2: The PE of all wells have no strong variance; For now, fillin the Missing value of median
Step3: ### Build up Initial Test Loop for model and feature engineering
Step4: Bad indicator of model performance. It means no accurate prediction was found in one class
|
<ASSISTANT_TASK:>
Python Code:
well_PE_Miss = train.loc[train["PE"].isnull(),"Well Name"].unique()
well_PE_Miss
train.loc[train["Well Name"] == well_PE_Miss[0]].count()
train.loc[train["Well Name"] == well_PE_Miss[1]].count()
(train.groupby("Well Name"))["PE"].mean()
(train.groupby("Well Name"))["PE"].median()
train["PE"] = train["PE"].fillna(train["PE"].median())
print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].mean())
print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].median())
print((train.groupby("Well Name"))["PE"].median()) ## QC for the fill in
print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].mean())
print(train.loc[train["Well Name"] == "CHURCHMAN BIBLE","PE"].median())
plt.show()
features = ['GR', 'ILD_log10', 'DeltaPHI',
'PHIND','PE','NM_M', 'RELPOS']
feature_vectors = train[features]
facies_labels = train['Facies']
## 1=sandstone 2=c_siltstone 3=f_siltstone
## 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
## 8=packstone 9=bafflestone
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00',
'#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
train.loc[:,'FaciesLabels'] = train.apply(lambda row: label_facies(row, facies_labels), axis=1)
#
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
make_facies_log_plot(
train[train['Well Name'] == 'SHRIMPLIN'],
facies_colors)
plt.show()
## Investigate the dependencies of the depth feature and Facies
wells = train["Well Name"].unique()
#train.plot(x = "Depth", y = "Facies")
#plt.show()
pi = 0
for well in wells:
pi = pi + 1 # Plot index
ax = plt.subplot(3, 4, pi)
depthi = train.loc[train["Well Name"] == well, "Depth"].values
faci = train.loc[train["Well Name"] == well, "Facies"].values
plt.plot(faci,depthi)
ax.set_title(well)
## Create dummy variables for Well Name, Formation, which may have geologic or geospatial information
train_dummy = pd.get_dummies(train[["Formation"]])
train_dummy.describe()
cols_dummy = train_dummy.columns.values
train[cols_dummy] = train_dummy[cols_dummy]
print(len(cols_dummy))
## For trainning drop Formation, FaciesLabels Leave Well Name for Later group splitting
wellgroups = train["Well Name"].values
train_inp = train.drop(["Formation","Well Name",'FaciesLabels'],axis =1)
train_inp.info()
from sklearn.model_selection import LeavePGroupsOut
X = train_inp.drop(["Facies","Depth"],axis = 1).values
y = train_inp["Facies"].values
lpgo = LeavePGroupsOut(n_groups=2)
split_no = lpgo.get_n_splits(X,y,wellgroups)
svc_b1 = SVC(C =1, gamma = 0.001, kernel = 'rbf')
svc_b1.fit(X,y)
test = pd.read_csv('01_raw_data/validation_data_nofacies.csv')
test.count()
test["Formation"].unique()
test_dummy = pd.get_dummies(test[["Formation"]])
test_cols_dummy = test_dummy.columns.values
test[test_cols_dummy] = test_dummy[cols_dummy]
test_inp = test.drop(["Formation","Well Name"],axis =1)
X_test = test_inp.drop(["Depth"],axis = 1).values
svc_b1.predict(X_test)
test = test.drop(test_cols_dummy,axis = 1)
test["Facies"] = svc_b1.predict(X_test)
test.to_csv("Houston_J_sub_1.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Update time series for the symbols below.
Step2: Remove the time series for TSLA
Step3: Update time series for all symbols in the cache directory
Step4: Remove time series for all symbols in the cache directory
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pinkfish as pf
pf.update_cache_symbols(symbols=['msft', 'orcl', 'tsla'])
pf.remove_cache_symbols(symbols=['tsla'])
pf.update_cache_symbols()
# WARNING!!! - if you uncomment the line below, you'll wipe out
# all the symbols in your cache directory
#pf.remove_cache_symbols()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Label mapping
Step3: Building and training the classifier
Step4: Save the checkpoint
Step5: testing
Step6: Loading the checkpoint
Step7: Inference for classification
Step9: To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your process_image function works, running the output through this function should return the original image (except for the cropped out portions).
Step10: Class Prediction
Step11: Sanity Checking
|
<ASSISTANT_TASK:>
Python Code:
# Imports here
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch
from torchvision import datasets,transforms,utils,models
import matplotlib.pyplot as plt
import os
import time
import copy
!ls -r flower_data/
data_dir = 'flower_data'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
datadict={
'train':train_dir,
'valid':valid_dir
}
datadict['train']
# TODO: Define your transforms for the training and validation sets
batch_size=16
data_transforms = {
'train': transforms.Compose([
transforms.RandomRotation(45),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
]),
'valid':transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
}
# TODO: Load the datasets with ImageFolder
image_datasets = {i:datasets.ImageFolder(datadict[i],data_transforms[i]) for i in ['train','valid']}
# TODO: Using the image datasets and the trainforms, define the dataloaders
dataloaders = {i:torch.utils.data.DataLoader(image_datasets[i],batch_size=batch_size,shuffle=True) for i in ['train','valid']}
dataset_sizes={i:len(image_datasets[i]) for i in ['train','valid']}
class_names=image_datasets['train'].classes
dataloaders
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
cat_to_name
fig,ax=plt.subplots(figsize=(10,10))
def imshow(inp,title=None):
inp=inp.numpy().transpose((1,2,0))
mean=np.array([0.485, 0.456, 0.406])
std=np.array([0.229, 0.224, 0.225])
inp=std*inp+mean
inp=np.clip(inp,0,1)
ax.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.1)
inputs,classes=next(iter(dataloaders['train']))
out=utils.make_grid(inputs)
imshow(out,title=[cat_to_name[str(i.item())] for i in classes])
for x in classes:
print(x.item(), cat_to_name[str(x.item())])
# TODO: Build and train your network
#model=models.vgg19(pretrained=True)
#for param in model.parameters():
# param.requires_grad=False
MY_model=models.resnet152(pretrained=True)
for param in MY_model.parameters():
param.requires_grad=False
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
print(MY_model)
# TODO: Save the checkpoint
classifier2=nn.Linear(2048,102)
MY_model.fc=classifier2
loss_fn=nn.CrossEntropyLoss()
#loss_fn=nn.NLLLoss()
optimizer=optim.Adam(MY_model.fc.parameters(),lr=0.001)
scheduler=optim.lr_scheduler.StepLR(optimizer,step_size=5,gamma=0.1)
def train_model(model,criterion,optimizer,scheduler,epochs=25,device='cuda'):
start=time.time()
best_model=copy.deepcopy(model.state_dict())
best_acc=0.0
for e in range(epochs):
print('epoch {}/{}'.format(e,epochs-1))
print('-'*10)
for mode in ['train','valid']:
if mode=='train':
scheduler.step()
model.train()
else:
model.eval()
running_loss=0.0
running_corrects=0
for inputs,labels in dataloaders[mode]:
#inputs.to(device)
#labels.to(device)
inputs.cuda()
labels.cuda()
#print(device,type(inputs),type(labels),type(model),type(best_model))
#
optimizer.zero_grad()
with torch.set_grad_enabled(mode=='train'):
outputs=model(inputs.cuda())
#outputs.cuda()
pred_values,preds=torch.max(outputs,1)#retunrs values, indexes
loss=criterion(outputs.cuda(),labels.cuda())
if mode=='train':
loss.backward()
optimizer.step()
running_loss+=loss.item()*inputs.size(0)
running_corrects+=torch.sum(preds.cuda()==labels.data.cuda())
epoch_loss=running_loss/dataset_sizes[mode]
epoch_acc=running_corrects.double()/dataset_sizes[mode]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(mode,epoch_loss,epoch_acc))
if (mode=='valid' and epoch_acc>best_acc):
best_acc=epoch_acc
best_model=copy.deepcopy(model.state_dict())
print('----saving the model----')
model.cpu()
save_checkpoint('checkpoint.pth',model,optimizer,best_acc,e)
model.cuda()
#best_model.cuda()
print()
time_taken=time.time()-start
print('Training complete in {:.0f}m {:.0f}s'.format(time_taken//60,time_taken%60))
print('Best Validation Acc: {:.4f}'.format(best_acc))
model.load_state_dict(best_model)
return model
MY_model.to(device)
model_final=train_model(MY_model,loss_fn,optimizer,scheduler,15,device)
device
!ls
def save_checkpoint(path,model,optimizer,best_acc,epochs):
model.class_to_idx=image_datasets['train'].class_to_idx
torch.save({
'arch':'resnet152',
'num_epochs':epochs,
'best_acc':best_acc,
'model_state_dict':model.state_dict(),
'optim_state_dict':optimizer.state_dict(),
'class_to_idx':model.class_to_idx
},path)
print('epochs {} \t best accuracy {:.4f}'.format(epochs,best_acc))
print('!-!-checkpoint saved-!-!')
save_checkpoint('checkpoint.pth',MY_model,optimizer,92,10)
!ls
testdir='flower_data_test'
test_data_transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
])
image_test_datasets=datasets.ImageFolder(testdir,test_data_transform)
batch_size=16
test_dataloaders=torch.utils.data.DataLoader(image_test_datasets,batch_size=batch_size,shuffle=True)
test_dataset_size=len(image_test_datasets)
def calc_test_acc(model):
model.eval()
model.cuda()
running_corrects=0
with torch.no_grad():
for inputs,labels in test_dataloaders:
#inputs,labels = inputs.cuda(),labels=labels.cuda()
inputs=inputs.cuda()
labels=labels.cuda()
outputs=model.forward(inputs)
predvalues,preds=torch.max(outputs,1)
running_corrects+=torch.sum(labels.data==preds)
acc=running_corrects.double()/test_dataset_size
print('test accuracy',acc)
calc_test_acc(MY_model)
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(path,model,optimizer):
print(path)
checkpoint=torch.load(path)
print('epochs {}\n best accuracy {}'.format(checkpoint['num_epochs'],checkpoint['best_acc']))
if checkpoint['arch']=='resnet152':
model=models.resnet152(pretrained=True)
for param in model.parameters():
param.requires_grad=False
else:
print('arch error')
return None
model.class_to_idx=checkpoint['class_to_idx']
classifier=nn.Linear(2048,102)
model.fc=classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
MY_model_2=load_checkpoint('checkpoint.pth',MY_model,optimizer)
print(MY_model_2)
ll=os.listdir()
ll
uploadId = '1w26x8ZtouNfb2X9piy6cEqsgi83BNpCN'
uploaded = drive.CreateFile({'parents':[{'id': uploadId}] , 'title' : 'checkpoint.pth'})
uploaded.SetContentFile('checkpoint.pth')
uploaded.Upload()
from PIL import Image
if img.size[0] > img.size[1]:
img.thumbnail((10000, 256))
else:
img.thumbnail((256, 10000))
# Crop
left_margin = (img.width-224)/2
bottom_margin = (img.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin,
top_margin))
# Normalize
img = np.array(img)/255
mean = np.array([0.485, 0.456, 0.406]) #provided mean
std = np.array([0.229, 0.224, 0.225]) #provided std
img = (img - mean)/std
# Move color channels to first dimension as expected by PyTorch
img = img.transpose((2, 0, 1))
return img
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
pil_img=Image.open(image_path)
loader=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
])
pil_img=loader(pil_img).float()
np_img=np.array(pil_img)/225
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_img=(np.transpose(np_img,(1,2,0))-mean)/std
#np_img=(np_img-mean)/std
np_img=np.transpose(np_img,(2,0,1))
return np_img
def imshow(image, ax=None, title='Flower'):
Imshow for Tensor.
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = np.transpose(image,(1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
image=image*255.0
image=np.array(image,dtype=float)
ax.imshow(image)
return ax
image_path='flower_data/valid/28/image_05258.jpg'
ims=imshow(process_image(image_path))
plt.imshow(ims)
#plt.imshow(ims, clim=(0.064, 0.068))
#mod_img = ndimage.median_filter(ims, 20)
#plt.imshow(mod_img)
def process_image(image_path):
'''
Scales, crops, and normalizes a PIL image for a PyTorch
model, returns an Numpy array
'''
# Open the image
#from PIL import Image
img = Image.open(image_path)
# Resize
if img.size[0] > img.size[1]:
img.thumbnail((10000, 256))
else:
img.thumbnail((256, 10000))
# Crop
left_margin = (img.width-224)/2
bottom_margin = (img.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
img = img.crop((left_margin, bottom_margin, right_margin,top_margin))
# Normalize
img = np.array(img)/255
mean = np.array([0.485, 0.456, 0.406]) #provided mean
std = np.array([0.229, 0.224, 0.225]) #provided std
img = (img - mean)/std
# Move color channels to first dimension as expected by PyTorch
img = img.transpose((2, 0, 1))
return img
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
if title:
plt.title(title)
# PyTorch tensors assume the color channel is first
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
image_path = 'flower_data/valid/28/image_05258.jpg'
img = process_image(image_path)
imshow(img)
def predict(image_path, model, num_k=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.eval()
model.cpu()
np_img=process_image(image_path)
img_tensor=torch.from_numpy(np_img).type(torch.FloatTensor)
img_tensor=torch.unsqueeze(img_tensor,0)
probs=torch.exp(model.forward(img_tensor))
top_probs,top_labs=probs.topk(num_k)
top_probs=top_probs.detach().numpy().tolist()[0]
top_labs=top_labs.detach().numpy().tolist()[0]
idx_to_class={val:key for key,val in model.class_to_idx.items()}
top_labels=[idx_to_class[lab] for lab in top_labs]
top_flowers=[cat_to_name[idx_to_class[lab]] for lab in top_labs]
return top_probs,top_labels,top_flowers
probs,classes,flowers=predict(image_path,MY_model_2)
print(probs)
print(classes)
print(flowers)
# TODO: Display an image along with the top 5 classes
def plot_solution(img_path):
fig,(ax1,ax2)=plt.subplots(1,2)
fig.figsize=(15,15)
ims=imshow(process_image(img_path),ax2)
probs,classes,flowers=predict(img_path,MY_model_2)
ax1.barh(flowers,probs)
plt.show()
plot_solution(image_path)
plot_solution('flower_data_test/16/image_06670.jpg')
plot_solution('flower_data_test/16/image_06657.jpg')
plot_solution('flower_data_test/2/image_05100.jpg')
!ls
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we set some parameters.
Step2: Load the MEG data
Step3: Estimate the background noise covariance from the baseline period
Step4: Generate sinusoids in two spatially distant labels
Step5: Find the center vertices in source space of each label
Step6: Create source-space data with known signals
Step7: Plot original signals
Step8: Simulate sensor-space signals
Step9: Plot the point-spread of corrupted signal
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.simulation import simulate_stc, simulate_evoked
seed = 42
# parameters for inverse method
method = 'sLORETA'
snr = 3.
lambda2 = 1.0 / snr ** 2
# signal simulation parameters
# do not add extra noise to the known signals
nave = np.inf
T = 100
times = np.linspace(0, 1, T)
dt = times[1] - times[0]
# Paths to MEG data
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-fixed-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis-ave.fif')
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True,
use_cps=False)
fwd['info']['bads'] = []
inv_op = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw.fif'))
raw.set_eeg_reference(projection=True)
events = mne.find_events(raw)
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True)
epochs.info['bads'] = []
evoked = epochs.average()
labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir)
label_names = [label.name for label in labels]
n_labels = len(labels)
cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
# The known signal is all zero-s off of the two labels of interest
signal = np.zeros((n_labels, T))
idx = label_names.index('inferiorparietal-lh')
signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times)
idx = label_names.index('rostralmiddlefrontal-rh')
signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times)
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
# The `center_of_mass` function needs labels to have values.
labels[i].values.fill(1.)
# Restrict the eligible vertices to be those on the surface under
# consideration and within the label.
surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
restrict_verts = np.intersect1d(surf_vertices, label.vertices)
com = labels[i].center_of_mass(subjects_dir=subjects_dir,
restrict_vertices=restrict_verts,
surf='white')
# Convert the center of vertex index from surface vertex list to Label's
# vertex list.
cent_idx = np.where(label.vertices == com)[0][0]
# Create a mask with 1 at center vertex and zeros elsewhere.
labels[i].values.fill(0.)
labels[i].values[cent_idx] = 1.
# Print some useful information about this vertex and label
if 'transversetemporal' in label.name:
dist, _ = label.distances_to_outside(
subjects_dir=subjects_dir)
dist = dist[cent_idx]
area = label.compute_area(subjects_dir=subjects_dir)
# convert to equivalent circular radius
r = np.sqrt(area / np.pi)
print(f'{label.name} COM vertex is {dist * 1e3:0.1f} mm from edge '
f'(label area equivalent to a circle with r={r * 1e3:0.1f} mm)')
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
value_fun=lambda x: x)
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
time_unit='s', initial_time=0.05, size=1200,
views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
brain_gen = stc_gen.plot(clim=clim, **kwargs)
evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave,
random_state=seed)
# Map the simulated sensor-space data to source-space using the inverse
# operator.
stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method)
brain_inv = stc_inv.plot(**kwargs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: OEIS
Step2: Recurrence
Step3: Unfolding
Step4: Involution
Step5: Subsuming
Step6: We can abstract the following conjecture
Step7: Instantiation
Step8: Based
Step9: Computing
Step10: Collecting
Step11: Spec as based matrix-vector product
Step12: Identities and conjectures
Step13: which is known but left as an exercise by Benjamin and Quinn, in the following form
Step14: instantiating $m=n$ we get a closed representation of the matrix-vector product seen before
Step15: A little application of the previous identity is in the following cells
Step16: symbolic expansion of the Sum object
Step17: instantiate to let $f_{0}$ appears in the identity (our concept of based instantiation)
Step18: finally, check the consistency using the sequence of Fibonacci numbers
Step19: Looking at bottom left to top right diagonal sums
Step20: we build the sum for each diagonal, manually
Step21: looking at the sequence composed by coeffients in the rhs of each equation we recognize A059633. According to the reference, the sequence is built by the following recurrence
Step23: therefore we can conjecture the following identity
|
<ASSISTANT_TASK:>
Python Code:
%run "../src/start_session.py"
%run "../src/recurrences.py"
%run "../src/sums.py"
from sympy.abc import i
from oeis import oeis_search, ListData
import knowledge
sys.setrecursionlimit(10000000)
s = oeis_search(id=45)
s(data_only=True)#, data_representation=ListData(upper_limit=20))
with bind(IndexedBase('y'), single=True) as y,\
bind(IndexedBase('b'), single=True) as b:
stirling_second_kind_rec_spec = recurrence_spec(recurrence_eq=Eq(y[i],x*y[i-1]+b[i]),
recurrence_symbol=y,
variables=[i])
stirling_second_kind_rec_spec
unfolded = stirling_second_kind_rec_spec.unfold(depth=4)
unfolded
unfolded.involute(depth=1)
unfolded.involute()
subsumed=unfolded.subsume()
subsumed
im=knowledge.fibonacci_numbers_inverted_mapping(start=2, limit=20)
im
subsumed.subs(im)
unfolded.instantiate(strategy=raw(substitutions={n:20}))
instantiated = unfolded.instantiate(strategy=based(arity=unary_indexed()))
instantiated
almost_valued = instantiated.subsume(additional_terms={f[0]:Integer(0), f[1]:Integer(1)})
almost_valued
almost_valued.involute()
m, v, r, eqs = stirling_second_kind_rec_spec.matrix_vector_product(depth=10, arity=unary_indexed(),
segment=[n-k for k in range(-2, 19)])
latex_array_src(eqs)
m, v, r
sols=solve(Eq(m*v, r), v)
eq_system = [Eq(k, v) for k, v in sols.items()]
latex_array_src(eq_system)
to_matrix_notation(eq_system, f, [n-k for k in range(8,19)])
example={n:30}
to_check = Eq(m*v.subs(example), r.subs(example))
to_check
to_check.subs(knowledge.fibonacci_numbers(), simultaneous=True)
m, v, r, eqs = fibonacci_rec_spec.matrix_vector_product(depth=10, arity=unary_indexed(),
segment=[Integer(k) for k in range(0, 11)],
based_instantiation=True)
latex_array_src(eqs)
m,v,r
m = symbols('m')
thm = Eq(f[n], Sum(binomial(m,k)*f[n-2*m+k], (k,0,m)))
thm
benjamin_quinn_thm = thm.subs({n:n+m})
benjamin_quinn_thm
benjamin_quinn_thm.subs({m:n})
def expand_sum_in_eq(eq_term, in_lhs=False, in_rhs=True):
lhs, rhs = eq_term.lhs, eq_term.rhs
return Eq(expand_Sum(lhs) if in_lhs else lhs, expand_Sum(rhs) if in_rhs else rhs)
expanded_eq = expand_sum_in_eq(thm.subs({m:20}))
expanded_eq
subsed = expanded_eq.subs(n, 40)
subsed
subsed.subs(knowledge.fibonacci_numbers())
fibs = knowledge.fibonacci_numbers()
f2_diag = f[0]
f4_diag = f[0]+f[1]
f6_diag = f[0]+2*f[1]
f8_diag = f[0]+3*f[1]+f[2]
f10_diag = f[0]+4*f[1]+3*f[2]
f12_diag = f[0]+5*f[1]+6*f[2]+f[3]
f14_diag = f[0]+6*f[1]+10*f[2]+4*f[3]
f16_diag = f[0]+7*f[1]+15*f[2]+10*f[3]+f[4]
f18_diag = f[0]+8*f[1]+21*f[2]+20*f[3]+5*f[4]
f20_diag = f[0]+9*f[1]+28*f[2]+35*f[3]+15*f[4]+f[5]
f22_diag = f[0]+10*f[1]+36*f[2]+56*f[3]+35*f[4]+6*f[5]
f24_diag = f[0]+11*f[1]+45*f[2]+84*f[3]+70*f[4]+21*f[5]+f[6]
f26_diag = f[0]+12*f[1]+55*f[2]+120*f[3]+126*f[4]+56*f[5]+7*f[6]
diagonals = [Eq(f2_diag, f2_diag.subs(fibs)),
Eq(f4_diag, f4_diag.subs(fibs)),
Eq(f6_diag, f6_diag.subs(fibs)),
Eq(f8_diag, f8_diag.subs(fibs)),
Eq(f10_diag, f10_diag.subs(fibs)),
Eq(f12_diag, f12_diag.subs(fibs)),
Eq(f14_diag, f14_diag.subs(fibs)),
Eq(f16_diag, f16_diag.subs(fibs)),
Eq(f18_diag, f18_diag.subs(fibs)),
Eq(f20_diag, f20_diag.subs(fibs)),
Eq(f22_diag, f22_diag.subs(fibs)),
Eq(f24_diag, f24_diag.subs(fibs)),
Eq(f26_diag, f26_diag.subs(fibs))]
latex_array_src(diagonals)
def A059633_gf(x):
return 1/(1-2*x+x**3-x**4)
A059633_gf(t).series(t,n=14)
def do_memberwise_on_eqs(an_eq, another_eq, operator=lambda x, y: Add(x,y,evaluate=True)):
return Eq(operator(an_eq.lhs, another_eq.lhs), operator(an_eq.rhs, another_eq.rhs))
def swap_eq(eq_term): return Eq(eq_term.rhs, eq_term.lhs)
reduce(do_memberwise_on_eqs, eqs)
even_subscript_fibonacci_eq = Eq(f[2*n+1]-1, Sum(f[2*k],(k,0,n)))
even_subscript_fibonacci_eq
example = even_subscript_fibonacci_eq.subs(n,21)
example_expanded = expand_sum_in_eq(example)
example_expanded, example_expanded.subs(fibs)
from itertools import accumulate
enum_range = range(0,50,2)
def worker(accumulated_pair, current_pair):
index, current = current_pair
_, accumulated = accumulated_pair
summed_eq = do_memberwise_on_eqs(accumulated, current)
return index, summed_eq
def subs_fib_thm(pair):
index, current_eq = pair
expanded_thm = expand_sum_in_eq(even_subscript_fibonacci_eq.subs({n:Integer(index)/2}))
return index, current_eq, expanded_thm
#def eq_to_subs_dict(eq_term): return {eq_term.lhs:eq_term.rhs}
def apply_subs_on_lhs(triple):
index, current_eq, thm = triple
return current_eq.subs({thm.rhs:thm.lhs})
def latex_array_env_of_eqs(mapped):
from string import Template
template = Template(r\begin{array}{c}$content\end{array})
return template.substitute(content="\n".join(mapped))
triangle = accumulate(zip(enum_range, eqs), worker)
triangle = map(subs_fib_thm, triangle)
triangle = list(map(apply_subs_on_lhs, triangle))
triangle[0] = Eq(f[1]-1,f[0])
latex_array_src(triangle)
all(map(lambda eq: eq.subs(fibs), triangle))
to_matrix_notation(triangle, f , range(0, 11))
triangle = [eq.subs({f[0]:fibs[f[0]]}, simultaneous=True) for eq in triangle[1:]]
latex_array_src(triangle)
to_matrix_notation(triangle, f, range(1,11))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Verify tables exist
Step2: Model 4
Step3: Get training information and evaluate
Step4: Now let's evaluate our trained model on our eval dataset.
Step5: Let's use our evaluation's mean_squared_error to calculate our model's RMSE.
Step6: Final Model
Step7: Let's first look at our training statistics.
Step8: Now let's evaluate our trained model on our eval dataset.
Step9: Let's use our evaluation's mean_squared_error to calculate our model's RMSE.
Step10: Predict with final model
Step11: Modify above prediction query using example from simulated dataset
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
%%bash
pip freeze | grep google-cloud-bigquery==1.6.1 || \
pip install google-cloud-bigquery==1.6.1
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM babyweight.babyweight_data_train
LIMIT 0
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM babyweight.babyweight_data_eval
LIMIT 0
%%bigquery
CREATE OR REPLACE MODEL
babyweight.model_4
OPTIONS (
MODEL_TYPE="DNN_REGRESSOR",
HIDDEN_UNITS=[64, 32],
BATCH_SIZE=32,
INPUT_LABEL_COLS=["weight_pounds"],
DATA_SPLIT_METHOD="NO_SPLIT") AS
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_data_train
%%bigquery
SELECT * FROM ML.TRAINING_INFO(MODEL babyweight.model_4)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL babyweight.model_4,
(
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_data_eval
))
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL babyweight.model_4,
(
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
babyweight.babyweight_data_eval
))
%%bigquery
CREATE OR REPLACE MODEL
babyweight.final_model
TRANSFORM(
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
ML.FEATURE_CROSS(
STRUCT(
is_male,
ML.BUCKETIZE(
mother_age,
GENERATE_ARRAY(15, 45, 1)
) AS bucketed_mothers_age,
plurality,
ML.BUCKETIZE(
gestation_weeks,
GENERATE_ARRAY(17, 47, 1)
) AS bucketed_gestation_weeks)
) AS crossed)
OPTIONS (
MODEL_TYPE="DNN_REGRESSOR",
HIDDEN_UNITS=[64, 32],
BATCH_SIZE=32,
INPUT_LABEL_COLS=["weight_pounds"],
DATA_SPLIT_METHOD="NO_SPLIT") AS
SELECT
*
FROM
babyweight.babyweight_data_train
%%bigquery
SELECT * FROM ML.TRAINING_INFO(MODEL babyweight.final_model)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL babyweight.final_model,
(
SELECT
*
FROM
babyweight.babyweight_data_eval
))
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL babyweight.final_model,
(
SELECT
*
FROM
babyweight.babyweight_data_eval
))
%%bigquery
SELECT
*
FROM
ML.PREDICT(MODEL babyweight.final_model,
(
SELECT
"true" AS is_male,
32 AS mother_age,
"Twins(2)" AS plurality,
30 AS gestation_weeks
))
%%bigquery
SELECT
*
FROM
ML.PREDICT(MODEL babyweight.final_model,
(
SELECT
"Unknown" AS is_male,
32 AS mother_age,
"Multiple(2+)" AS plurality,
30 AS gestation_weeks
))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can then load the data from the text file and store each of the three columns as a vector
Step2: This is not very enlightening. Of course, we can do better and plot the two curves.
Step3: This looks like a pretty arterial input function (left) and a tissue curve (right).
Step4: Experience tells that this curve needs to be fitted with a two-compartment exchange model
Step5: Let us examine the model fit
Step6: This fit produced the following parameters
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# plotting and graphics settings
import matplotlib.pyplot as plt
%pylab inline
try:
import seaborn as sns # pretty graphics. not strictly necessary.
sns.set_context("notebook")
sns.set_style("whitegrid")
except:
pass # with less pretty graphics
time, curve, aif = np.loadtxt('tests/tumor.csv', delimiter=',', unpack=True, comments='#')
print time[0:5]
print curve[0:5]
print aif[0:5]
f, (a1,a2)=plt.subplots(1,2)
a1.plot(time, aif, label='AIF')
a2.plot(time,curve, label='Tissue curve')
import compartmentmodels.compartmentmodels as CM
twocx = CM.TwoCXModel(time, curve, aif,
startdict={'Fp':20.0, 'vp': 5.5, 'PS':0.1, 've':9.1})
twocx.fit_model()
f, a = plt.subplots(1,1)
a.plot(time, curve, label='Tissue curve')
a.plot(time, twocx.fit, label='Model fit')
plt.legend(loc=4)
print twocx.phys_parameters
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's filter a list of numbers. Note
Step2: filter() is more commonly used with lambda functions, this because we usually use filter for a quick job where we don't want to write an entire function. Lets repeat the example above using a lambda expression
|
<ASSISTANT_TASK:>
Python Code:
#First let's make a function
def even_check(num):
if num%2 ==0:
return True
lst =range(20)
filter(even_check,lst)
filter(lambda x: x%2==0,lst)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Node and edge label properties can also be changed individually after an
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from netgraph import Graph
fig, (ax1, ax2) = plt.subplots(1, 2)
triangle = [(0, 1), (0, 2), (1, 1), (1, 2), (2, 0)]
node_positions = {
0 : np.array([0.2, 0.2]),
1 : np.array([0.5, 0.8]),
2 : np.array([0.8, 0.2]),
}
g = Graph(
triangle,
node_layout=node_positions, edge_layout='curved', edge_layout_kwargs=dict(k=0.025),
node_labels={0 : 'a', 1 : 'b', 2 : 'c'},
edge_labels=True, edge_label_fontdict=dict(fontweight='bold'),
ax=ax1,
)
h = Graph(nx.complete_graph(7), edge_width=0.5, node_labels=True,
node_label_fontdict=dict(size=14), node_label_offset=0.075, ax=ax2)
# make changes
g.edge_label_artists[(0, 1)].set_style('italic')
g.node_label_artists[1].set_color('hotpink')
# force redraw to display changes
fig.canvas.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To use it in fastai, we first convert the Catalyst dict into a DataLoaders object
Step2: Using callbacks
Step3: The Catalyst example also modifies the training loop to add metrics, but you can pass these directly to your Learner in fastai
Step4: You can now fit your model. fastai supports many schedulers. We recommend using 1cycle
Step5: As you can see, migrating from Catalyst allowed us to replace 17 lines of code (in CustomRunner) with just 3 lines, and doesn't require you to change any of your existing data pipelines, optimizers, loss functions, models, etc. Once you've made this change, you can then benefit from fastai's rich set of callbacks, transforms, visualizations, and so forth.
Step6: We can now create a Learner and train without using any callbacks
|
<ASSISTANT_TASK:>
Python Code:
from migrating_catalyst import *
data = DataLoaders(loaders['train'], loaders['valid']).cuda()
@before_batch_cb
def cb(self, xb, yb): return (xb[0].view(xb[0].size(0), -1),),yb
metrics=[accuracy,top_k_accuracy]
learn = Learner(data, model, loss_func=F.cross_entropy, opt_func=Adam,
metrics=metrics, cbs=cb)
learn.fit_one_cycle(1, 0.02)
model = nn.Sequential(
Flatten(),
torch.nn.Linear(28 * 28, 10))
learn = Learner(data, model, loss_func=F.cross_entropy, opt_func=Adam, metrics=metrics)
learn.fit_one_cycle(1, 0.02)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Least squares parameter estimation
Step2: More generally, the task is to estimate the model weights $\weights$ from the linear model
Step3: Bayesian Linear model
Step4: 10 observations
Step5: 100 observations
Step6: 1000 observations
Step7: Note that as more observations are added, the posterior distribution collapses on the correct estimates of the model parameters. Since an uniformative prior was used, the maximum a posteriori estimates of the model weights are identical to the maximum likelihood solution. This can be confirmed by comparing the Bayesian solution to the maximum likelihood solution
Step8: Model Selection
Step9: If the model is known, the parameters can be estimated using least-squares or Bayesian updates. The previous section showed that under a (semi-conjugate) uninformative prior, these two methods produce the same estimates. As the complexity of the data increases, specifying a model may be difficult.
Step10: Again, note that if an uninformative prior is used, the Bayesian solution is identical to the maximum likelihood solution (least-squares). The plots above show that as the model complexity increases, the model provides a closer fit to the data.
Step11: Empirical Bayes
Step12: Maximising the marginal likelihood is seldom a convex problem. However, using convex optimisers to locate locally optimal hyper-parameters is common. Although these parameters may not be globally optimal, this practice is a computationally cheap solution for locating useful parameters. If it is clear local optima are affecting the solution, multi-start optimisation or more complicated methods of setting the hyper-parameters will be required.
|
<ASSISTANT_TASK:>
Python Code:
# Create hidden linear model.
w_true = [-0.3, 0.5]
polybasis = lambda x, p: PolynomialFeatures(p).fit_transform(x)
linear_model = lambda x, w=w_true: polybasis(x, len(w) - 1).dot(w).reshape(len(x), 1)
utils.plot(({'x': np.linspace(-1., 1.)[:, None], 'model': linear_model},))
# Make noisy observations of model.
N = 1000
noise = 0.2
X = np.random.uniform(-1.0, 1.0, size=(N, 1)).reshape((N, 1))
y = linear_model(X) + np.random.normal(scale=noise, size=(N, 1))
utils.plot({'x': X, 'y': y, 'linestyle': '', 'marker': '.', 'markersize': 2, 'color': 'k'})
# Note that numpy provides a function for implementing the above equation.
# 'lstsq' returns the least-squares solution to a linear matrix equation.
w_ml = np.linalg.lstsq(polybasis(X, len(w_true) - 1), y)[0]
model = lambda x: linear_model(x, w=w_ml)
utils.plot(({'x': X, 'y': y, 'linestyle': '', 'marker': '.', 'markersize': 2, 'color': 'k'},
{'x': X, 'model': model, 'color': 'r', 'linewidth': 2}))
print "'True' coefficients: {}".format(w_true)
print "Least-squares coefficients: {}".format(w_ml.squeeze())
# Create Bayesian linear model.
basis = lambda x: polybasis(x, 1)
blm = BayesianLinearModel(basis=basis)
# Perform update.
blm.update(X[:4], y[:4])
utils.plot_update(X[:4], y[:4], linear_model, blm)
blm.update(X[4:10], y[4:10])
utils.plot_update(X[:10], y[:10], linear_model, blm)
blm.update(X[10:100], y[10:100])
utils.plot_update(X[:100], y[:100], linear_model, blm)
blm.update(X[100:], y[100:])
utils.plot_update(X, y, linear_model, blm)
print "'True' coefficients: {}".format(w_true)
print "Least-squares coefficients: {}".format(w_ml.squeeze())
print "Bayesian coefficients: {}".format(blm.location.squeeze())
N = 50
noise = 0.25
X = np.sort(np.random.uniform(0, 2*np.pi, N)).reshape((N, 1))
y = np.sin(X) + np.random.normal(scale=noise, size=(N, 1))
# Approximate Sin function with a 6-degree polynomial.
w_ml = np.linalg.lstsq(polybasis(X, 6), y)[0]
model = lambda x: linear_model(x, w=w_ml)
utils.plot(({'x': X, 'y': y, 'linestyle': '', 'marker': '.', 'markersize': 2, 'color': 'k'},
{'x': X, 'model': model, 'color': 'r', 'linewidth': 2}))
num_plots = 15
num_cols = 3
num_rows = np.ceil(float(num_plots)/num_cols)
# Interate through polynomial degrees and plot linear models.
MSE = np.zeros(num_plots)
ML = np.zeros(num_plots)
fig = plt.figure(figsize=(5 * num_cols, 5 * num_rows))
fig.subplots_adjust(hspace=0.6)
for p in range(0, num_plots):
# Determine the maximum likelihood weights and evaluate the model.
theta = polybasis(X, p)
w_ml = np.linalg.lstsq(theta, y)[0]
fQuery = theta.dot(w_ml)
MSE[p] = np.mean((fQuery - y)**2)
# Create Bayesian linear model.
blm = BayesianLinearModel(basis=lambda x: polybasis(x, p))
blm.update(X, y)
ML[p] = blm.evidence()
# Plot ML/Bayesian linear models.
plt.subplot(num_rows, num_cols, p + 1)
utils.plot(({'x': X, 'y': y, 'linestyle': '', 'marker': '.', 'markersize': 2, 'color': 'k'},
{'x': X, 'y': fQuery, 'color': 'b', 'linestyle': '--', 'linewidth': 2},
{'x': X, 'y': blm.predict(X), 'color': 'r', 'linewidth': 2}))
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Polynomial degrees: ' + str(p))
utils.plot_model_fit(MSE, ML)
def sigmoid(x, params=(1., 1., 0., 0.)):
scale, rate, xoffset, yoffset = params
return np.abs(scale) / (1. + np.exp(np.abs(rate)*(-x + xoffset))) - yoffset
N = 50
noise = 0.1
X = np.sort(np.random.uniform(-6, 6, N)).reshape((N, 1))
y = sigmoid(X, (2, 2, 1, 0.5)) + np.random.normal(scale=noise, size=(N, 1))
utils.plot(({'x': X, 'y': y, 'linestyle': '', 'marker': '.', 'markersize': 5, 'color': 'k'},))
# Create Bayesian linear model.
blm = BayesianLinearModel(basis=sigmoid)
# Fit parameters using empirical Bayes.
x0 = np.array([1., 1., 0., 0.])
blm.empirical_bayes(x0, X, y)
M = 100
xs = np.linspace(-6, 6, M).reshape((M, 1))
mu, S = blm.predict(xs, variance=True)
utils.plot(({'x': xs, 'y': mu, 'color': 'r', 'linewidth': 2},
{'x': xs, 'y': mu - S, 'color': 'r', 'linestyle': '--'},
{'x': xs, 'y': mu + S, 'color': 'r', 'linestyle': '--'},
{'x': X, 'y': y, 'linestyle': '', 'marker': '.', 'markersize': 5, 'color': 'k'},))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Session 4
Step2: <a name="part-1---pretrained-networks"></a>
Step3: Now we can load a pre-trained network's graph and any labels. Explore the different networks in your own time.
Step4: Each network returns a dictionary with the following keys defined. Every network has a key for "labels" except for "i2v", since this is a feature only network, e.g. an unsupervised network, and does not have labels.
Step5: <a name="preprocessdeprocessing"></a>
Step6: Let's now try preprocessing this image. The function for preprocessing is inside the module we used to load it. For instance, for vgg16, we can find the preprocess function as vgg16.preprocess, or for inception, inception.preprocess, or for i2v, i2v.preprocess. Or, we can just use the key preprocess in our dictionary net, as this is just convenience for us to access the corresponding preprocess function.
Step7: Let's undo the preprocessing. Recall that the net dictionary has the key deprocess which is the function we need to use on our processed image, img.
Step8: <a name="tensorboard"></a>
Step9: If you open up the "mixed3a" node above (double click on it), you'll see the first "inception" module. This network encompasses a few advanced concepts that we did not have time to discuss during the lecture, including residual connections, feature concatenation, parallel convolution streams, 1x1 convolutions, and including negative labels in the softmax layer. I'll expand on the 1x1 convolutions here, but please feel free to skip ahead if this isn't of interest to you.
Step10: <a name="using-context-managers"></a>
Step11: <a name="part-2---visualizing-gradients"></a>
Step12: Let's also grab the input layer
Step14: We'll now try to find the gradient activation that maximizes a layer with respect to the input layer x.
Step15: Let's try this w/ an image now. We're going to use the plot_gradient function to help us. This is going to take our input image, run it through the network up to a layer, find the gradient of the mean of that layer's activation with respect to the input image, then backprop that gradient back to the input layer. We'll then visualize the gradient by normalizing it's values using the utils.normalize function.
Step16: <a name="part-3---basic-deep-dream"></a>
Step17: Let's now try running Deep Dream for every feature, each of our 5 pooling layers. We'll need to get the layer corresponding to our feature. Then find the gradient of this layer's mean activation with respect to our input, x. Then pass these to our dream function. This can take awhile (about 10 minutes using the CPU on my Macbook Pro).
Step18: Instead of using an image, we can use an image of noise and see how it "hallucinates" the representations that the layer most responds to
Step19: We'll do the same thing as before, now w/ our noise image
Step20: <a name="part-4---deep-dream-extensions"></a>
Step21: <h3><font color='red'>TODO! COMPLETE THIS SECTION!</font></h3>
Step22: Let's decide on some parameters of our deep dream. We'll need to decide how many iterations to run for. And we'll plot the result every few iterations, also saving it so that we can produce a GIF. And at every iteration, we need to decide how much to ascend our gradient.
Step23: Now let's dream. We're going to define a context manager to create a session and use our existing graph, and make sure we use the CPU device, as there is no gain in using GPU, and we have much more CPU memory than GPU memory.
Step24: <a name="fractal"></a>
Step25: <a name="guided-hallucinations"></a>
Step26: Preprocess both images
Step27: Like w/ Style Net, we are going to measure how similar the features in the guide image are to the dream images. In order to do that, we'll calculate the dot product. Experiment with other measures such as l1 or l2 loss to see how this impacts the resulting Dream!
Step28: We'll now use another measure that we saw when developing Style Net during the lecture. This measure the pixel to pixel difference of neighboring pixels. What we're doing when we try to optimize a gradient that makes the mean differences small is saying, we want the difference to be low. This allows us to smooth our image in the same way that we did using the Gaussian to blur the image.
Step29: Now we train just like before, except we'll need to combine our two loss terms, feature_loss and tv_loss by simply adding them! The one thing we have to keep in mind is that we want to minimize the tv_loss while maximizing the feature_loss. That means we'll need to use the negative tv_loss and the positive feature_loss. As an experiment, try just optimizing the tv_loss and removing the feature_loss from the tf.gradients call. What happens?
Step30: <a name="further-explorations"></a>
Step31: Let's now import the graph definition into our newly created Graph using a context manager and specifying that we want to use the CPU.
Step32: Let's then grab the names of every operation in our network
Step33: Now we need an image for our content image and another one for our style image.
Step34: Let's see what the network classifies these images as just for fun
Step35: <a name="content-features"></a>
Step36: Pick a layer for using for the content features. If you aren't using VGG remember to get rid of the dropout stuff!
Step37: <a name="style-features"></a>
Step38: Now we find the gram matrix which we'll use to optimize our features.
Step39: <a name="remapping-the-input"></a>
Step40: <a name="content-loss"></a>
Step41: <a name="style-loss"></a>
Step42: <a name="total-variation-loss"></a>
Step43: <a name="training"></a>
Step44: And now iterate! Feel free to play with the number of iterations or how often you save an image. If you use a different network to VGG, then you will not need to feed in the dropout parameters like I've done here.
Step45: <a name="assignment-submission"></a>
|
<ASSISTANT_TASK:>
Python Code:
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n',
'You should consider updating to Python 3.4.0 or',
'higher as the libraries built for this course',
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda'
'and then restart `jupyter notebook`:\n',
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
from scipy.ndimage.filters import gaussian_filter
import IPython.display as ipyd
import tensorflow as tf
from libs import utils, gif, datasets, dataset_utils, vae, dft, vgg16, nb_utils
except ImportError:
print("Make sure you have started notebook in the same directory",
"as the provided zip file which includes the 'libs' folder",
"and the file 'utils.py' inside of it. You will NOT be able",
"to complete this assignment unless you restart jupyter",
"notebook inside the directory created by extracting",
"the zip file or cloning the github repo. If you are still")
# We'll tell matplotlib to inline any drawn figures like so:
%matplotlib inline
plt.style.use('ggplot')
# Bit of formatting because I don't like the default inline code style:
from IPython.core.display import HTML
HTML(<style> .rendered_html code {
padding: 2px 4px;
color: #c7254e;
background-color: #f9f2f4;
border-radius: 4px;
} </style>)
from libs import vgg16, inception, i2v
# Stick w/ Inception for now, and then after you see how
# the next few sections work w/ this network, come back
# and explore the other networks.
net = inception.get_inception_model(version='v5')
# net = inception.get_inception_model(version='v3')
# net = vgg16.get_vgg_model()
# net = vgg16.get_vgg_face_model()
# net = i2v.get_i2v_model()
# net = i2v.get_i2v_tag_model()
print(net.keys())
# First, let's get an image:
og = plt.imread('clinton.png')[..., :3]
plt.imshow(og)
print(og.min(), og.max())
# Now call the preprocess function. This will preprocess our
# image ready for being input to the network, except for changes
# to the dimensions. I.e., we will still need to convert this
# to a 4-dimensional Tensor once we input it to the network.
# We'll see how that works later.
img = net['preprocess'](og)
print(img.min(), img.max())
deprocessed = ...
plt.imshow(deprocessed)
plt.show()
nb_utils.show_graph(net['graph_def'])
net['labels']
label_i = 851
print(net['labels'][label_i])
# Load the VGG network. Scroll back up to where we loaded the inception
# network if you are unsure. It is inside the "vgg16" module...
net = ..
assert(net['labels'][0] == (0, 'n01440764 tench, Tinca tinca'))
# Let's explicity use the CPU, since we don't gain anything using the GPU
# when doing Deep Dream (it's only a single image, benefits come w/ many images).
device = '/cpu:0'
# We'll now explicitly create a graph
g = tf.Graph()
# And here is a context manager. We use the python "with" notation to create a context
# and create a session that only exists within this indent, as soon as we leave it,
# the session is automatically closed! We also tel the session which graph to use.
# We can pass a second context after the comma,
# which we'll use to be explicit about using the CPU instead of a GPU.
with tf.Session(graph=g) as sess, g.device(device):
# Now load the graph_def, which defines operations and their values into `g`
tf.import_graph_def(net['graph_def'], name='net')
# Now we can get all the operations that belong to the graph `g`:
names = [op.name for op in g.get_operations()]
print(names)
# First find all the pooling layers in the network. You can
# use list comprehension to iterate over all the "names" we just
# created, finding whichever ones have the name "pool" in them.
# Then be sure to append a ":0" to the names
features = ...
# Let's print them
print(features)
# This is what we want to have at the end. You could just copy this list
# if you are stuck!
assert(features == ['net/pool1:0', 'net/pool2:0', 'net/pool3:0', 'net/pool4:0', 'net/pool5:0'])
# Use the function 'get_tensor_by_name' and the 'names' array to help you
# get the first tensor in the network. Remember you have to add ":0" to the
# name to get the output of an operation which is the tensor.
x = ...
assert(x.name == 'net/images:0')
def plot_gradient(img, x, feature, g, device='/cpu:0'):
Let's visualize the network's gradient activation
when backpropagated to the original input image. This
is effectively telling us which pixels contribute to the
predicted layer, class, or given neuron with the layer
# We'll be explicit about the graph and the device
# by using a context manager:
with tf.Session(graph=g) as sess, g.device(device):
saliency = tf.gradients(tf.reduce_mean(feature), x)
this_res = sess.run(saliency[0], feed_dict={x: img})
grad = this_res[0] / np.max(np.abs(this_res))
return grad
og = plt.imread('clinton.png')[..., :3]
img = net['preprocess'](og)[np.newaxis]
fig, axs = plt.subplots(1, len(features), figsize=(20, 10))
for i in range(len(features)):
axs[i].set_title(features[i])
grad = plot_gradient(img, x, g.get_tensor_by_name(features[i]), g)
axs[i].imshow(utils.normalize(grad))
def dream(img, gradient, step, net, x, n_iterations=50, plot_step=10):
# Copy the input image as we'll add the gradient to it in a loop
img_copy = img.copy()
fig, axs = plt.subplots(1, n_iterations // plot_step, figsize=(20, 10))
with tf.Session(graph=g) as sess, g.device(device):
for it_i in range(n_iterations):
# This will calculate the gradient of the layer we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={x: img_copy})[0]
# Let's normalize it by the maximum activation
this_res /= (np.max(np.abs(this_res) + 1e-8))
# Or alternatively, we can normalize by standard deviation
# this_res /= (np.std(this_res) + 1e-8)
# Or we could use the `utils.normalize function:
# this_res = utils.normalize(this_res)
# Experiment with all of the above options. They will drastically
# effect the resulting dream, and really depend on the network
# you use, and the way the network handles normalization of the
# input image, and the step size you choose! Lots to explore!
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
axs[it_i // plot_step].imshow(m)
# We'll run it for 3 iterations
n_iterations = 3
# Think of this as our learning rate. This is how much of
# the gradient we'll add back to the input image
step = 1.0
# Every 1 iterations, we'll plot the current deep dream
plot_step = 1
for feature_i in range(len(features)):
with tf.Session(graph=g) as sess, g.device(device):
# Get a feature layer
layer = g.get_tensor_by_name(features[feature_i])
# Find the gradient of this layer's mean activation
# with respect to the input image
gradient = tf.gradients(tf.reduce_mean(layer), x)
# Dream w/ our image
dream(img, gradient, step, net, x, n_iterations=n_iterations, plot_step=plot_step)
noise = net['preprocess'](
np.random.rand(256, 256, 3) * 0.1 + 0.45)[np.newaxis]
for feature_i in range(len(features)):
with tf.Session(graph=g) as sess, g.device(device):
# Get a feature layer
layer = ...
# Find the gradient of this layer's mean activation
# with respect to the input image
gradient = ...
# Dream w/ the noise image. Complete this!
dream(...)
# Load your own image here
og = ...
plt.imshow(og)
# Preprocess the image and make sure it is 4-dimensional by adding a new axis to the 0th dimension:
img = ...
assert(img.ndim == 4)
# Let's get the softmax layer
print(names[-2])
layer = g.get_tensor_by_name(names[-2] + ":0")
# And find its shape
with tf.Session(graph=g) as sess, g.device(device):
layer_shape = tf.shape(layer).eval(feed_dict={x:img})
# We can find out how many neurons it has by feeding it an image and
# calculating the shape. The number of output channels is the last dimension.
n_els = layer_shape[-1]
# Let's pick a label. First let's print out every label and then find one we like:
print(net['labels'])
# Pick a neuron. Or pick a random one. This should be 0-n_els
neuron_i = ...
print(net['labels'][neuron_i])
assert(neuron_i >= 0 and neuron_i < n_els)
# And we'll create an activation of this layer which is very close to 0
layer_vec = np.ones(layer_shape) / 100.0
# Except for the randomly chosen neuron which will be very close to 1
layer_vec[..., neuron_i] = 0.99
# Explore different parameters for this section.
n_iterations = 51
plot_step = 5
# If you use a different network, you will definitely need to experiment
# with the step size, as each network normalizes the input image differently.
step = 0.2
imgs = []
with tf.Session(graph=g) as sess, g.device(device):
gradient = tf.gradients(tf.reduce_max(layer), x)
# Copy the input image as we'll add the gradient to it in a loop
img_copy = img.copy()
with tf.Session(graph=g) as sess, g.device(device):
for it_i in range(n_iterations):
# This will calculate the gradient of the layer we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={
x: img_copy, layer: layer_vec})[0]
# Let's normalize it by the maximum activation
this_res /= (np.max(np.abs(this_res) + 1e-8))
# Or alternatively, we can normalize by standard deviation
# this_res /= (np.std(this_res) + 1e-8)
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
plt.figure(figsize=(5, 5))
plt.grid('off')
plt.imshow(m)
plt.show()
imgs.append(m)
# Save the gif
gif.build_gif(imgs, saveto='softmax.gif')
ipyd.Image(url='softmax.gif?i={}'.format(
np.random.rand()), height=300, width=300)
n_iterations = 101
plot_step = 10
step = 0.1
crop = 1
imgs = []
n_imgs, height, width, *ch = img.shape
with tf.Session(graph=g) as sess, g.device(device):
# Explore changing the gradient here from max to mean
# or even try using different concepts we learned about
# when creating style net, such as using a total variational
# loss on `x`.
gradient = tf.gradients(tf.reduce_max(layer), x)
# Copy the input image as we'll add the gradient to it in a loop
img_copy = img.copy()
with tf.Session(graph=g) as sess, g.device(device):
for it_i in range(n_iterations):
# This will calculate the gradient of the layer
# we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={
x: img_copy, layer: layer_vec})[0]
# This is just one way we could normalize the
# gradient. It helps to look at the range of your image's
# values, e.g. if it is 0 - 1, or -115 to +115,
# and then consider the best way to normalize the gradient.
# For some networks, it might not even be necessary
# to perform this normalization, especially if you
# leave the dream to run for enough iterations.
# this_res = this_res / (np.std(this_res) + 1e-10)
this_res = this_res / (np.max(np.abs(this_res)) + 1e-10)
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Optionally, we could apply any number of regularization
# techniques... Try exploring different ways of regularizing
# gradient. ascent process. If you are adventurous, you can
# also explore changing the gradient above using a
# total variational loss, as we used in the style net
# implementation during the lecture. I leave that to you
# as an exercise!
# Crop a 1 pixel border from height and width
img_copy = img_copy[:, crop:-crop, crop:-crop, :]
# Resize (Note: in the lecture, we used scipy's resize which
# could not resize images outside of 0-1 range, and so we had
# to store the image ranges. This is a much simpler resize
# method that allows us to `preserve_range`.)
img_copy = resize(img_copy[0], (height, width), order=3,
clip=False, preserve_range=True
)[np.newaxis].astype(np.float32)
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
plt.grid('off')
plt.imshow(m)
plt.show()
imgs.append(m)
# Create a GIF
gif.build_gif(imgs, saveto='fractal.gif')
ipyd.Image(url='fractal.gif?i=2', height=300, width=300)
# Replace these with your own images!
guide_og = plt.imread(...)[..., :3]
dream_og = plt.imread(...)[..., :3]
assert(guide_og.ndim == 3 and guide_og.shape[-1] == 3)
assert(dream_og.ndim == 3 and dream_og.shape[-1] == 3)
guide_img = net['preprocess'](guide_og)[np.newaxis]
dream_img = net['preprocess'](dream_og)[np.newaxis]
fig, axs = plt.subplots(1, 2, figsize=(7, 4))
axs[0].imshow(guide_og)
axs[1].imshow(dream_og)
x = g.get_tensor_by_name(names[0] + ":0")
# Experiment with the weighting
feature_loss_weight = 1.0
with tf.Session(graph=g) as sess, g.device(device):
feature_loss = tf.Variable(0.0)
# Explore different layers/subsets of layers. This is just an example.
for feature_i in features[3:5]:
# Get the activation of the feature
layer = g.get_tensor_by_name(feature_i)
# Do the same for our guide image
guide_layer = sess.run(layer, feed_dict={x: guide_img})
# Now we need to measure how similar they are!
# We'll use the dot product, which requires us to first reshape both
# features to a 2D vector. But you should experiment with other ways
# of measuring similarity such as l1 or l2 loss.
# Reshape each layer to 2D vector
layer = tf.reshape(layer, [-1, 1])
guide_layer = guide_layer.reshape(-1, 1)
# Now calculate their dot product
correlation = tf.matmul(guide_layer.T, layer)
# And weight the loss by a factor so we can control its influence
feature_loss += feature_loss_weight * correlation
n_img, height, width, ch = dream_img.shape
# We'll weight the overall contribution of the total variational loss
# Experiment with this weighting
tv_loss_weight = 1.0
with tf.Session(graph=g) as sess, g.device(device):
# Penalize variations in neighboring pixels, enforcing smoothness
dx = tf.square(x[:, :height - 1, :width - 1, :] - x[:, :height - 1, 1:, :])
dy = tf.square(x[:, :height - 1, :width - 1, :] - x[:, 1:, :width - 1, :])
# We will calculate their difference raised to a power to push smaller
# differences closer to 0 and larger differences higher.
# Experiment w/ the power you raise this to to see how it effects the result
tv_loss = tv_loss_weight * tf.reduce_mean(tf.pow(dx + dy, 1.2))
# Experiment with the step size!
step = 0.1
imgs = []
with tf.Session(graph=g) as sess, g.device(device):
# Experiment with just optimizing the tv_loss or negative tv_loss to understand what it is doing!
gradient = tf.gradients(-tv_loss + feature_loss, x)
# Copy the input image as we'll add the gradient to it in a loop
img_copy = dream_img.copy()
with tf.Session(graph=g) as sess, g.device(device):
sess.run(tf.initialize_all_variables())
for it_i in range(n_iterations):
# This will calculate the gradient of the layer we chose with respect to the input image.
this_res = sess.run(gradient[0], feed_dict={x: img_copy})[0]
# Let's normalize it by the maximum activation
this_res /= (np.max(np.abs(this_res) + 1e-8))
# Or alternatively, we can normalize by standard deviation
# this_res /= (np.std(this_res) + 1e-8)
# Then add the gradient back to the input image
# Think about what this gradient represents?
# It says what direction we should move our input
# in order to meet our objective stored in "gradient"
img_copy += this_res * step
# Plot the image
if (it_i + 1) % plot_step == 0:
m = net['deprocess'](img_copy[0])
plt.figure(figsize=(5, 5))
plt.grid('off')
plt.imshow(m)
plt.show()
imgs.append(m)
gif.build_gif(imgs, saveto='guided.gif')
ipyd.Image(url='guided.gif?i=0', height=300, width=300)
sess.close()
tf.reset_default_graph()
# Stick w/ VGG for now, and then after you see how
# the next few sections work w/ this network, come back
# and explore the other networks.
net = vgg16.get_vgg_model()
# net = vgg16.get_vgg_face_model()
# net = inception.get_inception_model(version='v5')
# net = inception.get_inception_model(version='v3')
# net = i2v.get_i2v_model()
# net = i2v.get_i2v_tag_model()
# Let's explicity use the CPU, since we don't gain anything using the GPU
# when doing Deep Dream (it's only a single image, benefits come w/ many images).
device = '/cpu:0'
# We'll now explicitly create a graph
g = tf.Graph()
# And here is a context manager. We use the python "with" notation to create a context
# and create a session that only exists within this indent, as soon as we leave it,
# the session is automatically closed! We also tel the session which graph to use.
# We can pass a second context after the comma,
# which we'll use to be explicit about using the CPU instead of a GPU.
with tf.Session(graph=g) as sess, g.device(device):
# Now load the graph_def, which defines operations and their values into `g`
tf.import_graph_def(net['graph_def'], name='net')
names = [op.name for op in g.get_operations()]
content_og = plt.imread('arles.png')[..., :3]
style_og = plt.imread('clinton.png')[..., :3]
fig, axs = plt.subplots(1, 2)
axs[0].imshow(content_og)
axs[0].set_title('Content Image')
axs[0].grid('off')
axs[1].imshow(style_og)
axs[1].set_title('Style Image')
axs[1].grid('off')
# We'll save these with a specific name to include in your submission
plt.imsave(arr=content_og, fname='content.png')
plt.imsave(arr=style_og, fname='style.png')
content_img = net['preprocess'](content_og)[np.newaxis]
style_img = net['preprocess'](style_og)[np.newaxis]
# Grab the tensor defining the input to the network
x = ...
# And grab the tensor defining the softmax layer of the network
softmax = ...
for img in [content_img, style_img]:
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
# Remember from the lecture that we have to set the dropout
# "keep probability" to 1.0.
res = softmax.eval(feed_dict={x: img,
'net/dropout_1/random_uniform:0': [[1.0]],
'net/dropout/random_uniform:0': [[1.0]]})[0]
print([(res[idx], net['labels'][idx])
for idx in res.argsort()[-5:][::-1]])
print(names)
# Experiment w/ different layers here. You'll need to change this if you
# use another network!
content_layer = 'net/conv3_2/conv3_2:0'
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
content_features = g.get_tensor_by_name(content_layer).eval(
session=sess,
feed_dict={x: content_img,
'net/dropout_1/random_uniform:0': [[1.0]],
'net/dropout/random_uniform:0': [[1.0]]})
# Experiment with different layers and layer subsets. You'll need to change these
# if you use a different network!
style_layers = ['net/conv1_1/conv1_1:0',
'net/conv2_1/conv2_1:0',
'net/conv3_1/conv3_1:0',
'net/conv4_1/conv4_1:0',
'net/conv5_1/conv5_1:0']
style_activations = []
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
for style_i in style_layers:
style_activation_i = g.get_tensor_by_name(style_i).eval(
feed_dict={x: style_img,
'net/dropout_1/random_uniform:0': [[1.0]],
'net/dropout/random_uniform:0': [[1.0]]})
style_activations.append(style_activation_i)
style_features = []
for style_activation_i in style_activations:
s_i = np.reshape(style_activation_i, [-1, style_activation_i.shape[-1]])
gram_matrix = np.matmul(s_i.T, s_i) / s_i.size
style_features.append(gram_matrix.astype(np.float32))
tf.reset_default_graph()
g = tf.Graph()
# Get the network again
net = vgg16.get_vgg_model()
# Load up a session which we'll use to import the graph into.
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
# We can set the `net_input` to our content image
# or perhaps another image
# or an image of noise
# net_input = tf.Variable(content_img / 255.0)
net_input = tf.get_variable(
name='input',
shape=content_img.shape,
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=np.mean(content_img), stddev=np.std(content_img)))
# Now we load the network again, but this time replacing our placeholder
# with the trainable tf.Variable
tf.import_graph_def(
net['graph_def'],
name='net',
input_map={'images:0': net_input})
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
content_loss = tf.nn.l2_loss((g.get_tensor_by_name(content_layer) -
content_features) /
content_features.size)
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
style_loss = np.float32(0.0)
for style_layer_i, style_gram_i in zip(style_layers, style_features):
layer_i = g.get_tensor_by_name(style_layer_i)
layer_shape = layer_i.get_shape().as_list()
layer_size = layer_shape[1] * layer_shape[2] * layer_shape[3]
layer_flat = tf.reshape(layer_i, [-1, layer_shape[3]])
gram_matrix = tf.matmul(tf.transpose(layer_flat), layer_flat) / layer_size
style_loss = tf.add(style_loss, tf.nn.l2_loss((gram_matrix - style_gram_i) / np.float32(style_gram_i.size)))
def total_variation_loss(x):
h, w = x.get_shape().as_list()[1], x.get_shape().as_list()[1]
dx = tf.square(x[:, :h-1, :w-1, :] - x[:, :h-1, 1:, :])
dy = tf.square(x[:, :h-1, :w-1, :] - x[:, 1:, :w-1, :])
return tf.reduce_sum(tf.pow(dx + dy, 1.25))
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
tv_loss = total_variation_loss(net_input)
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
# Experiment w/ the weighting of these! They produce WILDLY different
# results.
loss = 5.0 * content_loss + 1.0 * style_loss + 0.001 * tv_loss
optimizer = tf.train.AdamOptimizer(0.05).minimize(loss)
imgs = []
n_iterations = 100
with tf.Session(graph=g) as sess, g.device('/cpu:0'):
sess.run(tf.initialize_all_variables())
# map input to noise
og_img = net_input.eval()
for it_i in range(n_iterations):
_, this_loss, synth = sess.run([optimizer, loss, net_input], feed_dict={
'net/dropout_1/random_uniform:0': np.ones(
g.get_tensor_by_name(
'net/dropout_1/random_uniform:0'
).get_shape().as_list()),
'net/dropout/random_uniform:0': np.ones(
g.get_tensor_by_name(
'net/dropout/random_uniform:0'
).get_shape().as_list())
})
print("%d: %f, (%f - %f)" %
(it_i, this_loss, np.min(synth), np.max(synth)))
if it_i % 5 == 0:
m = vgg16.deprocess(synth[0])
imgs.append(m)
plt.imshow(m)
plt.show()
gif.build_gif(imgs, saveto='stylenet.gif')
ipyd.Image(url='stylenet.gif?i=0', height=300, width=300)
utils.build_submission('session-4.zip',
('softmax.gif',
'fractal.gif',
'guided.gif',
'content.png',
'style.png',
'stylenet.gif',
'session-4.ipynb'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: f1 is not visible since defined via "cdef"
Step2: filling an numpy array
Step3: basic
Step4: distance function (pure python and cython)
|
<ASSISTANT_TASK:>
Python Code:
%%cython
cdef f1(int x):
return x*x
cpdef f2(int x):
return x*x
cpdef f3(int x):
return f1(x)
#dir()
f2(3)
f1(3)
f3(3)
%%cython
cpdef fibseq(float[:] x):
cdef int n
cdef int i
n = len(x)
x[0] = 1.
x[1] = 1.
for i in range(2,n):
x[i] = x[i-1]+x[i-2]
x = np.zeros((43,), np.float32)
fibseq(x)
x
ratio = x[0:-1]/x[1:]
plt.plot(ratio,'.');
(np.sqrt(5)-1)/2.
plt.semilogy(ratio-(np.sqrt(5)-1)/2,'.');
ratio
import seaborn as sbs
sbs.plt.plot(ratio)
def pyfib(n):
a,b = 1,1
for i in range(n):
a,b = a+b, a
return a
pyfib(5)
%%cython
def fib(int n):
cdef int i,a,b
a,b = 1,1
for i in range(n):
a,b = a+b, a
return a
fib(10)
[fib(i) for i in range(10)]
pyfib(10)
%timeit fib(10)
%timeit pyfib(10)
%%cython
cpdef float distance(float[:] x, float[:] y):
cdef int i
cdef int n = x.shape[0]
cdef float d = 0.0
for i in range(n):
d += (x[i]-y[i])**2
return d
import numpy as np
x = np.array([1,2,3], np.float32)
y = np.array([7,6,5], np.float32)
dist = distance(x,y)
print dist
def pydistance(x, y, n):
d = 0.0
for i in range(n):
d += (x[i]-y[i])**2
return d
%timeit distance(x,y)
%timeit pydistance(x,y,3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two-Level
Step2: We'll just check that the pulse area is what we want.
Step3: Solve the Problem
Step4: Plot Output
Step5: Analysis
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
SECH_FWHM_CONV = 1./2.6339157938
t_width = 1.0*SECH_FWHM_CONV # [τ]
print('t_width', t_width)
mb_solve_json =
{
"atom": {
"fields": [
{
"coupled_levels": [[0, 1]],
"rabi_freq_t_args": {
"n_pi": 2.0,
"centre": 0.0,
"width": %f
},
"rabi_freq_t_func": "sech"
}
],
"num_states": 2
},
"t_min": -2.0,
"t_max": 10.0,
"t_steps": 120,
"z_min": -0.5,
"z_max": 1.5,
"z_steps": 100,
"interaction_strengths": [
10.0
],
"savefile": "mbs-two-sech-2pi"
}
%(t_width)
from maxwellbloch import mb_solve
mb_solve_00 = mb_solve.MBSolve().from_json_str(mb_solve_json)
print('The input pulse area is {0}'.format(np.trapz(mb_solve_00.Omegas_zt[0,0,:].real,
mb_solve_00.tlist)/np.pi))
%time Omegas_zt, states_zt = mb_solve_00.mbsolve(recalc=True)
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import numpy as np
sns.set_style("darkgrid")
fig = plt.figure(1, figsize=(16, 6))
ax = fig.add_subplot(111)
cmap_range = np.linspace(0.0, 1.0, 11)
cf = ax.contourf(mb_solve_00.tlist, mb_solve_00.zlist,
np.abs(mb_solve_00.Omegas_zt[0]/(2*np.pi)),
cmap_range, cmap=plt.cm.Blues)
ax.set_title('Rabi Frequency ($\Gamma / 2\pi $)')
ax.set_xlabel('Time ($1/\Gamma$)')
ax.set_ylabel('Distance ($L$)')
for y in [0.0, 1.0]:
ax.axhline(y, c='grey', lw=1.0, ls='dotted')
plt.colorbar(cf);
fig, ax = plt.subplots(figsize=(16, 4))
ax.plot(mb_solve_00.zlist, mb_solve_00.fields_area()[0]/np.pi, clip_on=False)
ax.set_ylim([0.0, 8.0])
ax.set_xlabel('Distance ($L$)')
ax.set_ylabel('Pulse Area ($\pi$)');
# C = 0.1 # speed of light
# Y_MIN = 0.0 # Y-axis min
# Y_MAX = 4.0 # y-axis max
# ZOOM = 2 # level of linear interpolation
# FPS = 30 # frames per second
# ATOMS_ALPHA = 0.2 # Atom indicator transparency
# FNAME = "images/mb-solve-two-sech-2pi"
# FNAME_JSON = FNAME + '.json'
# with open(FNAME_JSON, "w") as f:
# f.write(mb_solve_json)
# !make-mp4-fixed-frame.py -f $FNAME_JSON -c $C --fps $FPS --y-min $Y_MIN --y-max $Y_MAX \
# --zoom $ZOOM --atoms-alpha $ATOMS_ALPHA #--peak-line --c-line
# FNAME_MP4 = FNAME + '.mp4'
# !make-gif-ffmpeg.sh -f $FNAME_MP4 --in-fps $FPS
# from IPython.display import Image
# Image(url=FNAME_MP4 +'.gif', format='gif')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: Example 2
Step3: Example 3
|
<ASSISTANT_TASK:>
Python Code:
import synimagegen
import matplotlib.pyplot as plt
import numpy as np
import os
%matplotlib inline
ground_truth,cv,x_1,y_1,U_par,V_par,par_diam1,par_int1,x_2,y_2,par_diam2,par_int2 = synimagegen.create_synimage_parameters(None,[0,1],[0,1],[256,256],dt=0.0025)
frame_a = synimagegen.generate_particle_image(256, 256, x_1, y_1, par_diam1, par_int1,16)
frame_b = synimagegen.generate_particle_image(256, 256, x_2, y_2, par_diam2, par_int2,16)
fig = plt.figure(figsize=(20,10))
a = fig.add_subplot(1, 2, 1,)
imgplot = plt.imshow(frame_a, cmap='gray')
a.set_title('frame_a')
a = fig.add_subplot(1, 2, 2)
imgplot = plt.imshow(frame_b, cmap='gray')
a.set_title('frame_b')
data = np.load('PIV_experiment_data.npz')
data = np.stack([data['X'], data['Y'],data['U'] ,data['V']], axis=2)
ground_truth,cv,x_1,y_1,U_par,V_par,par_diam1,par_int1,x_2,y_2,par_diam2,par_int2 = synimagegen.create_synimage_parameters(data,[0,1],[0,1],[256,256],inter=True,dt=0.0025)
frame_a = synimagegen.generate_particle_image(256, 256, x_1, y_1, par_diam1, par_int1,16)
frame_b = synimagegen.generate_particle_image(256, 256, x_2, y_2, par_diam2, par_int2,16)
fig = plt.figure(figsize=(20,10))
a = fig.add_subplot(1, 2, 1)
imgplot = plt.imshow(frame_a, cmap='gray')
a.set_title('frame_a')
a = fig.add_subplot(1, 2, 2)
imgplot = plt.imshow(frame_b, cmap='gray')
a.set_title('frame_b')
path_to_file = os.getcwd() + '/velocity_report.txt'
ground_truth,cv,x_1,y_1,U_par,V_par,par_diam1,par_int1,x_2,y_2,par_diam2,par_int2 = synimagegen.create_synimage_parameters(None,[0,1],[0,1],[256,256],path=path_to_file,inter=True,dt=0.0025)
frame_a = synimagegen.generate_particle_image(256, 256, x_1, y_1, par_diam1, par_int1,16)
frame_b = synimagegen.generate_particle_image(256, 256, x_2, y_2, par_diam2, par_int2,16)
fig = plt.figure(figsize=(20,10))
a = fig.add_subplot(1, 2, 1)
imgplot = plt.imshow(frame_a, cmap='gray')
a.set_title('frame_a')
a = fig.add_subplot(1, 2, 2)
imgplot = plt.imshow(frame_b, cmap='gray')
a.set_title('frame_b')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify variables
Step2: Fashion MNIST dataset
Step3: Define the Bayesian deep-learning model
Step4: Using the TPU
Step5: Train
Step6: Training the fashion MNIST Bayesian deep-learning model
Step7: Bayesian inference
Step8: Visualize predictions
|
<ASSISTANT_TASK:>
Python Code:
%tensorflow_version 2.x
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from matplotlib import pyplot
%matplotlib inline
print("Tensorflow version " + tf.__version__)
WEIGHTS_FILE='./bayesian_fashionMNIST.h5'
GITHUB_REPO='https://github.com/rahulremanan/python_tutorial/'
WEIGHTS_URL='{}raw/master/Machine_Vision/07_Bayesian_deep_learning/weights/bayesian_fashionMNIST.h5'.format(GITHUB_REPO)
LABEL_NAMES = ['t_shirt','trouser','pullover','dress','coat','sandal','shirt','sneaker','bag','ankle_boots']
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def fashionMNIST_model(input_data,dropout_rate=0.5,model_name="Bayesian_fashionMNIST",enable_bayesian_inference=True):
inputs = tf.keras.Input(shape=(input_data.shape[1:]))
x = tf.keras.layers.Conv2D(128,(3,3))(inputs)
x = tf.keras.layers.MaxPooling2D(pool_size=(2,2),strides=(2,2))(x)
x = tf.keras.layers.Activation('elu')(x)
x = tf.keras.layers.Dropout(dropout_rate)(x,training=enable_bayesian_inference)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(10)(x)
outputs = tf.keras.layers.Activation('softmax')(x)
model = tf.keras.Model(inputs=inputs,outputs=outputs,name=model_name)
return model
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("REPLICAS: ", strategy.num_replicas_in_sync)
with strategy.scope():
bayesian_model = fashionMNIST_model(x_train,enable_bayesian_inference=True)
bayesian_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=[tf.keras.metrics.sparse_categorical_accuracy])
if not os.path.exists(WEIGHTS_FILE):
!wget {WEIGHTS_URL} -O {WEIGHTS_FILE}
if os.path.exists(WEIGHTS_FILE):
bayesian_model.load_weights(WEIGHTS_FILE)
print('Loaded pre-trained weights: {} ...'.format(WEIGHTS_FILE))
bayesian_model.fit(x_train.astype(np.float32),y_train.astype(np.float32),
epochs=5,
steps_per_epoch=60,
validation_data=(x_test.astype(np.float32),y_test.astype(np.float32)),
validation_freq=1)
bayesian_model.save_weights(WEIGHTS_FILE,overwrite=True)
with strategy.scope():
bayesian_model = fashionMNIST_model(x_train,enable_bayesian_inference=True)
bayesian_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=[tf.keras.metrics.sparse_categorical_accuracy])
bayesian_model.load_weights(WEIGHTS_FILE)
preds=[]
num_bayesian_inference_steps=10
for i in tqdm(range(num_bayesian_inference_steps)):
preds.append(bayesian_model.predict(x_test[:16].astype(np.float32)))
mean_preds=np.mean(np.asarray(preds),axis=0)
stdev_preds=np.std(np.asarray(preds),axis=0)
def plot_predictions(images,ground_truths,
preds_acc,preds_stdev=None,
label_names=None,
enable_bayesian_inference=True):
n = images.shape[0]
nc = int(np.ceil(n / 4))
f, axes = pyplot.subplots(nc, 4)
for i in range(nc * 4):
y = i // 4
x = i % 4
axes[x, y].axis('off')
label = label_names[np.argmax(preds_acc[i])]
ground_truth=label_names[ground_truths[i]]
accuracy = np.max(preds_acc[i])
if enable_bayesian_inference and preds_stdev is not None:
confidence = preds_stdev[i][np.argmax(preds_acc[i])]
if i > n:
continue
axes[x, y].imshow(images[i])
if enable_bayesian_inference and preds_stdev is not None:
axes[x, y].text(0.5,0.5, '\nLabel (Actual): {} ({})'.format(label,ground_truth) +
'\nAccuracy: {}, \nUncertainty: {}\n'.format(str(round(accuracy,2)),
str(round(confidence,2))),
fontsize=10)
else:
axes[x, y].text(0.5,0.5, '\nLabel: {}'.format(label) +
'\nAccuracy: {} \n'.format(str(round(accuracy,2))),
fontsize=10)
pyplot.gcf().set_size_inches(16,16)
plot_predictions(np.squeeze(x_test[:16]), y_test[:16],
mean_preds,stdev_preds,
label_names=LABEL_NAMES,
enable_bayesian_inference=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def sigmoid(x):
return 1/(1 + np.exp(-x))
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weidghts
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x : 1/(1 + np.exp(-x))
#question: as I wrote like this the notebook tells that sigmoid is not defined
#self.activation_function= sigmoid
#def sigmoid(x):
#return 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs =np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs
# Output layer error is the difference between desired target and actual
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer
hidden_grad = hidden_outputs * (1 - hidden_outputs) # hidden layer gradients
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors , hidden_outputs.T) # update hidden-to-output weights with gradient descent step
#self.weights_input_to_hidden +=self.lr * np.dot(hidden_errors.T, inputs.T) *hidden_grad# update input-to-hidden weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot(hidden_errors, inputs.T) * hidden_grad
#Question:
# I've tried many times to figure out the * and the np.dot, finally I turned for forum's help and use np.dot instead.
# and with .T or not the final unit test may not pass, that is confusing sometimes.
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)# signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)# signals into final output layer
final_outputs = final_inputs# signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 1500
learning_rate = 0.2
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Template matching
Step2: Both the image used for processing and the template are converted to grayscale images to boost efficiency.
Step3: Change the code above to plot grayscale images.
Step4: Change the code above and try other methods, TM_CCORR_NORMED, TM_SQDIFF_NORMED, for instance.
Step5: Let's try to find the template on the rotated image.
Step7: Let's transform the image back to the perpendicular plan.
Step8: Recognition of ArUco markers
Step9: Calibration
Step10: The first 6 images for calibration
Step11: Using the ArUco calibration, let's find the camera matrix and the associated radial and tangential distortion parameters.
Step12: Plot undistorted image and the one corrected by calibration parameters.
Step13: Complex example
|
<ASSISTANT_TASK:>
Python Code:
import glob # to extend file name pattern to list
import cv2 # OpenCV for image processing
from cv2 import aruco # to find ArUco markers
import numpy as np # for matrices
import matplotlib.pyplot as plt # to show images
!wget -q -O sample_data/monalisa.jpg https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/monalisa.jpg
!wget -q -O sample_data/mona_temp4.png https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/mona_temp4.png
img = cv2.imread('sample_data/monalisa.jpg') # load image
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert image to grayscale
templ = cv2.imread('sample_data/mona_temp4.png') # load template
templ_gray = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY) # convert template to grayscale
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5)) # show image and template
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('image to scan')
ax2.imshow(cv2.cvtColor(templ, cv2.COLOR_BGR2RGB)) # BGR vs. RGB
ax2.set_title('template to find')
ax2.set_xlim(ax1.get_xlim()) # set same scale
ax2.set_ylim(ax1.get_ylim())
print(f'image sizes: {img_gray.shape} template sizes: {templ_gray.shape}')
result = cv2.matchTemplate(img_gray, templ_gray, cv2.TM_CCOEFF_NORMED)
val, _, max = cv2.minMaxLoc(result)[1:4] # get position of best match
fr = np.array([max,
(max[0]+templ.shape[1], max[1]),
(max[0]+templ.shape[1], max[1]+templ.shape[0]),
(max[0], max[1]+templ.shape[0]),
max])
result_uint = ((result - np.min(result)) / (np.max(result) - np.min(result)) * 256).astype('uint8')
result_img = cv2.cvtColor(result_uint, cv2.COLOR_GRAY2BGR)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('Match on original image')
ax1.plot(fr[:,0], fr[:,1], 'r')
ax1.plot([max[0]],[max[1]], 'r*')
ax2.imshow(result_img)
ax2.plot(fr[:,0], fr[:,1], 'r')
ax2.plot([max[0]],[max[1]], 'r*')
ax2.set_title('Normalized coefficients')
ax2.set_xlim(ax1.get_xlim()) # set same scale
ax2.set_ylim(ax1.get_ylim())
print(f'best match at {max} value {val:.6f}')
!wget -q -O sample_data/monalisa_tilt.jpg https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/monalisa_tilt.jpg
img = cv2.imread('sample_data/monalisa_tilt.jpg', cv2.IMREAD_GRAYSCALE)
result = cv2.matchTemplate(img, templ_gray, cv2.TM_CCOEFF_NORMED)
val, _, max = cv2.minMaxLoc(result)[1:4]
fr = np.array([max,
(max[0]+templ.shape[1], max[1]),
(max[0]+templ.shape[1], max[1]+templ.shape[0]),
(max[0], max[1]+templ.shape[0]),
max])
plt.imshow(img, cmap="gray")
plt.plot(fr[:,0], fr[:,1], 'r')
plt.plot([max[0]],[max[1]], 'r*')
print(f'best match at {max} value {val:.6f} BUT FALSE!')
def project_img(image, a_src, a_dst):
calculate transformation matrix
new_image = image.copy() # make a copy of input image
# get parameters of transformation
projective_matrix = cv2.getPerspectiveTransform(a_src, a_dst)
# transform image
transformed = cv2.warpPerspective(img, projective_matrix, image.shape)
# cut destination area
transformed = transformed[0:int(np.max(a_dst[:,1])),0:int(np.max(a_dst[:,0]))]
return transformed
# frame on warped image
src = [(240, 44), (700, 116), (703, 815), (243, 903)]
# frame on original
s = img_gray.shape
dst = [(0, 0), (s[1], 0), (s[1], s[0]), (0,s[0])]
a_src = np.float32(src)
a_dst = np.float32(dst)
# image transformation
img_dst = project_img(img, a_src, a_dst)
# template match
result = cv2.matchTemplate(img_dst, templ_gray, cv2.TM_CCOEFF_NORMED)
val, _, max = cv2.minMaxLoc(result)[1:4]
# frame around template on transformed image
fr = np.array([max,
(max[0]+templ.shape[1], max[1]),
(max[0]+templ.shape[1], max[1]+templ.shape[0]),
(max[0], max[1]+templ.shape[0]),
max])
fig, ax = plt.subplots(1,2, figsize=(13,8))
ax[0].imshow(img, cmap="gray");
ax[0].plot(a_src[:,0], a_src[:,1], 'r--')
ax[0].set_title('Original Image')
ax[1].imshow(img_dst, cmap="gray")
ax[1].plot(a_dst[:,0], a_dst[:,1], 'r--')
ax[1].set_title('Warped Image')
ax[1].plot(fr[:,0], fr[:,1], 'r')
ax[1].plot([max[0]],[max[1]], 'r*')
print(f'best match at {max} value {val:.2f}')
!wget -q -O sample_data/markers.png https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/markers.png
img = cv2.imread('sample_data/markers.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(cv2.aruco.DICT_4X4_100)
params = aruco.DetectorParameters_create()
corners, ids, _ = aruco.detectMarkers(img_gray, aruco_dict, parameters=params)
x = np.zeros(ids.size)
y = np.zeros(ids.size)
img1 = img.copy()
for j in range(ids.size):
x[j] = int(round(np.average(corners[j][0][:, 0])))
y[j] = int(round(np.average(corners[j][0][:, 1])))
cv2.putText(img1, str(ids[j][0]), (int(x[j]+2), int(y[j])), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 0, 255), 3)
fig, ax = plt.subplots(1,2, figsize=(10,5))
ax[0].imshow(img)
ax[1].imshow(img1)
ax[1].plot(x, y, "ro")
print(list(zip(list(x), list(y))))
!wget -q -O sample_data/cal.zip https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/cal.zip
!unzip -q -o sample_data/cal.zip -d sample_data
width = 5 # Charuco board size
height = 7
board = cv2.aruco.CharucoBoard_create(width, height, .025, .0125, aruco_dict) # generate board in memory
img = board.draw((500, 700))
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
_ = plt.title('Charuco board')
fig, ax = plt.subplots(1, 6, figsize=(15, 2))
for i in range(6):
im = cv2.imread('sample_data/cal{:d}.jpg'.format(i+1))
ax[i].imshow(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))
ax[i].set_title('cal{:d}.jpg'.format(i+1))
allCorners = []
allIds = []
decimator = 0
for name in glob.glob("sample_data/cal*.jpg"):
frame = cv2.imread(name)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, _ = cv2.aruco.detectMarkers(gray, aruco_dict)
ret, corners1, ids1 = cv2.aruco.interpolateCornersCharuco(corners, ids, gray, board)
allCorners.append(corners1)
allIds.append(ids1)
decimator += 1
ret, mtx, dist, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(allCorners, allIds, board, gray.shape, None, None)
print("Camera matrix [pixels]")
for i in range(mtx.shape[0]):
print(f'{mtx[i][0]:8.1f} {mtx[i][1]:8.1f} {mtx[i][2]:8.1f}')
print('Radial components')
print(30 * '-')
print(f'{dist[0][0]:10.5f} {dist[0][1]:10.5f} {dist[0][2]:10.5f}')
print(30 * '-')
print('Tangential components')
print(f'{dist[0][3]:10.5f} {dist[0][4]:10.5f}')
gray = cv2.imread('sample_data/cal1.jpg', cv2.IMREAD_GRAYSCALE)
fig, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(gray, cmap='gray')
ax[0].set_title('distorted image')
ax[1].imshow(cv2.undistort(gray, mtx, dist, None), cmap='gray')
_ = ax[1].set_title('undistorted image')
!wget -q -O sample_data/demo.mp4 https://raw.githubusercontent.com/OSGeoLabBp/tutorials/master/english/img_processing/code/demo.mp4
cap = cv2.VideoCapture('sample_data/demo.mp4')
frame = 0 # frame counter
xc = [] # for pixel coordinates of marker
yc = []
frames = []
while True:
ret, img = cap.read() # get next frame from video
if ret:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert image to grayscale
img_gray = cv2.undistort(gray, mtx, dist, None) # remove camera distortion using calibration
corners, ids, _ = aruco.detectMarkers(img_gray, aruco_dict, parameters=params) # find ArUco markers
if ids: # marker found?
yc.append(img_gray.shape[1] - int(round(np.average(corners[0][0][:, 1])))) # change y direction
frames.append(frame)
frame += 1 # frame count
else:
break # no more images
plt.plot(frames, yc)
plt.title('Vertical positions of ArUco marker from video frames')
plt.xlabel('frame count')
plt.grid()
_ = plt.ylabel('vertical position [pixel]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing it takes some time
Step2: 05 - NumPy Arrays
Step3: Let's look at the element type of few array which numpy calls dtype
Step4: |S11 means String with maximum length 11.
Step5: 07 - Vectorized Operations
Step6: 09 - Multiplying by a Scalar
Step7: 11 - Calculate Overall Completion Rate
Step8: See this article for more information about bitwise operations.
Step9: 13 - Standardizing Data
Step10: 15 - NumPy Index Arrays
Step11: 17 - + vs +=
Step12:
Step13: 19 - In-Place vs Not In-Place
Step14: slice refers to view of original array
Step15: 23 - Series Indexes
Step16: Some people call countries[0] as indexing into array. But the instructor uses position 0 to avoid confusion. This is because in Pandas index and postion are not the same thing
Step17: NumPy arrays are souped-up version of Python lists
Step18: 25 - Vectorized Operations and Series Indexes
Step19: Matching indexes were used to add the 2 series
Step20: 28 - Filling Missing Values - Solution
Step21: 29 - Pandas Series apply
Step22: 31 - Plotting in Pandas - Solution
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
employments = pd.read_csv('employment_above_15.csv')
employments[0:5]
#Selecting a column and displaying its first 5 elements
employments.get('1991')[0:5]
employments.get('Country')[0:5]
def max_employment(countries, employment):
i = employment.argmax()
return (countries[i], employment[i])
max_employment(employments.get('Country'), employments.get('2007'))
countries = np.array(['Afghanistan','Albania','Algeria','Angola','Argentina','Armenia'])
employment = np.array([56.700001, 52.700001, 39.400002, 75.800003, 53.599998])
print countries.dtype
print employment.dtype
print np.array([0, 1, 2, 3]).dtype
print np.array([True, False, True]).dtype
print np.array(['AL', 'AK']).dtype
print employment.mean()
print employment.std()
print employment.max()
print employment.sum()
np.array([1, 2, 3]) + np.array([4, 5, 6])
np.array([1, 2, 3]) * 3
np.array([1, 2, 3]) + np.array([4, 5, 6])
np.array([1, 2, 3]) + 1
np.array([1, 2, 3]) - np.array([7, 10, 15])
np.array([1, 2, 3]) - 1
np.array([1, 2, 3]) * np.array([4, 5, 6])
np.array([1, 2, 3]) * np.array([2])
#Throws error
#np.array([1, 2, 3]) * np.array([2, 3])
np.array([2, 3]) ** np.array([2, 3])
np.array([5, 6]) ** 2
female_completion = pd.read_csv('female_completion_rate.csv')
male_completion = pd.read_csv('male_completion_rate.csv')
female_completion[0:5]
male_completion[0:5]
female = np.array([56.0, 23.0, 65.0])
male = np.array([23.0, 45.0, 22.0])
def overall_completion_rate(female_completion, male_completion):
return (female_completion + male_completion) / 2
overall_completion_rate(female, male)
def standardize_data(values):
return (values - values.mean()) / values.std()
def mean_time_for_paid_students(time_spent, days_to_cancel):
return time_spent[days_to_cancel >= 7].mean()
a = np.array([1, 2, 3, 4])
b = a
a += np.array([1, 1, 1, 1]) #Difference here
print b
a = np.array([1, 2, 3, 4])
b = a
a = a + np.array([1, 1, 1, 1]) #Difference here
print b
a = np.array([1, 2, 3, 4, 5])
slice = a[:3]
slice[0] = 100
a
def variable_correlation(variable1, variable2):
both_above = (variable1 > variable1.mean()) & \
(variable2 > variable2.mean())
both_below = (variable1 < variable1.mean()) & \
(variable2 < variable2.mean())
is_same_direction = both_above | both_below
num_same_direction = is_same_direction.sum()
num_different_direction = len(variable1) - num_same_direction
return (num_same_direction, num_different_direction)
s = pd.Series([1, 2, 3, 4])
s.describe()
countries = np.array(['Albania', 'Algeria', 'Andorra', 'Angola'])
life_expectancy = np.array([74.7, 75., 83.4, 57.6])
life_expectancy
life_expectancy = pd.Series([74.7, 75., 83.4, 57.6],
index = ['Albania',
'Algeria',
'Andorra',
'Angola'])
life_expectancy
#Access by index
life_expectancy.loc['Angola']
#If we don't specify index then automatically adds index 0, 1, 2, ...
pd.Series([74.7, 75., 83.4, 57.6])
#Access element by position
print life_expectancy.iloc[0]
#same as
print life_expectancy[0]
def max_employment(employment):
max_country = employment.argmax()
max_value = employment.loc[max_country]
return (max_country, max_value)
s1 = pd.Series([1, 2, 3, 4], index = ['a', 'b', 'c', 'd'])
s2 = pd.Series([10, 20, 30, 40], index = ['a', 'b', 'c', 'd'])
s1
s2
s1 + s2
# Index are in different order
s3 = pd.Series([10, 20, 30, 40], index = ['b', 'd', 'a', 'c'])
s3
s1 + s3
s4 = pd.Series([10, 20, 30, 40], index = ['c', 'd', 'e', 'f'])
s4
s1 + s4
#If we don't want to show NaN in our solution
(s1 + s4).dropna()
#If we want to give a default value
s1.add(s4, fill_value=0)
names = pd.Series([
'Andre Agassi',
'Barry Bonds',
'Christopher Columbus',
'Daniel Defoe'
])
def reverse_name(name):
split_name = name.split(" ")
return "{}, {}".format(split_name[1], split_name[0])
reverse_name(names.iloc[0])
def reverse_names(names):
return names.apply(reverse_name)
reverse_names(names)
employment = pd.read_csv('employment_above_15.csv', index_col = 'Country')
female_completion = pd.read_csv('female_completion_rate.csv', index_col = 'Country')
male_completion = pd.read_csv('male_completion_rate.csv', index_col = 'Country')
life_expectancy = pd.read_csv('life_expectancy.csv', index_col = 'Country')
gdp_per_capita = pd.read_csv('gdp_per_capita.csv', index_col = 'Country')
_country = 'United States'
employment_country = employment.loc[_country]
female_completion_country = female_completion.loc[_country]
male_completion_country = male_completion.loc[_country]
life_expectancy_country = life_expectancy.loc[_country]
gdp_per_capita_country = gdp_per_capita.loc[_country]
%pylab inline
employment_country.plot()
female_completion_country.plot()
male_completion_country.plot()
life_expectancy_country.plot()
gdp_per_capita_country.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: User Defined Module
Step2: Import with renaming
Step3: from...import statement
Step4: To import all definitions from the module just specify '*' as below. Please note that this not a good practice as it can lead to duplicate definitions for an identifier.
Step5: Module Search Path
Step6: Reloading a Module
Step7: Module Functions
Step8: Packages
|
<ASSISTANT_TASK:>
Python Code:
# import os module
import os
os.getcwd()
# The following command provides the details of the imported package definition
# help(os.listdir())
# save the following code as example.py
def add(a,b):
return a+b
# now you can import example.py
# import example
# example.add(5,4)
import math as m
print(m.pi)
from math import pi
print(pi) # please note the dot operator is not required
from math import *
print(pi)
import sys
sys.path
# my_module.py
# print('This code got executed')
# import imp
# import my_module
# This code got executed
# import my_module
# import my_module
# imp.reload(my_module)
print(dir(os))
import math
print(math.__doc__)
math.__name__
# examples
import math
from math import pi
print(pi)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interating over a collection
Step3: Iterating over a list of strings
Step4: Default Parameters
Step6: Bonus
|
<ASSISTANT_TASK:>
Python Code:
def add_together(one, two):
one = one + two
return one
def mutiply_and_add(one, two):
one = add_together(one, two)
return one * one
temparary_value = mutiply_and_add(2, 3)
print(temparary_value)
print(mutiply_and_add(2, 3))
number_1 = 10
number_2 = 30
print(len(str(number_1 * number_2)))
print(len(number_1 * number_2))
some_string = 'cow'
print(some_string)
some_string = some_string + ' town'
print(some_string)
print('\n\n\n\n')
print('meow cats'.ljust(10))
words = ['one', 'two']
words_two = words
words[0] = 'dinasour'
print(words)
print(words_two)
print(ord('z'))
print(ord('a'))
print(ord('z') - ord('a'))
import math
# Cannot range over infinity
for i in range(math.inf):
print(i)
def get_rows():
return [1,2,3,4,5,6,7,8,9,10,11,12]
def get_columns():
return [1,2,3,4,5,6,7,8,9,10,11,12]
def get_max_width(first_number, second_number):
Return the widest a given mutiplication times table cell should be
highest_value_str = str(first_number * second_number)
return len(highest_value_str) + 2 # Add two to make it appear to be one bigger on each side
rows = get_rows()
columns = get_columns()
max_width = get_max_width(max(rows), max(columns))
output = ''
# Go over the numbers to produce each row in the times table
for row_value in rows:
# Create each column in the times table with this for loop
for col_value in columns:
product_str = str(row_value * col_value)
output = output + product_str.rjust(max_width)
# Add a new line after each set of numbers to ensure the next
# number in the times table gets its own row.
output += '\n'
print(output)
# For each word in our list of words I want to "translate" it
from random import shuffle
new_alphabet = ['l', 'i', 'g', 'a', 'f', 'x', 'o', 'e', 'v',
'y', 'r', 'b', 'd', 'h', 'm', 'p', 'k', 'u',
'w', 'j', 's', 'q', 'c', 'z', 't', 'n']
words = ['The', 'quick', 'brown', 'fox',
'jumps', 'over', 'the', 'lazy', 'dog']
# Normalize the words
position = 0
for word in words:
words[position] = word.lower()
position += 1
# Go over each word and replace it with the 'modified' word
# Create a new list so that we do not modify the original
word_index = 0
new_words = list(words)
for word in words:
# Initialize the new word
new_words[word_index] = ''
# Go over and setup each new word replacing the old letters
for letter in word:
new_alphabet_position = ord(letter) - ord('a')
new_words[word_index] += new_alphabet[new_alphabet_position]
# Increase the word index so we populate the word in the list correctly
word_index += 1
print('Existing words: ', words)
print('New words: ', new_words)
import math
def times_x(value, x=10):
return value * x
def divide_x(value, x=None):
if x == None:
return 0
elif x == 0:
return math.inf # This is infinity
return value / x
print('one', times_x(10))
print('two', times_x(10,5))
print('two', divide_x(20, 10))
print('two', divide_x(20, 0))
print('one', divide_x(20))
def broken_times(x, y):
if y == None:
y = 10
return x * y
# print(broken_times(10))
def x_times_y(x, y):
if type(x) == int and type(y) == int:
return x * y
return None
product = x_times_y(2, 4)
print(product)
product = x_times_y(None, 2)
if type(product) == type(None):
print('I have no product')
else:
print(product)
def test_values(first_value, second_value):
Asserts that the first and second parameters have the same value
Raises an AssertionError if they are different with a message
failed_message = "{0} is not equal to {1}".format(first_value, second_value)
assert first_value == second_value, failed_message
def is_prime(x):
if x in [1,2,3,5,7,11,13,15,17]:
return True
return False
primes = [1,2,3,5,7,11,13,15,17,19,23]
for prime in primes:
print(prime)
test_values(is_prime(prime), True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The ieml lexicons are stored on github, they have to be downloaded first
Step2: The ieml.ieml_database.IEMLDatabase is responsible of reading and writing association between ieml.usl.USL and ieml.dictionary.Script with their translation to disk. The values as stored in rows in one file per USL. The format of the rows are space separated values (ssv like csv or tsv).
Step4: ieml.dictionary.Script
|
<ASSISTANT_TASK:>
Python Code:
from ieml.usl.usl import usl
u = usl("[E:.b.E:B:.- E:S:. (E:.-wa.-t.o.-' E:.-'wu.-S:.-'t.o.-',)(a.T:.-) > ! E:.l.- (E:.wo.- E:S:.-d.u.-')]")
u.check()
print(u)
u1 = usl("[E:.b.E:B:.- E:S:. (E:.-'wu.-S:.-'t.o.-', E:.-wa.-t.o.-' )(a.T:.-) > ! E:.l.- (E:.wo.- E:S:.-d.u.-')]")
u1.check()
print(u1)
assert u1 == u
from ieml.ieml_database import GitInterface, IEMLDatabase
gitdb = GitInterface()
gitdb.pull() # download database in ~/.cache/ieml/ folder
print(gitdb)
# instanciate a ieml.ieml_database.IEMLDatabase from the downloaded git repository
db = IEMLDatabase(folder=gitdb.folder)
print(db)
morphs = db.list(type='morpheme')[:100]
desc = db.get_descriptors()
desc.get_values_partial(morphs[0])
w = db.list(type='word', parse=True)[1]
list(w.iter_structure())
# ou se trouve le pointd'exclamation
str(u.role)
print('\n'.join(str(r) + ' ' + str(a.actor) for r, a in u.syntagmatic_fun.actors.items()))
from ieml.usl.constants import ADDRESS_SCRIPTS,NAMES_TO_ADDRESS
# list des roles syntagmatic
print('\n'.join(str(r) + ' ' + NAMES_TO_ADDRESS[r] for r in ADDRESS_SCRIPTS))
from requests import get
from ieml.usl.word import Word
from ieml.usl.usl import usl
def get_word_structure(w: Word):
return get("https://dev.intlekt.io/api/words/{}/?repository=IEMLdev".format(str(w))).json()
The structure for any Ieml is :
IemlEntry = {
'ieml': string,
'cardinality': 'singular_sequence' | 'paradigm' | 'root_paradigm',
'class': 'Noun'|'Verb'|'Auxialiary',
'type': 'word' | 'morpheme' | 'polymorpheme' | 'lexeme',
'comments': {'en': [], 'fr': []},
'tags': {'en': [], 'fr': []},
'translations': {'en': [], 'fr': []},
'created': True|False, # if exists in db, equivalent of 'comments', 'tags' or'translations' at a least one value
'editable': True|False, # if exists in db and not in the main db or doesn't exists in db
'domains': [],
'index': string, # string value to order the usls from each other
'main_table': None, # main table for morpheme
'paradigm': True|False, # is a paradigm ?
'singular_sequences': None|IemlEntry[], # if not a paradigm, None, otherwise the list of the singular sequences (the cells of the table)
}
For Words, we had the following entries :
WordsEntry = IemlEntry + {
'role': string[], # the tree address where to put the '!'
'syntagmatic_function': SyntagmaticFunctionEntry # the tree
}
LexemeEntry = IemlEntry + {
'pm_content': PolyMorphemeEntry,
'pm_flexion': PolyMorphemeEntry
}
PolyMorphemeEntry = IemlEntry + {
'constant': MorphemeEntry[], # the constant of the polymorphemes
'groups': (MorphemeEntry, 0|1|2)[], # the variables with theirs multiplicities.
}
MorphemeEntry = IemlEntry
The tree structure : a tree of subtype of SyntagmaticFunctionEntry. The nodes are accessed with the actor property.
SyntagmaticFunctionEntry = {
'actor': LexemeEntry, # the lexeme at this node in the tree
'role_full': string[], # the address of thisnode in the tree
'role': string, # the last value of the address (role_full[-1])
'type': 'ProcessSyntagmaticFunction'| 'DependantQualitySyntagmaticFunction'|'IndependantQualitySyntagmaticFunction',
# There is 3 types of syntagmatic functions, process for verbal frames, dependant for actants (nouns) and independant for adjectives.
}
ProcessSyntagmaticFunctionEntry = SyntagmaticFunctionEntry + {
'valence': 1|2|3,
'initiator': DependantQualitySyntagmaticFunctionEntry,
'recipient': DependantQualitySyntagmaticFunctionEntry, # always None is valence < 2
'interactant': DependantQualitySyntagmaticFunctionEntry, # always None is valence < 3
'cause': DependantQualitySyntagmaticFunctionEntry,
'intention': DependantQualitySyntagmaticFunctionEntry,
'manner': DependantQualitySyntagmaticFunctionEntry,
'time': DependantQualitySyntagmaticFunctionEntry,
'location': DependantQualitySyntagmaticFunctionEntry,
}
DependantQualitySyntagmaticFunctionEntry = SyntagmaticFunctionEntry + {
'independant': IndependantQualitySyntagmaticFunction,
'dependant': DependantQualitySyntagmaticFunctionEntry
}
IndependantQualitySyntagmaticFunction = SyntagmaticFunctionEntry
get_word_structure(usl("[! E:A:. (wa.)]"))
from itertools import chain
def list_polymorpheme_of_word(w):
w = usl(w)
assert isinstance(w, Word)
# w.syntagmatic_fun correspond à l'arbre syntagmatic du mot w
# w.syntagmatic_fun.actors correspond à un dictionnaire qui associe tous les roles vers toutes les fonction syntagmatics presentent en descendance du noeud courant
# donc sfun correspond successivement à chacun des noeuds de l'arbre
# sfun.actor correspond au lexeme du noeud sfun
# sfun.actor.pm_flexion correspond au polymorpheme de flexion et
# sfun.actor.pm_content correspond au polymorpheme de contenu
return list(chain.from_iterable((sfun.actor.pm_content, sfun.actor.pm_flexion)
for sfun in w.syntagmatic_fun.actors.values()))
pl = list_polymorpheme_of_word("[! E:A:. (E:.wo.- E:S:.-d.u.-')(b.-S:.A:.-'S:.-'S:.-',) > E:A:. E:A:. (E:.wo.- E:S:.-d.u.-')(k.a.-k.a.-')]")
for pm in pl:
print(pm)
# parse all words
usls = db.list(parse=False, type='word')
usls
from ieml.usl import USL
from ieml.dictionary.script import Script
# the database contains the morphemes and the usls made from morphemes
all(isinstance(u, (USL, Script)) for u in usls)
descriptorsDB = db.get_descriptors()
def display_usls(u):
descriptor = descriptorsDB.get_values_partial(u)
# descriptor is a dict :
# { (ieml, descriptor_type, lang) : string[]}
# descriptor_type in ieml.constants.DESCRIPTORS_CLASS = ['translations', 'comments', 'tags']
# lang in ieml.constants.LANGUAGES = [ 'fr', 'en' ]
return str(u) +\
"".join("\n\t{}: {}".format(k, str(v)) for k, v in {
**{'{}_{}'.format(descriptor_type, lang): ', '.join(t)
for (_, descriptor_type, lang), t in descriptor.items()},
'type': u.__class__.__name__,
'is_paradigm': not u.is_singular,
}.items())
from ieml.usl.usl import usl
# usl() parse the string and return an Usl or a Script (if a morpheme)
oo_script = usl('O:O:.')
assert isinstance(oo_script, Script)
print(display_usls(usl('O:O:.')))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Predict with Model (CLI)
Step2: Predict with Model under Mini-Load (CLI)
Step3: Predict with Model (REST)
|
<ASSISTANT_TASK:>
Python Code:
%%bash
pio init-model \
--model-server-url http://prediction-python3.community.pipeline.io \
--model-type python3 \
--model-namespace default \
--model-name python3_zscore \
--model-version v1 \
--model-path .
%%bash
pio predict \
--model-test-request-path ./data/test_request.json
%%bash
pio predict_many \
--model-test-request-path ./data/test_request.json \
--num-iterations 5
import requests
model_type = 'python3'
model_namespace = 'default'
model_name = 'python3_zscore'
model_version = 'v1'
deploy_url = 'http://prediction-%s.community.pipeline.io/api/v1/model/predict/%s/%s/%s/%s' % (model_type, model_type, model_namespace, model_name, model_version)
print(deploy_url)
with open('./data/test_request.json', 'rb') as fh:
model_input_binary = fh.read()
response = requests.post(url=deploy_url,
data=model_input_binary,
timeout=30)
print("Success!\n\n%s" % response.text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact with SVG display
Step4: Write a function named draw_circle that draws a circle using SVG. Your function should take the parameters of the circle as function arguments and have defaults as shown. You will have to write the raw SVG code as a Python string and then use the IPython.display.SVG object and IPython.display.display function.
Step5: Use interactive to build a user interface for exploing the draw_circle function
Step6: Use the display function to show the widgets created by interactive
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.html import widgets
from IPython.html import svgwrite
from IPython.display import display
s =
<svg width="100" height="100">
<circle cx="50" cy="50" r="20" fill="aquamarine" />
</svg>
SVG(s)
def draw_circle(width=100, height=100, cx=25, cy=25, r=5, fill='red'):
Draw an SVG circle.
Parameters
----------
width : int
The width of the svg drawing area in px.
height : int
The height of the svg drawing area in px.
cx : int
The x position of the center of the circle in px.
cy : int
The y position of the center of the circle in px.
r : int
The radius of the circle in px.
fill : str
The fill color of the circle.
# YOUR CODE HERE
raise NotImplementedError()
draw_circle(cx=10, cy=10, r=10, fill='blue')
assert True # leave this to grade the draw_circle function
# YOUR CODE HERE
raise NotImplementedError()
c = w.children
assert c[0].min==0 and c[0].max==300
assert c[1].min==0 and c[1].max==300
assert c[2].min==0 and c[2].max==50
assert c[3].value=='red'
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this to grade the display of the widget
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convertion utility functions
Step2: Multiplication utility functions
Step3: Karatsuba's algorithm
Step4: Multiplication and testing
Step5: Generate big integers
Step6: Run(s)
Step7: Karatsuba multiplication using Baruchel's implementation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # used for generating random numbers
def int_to_big(x):
if x == 0:
return [0]
z = []
while x > 0:
t = x % 10
z.append(t)
x //= 10
trim(z)
return z
def big_to_int(x):
z, p = 0, 1
for d in x:
z += p * d
p *= 10
return z
from itertools import zip_longest
def trim(z):
while len(z) > 1 and z[-1] == 0:
z.pop(-1)
def add(x, y):
z, carry = [], 0
for r, s in zip_longest(x, y, fillvalue=0):
carry += r + s
z.append(carry % 10)
carry //= 10
if carry:
z.append(carry)
return z
def subtract(x, y):
z, carry = [], 0
for r, s in zip_longest(x, y, fillvalue=0):
carry += r - s
z.append(carry % 10)
carry //= 10
trim(z)
return z
def karatsuba(x, y):
# ensure same length
while len(x) < len(y):
x.append(0)
while len(x) > len(y):
y.append(0)
# length
n = len(x)
half = n // 2
if n == 1:
return add([x[0] * y[0]], [])
# cut-off for improved efficiency
if n <= 50:
a = big_to_int(x)
b = big_to_int(y)
z = a * b
return int_to_big(z)
x0, x1 = x[:half], x[half:]
y0, y1 = y[:half], y[half:]
# x = x0x1
# y = y0y1
# z0 = x0 * y0
# z1 = x1 * y1
# z2 = (x0 + x1) * (y0 + y1)
# z2 = z2 - (z0 + z1)
z0 = karatsuba(x0, y0)
z1 = karatsuba(x1, y1)
z2 = karatsuba(add(x0, x1), add(y0, y1))
z2 = subtract(z2, add(z0, z1))
z = add(z0, [0] * (half << 1) + z1)
z = add(z, [0] * half + z2)
return z
def multiply(x, y):
xb = int_to_big(x)
yb = int_to_big(y)
zb = karatsuba(xb, yb)
return big_to_int(zb)
def test(x, y):
z = multiply(x, y)
assert x * y == z
print("{} x {} = {}".format(x, y, z))
def gen_long(n):
x = ''.join(map(str, np.random.randint(0, 10, n)))
return int(x)
test(1432423423420, 12321312332131233)
test(8931283129323420, 1233123602345430533)
tests = 30
for _ in range(tests):
n = np.random.randint(1, 15)
x, y = gen_long(n), gen_long(n)
test(int(x), int(y))
%%time
a, b = gen_long(1000), gen_long(1000)
z = multiply(a, b)
assert z == a * b
%%time
a, b = gen_long(20000), gen_long(20000)
z = multiply(a, b)
assert z == a * b
from karatsuba import *
def power_of_two(x):
p = 1
while p < x:
p <<= 1
return p
def reverse(num):
return int(str(num)[::-1])
def kat_multiply(x, y):
if x == 0 or y == 0:
return 0
xs = list(map(int, str(x)))
ys = list(map(int, str(y)))
n = power_of_two(max(len(xs), len(ys)))
plan = make_plan(range(n), range(n))
xs = [0] * (n - len(xs)) + xs
ys = [0] * (n - len(ys)) + ys
zs = plan(xs, ys)
zs.pop(-1)
zs = list(reversed(zs))
while zs[-1] == 0:
zs.pop(-1)
ans = 0
for p, d in enumerate(zs):
ans += d * 10 ** p
return ans
tests = 30
for _ in range(tests):
n = np.random.randint(1, 15)
x, y = gen_long(n), gen_long(n)
z = kat_multiply(x, y)
assert z == x * y
print("{} x {} = {}".format(x, y, z))
%%time
a, b = gen_long(100), gen_long(100)
z = kat_multiply(a, b)
assert z == a * b
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the data
Step2: Counting word frequency
Step3: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
Step4: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
Step5: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Step6: Text to vector function
Step7: If you do this right, the following code should return
Step8: Now, run through our entire review data set and convert each review to a word vector.
Step9: Train, Validation, Test sets
Step10: Building the network
Step11: Intializing the model
Step12: Training the network
Step13: Testing
Step14: Try out your own text!
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
from collections import Counter
total_counts = Counter()
for idx, row in reviews.iterrows():
total_counts.update(row[0].split(' '))
print("Total words in data set: ", len(total_counts))
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
print(vocab[-1], ': ', total_counts[vocab[-1]])
word2idx = dict(enumerate(vocab, 0))
def text_to_vector(text):
word_vector = np.zeros(len(vocab), dtype=np.int_)
pass
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
model = tflearn.DNN(net)
return model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=10)
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
text = "This movie is so bad. It was awful and the worst"
positive_prob = model.predict([text_to_vector(text.lower())])[0][1]
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Numpy Arrays
Step2: There is much, much more to know, but these few operations are fundamental to what we'll
Step3: Matplotlib
Step4: There are many, many more plot types available. One useful way to explore these is by
|
<ASSISTANT_TASK:>
Python Code:
# Start pylab inline mode, so figures will appear in the notebook
%matplotlib inline
import numpy as np
# Generating a random array
X = np.random.random((3, 5)) # a 3 x 5 array
print(X)
# Accessing elements
# get a single element
print(X[0, 0])
# get a row
print(X[1])
# get a column
print(X[:, 1])
# Transposing an array
print(X.T)
# Turning a row vector into a column vector
y = np.linspace(0, 12, 5)
print(y)
# make into a column vector
print(y[:, np.newaxis])
from scipy import sparse
# Create a random array with a lot of zeros
X = np.random.random((10, 5))
print(X)
# set the majority of elements to zero
X[X < 0.7] = 0
print(X)
# turn X into a csr (Compressed-Sparse-Row) matrix
X_csr = sparse.csr_matrix(X)
print(X_csr)
# convert the sparse matrix to a dense array
print(X_csr.toarray())
%matplotlib inline
# Here we import the plotting functions
import matplotlib.pyplot as plt
# plotting a line
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x));
# scatter-plot points
x = np.random.normal(size=500)
y = np.random.normal(size=500)
plt.scatter(x, y);
# showing images
x = np.linspace(1, 12, 100)
y = x[:, np.newaxis]
im = y * np.sin(x) * np.cos(y)
print(im.shape)
# imshow - note that origin is at the top-left by default!
plt.imshow(im);
# Contour plot - note that origin here is at the bottom-left by default!
plt.contour(im);
# %load http://matplotlib.org/mpl_examples/pylab_examples/ellipse_collection.py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import EllipseCollection
x = np.arange(10)
y = np.arange(15)
X, Y = np.meshgrid(x, y)
XY = np.hstack((X.ravel()[:, np.newaxis], Y.ravel()[:, np.newaxis]))
ww = X/10.0
hh = Y/15.0
aa = X*9
fig, ax = plt.subplots()
ec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,
transOffset=ax.transData)
ec.set_array((X + Y).ravel())
ax.add_collection(ec)
ax.autoscale_view()
ax.set_xlabel('X')
ax.set_ylabel('y')
cbar = plt.colorbar(ec)
cbar.set_label('X+Y');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Oefening 2
Step2: Oefening 3
Step3: Oefening 4
Step4: Met behulp van Monte Carlo simulatie kun je de integraal wel vrij eenvoudig benaderen. Je kunt een rechthoek kiezen waar je integraal binnen valt en dan voor willekeurige punten in de rechthoek bepalen of ze onder of boven de grafiek van de functie vallen.
|
<ASSISTANT_TASK:>
Python Code:
n = 10000
steps_to_exit = []
for i in range(n):
x = 0
steps = 0
while -7 < x < 7:
x += np.random.choice([-1, 1]) # step left or right
steps += 1
steps_to_exit.append(steps)
print("Gemiddeld aantal stappen tot suiker: {:.3f}".format(mean(steps_to_exit)))
n = 100
successes = 0
steps_used = []
for i in range(n):
x, y = 0, 0
steps = 0
for j in range(10000):
direction = np.random.choice(list("NESW"))
if direction == "N":
y += 1
elif direction == "E":
x += 1
elif direction == "S":
y -= 1
else:
x -= 1
steps += 1
if (x, y) == (0, 0):
# we zijn terug in het cafe (0, 0)
successes += 1
steps_used.append(steps)
break
else:
# steps_used.append(10000)
pass
print("Kans op terugkeren in café in maximaal 10000 stappen: {:.3f}".format(successes / n))
print(np.mean(steps_used))
print(len(steps_used))
n = 10000
successes = 0
for i in range(n):
sides = list(np.random.rand(3))
sides.sort()
if sides[2] < sum(sides[:2]):
successes += 1
print("Kans op mogelijkheid driehoek bij benadering {:.3f}".format(successes / n))
%pylab
%matplotlib inline
x = linspace(0, 1, 400)
y = sqrt(1 - x**4)
plt.fill_between(x, y)
plt.axis('equal')
plt.ylim([0, 1.1])
plt.show()
n = 500000
under_graph = 0
for i in range(n):
x = np.random.rand()
y = np.random.rand()
fx = sqrt(1 - x**4)
if y < fx:
under_graph += 1
print("Schatting kans willekeurig punt onder grafiek = {:.3f}".format(under_graph / n))
print("Schatting integraal = {:.5f}".format((1 * 1) * (under_graph / n)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now one we assume that RDF Stream are up and running from Part 1. If you did not followed part one please follow the link below and complete the part about TripleWave and RDF Streams.
Step2: Registering Streams
Step3: Adding domain knowledge
Step4: If we have a look to our streams again
Step5: Load Some rules from our file, e.g. RDFS entailment
Step6: Register RuleSet
Step7: Query RDF Streams Under Graph Level Entailment
Step8: Registering the Query
Step9: Registering the Observer
Step10: Are you courious how the output stream looks like?
Step11: Clean Up
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
from IPython.core.display import HTML
from rsplib.processing import RSPSource, StreamReasoner
jasper = StreamReasoner("http://jasper", 8183);
jasper.status()
jasper.register_stream("AarhusTrafficData158505", "http://aarhustrafficdata158505:4001/sgraph")
jasper.register_stream("AarhusTrafficData182955", "http://aarhustrafficdata182955:4000/sgraph")
Image(url= "https://raw.githubusercontent.com/streamreasoning/rsplab/tutorial/collector/lab/streamapp/images/tbox.png")
tbox = "https://raw.githubusercontent.com/streamreasoning/rsplab/tutorial/collector/lab/streamapp/tbox.rdf"
with open('rdfs.rules.txt', 'r') as rule_file:
rdfs=rule_file.read()
rdfs
jasper.register_rules("rdfs", rdfs)
jasper.rules()
with open('q4.rspql.txt', 'r') as rspql_query:
body = rspql_query.read()
print(body)
jasper.register_query("ct", "STREAM", body, "rdfs", tbox)
jasper.register_observer("ct", "default", {"host":"jasper","type":"ws","port":8283,"name":"default"})
Image(url= "https://raw.githubusercontent.com/streamreasoning/rsplab/tutorial/collector/lab/streamapp/images/csparq_grafana.png")
jasper.unregister_observer("ct", "default")
jasper.unregister_query("ct")
jasper.unregister_stream("AarhusTrafficData182955")
jasper.unregister_stream("AarhusTrafficData158505")
jasper.status()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id="ref3"></a>
Step2: After that, let's make an operation over these variables. The function tf.add() adds two elements (you could also use c = a + b).
Step3: Then TensorFlow needs to initialize a session to run our code. Sessions are, in a way, a context for creating a graph inside TensorFlow. Let's define our session
Step4: Let's run the session to get the result from the previous defined 'c' operation
Step5: Close the session to release resources
Step6: To avoid having to close sessions every time, we can define them in a with block, so after running the with block the session will close automatically
Step7: Even this silly example of adding 2 constants to reach a simple result defines the basis of TensorFlow. Define your edge (In this case our constants), include nodes (operations, like tf.add), and start a session to build a graph.
Step8: <a id="ref5"></a>
Step9: Let's first create a simple counter, a variable that increases one unit at a time
Step10: Variables must be initialized by running an initialization operation after having launched the graph. We first have to add the initialization operation to the graph
Step11: We then start a session to run the graph, first initialize the variables, then print the initial value of the state variable, and then run the operation of updating the state variable and printing the result after each update
Step12: <a id="ref7"></a>
Step13: And define a simple multiplication operation
Step14: Now we need to define and run the session, but since we created a "hole" in the model to pass the data, when we initialize the session we are obligated to pass an argument with the data, otherwise we would get an error.
Step15: Since data in TensorFlow is passed in form of multidimensional arrays we can pass any kind of tensor through the placeholders to get the answer to the simple multiplication operation
Step16: <a id="ref8"></a>
Step17: <a id="ref8"></a>
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
a = tf.constant([2])
b = tf.constant([3])
c = tf.add(a,b)
session = tf.Session()
result = session.run(c)
print(result)
session.close()
with tf.Session() as session:
result = session.run(c)
print(result)
Scalar = tf.constant([2])
Vector = tf.constant([5,6,2])
Matrix = tf.constant([[1,2,3],[2,3,4],[3,4,5]])
Tensor = tf.constant( [ [[1,2,3],[2,3,4],[3,4,5]] , [[4,5,6],[5,6,7],[6,7,8]] , [[7,8,9],[8,9,10],[9,10,11]] ] )
with tf.Session() as session:
result = session.run(Scalar)
print ("Scalar (1 entry):\n %s \n" % result)
result = session.run(Vector)
print ("Vector (3 entries) :\n %s \n" % result)
result = session.run(Matrix)
print ("Matrix (3x3 entries):\n %s \n" % result)
result = session.run(Tensor)
print ("Tensor (3x3x3 entries) :\n %s \n" % result)
state = tf.Variable(0)
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
print(session.run(state))
for i in range(3):
session.run(update)
print(session.run(state))
a=tf.placeholder(tf.float32)
b=a*2
with tf.Session() as sess:
result = sess.run(b,feed_dict={a:3.5})
print (result)
dictionary={a: [ [ [1,2,3],[4,5,6],[7,8,9],[10,11,12] ] , [ [13,14,15],[16,17,18],[19,20,21],[22,23,24] ] ] }
with tf.Session() as sess:
result = sess.run(b,feed_dict=dictionary)
print (result)
a = tf.constant([5])
b = tf.constant([2])
c = tf.add(a,b)
d = tf.subtract(a,b)
with tf.Session() as session:
result = session.run(c)
print ('c =: %s' % result)
result = session.run(d)
print ('d =: %s' % result)
import tensorflow as tf
with tf.name_scope("Operations"):
with tf.name_scope("Scope_a"):
a = tf.add(1, 2, name="a")
b = tf.multiply(a, 3, name="b")
with tf.name_scope("Scope_b"):
c = tf.add(4, 5, name="c")
d = tf.multiply(c, 6, name="d")
with tf.name_scope("Scope_c"):
e = tf.multiply(4, 5, name="e")
f = tf.div(c, 6, name="f")
g = tf.add(b, d, name="g")
h = tf.multiply(g, f, name="h")
with tf.Session() as sess:
print(sess.run(h))
with tf.Session() as sess:
writer = tf.summary.FileWriter("/home/raghav/TECH/output4", sess.graph)
print(sess.run(h))
writer.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 读取数据,去掉不用的数据
Step2: 分词实验
Step3: 分词不是很成功,我们转向直接用汉字字符来代替分段,我们保留标点符号
|
<ASSISTANT_TASK:>
Python Code:
# -*- coding: utf-8 -*-
import os
import re
import time
import codecs
import argparse
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
BASE_FOLDER = "C:/Users/sethf/source/repos/chinesepoem/" # os.path.abspath(os.path.dirname(__file__))
DATA_FOLDER = os.path.join(BASE_FOLDER, 'data')
DEFAULT_FIN = os.path.join(DATA_FOLDER, '唐诗语料库.txt')
DEFAULT_FOUT = os.path.join(DATA_FOLDER, 'poem.txt')
reg_noisy = re.compile('[^\u3000-\uffee]')
reg_note = re.compile('((.*))') # Cannot deal with () in seperate lines
# 中文及全角标点符号(字符)是\u3000-\u301e\ufe10-\ufe19\ufe30-\ufe44\ufe50-\ufe6b\uff01-\uffee
if __name__ == '__main__':
# parser = set_arguments()
# cmd_args = parser.parse_args()
print('{} START'.format(time.strftime(TIME_FORMAT)))
fd = codecs.open(DEFAULT_FIN, 'r', 'utf-8')
fw = codecs.open( DEFAULT_FOUT, 'w', 'utf-8')
reg = re.compile('〖(.*)〗')
start_flag = False
for line in fd:
line = line.strip()
if not line or '《全唐诗》' in line or '<http' in line or '□' in line:
continue
elif '〖' in line and '〗' in line:
if start_flag:
fw.write('\n')
start_flag = True
g = reg.search(line)
if g:
fw.write(g.group(1))
fw.write('\n')
else:a
# noisy data
print(line)
else:
line = reg_noisy.sub('', line)
line = reg_note.sub('', line)
line = line.replace(' .', '')
fw.write(line)
fd.close()
fw.close()
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
print('{} START'.format(time.strftime(TIME_FORMAT)))
import thulac
DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
fd = codecs.open(DEFAULT_FOUT, 'r', 'utf-8')
fw = codecs.open(DEFAULT_Segment, 'w', 'utf-8')
thu1 = thulac.thulac(seg_only=True) #只进行分词,不进行词性标注
for line in fd:
#print(line)
fw.write(thu1.cut(line, text=True))
fw.write('\n')
fd.close()
fw.close()
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
print('{} START'.format(time.strftime(TIME_FORMAT)))
from gensim.models import word2vec
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
DEFAULT_Word2Vec = os.path.join(DATA_FOLDER, 'Word2Vec150.bin')
sentences = word2vec.Text8Corpus(DEFAULT_Segment)
model = word2vec.Word2Vec(sentences, size=150)
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
model.save(DEFAULT_Word2Vec)
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
model[u'男']
DEFAULT_FIN = os.path.join(DATA_FOLDER, '唐诗语料库.txt')
DEFAULT_FOUT = os.path.join(DATA_FOLDER, 'poem.txt')
DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
def GetFirstNline(filePath, linesNumber):
fd = codecs.open(filePath, 'r', 'utf-8')
for i in range(1,linesNumber):
print(fd.readline())
fd.close()
GetFirstNline(DEFAULT_Segment, 3)
GetFirstNline(DEFAULT_FOUT, 3)
print('{} START'.format(time.strftime(TIME_FORMAT)))
DEFAULT_FOUT = os.path.join(DATA_FOLDER, 'poem.txt')
DEFAULT_charSegment = os.path.join(DATA_FOLDER, 'Charactersegment.txt')
fd = codecs.open(DEFAULT_FOUT, 'r', 'utf-8')
fw = codecs.open(DEFAULT_charSegment, 'w', 'utf-8')
start_flag = False
for line in fd:
if len(line) > 0:
for c in line:
if c != '\n':
fw.write(c)
fw.write(' ')
fw.write('\n')
fd.close()
fw.close()
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
GetFirstNline(DEFAULT_charSegment, 3)
print('{} START'.format(time.strftime(TIME_FORMAT)))
from gensim.models import word2vec
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
DEFAULT_Char2Vec = os.path.join(DATA_FOLDER, 'Char2Vec100.bin')
fd = codecs.open(DEFAULT_charSegment, 'r', 'utf-8')
sentences = fd.readlines()
fd.close
model = word2vec.Word2Vec(sentences, size=100)
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
model.save(DEFAULT_Char2Vec)
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
model[u'男']
print('{} START'.format(time.strftime(TIME_FORMAT)))
from gensim.models import word2vec
DEFAULT_charSegment = os.path.join(DATA_FOLDER, 'Charactersegment.txt')
DEFAULT_Char2Vec50 = os.path.join(DATA_FOLDER, 'Char2Vec50.bin')
fd = codecs.open(DEFAULT_charSegment, 'r', 'utf-8')
sentences = fd.readlines()
fd.close
model = word2vec.Word2Vec(sentences, size=50)
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
model.save(DEFAULT_Char2Vec50)
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
model.wv.most_similar([u'好'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The # option of print_dot() can be used to display the internal number of each transition
Step2: Using these numbers you can selectively hightlight some transitions. The second argument is a color number (from a list of predefined colors).
Step3: Note that these highlight_ functions work for edges and states, and come with both singular (changing the color of single state or edge) and plural versions.
Step4: Saving to HOA 1.1
Step5: Highlighting a run
Step6: The call of highlight(5) on the accepting run r modified the original automaton b
Step7: Highlighting from a product
Step8: The projection also works for products generated on-the-fly, but the on-the-fly product itself cannot be highlighted (it does not store states or transitions).
Step9: Highlighting nondeterminism
Step10: Disappearing highlights
Step11: Highlighting with partial output
Step12: Highlighting languages
|
<ASSISTANT_TASK:>
Python Code:
a = spot.translate('a U b U c')
a.show('.#')
a.highlight_edges([2, 4, 5], 1)
a.highlight_edge(6, 2).highlight_states((0, 1), 0)
print(a.to_str('HOA', '1'))
print()
print(a.to_str('HOA', '1.1'))
b = spot.translate('X (F(Ga <-> b) & GF!b)'); b
r = b.accepting_run(); r
r.highlight(5) # the parameter is a color number
b
left = spot.translate('a U b')
right = spot.translate('GFa')
display(left, right)
prod = spot.product(left, right); prod
run = prod.accepting_run(); run
run.highlight(5)
# Note that by default project() needs to know on which side you project, but it cannot
# guess it. The left-side is assumed unless you pass True as a second argument.
run.project(left).highlight(5)
run.project(right, True).highlight(5)
display(prod, left, right)
left2 = spot.translate('!b & FG a')
right2 = spot.translate('XXXb')
prod2 = spot.otf_product(left2, right2) # Note "otf_product()"
run2 = prod2.accepting_run()
run2.project(left2).highlight(5)
run2.project(right2, True).highlight(5)
display(run2, prod2, left2, right2)
b = spot.translate('X (F(Ga <-> b) & GF!b)')
spot.highlight_nondet_states(b, 5)
spot.highlight_nondet_edges(b, 4)
b
spot.randomize(b); b
spot.highlight_nondet_edges(b, 4) # let's get those highlighted edges back
display(b, b.show('.<4'), b.show('.<2'))
aut = spot.translate('(b W Xa) & GF(c <-> Xb) | a', 'generic', 'det')
spot.highlight_languages(aut)
aut.show('.bas')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Save as json
Step2: Loop through all files
Step3: 2. Example of loading and plotting impedance data
Step4: Plot
|
<ASSISTANT_TASK:>
Python Code:
def build_dictionaries(mess):
discharge, charge, impedance = {}, {}, {}
for i, element in enumerate(mess):
step = element[0][0]
if step == 'discharge':
discharge[str(i)] = {}
discharge[str(i)]["amb_temp"] = str(element[1][0][0])
year = int(element[2][0][0])
month = int(element[2][0][1])
day = int(element[2][0][2])
hour = int(element[2][0][3])
minute = int(element[2][0][4])
second = int(element[2][0][5])
millisecond = int((second % 1)*1000)
date_time = datetime.datetime(year, month, day, hour, minute, second, millisecond)
discharge[str(i)]["date_time"] = date_time.strftime("%d %b %Y, %H:%M:%S")
data = element[3]
discharge[str(i)]["voltage_battery"] = data[0][0][0][0].tolist()
discharge[str(i)]["current_battery"] = data[0][0][1][0].tolist()
discharge[str(i)]["temp_battery"] = data[0][0][2][0].tolist()
discharge[str(i)]["current_load"] = data[0][0][3][0].tolist()
discharge[str(i)]["voltage_load"] = data[0][0][4][0].tolist()
discharge[str(i)]["time"] = data[0][0][5][0].tolist()
if step == 'charge':
charge[str(i)] = {}
charge[str(i)]["amb_temp"] = str(element[1][0][0])
year = int(element[2][0][0])
month = int(element[2][0][1])
day = int(element[2][0][2])
hour = int(element[2][0][3])
minute = int(element[2][0][4])
second = int(element[2][0][5])
millisecond = int((second % 1)*1000)
date_time = datetime.datetime(year, month, day, hour, minute, second, millisecond)
charge[str(i)]["date_time"] = date_time.strftime("%d %b %Y, %H:%M:%S")
data = element[3]
charge[str(i)]["voltage_battery"] = data[0][0][0][0].tolist()
charge[str(i)]["current_battery"] = data[0][0][1][0].tolist()
charge[str(i)]["temp_battery"] = data[0][0][2][0].tolist()
charge[str(i)]["current_load"] = data[0][0][3][0].tolist()
charge[str(i)]["voltage_load"] = data[0][0][4][0].tolist()
charge[str(i)]["time"] = data[0][0][5][0].tolist()
if step == 'impedance':
impedance[str(i)] = {}
impedance[str(i)]["amb_temp"] = str(element[1][0][0])
year = int(element[2][0][0])
month = int(element[2][0][1])
day = int(element[2][0][2])
hour = int(element[2][0][3])
minute = int(element[2][0][4])
second = int(element[2][0][5])
millisecond = int((second % 1)*1000)
date_time = datetime.datetime(year, month, day, hour, minute, second, millisecond)
impedance[str(i)]["date_time"] = date_time.strftime("%d %b %Y, %H:%M:%S")
data = element[3]
impedance[str(i)]["sense_current"] = {}
impedance[str(i)]["battery_current"] = {}
impedance[str(i)]["current_ratio"] = {}
impedance[str(i)]["battery_impedance"] = {}
impedance[str(i)]["rectified_impedance"] = {}
impedance[str(i)]["sense_current"]["real"] = np.real(data[0][0][0][0]).tolist()
impedance[str(i)]["sense_current"]["imag"] = np.imag(data[0][0][0][0]).tolist()
impedance[str(i)]["battery_current"]["real"] = np.real(data[0][0][1][0]).tolist()
impedance[str(i)]["battery_current"]["imag"] = np.imag(data[0][0][1][0]).tolist()
impedance[str(i)]["current_ratio"]["real"] = np.real(data[0][0][2][0]).tolist()
impedance[str(i)]["current_ratio"]["imag"] = np.imag(data[0][0][2][0]).tolist()
impedance[str(i)]["battery_impedance"]["real"] = np.real(data[0][0][3]).tolist()
impedance[str(i)]["battery_impedance"]["imag"] = np.imag(data[0][0][3]).tolist()
impedance[str(i)]["rectified_impedance"]["real"] = np.real(data[0][0][4]).tolist()
impedance[str(i)]["rectified_impedance"]["imag"] = np.imag(data[0][0][4]).tolist()
impedance[str(i)]["re"] = float(data[0][0][5][0][0])
impedance[str(i)]["rct"] = float(data[0][0][6][0][0])
return discharge, charge, impedance
def save_json(dictionary, name):
with open(name + '.json', 'w') as f:
json.dump(dictionary, f, indent=4)
folder = './nasa-battery-data/BatteryAgingARC-FY08Q4'
filenames = [f for f in os.listdir(folder) if f.endswith('.mat')]
for filename in filenames:
name = filename.split('.mat')[0]
print(name)
struct = loadmat(folder + '/' + filename)
mess = struct[name][0][0][0][0]
discharge, charge, impedance = build_dictionaries(mess)
save_json(discharge, name + '_discharge')
save_json(charge, name + '_charge')
save_json(impedance, name + '_impedance')
with open('./B0005_impedance.json') as f:
impedance_data = json.load(f)
with open('./B0005_discharge.json') as f:
discharge_data = json.load(f)
for cycle in impedance_data.keys():
plt.plot(cycle, impedance_data[cycle]["rct"], 'o')
plt.ylabel('Charge transfer resistance')
plt.xlabel('Cycle number')
plt.title('Battery B0005')
plt.show()
for cycle in discharge_data.keys():
plt.plot(discharge_data[cycle]["time"], discharge_data[cycle]["voltage_battery"], '-')
plt.ylabel('Voltage')
plt.xlabel('Time (s)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 使用 Keras 和 Tensorflow Hub 对电影评论进行文本分类
Step2: 下载 IMDB 数据集
Step3: 探索数据
Step4: 我们再打印下前十个标签。
Step5: 构建模型
Step6: 现在让我们构建完整模型:
Step7: 层按顺序堆叠以构建分类器:
Step8: 训练模型
Step9: 评估模型
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
!pip install tensorflow-hub
!pip install tensorflow-datasets
import numpy as np
import tensorflow as tf
!pip install tensorflow-hub
!pip install tfds-nightly
import tensorflow_hub as hub
import tensorflow_datasets as tfds
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
# Split the training set into 60% and 40% to end up with 15,000 examples
# for training, 10,000 examples for validation and 25,000 examples for testing.
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews",
split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
train_examples_batch
train_labels_batch
embedding = "https://tfhub.dev/google/nnlm-en-dim50/2"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=tf.string, trainable=True)
hub_layer(train_examples_batch[:3])
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=10,
validation_data=validation_data.batch(512),
verbose=1)
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utility function
Step2: Experiments on the Auto-MPG dataset
Step3: Causal Discovery with Causal Discovery Tool (CDT)
Step4: As you can see, no two methods agree on the graphs. PC and GES effectively produce an undirected graph whereas LiNGAM produces a directed graph. We use only the LiNGAM method in the next section.
Step5: As mentioned earlier, due to the absence of directed edges, no backdoor, instrmental or frontdoor variables can be found out for PC and GES. Thus, causal effect estimation is not possible for these methods. However, LiNGAM does discover a DAG and hence, its possible to output a causal estimate for LiNGAM. The estimate is still pretty far from the original estimate of -70.466 (which can be calculated from the graph).
Step6: Ground truth of the causal graph
Step7: Causal Discovery with Causal Discovery Tool (CDT)
Step8: As you can see, no two methods agree on the graphs. Next we study the causal effects of these different graphs
Step9: From the causal estimates obtained, it can be seen that the three estimates differ in different aspects. The graph obtained using LiNGAM contains a backdoor path and instrumental variables. On the other hand, the graph obtained using PC contains a backdoor path and a frontdoor path. However, despite these differences, both obtain the same mean causal estimate.
Step10: The graph similarity metrics show that the scores are the lowest for the LiNGAM method of graph extraction. Hence, of the three methods used, LiNGAM provides the graph that is most similar to the original graph.
|
<ASSISTANT_TASK:>
Python Code:
import dowhy
from dowhy import CausalModel
from rpy2.robjects import r as R
%load_ext rpy2.ipython
import numpy as np
import pandas as pd
import graphviz
import networkx as nx
np.set_printoptions(precision=3, suppress=True)
np.random.seed(0)
def make_graph(adjacency_matrix, labels=None):
idx = np.abs(adjacency_matrix) > 0.01
dirs = np.where(idx)
d = graphviz.Digraph(engine='dot')
names = labels if labels else [f'x{i}' for i in range(len(adjacency_matrix))]
for name in names:
d.node(name)
for to, from_, coef in zip(dirs[0], dirs[1], adjacency_matrix[idx]):
d.edge(names[from_], names[to], label=str(coef))
return d
def str_to_dot(string):
'''
Converts input string from graphviz library to valid DOT graph format.
'''
graph = string.replace('\n', ';').replace('\t','')
graph = graph[:9] + graph[10:-2] + graph[-1] # Removing unnecessary characters from string
return graph
data_mpg = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data-original',
delim_whitespace=True, header=None,
names = ['mpg', 'cylinders', 'displacement',
'horsepower', 'weight', 'acceleration',
'model year', 'origin', 'car name'])
data_mpg.dropna(inplace=True)
data_mpg.drop(['model year', 'origin', 'car name'], axis=1, inplace=True)
print(data_mpg.shape)
data_mpg.head()
from cdt.causality.graph import LiNGAM, PC, GES
graphs = {}
labels = [f'{col}' for i, col in enumerate(data_mpg.columns)]
functions = {
'LiNGAM' : LiNGAM,
'PC' : PC,
'GES' : GES,
}
for method, lib in functions.items():
obj = lib()
output = obj.predict(data_mpg)
adj_matrix = nx.to_numpy_matrix(output)
adj_matrix = np.asarray(adj_matrix)
graph_dot = make_graph(adj_matrix, labels)
graphs[method] = graph_dot
# Visualize graphs
for method, graph in graphs.items():
print("Method : %s"%(method))
display(graph)
for method, graph in graphs.items():
if method != "LiNGAM":
continue
print('\n*****************************************************************************\n')
print("Causal Discovery Method : %s"%(method))
# Obtain valid dot format
graph_dot = str_to_dot(graph.source)
# Define Causal Model
model=CausalModel(
data = data_mpg,
treatment='mpg',
outcome='weight',
graph=graph_dot)
# Identification
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
# Estimation
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression",
control_value=0,
treatment_value=1,
confidence_intervals=True,
test_significance=True)
print("Causal Estimate is " + str(estimate.value))
from cdt.data import load_dataset
data_sachs, graph_sachs = load_dataset("sachs")
data_sachs.dropna(inplace=True)
print(data_sachs.shape)
data_sachs.head()
labels = [f'{col}' for i, col in enumerate(data_sachs.columns)]
adj_matrix = nx.to_numpy_matrix(graph_sachs)
adj_matrix = np.asarray(adj_matrix)
graph_dot = make_graph(adj_matrix, labels)
display(graph_dot)
from cdt.causality.graph import LiNGAM, PC, GES
graphs = {}
graphs_nx = {}
labels = [f'{col}' for i, col in enumerate(data_sachs.columns)]
functions = {
'LiNGAM' : LiNGAM,
'PC' : PC,
'GES' : GES,
}
for method, lib in functions.items():
obj = lib()
output = obj.predict(data_sachs)
graphs_nx[method] = output
adj_matrix = nx.to_numpy_matrix(output)
adj_matrix = np.asarray(adj_matrix)
graph_dot = make_graph(adj_matrix, labels)
graphs[method] = graph_dot
# Visualize graphs
for method, graph in graphs.items():
print("Method : %s"%(method))
display(graph)
for method, graph in graphs.items():
if method != "LiNGAM":
continue
print('\n*****************************************************************************\n')
print("Causal Discovery Method : %s"%(method))
# Obtain valid dot format
graph_dot = str_to_dot(graph.source)
# Define Causal Model
model=CausalModel(
data = data_sachs,
treatment='PIP2',
outcome='PKC',
graph=graph_dot)
# Identification
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
# Estimation
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression",
control_value=0,
treatment_value=1,
confidence_intervals=True,
test_significance=True)
print("Causal Estimate is " + str(estimate.value))
from cdt.metrics import SHD, SHD_CPDAG, SID, SID_CPDAG
from numpy.random import randint
for method, graph in graphs_nx.items():
print("***********************************************************")
print("Method: %s"%(method))
tar, pred = graph_sachs, graph
print("SHD_CPDAG = %f"%(SHD_CPDAG(tar, pred)))
print("SHD = %f"%(SHD(tar, pred, double_for_anticausal=False)))
print("SID_CPDAG = [%f, %f]"%(SID_CPDAG(tar, pred)))
print("SID = %f"%(SID(tar, pred)))
import itertools
from numpy.random import randint
from cdt.metrics import SHD, SHD_CPDAG, SID, SID_CPDAG
# Find combinations of pair of methods to compare
combinations = list(itertools.combinations(graphs_nx, 2))
for pair in combinations:
print("***********************************************************")
graph1 = graphs_nx[pair[0]]
graph2 = graphs_nx[pair[1]]
print("Methods: %s and %s"%(pair[0], pair[1]))
print("SHD_CPDAG = %f"%(SHD_CPDAG(graph1, graph2)))
print("SHD = %f"%(SHD(graph1, graph2, double_for_anticausal=False)))
print("SID_CPDAG = [%f, %f]"%(SID_CPDAG(graph1, graph2)))
print("SID = %f"%(SID(graph1, graph2)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We're going to be building a model that recognizes these digits as 5, 0, and 4.
Step3: Working with the images
Step4: The first 10 pixels are all 0 values. Not very interesting, but also unsurprising. We'd expect most of the pixel values to be the background color, 0.
Step5: The large number of 0 values correspond to the background of the image, another large mass of value 255 is black, and a mix of grayscale transition values in between.
Step6: Great -- we've retained the correct image data while properly rescaling to the range [-0.5, 0.5].
Step8: Indeed, the first label of the test set is 7.
Step9: A crucial difference here is how we reshape the array of pixel values. Instead of one image that's 28x28, we now have a set of 60,000 images, each one being 28x28. We also include a number of channels, which for grayscale images as we have here is 1.
Step11: Looks good. Now we know how to index our full set of training and test images.
Step12: As with our image data, we'll double-check that our 1-hot encoding of the first few values matches our expectations.
Step13: The 1-hot encoding looks reasonable.
Step14: Defining the model
Step16: Now that we've defined the variables to be trained, we're ready to wire them together into a TensorFlow graph.
Step17: Having defined the basic structure of the graph, we're ready to stamp out multiple copies for training, testing, and validation.
Step18: Training and visualizing results
Step19: Now we're ready to perform operations on the graph. Let's start with one round of training. We're going to organize our training steps into batches for efficiency; i.e., training using a small set of examples at each step rather than a single example.
Step20: Let's take a look at the predictions. How did we do? Recall that the output will be probabilities over the possible classes, so let's look at those probabilities.
Step21: As expected without training, the predictions are all noise. Let's write a scoring function that picks the class with the maximum probability and compares with the example's label. We'll start by converting the probability vectors returned by the softmax into predictions we can match against the labels.
Step22: Next, we can do the same thing for our labels -- using argmax to convert our 1-hot encoding into a digit class.
Step23: Now we can compare the predicted and label classes to compute the error rate and confusion matrix for this batch.
Step25: Now let's wrap this up into our scoring function.
Step26: We'll need to train for some time to actually see useful predicted values. Let's define a loop that will go through our data. We'll print the loss and error periodically.
Step27: The error seems to have gone down. Let's evaluate the results using the test set.
Step28: We can see here that we're mostly accurate, with some errors you might expect, e.g., '9' is often confused as '4'.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from IPython.display import Image
import base64
Image(data=base64.decodestring("iVBORw0KGgoAAAANSUhEUgAAAMYAAABFCAYAAAARv5krAAAYl0lEQVR4Ae3dV4wc1bYG4D3YYJucc8455yCSSIYrBAi4EjriAZHECyAk3rAID1gCIXGRgIvASIQr8UTmgDA5imByPpicTcYGY+yrbx+tOUWpu2e6u7qnZ7qXVFPVVbv2Xutfce+q7hlasmTJktSAXrnn8vR/3/xXmnnadg1aTfxL3/7rwfSPmT+kf/7vf098YRtK+FnaZaf/SS++OjNNathufF9caiT2v/xxqbTGki/SXyM1nODXv/r8+7Tb+r+lnxZNcEFHEG/e3LnpoINXSh/PWzxCy/F9eWjOnDlLrr/++jR16tQakgylqdOWTZOGFqX5C/5IjXNLjdt7/NTvv/+eTjnllLT//vunr776Kl100UVpueWWq8n10lOmpSmTU5o/f0Fa3DDH1ry9p0/++eefaZ999slYYPS0005LK664Yk2eJ02ekqZNnZx+XzA/LfprYgGxePHitOqqq6YZM2akyfPmzUvXXXddHceoic2EOckxDj300CzPggUL0g033NC3OKy00krDer3pppv6FgcBIjvGUkv9u5paZZVVhoHpl4Mvv/wyhfxDQ0NZ7H7EQbacPHny39Tejzj88ccfacqUKRmHEecYf0Nr8GGAQJ8gMHCMPlH0QMzmEBg4RnN4DVr3CQIDx+gTRQ/EbA6BgWM0h9egdZ8g8PeliD4RutfF/Ouvfz9OtZy8aNGiNH/+/GGWl1122XzseYuVNKtqsaI23Ghw0DYCA8doG8JqO+AUG2+8cVq4cGHaY4890vLLL5/WXXfdfI6jvPDCC3lJ8amnnkoezP3000/pl19+GThHtWpIPekYomTxFS7HnkqKjMsss0yGgFE4r62tSBFVJ02aNPyconi9V4/JwzHwT9ZNNtkkeZ6w5ZZbph133DH99ttv6ccff8zXX3nllcRRnHNfv2cNGMQWGRaOrWbUrjsGBRLAA6U4Lhoqw9h2223ztRBq6aWXzsbgvueffz4Lu9NOO2UnYTgrr7xy7tO9nOH111/Pbb744ov0ww8/jAvngAdFMvQDDjggG/0GG2yQX1GZNm1aziCCwzrrrJPl3muvvXKwePnll9M333wzHDCKWPbLMbuAkfISjnvvvXcW/emnn85lqCBqa4a65hiYR/Gk2RNGRlwm3n7ggQfmdrKD9sqJtdZaKxvCnDlz8n3Tp09PXmPYeuutc0SVNQjvnmuvvTa3efzxx9N33303PGZ5rF75DBvvqq233nrp22+/TWeddVbyikpgxCE4vQDhlQUBRfDw2esbs2fPTquvvnqviNN1PuIdJ4GErVx44YUZowsuuCB9+umn6eeff84BspmsWqljhPFDxjGGYx/lDkN33udajCoVlAjRzl4U8LjefRwnPjsXG8OJqKBd8NB1LTU5IHyCd7LJGOYXNoGjFqaGIKtrERDIDKtukfGMH/zRZa1A101+YBF44KfMYzO8VOYYjDWiukiGqc022yyXOUqdzTffPJ/z1ialeqNVxA9gi0wzlOJ5juJlR8JeddVV+ZrIKTq4ZvJp/8EHH+SU+txzz+W2SqmxVFZRplrH5DTRXmGFFdKuu+6azjjjjOzosl5g6D54CQCI4mGjhNQO5occckh2LvLTA6fqJOEnyhU6kNlkZmUuvrtNcFx77bUzhsZWXgoSsm6t4Dsa/tp2DErCmA04HAI4FLjaaqtlBhmnSKiNY4rDtHZFB6jFMMH0RVDH+nCPYxtDCFJnKkniRbDitWjTK3sykQUuMLPn3DZGX8SFnCG/fVyz5zCCBtIHTLshdzif8fERn8cKXxjCNOwCTu3Qf6yqhV4AQokiP489//zzM0DxnQYKwqAtIkko1kQzFFxvaNcJ6u3Pe+65J/cRRvDee+9lA2BInIyRff/997nNO++8k7t0vl2A6vHWynmyiPJ43WKLLbIijz/++LTddtvlTCdzwIWSg9yjxBJ0GN/DDz+c7zv77LOzbEceeWSekwVGgsOsWbNyNo0+qt7DfPvtt8/dmtvIGnPnzk3PPPPMsJ6rHrNef/BBeJA90RprrJEDcNhctMkXR/mnbccwuCjNGTbaaKMc8TBZprITxOdgOvbuKxqGz6LSJ598kseJ9Gi1CYmSv/76a3YyJZWMZJ6Ceskp8EMusihFEAyUmVaa8G2rxTNHIrd733///eH7YeaLNe5xrEzlWNF/HqQDf0Tm+GIbvYdD43MsKAIo/JDgE0G5aFfN8NaWYxiUshikqGYTTUSt0TCkjXsYNqJQQso+rgGa0vX58ccf56hQTtk+48F92rmvlnE1A0on2uKP0Yrw+Nxzzz0zn+ZhjKwRXq6vueaa2TmUiRQfS7SyNeMks9IV9vrvJOl/q622yo4Mfw5Pvm6TMclLdit6shh+YAMnq1E29tEsteUYBgMSgxa5MOAzJZcVXQs4bUR8XxhCHIwzMALCBuCcx5q0tF3u133l8XrRMchFiRYNyMxBKM/5IjZlWVzjULKwACISytIWFsi56aab5mvOKyEikmdAO/iHY+BDCRUZuoPD1e1akECyLseA7d13352DhdKak8Cmlt3U7TSl9p58FwejYK8ncAwKpDTnGDcARbWiAUjHiNEHsITSPlagpEZChcfrZzwSOfBOiQwXLuR3PjAhtwAD08iAMCO/a+5xPTIm3ALjwERf0V+c69QeT7ZujVdLDhgKBrANXAMreMESRkU7rdVPrXNtZ4xIpSLH1VdfnR3j4IMPzkbw2Wefpa+//jovo5188slZsZjArAcvFP3YY4+lSy+9NEdTdTTy0I5xHHfccfm1CH2LtuORKEqmkwVlVU+sBY+IdJRmE0zeeOONnEXuu+++7AhnnnlmWn/99XMJ5brtzTffzHMJx/o555xzkgdb0U8rRtAKrnTYqtG1Ml6teyxInHDCCdlGYByBmG2Z97ChVvFo2zEwbHCRTbqP7EDxPjN2pUBEe86AXAcsg+f10TYMSTvnRM1ulQe1wG/nHEXZZEJZUIYQ5cgWMsEgMgqclFdkdh+MbFFyuddnWMLNfTYkcuuXHlBkpFYNI3dS+mMMfCHHsZWadfUjmQVn8iLywscG21apMscQwR555JEM3KuvvpoZ5LHOmzgjAvBwzFt2/Oijj3Lm4Ayin/MU/eGHH+b2N998c/5MGSaZ44nw7OEd5Rx77LE5+1EehYXxkpes5li2K6+8Mhv8Lrvsko381ltvzcEBfvHQKh5auk9GPvHEE3NJAx+/eKL/HXbYIQcbK3nwN067xAk4s5VHdbvsx0nxrYQeKxJMZAfBA7GlRx99NC9EtCN7JY4RoPBeAHIAyrB3jpHYwqu1d02d7HpZcfqINo5dL7eJMXtxTzk2sgWFM/gcsnCakI2cFOk+523O+Qw7WaeYHYpYRp9xn4BkbPdWSfgJXYYM+ne+2xRj2sdx8EDu8rm4Ntp9pY4RSmb0CIPOAVNGoLA47yU4S2xen37ppZdy9CkLE/3lm8bJHzJbbiavt2Q9p7AkK7oyXAZOLk7gs9c4PJC0AOE8DDyrgJkaWgYQkSPYuAdpWySfteU8HhqKouYq+io6ZfGeZo7xpbT1+jt+jGULfprpq922ePHMBibwjWVq523KVrzBsIzTaMeu1DFi0HI0YyyYtAekY5MltbRyihFJiROBKIYTwMCTWJNubwdQFCXFapK9z96mtbjgs3thFKWnUgjBzNZIya5FOyUcPG36q4LwRgZ6Ix8HtBk3tirGGU0feAkslHfk5PzBh2cXSkvtWqWOOEaRGcoSHdXDMoYn1tK8yaON0ahbCWgFS/vxSnjn5F4ItLeiFAGAzCKc7MDA1OlIjc4pLFKE7FEyxb5ZPNTbtuiv2fvrtddfOFsYXcwj8d8qv/XGq3femLvvvnvOvrIYPPEjG+PDseDbDnXcMXiyiGiyyACOPvrovN95552zV3/++ef5zVveznlEo6CICvG5l/d4JSvHP+qoo7JjKDs4PkVSGPm9HSz9W5rlPEoCQYHjVFXyRGnBOcKA28VOP/qTBWX6YnS2IKB8qYL/enyGHPbKziOOOCLj6sGeslGW8L6Y4ANr2MY99fpsdL7jjmFwkSTSr6gDVCk+tmDQedcJ5LgdwaLPbu7xjJRRNlErSsiQhVHJlOEQoh182o1wRTnharwYs3itnWP9Rd/RD5mLW5yveh/YRhYMjItyBh/wjPat8tEVx6B00RKo5513XpIl7rzzzuwEourMmTOz95uIcyBfTSXYiy++mCOrSFS1klsFrNZ9eGPoJtmeyRx00EE5cpGbIi21XnbZZbkMee2117KMHIKMIVcotVb/vXoOz6I0+URoMlVFcBFE7L1+IjNYIo6v/fo+D3tC+FCR+FHuwNUCgfOtUlccI5hnJMoIBhN1sBICqMoNNaLP3pkiFGciIIBC4HaEbRWk0dyHb3Mp/EY0I6+NsytvyKxsKhpQr8ozGpm1IZ8IbV+PyllGuyh1YBXXOQEcy6R8M5eAHzuxxX3GRvbaCKJ4aRfXrjkG5jEbk00Prxi8SZTJKmc5/PDDc5v99tsvC+hBjWtqStmD0F4Ma1foMvDtfqZMUc3/lYjMSFFW3NS7JtyyoKzSiTocHoFJHMc+MlK7Mta7n9NbATJerbEYvQWIWCVitIyaXrV3nsG7H2Y2GVcbxyj6NX+waKEPmOvbfShwtjhQDDz5Ygt/uuoY+OPtnICDEMBTWsAQUu0NBBsDEgFEWOADAiDaVRERWsCq5i34IRN+TbTJgn8KwzOFuR4KDUXW7Kyik53Ep8w/+RkxWeO5S1EM5wVABguXMGp69dk1x87D0ObdL32GHI5tsDQGHtwbm/Hw4TpnKvNY5Ge0x113DEwT3tIsIdSnDIfxcxJAevCHfE9cXcmotHXfAw88kIFUdgFjLMn4HuZRuh9FExmjRCCnZxRqcPxz8ioUVk9eRhJkPAYHV8ZVFRkjjFSfAtw222yTy2OZ0iv15fHcQ4dKaMcwsBdEEL26RzaIh5+yK7LSBGPno8yOZX+vzRhfXzZ8cRrtyzzkzpr803XHwB8wTJYIRol+VY8zqMMBbP0f+cExE1qTdbU7x3jwwQdzVBYdesExKNiEWx2MfwoOAyCbJ9uRHZvUTcPmsENhGNE4HBKOHKNqZzQu3KNfX9H1nRABQZlbNkpt4SNo4DWIIesDj9qYnwki2giWqol3330348kZLPm7xvi1Pffcc7MzhA3gy/0oeIuxWtmPiWNgNCIFYwcCAa2FA1ikJZz1aeUVsBmge9TyoqGoIqKUFdEKCFXcU0/pHJizVMUnXBiBh6IicdTTzsEOnuZkDE/2rcJI4KMf/TF+0TucwDhkZ+DGL4/nGkPGV/AIC+2RvfP6ZPTI4gu5XNM/Um7RPzuIFyn1zW7wpQ9UHj+fbOHPmDlGCOGBGIeQQfwuq0jnISBQfOHft7JEHN94Q5xF6XLFFVfkyKIEGyuiGAo3r6BIx0imcM6k+6GHHspOEQbcDq+UTl4BwRu7PstUiPEJFsa9/PLL83nXg6d2xnUvoxS5L7744uGyh/wyRpRF9YwSHsHjE088kWWADQeRFThZkTgBstensZG5h4m56oEdcAp9CwTOVUlj6hgECcGBpA6XDazeiLKhVABQAhKB3cNxbEAL4KoEppm+gjf3OMafDf+UW7zeTL/ltqIiAxBMOIIxnLOHgbFsMGQ4InhE0nJfrXw2hnIRD3SFBKmYWDfqE49woFvOzZno3NxM0HDciMjBDsjEBgLTsJHYN+qjmWtj7hjBLKFFQgL7qRz14jHHHJPBcC2M3wRPVDT5ohzZRv0Z16O/sdozAKmdopUH5kftTrzJpl+lk29CcgpLw3BgpMbwwqF/S80pGJ6xO0WM+8Ybbxw2TuOEoTYakwyovB/JKdzDMVQOHvCRzXju890fL11aGhcMqqIxdwwCRkYQDZAaE7lWBhyosQEmQM439MgffDHm0Si8EcuBC0ezcQSZVKYktzFEW+3sfQ4natRvu9eMTS9F7IvHo+m/2fb6LNuCc0WsW+mzHq9j6hgE9YCHp5tkez2EAVjlMOmyUlU2Lis8ygVR0rykyoltPZCaOY9fr32Qp50X6xi7pWCGbsHBvwLgGIcddljGxvcsjOU1GseyiKjJQWydpiqNsBlei85BfhNxeJunVCl31x0jBOMAjJ9jRC3OEERDS7QMI0qQohIYgLSq7FJuMZbi9WZA7kRbvFAWx5Dyy449mjEDG/dyDPW4VSiy2iNvBcCSUdxyyy35OYHrqJUx843j8I/qQpA074BVVdR1x+AIHCIiIGewsqIuds41tSSlOxeOFHuOQ/E+2zPEuFYVKM32U3RMvGy44YbZMTg2B2+GOIXXJcjpR9lkUy/QyZ7GUU8zAD9RCiuR0oQYVv1IMAk7qFL+rjkGg7GZQPLufffdN69QKJtkCAKKjNGu1p7gMgWDYEDRpkpAmu0rnMLehie/RavcI49Sr1ZW0w6V91ac/IsxmdHPB0U5pQ+4+TExDudNUhPufnaKIn7N6m2k9h11jKLRqP+UQJb2eHh4uYjK0LW1D0MpCq0NR4g24RTR/0hCdvM6/m14FtljeTL4D/liedFeO7LYcyh7eMGDY8X16IM8Vp9kWjj2GwWG5IZb2FKVOHTMMTCvDKBgD2Z22223bNynnnpqVrZXBFxjQDZUFJiwIqKHN8qHO+64IxvN/fffn9vG/VWC0UpfeC5uZMEbg/ctM/8SzYOxZ599Nhs4ebSx0ECpcDFvMCdRggkesoQ+zaHU0N4EgAEnue2227JTON+LgaEVDFu5h+w2Wdl33GFkEUIQqYIqdYwwbJGO8q2xOydqUiTFWpJVPzsuUwhlzzFETxlGdFSCqaMB4XwvUzgKWU3AyW4uwFns4QMbilUyxbq8p/4cw3UEB8FDGQUDx/acqB8zRS2dw5qthe3VatPKucocg6JiYu3lP2nfawvekKVITzgJQLH24QTBtPZeE2D89957b27jwZ1IwIm8R2OMWHmJ+3pxTzaK8l+HyMrgTzrppMxqOIEsGoZvz0nsyWiliRMUl2G9aOk6POyLZVUvYtBpniL4wA1m9lVSW46BOQqKpTLK9FnUsxftvW4swssa4dkhCGFCMNfcp08lhM9KKc4h0obgsa8ShHb6Cv5DJnu8IwHB9TB852DkOlzIRV6kXbSVMfQj48BWdhE0TLr1Fe3zQR/+gRMK5yjuq4KjZccQ2SlYjexHmCnSkiLjtsesmlnpQ5naFo1A5GMAHoJxBI709ttv54ygntZWmWEcQMS9VQleRT9kNmfAG0P3HRPGbHnVudg4gEyJOAYiE0wikHAAcxHyxndO4KI/WHEK/Qzo7wjAXfaFNdurikaNtIERRTqmYIYdE2tGEs8hfJ8iFB/3xV67MCjG8NZbb6Unn3wyC+XfDxfnDxFp496qhK6qn5CDA5twK/fIRH5Gb0MMOhxCFgkKjOBoHqKEkmWvueaanG04iTHcP3CKQO0/e3ZhgceP2smqcKyKRuUYlEKhPDL+d5z1c4qVFTDnmBIZMwZ9DiKAzTmvCetPNFR7W7fXXt/KLddqTcyjr17bRybkEF5XiQhPHnMuDlF07MCB3I49l4EDxTrnfsFBJBxQbQSKeGoROqjdurWzIzoGJqRxS2KUf/rpp2flcRDRjRKVCdpFhCwz7rOVKE5z++235/7uuuuuXDq5P5yKEY0np8B3TKb9K1/vLTF0/7MiJtyRPYrq4fx+7R2e7vFDDzDyfx1goPwcUGMEYG/rFI3oGAYW0UUyimQIcRwGzbgpVsZAUTYE065xCtc5GUeSHTyg4kzKs/FKoSBljyhvTz6y2gseZAwlwgI+cNBGtpV9ZRj4BobjFY9O8g0bQcXWaRpxBE5hHuFnJ0XB6dOn56ge2QGDlK2dFSSG4b8kxVzEdSWGVxgYQLzrxJkIGgbTaUE73b9MZ/KNfIMOJpdcckndYZWmFAwv+wgydW/o8wsCK3xnz56dFzx8oxPGtk7QiI5h0FBaeGzRKYIpjDN2ig6lB9OiprmI60qNieIMIXvsQy7yotjH9eI+2hbPDY4bI8D+2JdnWTYY+iwDs78qaUTHEM0sI1pClAVMnqX9ImGQszB6DHoNOLzZNZlGRlEq9JNB9JOsRXvoxDGnsDTudwFUHTNmzMjDqEaU9xYvGgWiZnka0TEo16CeNyCM1SLtwmt5cNEoCOUa5xjQAIFWEGBP5rbKdTRr1qwcfGUMthXVTCt917pnRMdwE6ZiQm0JckADBMYCgWLwtXjTSeq/d5Y7ieag7wmDwMAxJowqB4JUicDAMapEc9DXhEFgcjxcM7vvR4on7bHS1q84WNkpUr/iEL+aOLRw4cIlQCmuIhUBmsjHlpQ9c7EmzjEsN1vd6DeCg8UVT+qRd7b6EQey8wMT+6El8RSu36xhIO8AgQYI9F94bADG4NIAgUDg/wHX+3lgThDIegAAAABJRU5ErkJggg==".encode('utf-8')), embed=True)
import os
from six.moves.urllib.request import urlretrieve
SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
#SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
# for those who have no access to google storage, use lecun's repo please
WORK_DIRECTORY = "/tmp/mnist-data"
def maybe_download(filename):
A helper to download the data files if not present.
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
else:
print('Already downloaded', filename)
return filepath
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
import gzip, binascii, struct, numpy
import matplotlib.pyplot as plt
with gzip.open(test_data_filename) as f:
# Print the header fields.
for field in ['magic number', 'image count', 'rows', 'columns']:
# struct.unpack reads the binary data provided by f.read.
# The format string '>i' decodes a big-endian integer, which
# is the encoding of the data.
print(field, struct.unpack('>i', f.read(4))[0])
# Read the first 28x28 set of pixel values.
# Each pixel is one byte, [0, 255], a uint8.
buf = f.read(28 * 28)
image = numpy.frombuffer(buf, dtype=numpy.uint8)
# Print the first few values of image.
print('First 10 pixels:', image[:10])
%matplotlib inline
# We'll show the image and its pixel value histogram side-by-side.
_, (ax1, ax2) = plt.subplots(1, 2)
# To interpret the values as a 28x28 image, we need to reshape
# the numpy array, which is one dimensional.
ax1.imshow(image.reshape(28, 28), cmap=plt.cm.Greys);
ax2.hist(image, bins=20, range=[0,255]);
# Let's convert the uint8 image to 32 bit floats and rescale
# the values to be centered around 0, between [-0.5, 0.5].
#
# We again plot the image and histogram to check that we
# haven't mangled the data.
scaled = image.astype(numpy.float32)
scaled = (scaled - (255 / 2.0)) / 255
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(scaled.reshape(28, 28), cmap=plt.cm.Greys);
ax2.hist(scaled, bins=20, range=[-0.5, 0.5]);
with gzip.open(test_labels_filename) as f:
# Print the header fields.
for field in ['magic number', 'label count']:
print(field, struct.unpack('>i', f.read(4))[0])
print('First label:', struct.unpack('B', f.read(1))[0])
IMAGE_SIZE = 28
PIXEL_DEPTH = 255
def extract_data(filename, num_images):
Extract the images into a 4D tensor [image index, y, x, channels].
For MNIST data, the number of channels is always 1.
Values are rescaled from [0, 255] down to [-0.5, 0.5].
print('Extracting', filename)
with gzip.open(filename) as bytestream:
# Skip the magic number and dimensions; we know these values.
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
train_data = extract_data(train_data_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
print('Training data shape', train_data.shape)
_, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(train_data[0].reshape(28, 28), cmap=plt.cm.Greys);
ax2.imshow(train_data[1].reshape(28, 28), cmap=plt.cm.Greys);
NUM_LABELS = 10
def extract_labels(filename, num_images):
Extract the labels into a 1-hot matrix [image index, label index].
print('Extracting', filename)
with gzip.open(filename) as bytestream:
# Skip the magic number and count; we know these values.
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
# Convert to dense 1-hot representation.
return (numpy.arange(NUM_LABELS) == labels[:, None]).astype(numpy.float32)
train_labels = extract_labels(train_labels_filename, 60000)
test_labels = extract_labels(test_labels_filename, 10000)
print('Training labels shape', train_labels.shape)
print('First label vector', train_labels[0])
print('Second label vector', train_labels[1])
VALIDATION_SIZE = 5000
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
train_size = train_labels.shape[0]
print('Validation shape', validation_data.shape)
print('Train size', train_size)
import tensorflow as tf
# We'll bundle groups of examples during training for efficiency.
# This defines the size of the batch.
BATCH_SIZE = 60
# We have only one channel in our grayscale images.
NUM_CHANNELS = 1
# The random seed that defines initialization.
SEED = 42
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step, which we'll write once we define the graph structure.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,
shape=(BATCH_SIZE, NUM_LABELS))
# For the validation and test data, we'll just hold the entire dataset in
# one constant node.
validation_data_node = tf.constant(validation_data)
test_data_node = tf.constant(test_data)
# The variables below hold all the trainable weights. For each, the
# parameter defines how the variables will be initialized.
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
print('Done')
def model(data, train=False):
The Model definition.
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec ksize also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
print('Done')
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=train_labels_node, logits=logits))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(model(validation_data_node))
test_prediction = tf.nn.softmax(model(test_data_node))
print('Done')
# Create a new interactive session that we'll use in
# subsequent code cells.
s = tf.InteractiveSession()
# Use our newly created session as the default for
# subsequent operations.
s.as_default()
# Initialize all the variables we defined above.
tf.global_variables_initializer().run()
BATCH_SIZE = 60
# Grab the first BATCH_SIZE examples and labels.
batch_data = train_data[:BATCH_SIZE, :, :, :]
batch_labels = train_labels[:BATCH_SIZE]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
print('Done')
print(predictions[0])
# The highest probability in the first entry.
print('First prediction', numpy.argmax(predictions[0]))
# But, predictions is actually a list of BATCH_SIZE probability vectors.
print(predictions.shape)
# So, we'll take the highest probability for each vector.
print('All predictions', numpy.argmax(predictions, 1))
print('Batch labels', numpy.argmax(batch_labels, 1))
correct = numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(batch_labels, 1))
total = predictions.shape[0]
print(float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.float32)
bundled = zip(numpy.argmax(predictions, 1), numpy.argmax(batch_labels, 1))
for predicted, actual in bundled:
confusions[predicted, actual] += 1
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
def error_rate(predictions, labels):
Return the error rate and confusions.
correct = numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(labels, 1))
total = predictions.shape[0]
error = 100.0 - (100 * float(correct) / float(total))
confusions = numpy.zeros([10, 10], numpy.float32)
bundled = zip(numpy.argmax(predictions, 1), numpy.argmax(labels, 1))
for predicted, actual in bundled:
confusions[predicted, actual] += 1
return error, confusions
print('Done')
# Train over the first 1/4th of our training set.
steps = train_size // BATCH_SIZE
for step in range(steps):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
# Print out the loss periodically.
if step % 100 == 0:
error, _ = error_rate(predictions, batch_labels)
print('Step %d of %d' % (step, steps))
print('Mini-batch loss: %.5f Error: %.5f Learning rate: %.5f' % (l, error, lr))
print('Validation error: %.1f%%' % error_rate(
validation_prediction.eval(), validation_labels)[0])
test_error, confusions = error_rate(test_prediction.eval(), test_labels)
print('Test error: %.1f%%' % test_error)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.grid(False)
plt.xticks(numpy.arange(NUM_LABELS))
plt.yticks(numpy.arange(NUM_LABELS))
plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest');
for i, cas in enumerate(confusions):
for j, count in enumerate(cas):
if count > 0:
xoff = .07 * len(str(count))
plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white')
plt.xticks(numpy.arange(NUM_LABELS))
plt.hist(numpy.argmax(test_labels, 1));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interact with SVG display
Step3: Write a function named draw_circle that draws a circle using SVG. Your function should take the parameters of the circle as function arguments and have defaults as shown. You will have to write the raw SVG code as a Python string and then use the IPython.display.SVG object and IPython.display.display function.
Step4: Use interactive to build a user interface for exploing the draw_circle function
Step5: Use the display function to show the widgets created by interactive
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.html import widgets
from IPython.display import display, SVG
s = ' <svg width="100" height="100"> <circle cx="50" cy="50" r="20" fill="aquamarine" /> </svg>'
SVG(s)
def draw_circle(width=100, height=100, cx=25, cy=25, r=5, fill='red'):
Draw an SVG circle.
Parameters
----------
width : int
The width of the svg drawing area in px.
height : int
The height of the svg drawing area in px.
cx : int
The x position of the center of the circle in px.
cy : int
The y position of the center of the circle in px.
r : int
The radius of the circle in px.
fill : str
The fill color of the circle.
a = '<svg width="'+str(width)+'" height="'+str(height)+'"> <circle cx="'+str(cx)+'" cy="'+str(cy)+'" r="'+str(r)+'" fill="'+fill+'"/></svg>'
display(SVG(a))
draw_circle(cx=10, cy=10, r=10, fill='blue')
assert True # leave this to grade the draw_circle function
w=interactive(draw_circle, width=fixed(300), height=fixed(300), cx=(0,300,1), cy=(0,300,1), r=(0,50,1), fill='red');
c = w.children
assert c[0].min==0 and c[0].max==300
assert c[1].min==0 and c[1].max==300
assert c[2].min==0 and c[2].max==50
assert c[3].value=='red'
display(w)
assert True # leave this to grade the display of the widget
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Connect girder client and set parameters
Step2: 2. Polygon merger
Step3: Required arguments for initialization
Step4: maskpaths
Step5: Note that the pattern _left-123_ and _top-123_ is assumed to encode the x and y offset
Step6: 3. Initialize and run the merger
Step7: This is the result
Step8: 4. Visualize results on HistomicsTK
|
<ASSISTANT_TASK:>
Python Code:
import os
CWD = os.getcwd()
import os
import girder_client
from pandas import read_csv
from histomicstk.annotations_and_masks.polygon_merger import Polygon_merger
from histomicstk.annotations_and_masks.masks_to_annotations_handler import (
get_annotation_documents_from_contours, )
APIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/'
SAMPLE_SLIDE_ID = '5d586d76bd4404c6b1f286ae'
gc = girder_client.GirderClient(apiUrl=APIURL)
gc.authenticate(interactive=True)
# gc.authenticate(apiKey='kri19nTIGOkWH01TbzRqfohaaDWb6kPecRqGmemb')
# read GTCodes dataframe
PTESTS_PATH = os.path.join(CWD, '..', '..', 'tests')
GTCODE_PATH = os.path.join(PTESTS_PATH, 'test_files', 'sample_GTcodes.csv')
GTCodes_df = read_csv(GTCODE_PATH)
GTCodes_df.index = GTCodes_df.loc[:, 'group']
# This is where masks for adjacent rois are saved
MASK_LOADPATH = os.path.join(
PTESTS_PATH,'test_files', 'annotations_and_masks', 'polygon_merger_roi_masks')
maskpaths = [
os.path.join(MASK_LOADPATH, j) for j in os.listdir(MASK_LOADPATH)
if j.endswith('.png')]
print(Polygon_merger.__doc__)
print(Polygon_merger.__init__.__doc__)
print(Polygon_merger.run.__doc__)
GTCodes_df.head()
[os.path.split(j)[1] for j in maskpaths[:5]]
print(Polygon_merger.set_roi_bboxes.__doc__)
pm = Polygon_merger(
maskpaths=maskpaths, GTCodes_df=GTCodes_df,
discard_nonenclosed_background=True, verbose=1,
monitorPrefix='test')
contours_df = pm.run()
contours_df.head()
# deleting existing annotations in target slide (if any)
existing_annotations = gc.get('/annotation/item/' + SAMPLE_SLIDE_ID)
for ann in existing_annotations:
gc.delete('/annotation/%s' % ann['_id'])
# get list of annotation documents
annotation_docs = get_annotation_documents_from_contours(
contours_df.copy(), separate_docs_by_group=True,
docnamePrefix='test',
verbose=False, monitorPrefix=SAMPLE_SLIDE_ID + ": annotation docs")
# post annotations to slide -- make sure it posts without errors
for annotation_doc in annotation_docs:
resp = gc.post(
"/annotation?itemId=" + SAMPLE_SLIDE_ID, json=annotation_doc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: JS with IPython?
Step2: Python data | D3 Viz
Step3: Passing data from IPython to JS
Step6: Passing data from JS to IPython
Step7: Click "Set Value" then run the cell below.
Step8: Custom D3 module.
|
<ASSISTANT_TASK:>
Python Code:
# import requirments
from IPython.display import Image
from IPython.display import display
from IPython.display import HTML
from datetime import *
import json
from copy import *
from pprint import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
from ggplot import *
import networkx as nx
from networkx.readwrite import json_graph
#from __future__ import http_server
from BaseHTTPServer import BaseHTTPRequestHandler
from IPython.display import IFrame
import rpy2
%load_ext rpy2.ipython
%R require("ggplot2")
% matplotlib inline
randn = np.random.randn
%%javascript
require.config({
paths: {
//d3: "http://d3js.org/d3.v3.min" //<-- url
d3: 'd3/d3.min.js' //<-- local path
}
});
import json
import networkx as nx
from networkx.readwrite import json_graph
from IPython.display import IFrame
G = nx.barbell_graph(6,3)
# this d3 example uses the name attribute for the mouse-hover value,
# so add a name to each node
for n in G:
G.node[n]['name'] = n
# write json formatted data
d = json_graph.node_link_data(G) # node-link format to serialize
# write json
json.dump(d, open('force/force.json','w'))
# render html inline
IFrame('force/force.html', width=700, height=350)
#print('Or copy all files in force/ to webserver and load force/force.html')
from IPython.display import Javascript
import numpy as np
mu, sig = 0.05, 0.2
rnd = np.random.normal(loc=mu, scale=sig, size=4)
## Use the variable rnd above in Javascript:
javascript = 'element.append("{}");'.format(str(rnd))
Javascript(javascript)
from IPython.display import HTML
input_form =
<div style="background-color:gainsboro; border:solid black; width:300px; padding:20px;">
Name: <input type="text" id="var_name" value="foo"><br>
Value: <input type="text" id="var_value" value="bar"><br>
<button onclick="set_value()">Set Value</button>
</div>
javascript =
<script type="text/Javascript">
function set_value(){
var var_name = document.getElementById('var_name').value;
var var_value = document.getElementById('var_value').value;
var command = var_name + " = '" + var_value + "'";
console.log("Executing Command: " + command);
var kernel = IPython.notebook.kernel;
kernel.execute(command);
}
</script>
HTML(input_form + javascript)
print foo
from pythonD3 import visualize
data = [{'x': 10, 'y': 20, 'r': 15, 'name': 'circle one'},
{'x': 40, 'y': 40, 'r': 5, 'name': 'circle two'},
{'x': 20, 'y': 30, 'r': 8, 'name': 'circle three'},
{'x': 25, 'y': 10, 'r': 10, 'name': 'circle four'}]
visualize.plot_circle(data, id=2)
visualize.plot_chords(id=5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the above examples, it becomes clear that there is much repetition, and we can make the code more compact. Let us abstract common functionality into an abstract class.
Step3: Iterators
Step4: Generators
Step5: Summing series
Step6: The key thing to note is that this is much more efficient than generating a list of terms in memory and summing it. That is, more efficient than
Step7: Decorators
|
<ASSISTANT_TASK:>
Python Code:
class Dog:
def __init__(self, name):
self.age = 0
self.name = name
self.noise = "Woof!"
self.food = "dog biscuits"
def make_sound(self):
print(self.noise)
def eat_food(self):
print("Eating " + self.food + ".")
def increase_age(self, n = 1):
self.age = self.age + n
d1 = Dog('Buster')
d1.make_sound()
d2 = Dog('Tiger')
d2.noise = 'Bark'
d2.make_sound()
d1.make_sound()
d1.eat_food()
d1.increase_age(3)
print(d1.age)
class Cat:
def __init__(self, name):
self.age = 0
self.name = name
self.noise = "Meow!"
self.food = "cat food"
def make_sound(self):
print(self.noise)
def eat_food(self):
print("Eating " + self.food + ".")
def increase_age(self, n = 1):
self.age = self.age + n
c1 = Cat('Harvey')
c1.make_sound()
c1.eat_food()
from abc import ABCMeta, abstractmethod
class Mammal(metaclass=ABCMeta):
@abstractmethod
def __init__(self, name):
self.age = 0
self.name = name
self.noise = "None!"
self.food = "none"
def make_sound(self):
print(self.name + " says " + self.noise)
def eat_food(self):
print(self.name + " is eating " + self.food + ".")
def increase_age(self, n = 1):
self.age = self.age + n
class Dog(Mammal):
def __init__(self, name):
super(Dog, self).__init__(name)
self.noise = "Bark!"
self.food = "dog biscuits"
class Cat(Mammal):
def __init__(self, name):
super(Cat, self).__init__(name)
self.noise = "Meow!"
self.food = "cat food"
d = Dog("Buster")
c = Cat("Harvey")
d.make_sound()
c.make_sound()
c.eat_food()
m = Mammal("Name")
m.make_sound()
m.eat_food()
import sys
print(sys.version)
animal_house = [Dog("MyDog" + str(i))
for i in range(1, 5)]
animal_house.extend([Cat("MyCat" + str(i))
for i in range(1, 5)])
for i in animal_house:
i.make_sound()
class Reverse:
Iterator for looping over a sequence backwards.
def __init__(self, data):
self.data = data
self.index = len(data)
def __iter__(self):
return self
def next(self): # def next(self): in Python 2!
if self.index == 0:
raise StopIteration
self.index = self.index - 1
return self.data[self.index]
rev = iter(Reverse([10, 30, 200, 0.0, 'ABC']))
for i in rev:
print(i)
def reverse(data):
for index in range(len(data)-1, -1, -1):
yield data[index]
for char in reverse("Madam, I'm Adam"):
print(char)
import math
def series_sum(max_terms=1000):
n = 0
while n < max_terms:
n = n + 1
yield 1.0 / n**2
print(sum(series_sum(100000)) - math.pi**2 / 6)
print(sum([1.0 / i**2 for i in range(1, 10000)]))
def add_numbers(a, b):
return a + b
def arg_wrapper(f, *args, **kwargs):
print("The function arguments are:")
print(args)
print(kwargs)
print("Now running the function!")
return f(*args, **kwargs)
#print(add_numbers(1, 2))
#print(arg_wrapper(add_numbers, 1, 2))
def myfunction(name='Test', age=30):
print("Name: %s, Age: %d" % (name, age))
arg_wrapper(myfunction, name='Harvey', age=3)
import time
def timing_function(some_function):
def wrapper():
t1 = time.time()
some_function()
t2 = time.time()
return "Time it took to run the function: " + str((t2 - t1)) + "\n"
return wrapper
@timing_function
def my_function():
num_list = []
for num in (range(0, 10000)):
num_list.append(num)
print("\nSum of all the numbers: " + str((sum(num_list))))
my_function()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing preprocessing data
Step2: Sorting out data (for plotting purposes)
Step3: Artificial Neural Network (Gridsearch, DO NOT RUN)
Step4: Plotting
Step5: Saving ANN to file through pickle (and using it later)
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.metrics import r2_score # in order to test the results
from sklearn.grid_search import GridSearchCV # looking for parameters
import pickle #saving to file
#this function reads the file
def read_data(archive, rows, columns):
data = open(archive, 'r')
mylist = data.read().split()
data.close()
myarray = np.array(mylist).reshape(( rows, columns)).astype(float)
return myarray
data = read_data('../get_data_example/set.txt',72, 12)
X = data[:, [0, 2, 4, 6, 7, 8, 9, 10, 11]]
#print pre_X.shape, data.shape
y = data[:,1]
#print y.shape
#getting the time vector for plotting purposes
time_stamp = np.zeros(data.shape[0])
for i in xrange(data.shape[0]):
time_stamp[i] = i*(1.0/60.0)
#print X.shape, time_stamp.shape
X = np.hstack((X, time_stamp.reshape((X.shape[0], 1))))
print X.shape
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
t_test = X_test[:,-1]
t_train = X_train[:, -1]
X_train_std = preprocessing.scale(X_train[:,0:-1])
X_test_std = preprocessing.scale(X_test[:, 0:-1])
#Here comes the way to sort out the data according to one the elements of it
test_sorted = np.hstack(
(t_test.reshape(X_test_std.shape[0], 1), X_test_std, y_test.reshape(X_test_std.shape[0], 1)))
test_sorted = test_sorted[np.argsort(test_sorted[:,0])] #modified
train_sorted = np.hstack((t_train.reshape(t_train.shape[0], 1), y_train.reshape(y_train.shape[0], 1) ))
train_sorted = train_sorted[np.argsort(train_sorted[:,0])]
#Grid search, random state =0: same beginning for all
alpha1 = np.linspace(0.001,0.9, 9).tolist()
momentum1 = np.linspace(0.3,0.9, 9).tolist()
params_dist = {"hidden_layer_sizes":[(20, 40), (15, 40), (10,15), (15, 15, 10), (15, 10), (15, 5)],
"activation":['tanh','logistic'],"algorithm":['sgd', 'l-bfgs'], "alpha":alpha1,
"learning_rate":['constant'],"max_iter":[500], "random_state":[0],
"verbose": [False], "warm_start":[False], "momentum":momentum1}
grid = GridSearchCV(MLPRegressor(), param_grid=params_dist)
grid.fit(X_train_std, y_train)
print "Best score:", grid.best_score_
print "Best parameter's set found:\n"
print grid.best_params_
reg = MLPRegressor(warm_start = grid.best_params_['warm_start'], verbose= grid.best_params_['verbose'],
algorithm= grid.best_params_['algorithm'],hidden_layer_sizes=grid.best_params_['hidden_layer_sizes'],
activation= grid.best_params_['activation'], max_iter= grid.best_params_['max_iter'],
random_state= None,alpha= grid.best_params_['alpha'], learning_rate= grid.best_params_['learning_rate'],
momentum= grid.best_params_['momentum'])
reg.fit(X_train_std, y_train)
%matplotlib inline
results = reg.predict(test_sorted[:, 1:-1])
plt.plot(test_sorted[:, 0], results, c='r') # ( sorted time, results)
plt.plot(train_sorted[:, 0], train_sorted[:,1], c='b' ) #expected
plt.scatter(time_stamp, y, c='k')
plt.xlabel("Time(s)")
plt.ylabel("Angular velocities(rad/s)")
red_patch = mpatches.Patch(color='red', label='Predicted')
blue_patch = mpatches.Patch(color='blue', label ='Expected')
black_patch = mpatches.Patch(color='black', label ='Original')
plt.legend(handles=[red_patch, blue_patch, black_patch])
plt.title("MLP results vs Expected values")
plt.show()
print "Accuracy:", reg.score(X_test_std, y_test)
#print "Accuracy test 2", r2_score(test_sorted[:,-1], results)
#This prevents the user from losing a previous important result
def save_it(ans):
if ans == "yes":
f = open('data.ann', 'w')
mem = pickle.dumps(grid)
f.write(mem)
f.close()
else:
print "Nothing to save"
save_it("no")
#Loading a successful ANN
f = open('data.ann', 'r')
nw = f.read()
saved_ann = pickle.loads(nw)
print "Just the accuracy:", saved_ann.score(X_test_std, y_test), "\n"
print "Parameters:"
print saved_ann.get_params(), "\n"
print "Loss:", saved_ann.loss_
print "Total of layers:", saved_ann.n_layers_
print "Total of iterations:", saved_ann.n_iter_
#print from previously saved data
%matplotlib inline
results = saved_ann.predict(test_sorted[:, 1:-1])
plt.plot(test_sorted[:, 0], results, c='r') # ( sorted time, results)
plt.plot(train_sorted[:, 0], train_sorted[:,1], c='b' ) #expected
plt.scatter(time_stamp, y, c='k')
plt.xlabel("Time(s)")
plt.ylabel("Angular velocities(rad/s)")
red_patch = mpatches.Patch(color='red', label='Predicted')
blue_patch = mpatches.Patch(color='blue', label ='Expected')
black_patch = mpatches.Patch(color='black', label ='Original')
plt.legend(handles=[red_patch, blue_patch, black_patch])
plt.title("MLP results vs Expected values (Loaded from file)")
plt.show()
print " Accuracy:", saved_ann.score(X_test_std, y_test)
plt.plot(time_stamp, y,'--.', c='r')
plt.xlabel("Time(s)")
plt.ylabel("Angular velocities(rad/s)")
plt.title("Resuts from patient:\n"
" Angular velocities for the right knee")
plt.show()
#print "Accuracy test 2", r2_score(test_sorted[:,-1], results)
print max(y), saved_ann.predict(X_train_std[y_train.tolist().index(max(y_train)),:].reshape((1,9)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Introduction
Step2: We import all classes/methods from the geometry sub-module
Step3: 2. Basic spatial data types
Step4: When considering two points, we can calculate their 3D distance as well as their horizontal, 2D distance
Step5: Among other methods, we can
Step6: The four coefficient returned (a, b, c and d) define the Cartesian plane as in the equation
Step7: In this example the normal versor is vertical.
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 1
%matplotlib inline
from pygsf.geometries.shapes.space3d import *
p1 = Point3D(1.0, 2.4, 0.2) # definition of a PPoint3Doint instance
p2 = Point3D(0.9, 4.2, 10.5)
p1.distance(p2) # 3D distance between two points
pl1 = CPlane3D.fromPoints(Point3D(0, 0, 0), Point3D(1, 0, 0), Point3D(0, 1, 0))
print(pl1)
normal_versor = pl1.normVersor() # versor (unit vector) normal to the provided Cartesian plane
print(normal_versor)
pl1, pl2 = CPlane3D(1, 0, 0, 0), CPlane3D(0, 0, 1, 0)
inters_v = pl1.intersVersor(pl2) # intersection versor between two Cartesian planes
print(inters_v)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Exercise 1
Step3: b. Mean of returns
Step4: Exercise 2
Step5: b. Median of the returns.
Step6: Exercise 3
Step7: b. Mode of the returns.
Step8: Exercise 4
Step9: b. Geometric Mean of returns.
Step10: Exercise 5
Step11: b. Harmonic Mean of stock returns.
Step12: Exercise 6
Step13: We can clearly see positive skewing from the histogram of the returns. We see fewer higher values and a longer right tail.
|
<ASSISTANT_TASK:>
Python Code:
# Useful Functions
def mode(l):
# Count the number of times each element appears in the list
counts = {}
for e in l:
if e in counts:
counts[e] += 1
else:
counts[e] = 1
# Return the elements that appear the most times
maxcount = 0
modes = {}
for (key, value) in counts.items():
if value > maxcount:
maxcount = value
modes = {key}
elif value == maxcount:
modes.add(key)
if maxcount > 1 or len(l) == 1:
return list(modes)
return 'No mode'
# Useful Libraries
import scipy.stats as stats
import numpy as np
l=[]
for x in range(1,100):
x=np.random.randint(1,100)
l.append(x)
## Your code goes here
print 'mean of l:', np.mean(l)
price = get_pricing('ITI', fields='price', start_date='2005-01-01', end_date='2010-01-01')
returns = price.pct_change()[1:]
## Your code goes here.
print 'mean of ITI returns:', np.mean(returns)
## Your code goes here.
print 'median of l:', np.median(l)
price = get_pricing('BAC', fields='open_price', start_date='2005-01-01', end_date='2010-01-01')
returns = price.pct_change()[1:]
## Your code goes here
print 'Median of BAC returns:', np.median(returns)
## Your code goes here.
print 'mode of l:', mode(l)
start = '2014-01-01'
end = '2015-01-01'
pricing = get_pricing('GS', fields='price', start_date=start, end_date=end)
returns = pricing.pct_change()[1:]
hist, bins = np.histogram(returns, 20)
maxfreq = max(hist)
print 'Mode of bins:', [(bins[i], bins[i+1]) for i, j in enumerate(hist) if j == maxfreq]
## Your code goes here.
print 'Geometric mean of l:', stats.gmean(l)
price = get_pricing('C', fields='open_price', start_date='2005-01-01', end_date='2010-01-01')
print 'Geometric mean of Citi:', stats.gmean(price) ## Your code goes here
## Your code goes here.
print 'Harmonic mean of l:', stats.hmean(l)
## Your code goes here.
print 'Harmonic mean of XLF:', stats.hmean(price)
import matplotlib.pyplot as plt
# Collect Data.
price = get_pricing('SPY', fields='volume', start_date='2016-01-01', end_date='2017-01-01')
returns = price.pct_change()[1:]
# Calculate Mean, Median and Mode.
mean = np.mean(returns)
median = np.median(returns)
mode = stats.mode(returns)[0][0]
print 'mean:', mean
print 'median:', median
print 'mode:', mode
print len(returns)
# Setting parameters and print skewness outcome.
if mode < median < mean:
print 'The returns are positivly skewed.'
if mean<median<mode:
print 'The returns are negativly skewed.'
if mean == median == mode:
print 'There is no Skewness: the returns are symetricaly distributed'
plt.hist(returns, bins = 50);
plt.xlabel('Returns');
plt.ylabel('Frequency');
plt.title('Histogram of Returns');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare Data
Step2: Prepare Hyperparameters
Step3: Run Validation
Step4: Pick the best hyperparameters and train the full data
Step5: Calculate Accuracy on Full Training Set
|
<ASSISTANT_TASK:>
Python Code:
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
import itertools
import time
import numpy as np
import pandas as pd
from sklearn import model_selection
from sklearn import linear_model
from sklearn import metrics
try:
import wget
except ImportError:
!pip install wget # you may need pip3
import wget
train_data_url = "http://s3.amazonaws.com/verta-starter/census-train.csv"
train_data_filename = wget.download(train_data_url)
test_data_url = "http://s3.amazonaws.com/verta-starter/census-test.csv"
test_data_filename = wget.download(test_data_url)
df_train = pd.read_csv("census-train.csv")
X_train = df_train.iloc[:,:-1].values
y_train = df_train.iloc[:, -1]
df_train.head()
hyperparam_candidates = {
'C': [1e-4, 1e-1, 1, 10, 1e3],
'solver': ['liblinear', 'lbfgs'],
'max_iter': [15, 28],
}
# total models 20
# create hyperparam combinations
hyperparam_sets = [dict(zip(hyperparam_candidates.keys(), values))
for values
in itertools.product(*hyperparam_candidates.values())]
# create validation split
(X_val_train, X_val_test,
y_val_train, y_val_test) = model_selection.train_test_split(X_train, y_train,
test_size=0.2,
shuffle=True)
def run_experiment(hyperparams):
# create and train model
model = linear_model.LogisticRegression(**hyperparams)
model.fit(X_train, y_train)
# calculate and log validation accuracy
val_acc = model.score(X_val_test, y_val_test)
print(hyperparams, end=' ')
print("Validation accuracy: {:.4f}".format(val_acc))
# NOTE: run_experiment() could also be defined in a module, and executed in parallel
for hyperparams in hyperparam_sets:
run_experiment(hyperparams)
best_hyperparams = {}
model = linear_model.LogisticRegression(multi_class='auto', **best_hyperparams)
model.fit(X_train, y_train)
train_acc = model.score(X_train, y_train)
print("Training accuracy: {:.4f}".format(train_acc))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Create validation set and sample
Step3: This was original output
Step4: Training & 10% for Validation numbers
Step5: Finetuning and Training
Step6: if you are training, stay here. if you are loading & creating submission skip down from here.
Step7: Validate Predictions
Step8: Generate Predictions
Step9: Submit Predictions to Kaggle!
|
<ASSISTANT_TASK:>
Python Code:
#Verify we are in the lesson1 directory
%pwd
%matplotlib inline
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
from utils import *
from vgg16 import Vgg16
from PIL import Image
from keras.preprocessing import image
from sklearn.metrics import confusion_matrix
current_dir = os.getcwd()
LESSON_HOME_DIR = current_dir
DATA_HOME_DIR = current_dir+'/data'
categories = sorted([os.path.basename(x) for x in glob(DATA_HOME_DIR+'/train/*')])
from shutil import copyfile
#Create directories
%cd $DATA_HOME_DIR
# did this once
%mkdir valid
%mkdir results
%mkdir -p sample/train
%mkdir -p sample/test
%mkdir -p sample/valid
%mkdir -p sample/results
%mkdir -p test/unknown
# Create subdirectories
for c in categories:
%mkdir -p valid/{c}
%mkdir -p sample/train/{c}
%mkdir -p sample/valid/{c}
%cd $DATA_HOME_DIR/train
# how many images we talking about?
for c in categories:
g = glob(c+"/*.jpg")
print c, len(g)
validation_ratio = 0.1
for c in categories:
g = glob(c+"/*.jpg")
shuf = np.random.permutation(g)
num_valid = int(validation_ratio*len(g))
for i in range(num_valid):
#print shuf[i], DATA_HOME_DIR+'/valid/' + shuf[i]
os.rename(shuf[i], DATA_HOME_DIR+'/valid/' + shuf[i])
# Now, how many images we talking about?
for c in categories:
g = glob(c+"/*.jpg")
print c, len(g),
g = glob("../valid/"+c+"/*.jpg")
print len(g)
# now create the sample train subset of 10 per category
for c in categories:
g = glob(c+"/*.jpg")
shuf = np.random.permutation(g)
for i in range(10):
#print shuf[i], DATA_HOME_DIR+'/sample/train/' + shuf[i]
copyfile(shuf[i], DATA_HOME_DIR+'/sample/train/' + shuf[i])
%cd $DATA_HOME_DIR/valid
# now create the sample valid subset of 2 per category
for c in categories:
g = glob(c+"/*.jpg")
shuf = np.random.permutation(g)
for i in range(2):
#print shuf[i], DATA_HOME_DIR+'/sample/valid/' + shuf[i]
copyfile(shuf[i], DATA_HOME_DIR+'/sample/valid/' + shuf[i])
!ls {DATA_HOME_DIR}/train/*/* |wc -l
!ls {DATA_HOME_DIR}/valid/*/* |wc -l
!ls {DATA_HOME_DIR}/sample/train/*/* |wc -l
!ls {DATA_HOME_DIR}/sample/valid/*/* |wc -l
# Create single 'unknown' class for test set
%cd $DATA_HOME_DIR/test_stg1
%mv *.jpg ../test/unknown/
!ls {DATA_HOME_DIR}/test
%cd $DATA_HOME_DIR
#Set path to sample/ path if desired
path = DATA_HOME_DIR + '/'
#path = DATA_HOME_DIR + '/sample/'
test_path = DATA_HOME_DIR + '/test/' #We use all the test data
results_path=DATA_HOME_DIR + '/results/'
train_path=path + '/train/'
valid_path=path + '/valid/'
vgg = Vgg16()
#Set constants. You can experiment with no_of_epochs to improve the model
batch_size=64
no_of_epochs=1
#Finetune the model
batches = vgg.get_batches(train_path, batch_size=batch_size)
val_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)
vgg.finetune(batches)
#Not sure if we set this for all fits
vgg.model.optimizer.lr = 0.01
#Notice we are passing in the validation dataset to the fit() method
#For each epoch we test our model against the validation set
latest_weights_filename = None
#latest_weights_filename='ft24.h5'
#vgg.model.load_weights(results_path+latest_weights_filename)
# if you have run some epochs already...
epoch_offset=1 # trying again from ft1
for epoch in range(no_of_epochs):
print "Running epoch: %d" % (epoch + epoch_offset)
vgg.fit(batches, val_batches, nb_epoch=1)
latest_weights_filename = 'ft%d.h5' % (epoch + epoch_offset)
vgg.model.save_weights(results_path+latest_weights_filename)
print "Completed %s fit operations" % no_of_epochs
# only if you have to
latest_weights_filename='ft1.h5'
vgg.model.load_weights(results_path+latest_weights_filename)
val_batches, probs = vgg.test(valid_path, batch_size = batch_size)
filenames = val_batches.filenames
expected_labels = val_batches.classes # 0 - 7
#Round our predictions to 0/1 to generate labels
#our_predictions = probs[:,0]
#our_labels = np.round(1-our_predictions)
our_labels = np.argmax(probs, axis=1)
cm = confusion_matrix(expected_labels, our_labels)
plot_confusion_matrix(cm, val_batches.class_indices)
#Helper function to plot images by index in the validation set
#Plots is a helper function in utils.py
def plots_idx(idx, titles=None):
plots([image.load_img(valid_path + filenames[i]) for i in idx], titles=titles)
#Number of images to view for each visualization task
n_view = 4
#1. A few correct labels at random
correct = np.where(our_labels==expected_labels)[0]
print "Found %d correct labels" % len(correct)
idx = permutation(correct)[:n_view]
plots_idx(idx, our_predictions[idx])
#2. A few incorrect labels at random
incorrect = np.where(our_labels!=expected_labels)[0]
print "Found %d incorrect labels" % len(incorrect)
idx = permutation(incorrect)[:n_view]
plots_idx(idx, our_predictions[idx])
val_batches.class_indices
#3a. The images we most confident were X, and are actually X
X='YFT'
Xi=val_batches.class_indices[X]
correct_cats = np.where((our_labels==Xi) & (our_labels==expected_labels))[0]
print "Found %d confident correct %s labels" % (len(correct_cats),X)
most_correct_cats = np.argsort(our_predictions[correct_cats])[::-1][:n_view]
plots_idx(correct_cats[most_correct_cats], our_predictions[correct_cats][most_correct_cats])
#4a. The images we were most confident were cats, but are actually dogs
incorrect_cats = np.where((our_labels==0) & (our_labels!=expected_labels))[0]
print "Found %d incorrect cats" % len(incorrect_cats)
if len(incorrect_cats):
most_incorrect_cats = np.argsort(our_predictions[incorrect_cats])[::-1][:n_view]
plots_idx(incorrect_cats[most_incorrect_cats], our_predictions[incorrect_cats][most_incorrect_cats])
#4b. The images we were most confident were dogs, but are actually cats
incorrect_dogs = np.where((our_labels==1) & (our_labels!=expected_labels))[0]
print "Found %d incorrect dogs" % len(incorrect_dogs)
if len(incorrect_dogs):
most_incorrect_dogs = np.argsort(our_predictions[incorrect_dogs])[:n_view]
plots_idx(incorrect_dogs[most_incorrect_dogs], our_predictions[incorrect_dogs][most_incorrect_dogs])
#5. The most uncertain labels (ie those with probability closest to 0.5).
most_uncertain = np.argsort(np.abs(our_predictions-0.5))
plots_idx(most_uncertain[:n_view], our_predictions[most_uncertain])
batches, preds = vgg.test(test_path, batch_size = batch_size*2)
# Error allocating 3347316736 bytes of device memory (out of memory).
# got this error when batch-size = 128
# I see this pop up to 6GB memory with batch_size = 64 & this takes some time...
#For every image, vgg.test() generates two probabilities
#based on how we've ordered the cats/dogs directories.
#It looks like column one is cats and column two is dogs
print preds[:5]
filenames = batches.filenames
print filenames[:5]
#You can verify the column ordering by viewing some images
Image.open(test_path + filenames[1])
#Save our test results arrays so we can use them again later
save_array(results_path + 'test_preds.dat', preds)
save_array(results_path + 'filenames.dat', filenames)
#Load our test predictions from file
preds = load_array(results_path + 'test_preds.dat')
filenames = load_array(results_path + 'filenames.dat')
#Grab the dog prediction column
isdog = preds[:,1]
print "Raw Predictions: " + str(isdog[:5])
print "Mid Predictions: " + str(isdog[(isdog < .6) & (isdog > .4)])
print "Edge Predictions: " + str(isdog[(isdog == 1) | (isdog == 0)])
#play it safe, round down our edge predictions
#isdog = isdog.clip(min=0.05, max=0.95)
#isdog = isdog.clip(min=0.02, max=0.98)
isdog = isdog.clip(min=0.01, max=0.99)
#Extract imageIds from the filenames in our test/unknown directory
filenames = batches.filenames
ids = np.array([int(f[8:f.find('.')]) for f in filenames])
subm = np.stack([ids,isdog], axis=1)
subm[:5]
%cd $DATA_HOME_DIR
submission_file_name = 'submission4.csv'
np.savetxt(submission_file_name, subm, fmt='%d,%.5f', header='id,label', comments='')
from IPython.display import FileLink
%cd $LESSON_HOME_DIR
FileLink('data/'+submission_file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we import the data for the watershed we want to route flow on. You will want to change this code block for the different scenarios. Initially you do not need to change anything.
Step2: Now we set the boundary conditions, initialize the process components, and set the appropriate storm parameters.
Step3: Before we go further, let's pause to look at the landscape that we will be routing flow over.
Step4: Initialize a few more parameters, and getting ready to run the time loop and save data for plotting.
Step5: Now the time loop that generates overland flow.
Step6: Let's look at the data.
|
<ASSISTANT_TASK:>
Python Code:
## only needed for plotting in a jupyter notebook.
%matplotlib inline
## Code Block 1
import copy
import numpy as np
from matplotlib import pyplot as plt
from landlab import imshow_grid
from landlab.components import OverlandFlow, FlowAccumulator
from landlab.io import read_esri_ascii
## Code Block 2
basin_flag = "Square" # 'Square' or Long'
storm_flag = "Base" # 'Base' or'HigherIntensity' or 'LongerDuration'
## If the basin flag matches one of the two select basins,
## below will set the filename which to read the DEM from and
## the outlet link and upstream link to sample discharge values
## from for plotting.
if basin_flag == "Square":
watershed_dem = "Square_TestBasin.asc"
## Reading in the DEM given the filename from above
(rmg, z) = read_esri_ascii(watershed_dem, name="topographic__elevation")
outlet_node_to_sample = 300
outlet_link_to_sample = rmg.links_at_node[outlet_node_to_sample][3]
upstream_node_to_sample = 28689
upstream_link_to_sample = rmg.links_at_node[upstream_node_to_sample][3]
midstream_node_to_sample = 9102
midstream_link_to_sample = rmg.links_at_node[midstream_node_to_sample][3]
else:
watershed_dem = "Long_TestBasin.asc"
## Reading in the DEM given the filename from above
(rmg, z) = read_esri_ascii(watershed_dem, name="topographic__elevation")
outlet_node_to_sample = 150
outlet_link_to_sample = rmg.links_at_node[outlet_node_to_sample][3]
upstream_node_to_sample = 33859
upstream_link_to_sample = rmg.links_at_node[upstream_node_to_sample][3]
midstream_node_to_sample = 14658
midstream_link_to_sample = rmg.links_at_node[midstream_node_to_sample][2]
## The Flow Router calculates drainage area, which is helpful for
## calculating equilibrium discharge, which we illustrate later.
fr = FlowAccumulator(rmg) # Instantiate flow router
fr.run_one_step() # Drainage area calculated
## Code Block 3
## Set boundary coditions on the grid
rmg.set_watershed_boundary_condition(z)
## instantiate OverlandFlow object
rmg.add_zeros("surface_water__depth", at="node")
of = OverlandFlow(rmg, alpha=0.45, steep_slopes=True)
## Assign storm conditions based on flag in Code Block 2
if storm_flag == "Base":
starting_precip_mmhr = 5.0
starting_precip_ms = starting_precip_mmhr * (2.77778 * 10 ** -7)
storm_duration = 7200.0
elif storm_flag == "HigherIntensity":
starting_precip_mmhr = 10.0
starting_precip_ms = starting_precip_mmhr * (2.77778 * 10 ** -7)
storm_duration = 3600.0
elif storm_flag == "LongerDuration":
starting_precip_mmhr = 2.5
starting_precip_ms = starting_precip_mmhr * (2.77778 * 10 ** -7)
storm_duration = 14400.0
## Code Block 4
plt.figure(1)
imshow_grid(rmg, z) # plot the DEM
plt.plot(rmg.node_x[outlet_node_to_sample], rmg.node_y[outlet_node_to_sample], "yo")
plt.plot(rmg.node_x[upstream_node_to_sample], rmg.node_y[upstream_node_to_sample], "bo")
plt.plot(
rmg.node_x[midstream_node_to_sample], rmg.node_y[midstream_node_to_sample], "go"
)
## Code Block 5
elapsed_time = 1.0 # s
model_run_time = 43200.0 # s
## Lists for saving data
discharge_at_outlet = []
discharge_upstream = []
discharge_midstream = []
hydrograph_time = []
## Setting initial fields...
rmg["node"]["surface_water__discharge"] = np.zeros(rmg.number_of_nodes)
## Code Block 6
while elapsed_time < model_run_time:
# Setting the adaptive time step
of.dt = of.calc_time_step()
## The storm starts when the model starts. While the elapsed time is less
## than the storm duration, we add water to the system as rainfall.
if elapsed_time < (storm_duration):
of.rainfall_intensity = starting_precip_ms
else: # elapsed time exceeds the storm duration, rainfall ceases.
of.rainfall_intensity = 0.0
of.run_one_step() # Generating overland flow based on the deAlmeida solution.
## Append time and discharge to their lists to save data and for plotting.
hydrograph_time.append(elapsed_time)
q = rmg.at_link["surface_water__discharge"]
discharge_at_outlet.append(np.abs(q[outlet_link_to_sample]) * rmg.dx)
discharge_upstream.append(np.abs(q[upstream_link_to_sample]) * rmg.dx)
discharge_midstream.append(np.abs(q[midstream_link_to_sample]) * rmg.dx)
## output time every now and then so that you know the code
## is actually running
if (elapsed_time % 100) < 2:
print("elapsed time = ", elapsed_time)
## Updating elapsed_time
elapsed_time += of.dt
## Code Block 7
## Calculate equilibrium discharge at each point for reference
outlet_eq_q = starting_precip_ms * rmg.at_node["drainage_area"][outlet_node_to_sample]
midstream_eq_q = (
starting_precip_ms * rmg.at_node["drainage_area"][midstream_node_to_sample]
)
upstream_eq_q = (
starting_precip_ms * rmg.at_node["drainage_area"][upstream_node_to_sample]
)
## Plotting hydrographs and equilibrium discharge
plt.figure(2)
plt.plot(hydrograph_time, discharge_at_outlet, "y-", label="outlet")
plt.plot(
[np.min(hydrograph_time), np.max(hydrograph_time)],
[outlet_eq_q, outlet_eq_q],
"y--",
label="outlet eq Q",
)
plt.plot(hydrograph_time, discharge_midstream, "g-", label="midstream")
plt.plot(
[np.min(hydrograph_time), np.max(hydrograph_time)],
[midstream_eq_q, midstream_eq_q],
"g--",
label="midstream eq Q",
)
plt.plot(hydrograph_time, discharge_upstream, "b-", label="upstream")
plt.plot(
[np.min(hydrograph_time), np.max(hydrograph_time)],
[upstream_eq_q, upstream_eq_q],
"b--",
label="upstream eq Q",
)
## Plot storm end and center of storm for reference
plt.plot(
[storm_duration, storm_duration], [0, 100], "k-", linewidth=2, label="storm end"
)
plt.plot(
[storm_duration / 2, storm_duration / 2], [0, 100], "k:", label="storm mid point"
)
plt.ylabel("Discharge (cms)")
plt.xlabel("Time (seconds)")
plt.legend(loc="upper right")
title_text = "Hydrographs, Storm is " + storm_flag + ", Watershed is " + basin_flag
plt.title(title_text)
plt.axis([0, np.max(hydrograph_time), 0, 100])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Link the O$_2$scl library
Step2: Get the value of $\hbar c$ from an O$_2$scl find_constants object
Step3: Get a copy (a pointer to) the O$_2$scl unit conversion object
Step4: Use the cloud_file object to download the EOS
Step5: Read the tensor which stores the average mass number
Step6: In order to make a plot at fixed Ye, we first need to construct a
Step7: Create a table3d object
Step8: Now plot the results. Raw matplotlib works, but o2sclpy has
Step9: For testing purposes
|
<ASSISTANT_TASK:>
Python Code:
import o2sclpy
import matplotlib.pyplot as plot
import numpy
import sys
plots=True
if 'pytest' in sys.modules:
plots=False
link=o2sclpy.linker()
link.link_o2scl()
fc=o2sclpy.find_constants(link)
ħc=fc.find_unique('ħc','MeV*fm')
print('ħc = %7.6e\n' % (ħc))
cu=link.o2scl_settings.get_convert_units()
cf=o2sclpy.cloud_file(link)
cf.verbose=1
cf.get_file('dsh.o2','https://isospin.roam.utk.edu/public_data'+
'/eos_tables/du21/fid_3_5_22.o2')
hf=o2sclpy.hdf_file(link)
tg_A=o2sclpy.tensor_grid(link)
hf.open('dsh.o2')
o2sclpy.hdf_input_tensor_grid(link,hf,tg_A,'A')
hf.close()
ix=o2sclpy.std_vector_size_t(link)
ix.resize(3)
ix[1]=tg_A.lookup_grid(1,0.4)
t3d=o2sclpy.table3d(link)
tg_A.copy_table3d_align_setxy(0,2,ix,t3d,'nB','T','A')
if plots:
pl=o2sclpy.plotter()
pl.colbar=True
pl.xtitle(r'$ n_B~(\mathrm{fm}^{-3}) $')
pl.ytitle(r'$ T~(\mathrm{MeV}) $')
pl.ttext(1.25,0.5,u'$ A $',rotation=90)
pl.den_plot_direct(t3d,'A')
plot.show()
def test_fun():
assert numpy.allclose(t3d.get(0,0,'A'),81,rtol=0.1)
return
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import Policy, RL agent, ...
Step3: Define a Callback Function
Step4: Create and wrap the environment
Step5: Define and train the PPO agent
Step8: Plotting helpers
Step9: Make a GIF of a Trained Agent
Step10: Evaluate the trained agent
|
<ASSISTANT_TASK:>
Python Code:
import stable_baselines
stable_baselines.__version__
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines import results_plotter
sns.set_context("talk")
class SaveOnBestTrainingRewardCallback(BaseCallback):
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print("Num timesteps: {}".format(self.num_timesteps))
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
return True
# Create log dir
log_dir = "/tmp/gym/"
os.makedirs(log_dir, exist_ok=True)
# Create and wrap the environment
env = gym.make('CartPole-v1')
# Logs will be saved in log_dir/monitor.csv
env = Monitor(env, log_dir)
env = DummyVecEnv([lambda: env]) # PPO2 requires a vectorized environment to run
# Create the callback: check every 1000 steps
callback = SaveOnBestTrainingRewardCallback(check_freq=1000, log_dir=log_dir)
model = PPO2(MlpPolicy, env, verbose=1)
model.learn(total_timesteps=100000, callback=callback)
# Helper from the library
results_plotter.plot_results([log_dir], 1e5, results_plotter.X_TIMESTEPS, "PPO Cartpole")
def moving_average(values, window):
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
def plot_results(log_folder, title='Learning Curve'):
plot the results
:param log_folder: (str) the save location of the results to plot
:param title: (str) the title of the task to plot
x, y = ts2xy(load_results(log_folder), 'timesteps')
y = moving_average(y, window=50)
# Truncate x
x = x[len(x) - len(y):]
fig = plt.figure(title, figsize=(16,6))
plt.plot(x, y)
plt.xlabel('Number of Timesteps')
plt.ylabel('Rewards')
plt.title(title + " Smoothed")
plt.show()
plot_results(log_dir)
import imageio
images = []
obs = model.env.reset()
img = model.env.render(mode='rgb_array')
for i in range(350):
images.append(img)
action, _ = model.predict(obs)
obs, _, _ ,_ = model.env.step(action)
img = model.env.render(mode='rgb_array')
imageio.mimsave('ppo_cartpole.gif', [np.array(img) for i, img in enumerate(images) if i%2 == 0], fps=29)
import IPython
from IPython.display import Image
Image(open('ppo_cartpole.gif','rb').read()) # https://stackoverflow.com/questions/61110188/how-to-display-a-gif-in-jupyter-notebook-using-google-colab
env._max_episode_steps = 1000
reward_list = []
NUM_EPISODES = 100
for episode_index in range(NUM_EPISODES):
reward_sum = 0
obs = env.reset()
done = False
while not done:
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
reward_sum += reward
#env.render() # Cannot render on Google Colab
reward_list.append(reward_sum)
print("Mean reward:", sum(reward_list) / NUM_EPISODES)
env.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoder Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step40: Batch and pad the source and target sequences
Step43: Train
Step45: Save Parameters
Step47: Checkpoint
Step50: Sentence to Sequence
Step52: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
source_ids = []
target_ids = []
for sentence in source_text.split("\n"):
source_ids.append([source_vocab_to_int[word] for word in sentence.split(' ') if word != ''])
for sentence in target_text.split("\n"):
target_ids.append([target_vocab_to_int[word] for word in sentence.split(' ') if word != ''] + [target_vocab_to_int['<EOS>']])
return source_ids, target_ids
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
import problem_unittests as tests
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
input_text = tf.placeholder(tf.int32, [None, None], name="input")
target_text = tf.placeholder(tf.int32, [None, None], name="targets")
lr = tf.placeholder(tf.float32, name="learning_rate")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_text, target_text, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
ending = tf.strided_slice(target_data, [0,0], [batch_size, -1], [1,1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_encoding_input(process_decoder_input)
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
def make_cell(rnn_size, keep_prob):
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1))
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return drop
embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
cells = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for i in range(num_layers)])
outputs, state = tf.nn.dynamic_rnn(cells, embed_input,
sequence_length=source_sequence_length,
dtype=tf.float32)
return outputs, state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
with tf.variable_scope("decode"):
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer)
output = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_summary_length)[0]
return output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size])
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
end_of_sequence_id)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
encoder_state,
output_layer)
output = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
return output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
def make_cell(rnn_size, keep_prob):
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return drop
embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
embed_input = tf.nn.embedding_lookup(embeddings, dec_input)
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for i in range(num_layers)])
output_layer = Dense(target_vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
with tf.variable_scope("decode"):
training_decoder_output = decoding_layer_train(encoder_state,
dec_cell,
embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob)
with tf.variable_scope("decode", reuse=True):
inferer_decoder_output = decoding_layer_infer(encoder_state,
dec_cell,
embeddings,
target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'],
max_target_sequence_length,
target_vocab_size,
output_layer,
batch_size,
keep_prob)
return training_decoder_output, inferer_decoder_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
_, enc_state = encoding_layer(input_data, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
enc_embedding_size)
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
train_dec, infer_dec = decoding_layer(dec_input, enc_state,
target_sequence_length, max_target_sentence_length,
rnn_size, num_layers,
target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, dec_embedding_size)
return train_dec, infer_dec
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 5
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.5
display_step = 100
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
def pad_sentence_batch(sentence_batch, pad_int):
Pad sentences with <PAD> so that each sentence of a batch has the same length
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
Batch targets, sources, and the lengths of their sentences together
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
DON'T MODIFY ANYTHING IN THIS CELL
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
unk_id = vocab_to_int["<UNK>"]
return [vocab_to_int.get(word, unk_id) for word in sentence.split(" ")]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ragged Tensors
Step2: Overview
Step3: There are also a number of methods and operations that are
Step4: And just like normal tensors, you can use Python arithmetic and comparison
Step5: If you need to perform an elementwise transformation to the values of a RaggedTensor, you can use tf.ragged.map_flat_values, which takes a function plus one or more arguments, and applies the function to transform the RaggedTensor's values.
Step6: Constructing a ragged tensor
Step7: Ragged tensors can also be constructed by pairing flat values tensors with
Step8: tf.RaggedTensor.from_row_lengths
Step9: tf.RaggedTensor.from_row_splits
Step10: See the tf.RaggedTensor class documentation for a full list of factory methods.
Step11: Example use case
Step12: Ragged tensors
Step13: The method tf.RaggedTensor.bounding_shape can be used to find a tight
Step14: Ragged vs sparse tensors
Step15: But concatenating sparse tensors is equivalent to concatenating the corresponding dense tensors,
Step16: For another example of why this distinction is important, consider the
Step17: Since the overloaded operators perform elementwise computations, the inputs to
Step18: For a discussion of more advanced cases, see the section on
Step19: Indexing a 3-D ragged tensor with 2 ragged dimensions
Step20: RaggedTensors supports multidimensional indexing and slicing, with one
Step21: Evaluating ragged tensors
Step22: Use Python indexing. If the tensor piece you select contains no ragged
Step23: Decompose the ragged tensor into its components, using the
Step24: Graph execution
Step25: The resulting value will be a
Step26: Decompose the ragged tensor into its components, using the
Step27: Broadcasting
Step28: Here are some examples of shapes that do not broadcast
Step29: RaggedTensor encoding
Step30: Multiple ragged dimensions
Step31: The factory function tf.RaggedTensor.from_nested_row_splits may be used to construct a
Step32: Uniform Inner Dimensions
Step33: Alternative row-partitioning schemes
Step34: The RaggedTensor class defines methods which can be used to construct
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow.compat.v1 as tf
digits = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
words = tf.ragged.constant([["So", "long"], ["thanks", "for", "all", "the", "fish"]])
print(tf.add(digits, 3))
print(tf.reduce_mean(digits, axis=1))
print(tf.concat([digits, [[5, 3]]], axis=0))
print(tf.tile(digits, [1, 2]))
print(tf.strings.substr(words, 0, 2))
print(digits[0]) # First row
print(digits[:, :2]) # First two values in each row.
print(digits[:, -2:]) # Last two values in each row.
print(digits + 3)
print(digits + tf.ragged.constant([[1, 2, 3, 4], [], [5, 6, 7], [8], []]))
times_two_plus_one = lambda x: x * 2 + 1
print(tf.ragged.map_flat_values(times_two_plus_one, digits))
sentences = tf.ragged.constant([
["Let's", "build", "some", "ragged", "tensors", "!"],
["We", "can", "use", "tf.ragged.constant", "."]])
print(sentences)
paragraphs = tf.ragged.constant([
[['I', 'have', 'a', 'cat'], ['His', 'name', 'is', 'Mat']],
[['Do', 'you', 'want', 'to', 'come', 'visit'], ["I'm", 'free', 'tomorrow']],
])
print(paragraphs)
print(tf.RaggedTensor.from_value_rowids(
values=[3, 1, 4, 1, 5, 9, 2, 6],
value_rowids=[0, 0, 0, 0, 2, 2, 2, 3]))
print(tf.RaggedTensor.from_row_lengths(
values=[3, 1, 4, 1, 5, 9, 2, 6],
row_lengths=[4, 0, 3, 1]))
print(tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6],
row_splits=[0, 4, 4, 7, 8]))
print(tf.ragged.constant([["Hi"], ["How", "are", "you"]])) # ok: type=string, rank=2
print(tf.ragged.constant([[[1, 2], [3]], [[4, 5]]])) # ok: type=int32, rank=3
try:
tf.ragged.constant([["one", "two"], [3, 4]]) # bad: multiple types
except ValueError as exception:
print(exception)
try:
tf.ragged.constant(["A", ["B", "C"]]) # bad: multiple nesting depths
except ValueError as exception:
print(exception)
queries = tf.ragged.constant([['Who', 'is', 'Dan', 'Smith'],
['Pause'],
['Will', 'it', 'rain', 'later', 'today']])
# Create an embedding table.
num_buckets = 1024
embedding_size = 4
embedding_table = tf.Variable(
tf.truncated_normal([num_buckets, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
# Look up the embedding for each word.
word_buckets = tf.strings.to_hash_bucket_fast(queries, num_buckets)
word_embeddings = tf.ragged.map_flat_values(
tf.nn.embedding_lookup, embedding_table, word_buckets) # ①
# Add markers to the beginning and end of each sentence.
marker = tf.fill([queries.nrows(), 1], '#')
padded = tf.concat([marker, queries, marker], axis=1) # ②
# Build word bigrams & look up embeddings.
bigrams = tf.string_join([padded[:, :-1], padded[:, 1:]], separator='+') # ③
bigram_buckets = tf.strings.to_hash_bucket_fast(bigrams, num_buckets)
bigram_embeddings = tf.ragged.map_flat_values(
tf.nn.embedding_lookup, embedding_table, bigram_buckets) # ④
# Find the average embedding for each sentence
all_embeddings = tf.concat([word_embeddings, bigram_embeddings], axis=1) # ⑤
avg_embedding = tf.reduce_mean(all_embeddings, axis=1) # ⑥
print(avg_embedding)
tf.ragged.constant([["Hi"], ["How", "are", "you"]]).shape
print(tf.ragged.constant([["Hi"], ["How", "are", "you"]]).bounding_shape())
ragged_x = tf.ragged.constant([["John"], ["a", "big", "dog"], ["my", "cat"]])
ragged_y = tf.ragged.constant([["fell", "asleep"], ["barked"], ["is", "fuzzy"]])
print(tf.concat([ragged_x, ragged_y], axis=1))
sparse_x = ragged_x.to_sparse()
sparse_y = ragged_y.to_sparse()
sparse_result = tf.sparse.concat(sp_inputs=[sparse_x, sparse_y], axis=1)
print(tf.sparse.to_dense(sparse_result, ''))
x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
y = tf.ragged.constant([[1, 1], [2], [3, 3, 3]])
print(x + y)
x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
print(x + 3)
queries = tf.ragged.constant(
[['Who', 'is', 'George', 'Washington'],
['What', 'is', 'the', 'weather', 'tomorrow'],
['Goodnight']])
print(queries[1])
print(queries[1, 2]) # A single word
print(queries[1:]) # Everything but the first row
print(queries[:, :3]) # The first 3 words of each query
print(queries[:, -2:]) # The last 2 words of each query
rt = tf.ragged.constant([[[1, 2, 3], [4]],
[[5], [], [6]],
[[7]],
[[8, 9], [10]]])
print(rt[1]) # Second row (2-D RaggedTensor)
print(rt[3, 0]) # First element of fourth row (1-D Tensor)
print(rt[:, 1:3]) # Items 1-3 of each row (3-D RaggedTensor)
print(rt[:, -1:]) # Last item of each row (3-D RaggedTensor)
ragged_sentences = tf.ragged.constant([
['Hi'], ['Welcome', 'to', 'the', 'fair'], ['Have', 'fun']])
print(ragged_sentences.to_tensor(default_value=''))
print(ragged_sentences.to_sparse())
x = [[1, 3, -1, -1], [2, -1, -1, -1], [4, 5, 8, 9]]
print(tf.RaggedTensor.from_tensor(x, padding=-1))
st = tf.SparseTensor(indices=[[0, 0], [2, 0], [2, 1]],
values=['a', 'b', 'c'],
dense_shape=[3, 3])
print(tf.RaggedTensor.from_sparse(st))
rt = tf.ragged.constant([[1, 2], [3, 4, 5], [6], [], [7]])
print(rt.to_list())
print(rt[1].numpy())
print(rt.values)
print(rt.row_splits)
with tf.Session() as session:
rt = tf.ragged.constant([[1, 2], [3, 4, 5], [6], [], [7]])
rt_value = session.run(rt)
print(rt_value.to_list())
print(rt_value.values)
print(rt_value.row_splits)
tf.enable_eager_execution() # Resume eager execution mode.
# x (2D ragged): 2 x (num_rows)
# y (scalar)
# result (2D ragged): 2 x (num_rows)
x = tf.ragged.constant([[1, 2], [3]])
y = 3
print(x + y)
# x (2d ragged): 3 x (num_rows)
# y (2d tensor): 3 x 1
# Result (2d ragged): 3 x (num_rows)
x = tf.ragged.constant(
[[10, 87, 12],
[19, 53],
[12, 32]])
y = [[1000], [2000], [3000]]
print(x + y)
# x (3d ragged): 2 x (r1) x 2
# y (2d ragged): 1 x 1
# Result (3d ragged): 2 x (r1) x 2
x = tf.ragged.constant(
[[[1, 2], [3, 4], [5, 6]],
[[7, 8]]],
ragged_rank=1)
y = tf.constant([[10]])
print(x + y)
# x (3d ragged): 2 x (r1) x (r2) x 1
# y (1d tensor): 3
# Result (3d ragged): 2 x (r1) x (r2) x 3
x = tf.ragged.constant(
[
[
[[1], [2]],
[],
[[3]],
[[4]],
],
[
[[5], [6]],
[[7]]
]
],
ragged_rank=2)
y = tf.constant([10, 20, 30])
print(x + y)
# x (2d ragged): 3 x (r1)
# y (2d tensor): 3 x 4 # trailing dimensions do not match
x = tf.ragged.constant([[1, 2], [3, 4, 5, 6], [7]])
y = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
# x (2d ragged): 3 x (r1)
# y (2d ragged): 3 x (r2) # ragged dimensions do not match.
x = tf.ragged.constant([[1, 2, 3], [4], [5, 6]])
y = tf.ragged.constant([[10, 20], [30, 40], [50]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
# x (3d ragged): 3 x (r1) x 2
# y (3d ragged): 3 x (r1) x 3 # trailing dimensions do not match
x = tf.ragged.constant([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10]]])
y = tf.ragged.constant([[[1, 2, 0], [3, 4, 0], [5, 6, 0]],
[[7, 8, 0], [9, 10, 0]]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
rt = tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2],
row_splits=[0, 4, 4, 6, 7])
print(rt)
rt = tf.RaggedTensor.from_row_splits(
values=tf.RaggedTensor.from_row_splits(
values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
row_splits=[0, 3, 3, 5, 9, 10]),
row_splits=[0, 1, 1, 5])
print(rt)
print("Shape: {}".format(rt.shape))
print("Number of ragged dimensions: {}".format(rt.ragged_rank))
rt = tf.RaggedTensor.from_nested_row_splits(
flat_values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
nested_row_splits=([0, 1, 1, 5], [0, 3, 3, 5, 9, 10]))
print(rt)
rt = tf.RaggedTensor.from_row_splits(
values=[[1, 3], [0, 0], [1, 3], [5, 3], [3, 3], [1, 2]],
row_splits=[0, 3, 4, 6])
print(rt)
print("Shape: {}".format(rt.shape))
print("Number of ragged dimensions: {}".format(rt.ragged_rank))
values = [3, 1, 4, 1, 5, 9, 2, 6]
print(tf.RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]))
print(tf.RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]))
print(tf.RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]))
print(tf.RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]))
print(tf.RaggedTensor.from_value_rowids(
values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5))
rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
print(" values: {}".format(rt.values))
print(" row_splits: {}".format(rt.row_splits))
print(" row_lengths: {}".format(rt.row_lengths()))
print(" row_starts: {}".format(rt.row_starts()))
print(" row_limits: {}".format(rt.row_limits()))
print("value_rowids: {}".format(rt.value_rowids()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The previous import code requires that you have pandas, numpy and matplotlib installed. If you are using conda
Step2: Import data file with pandas
Step3: df is an instance of the pandas object (data structure) pandas.DataFrame. A DataFrame instance has several methods (functions) to operate over the object. For example, is easy to display the data for a first exploration of what it contains using .head()
Step4: A DataFrame can be converted into a numpy array by using the method .values
Step5: For numpy expert, you have also methods to access the data using the numpy standards. If you want to extract the data at the coordinate (0,1) you can do
Step6: But also you can use the column names and index keys, to substract, for example, the name of the first antenna in a baseline pair from row 3
Step7: DataFrame are objects containgin tabular data, that can be grouped by columns and then used to aggreate data. Let's say you want to obtaing the mean frequency for the baselines and the number of channels used
Step8: Plot.ly
Step9: Matplotlib
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
import plotly.tools as tls
import plotly.plotly as py
import cufflinks as cf
import plotly
plotly.offline.init_notebook_mode()
cf.offline.go_offline()
df = pd.read_csv('data_files/baseline_channels_phase.txt', sep=' ')
df.head()
df.values
df.iloc[0,1]
df.ix[3, 'ant1name']
data_group = df.groupby(['ant1name', 'ant2name'])
df2 = data_group.agg({'freq': np.mean, 'chan': np.count_nonzero}).reset_index()
df2.head()
data_raw = df.groupby(['ant1name', 'ant2name', 'chan']).y.mean()
data_raw.head(30)
data_raw.unstack().head(20)
pd.options.display.max_columns = 200
data_raw.unstack().head(20)
data_raw = data_raw.unstack().reset_index()
data_raw.head()
data_raw.to_excel('test.xls', index=False)
todegclean = np.degrees(np.arcsin(np.sin(np.radians(data_raw.iloc[:,2:]))))
todegclean.head()
todegclean['mean'] = todegclean.mean(axis=1)
todegclean.head()
data_clean = todegclean.iloc[:,:-1].apply(lambda x: x - todegclean.iloc[:,-1])
data_clean.head(20)
data_ready = pd.merge(data_raw[['ant1name', 'ant2name']], todegclean, left_index=True, right_index=True)
data_ready.head()
data_clean2 = data_clean.unstack().reset_index().copy()
data_clean2.query('100 < level_1 < 200')
data_clean2.query('100 < level_1 < 200').iplot(kind='scatter3d', x='chan', y='level_1', mode='markers', z=0, size=6,
title='Phase BL', filename='phase_test', width=1, opacity=0.8, colors='blue', symbol='circle',
layout={'scene': {'aspectratio': {'x': 1, 'y': 3, 'z': 0.7}}})
ploting = data_clean2.query('100 < level_1 < 200').figure(kind='scatter3d', x='chan', y='level_1', mode='markers', z=0, size=6,
title='Phase BL', filename='phase_test', width=1, opacity=0.8, colors='blue', symbol='circle',
layout={'scene': {'aspectratio': {'x': 1, 'y': 3, 'z': 0.7}}})
# ploting
ploting.data[0]['marker']['color'] = 'blue'
ploting.data[0]['marker']['line'] = {'color': 'blue', 'width': 0.5}
ploting.data[0]['marker']['opacity'] = 0.5
plotly.offline.iplot(ploting)
fig=plt.figure()
ax=fig.gca(projection='3d')
X = np.arange(0, data_clean.shape[1],1)
Y = np.arange(0, data_clean.shape[0],1)
X, Y = np.meshgrid(X,Y)
surf = ax.scatter(X, Y, data_clean, '.', c=data_clean,s=2,lw=0,cmap='winter')
%matplotlib notebook
fig=plt.figure()
ax=fig.gca(projection='3d')
X = np.arange(0, data_clean.shape[1],1)
Y = np.arange(0, data_clean.shape[0],1)
X, Y = np.meshgrid(X,Y)
surf = ax.scatter(X, Y, data_clean, '.', c=data_clean,s=2,lw=0,cmap='winter')
data_clean2.plot(kind='scatter', x='chan', y=0)
import seaborn as sns
data_clean2.plot(kind='scatter', x='level_1', y=0)
data_ready['noise'] = todegclean.iloc[:,2:].std(axis=1)
data_ready[['ant1name', 'ant2name', 'noise']].head(10)
corr = data_ready[['ant1name', 'ant2name', 'noise']].pivot_table(index=['ant1name'], columns=['ant2name'])
corr.columns.levels[1]
corr2 = pd.DataFrame(corr.values, index=corr.index.values, columns=corr.columns.levels[1].values)
corr2.head(10)
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr2, cmap=cmap,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
?sns.heatmap
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example
Step2: Extended sampling
|
<ASSISTANT_TASK:>
Python Code:
#--- Libraries
import pandas as pd # stats packages
import numpy as np # linear algebra packages
import matplotlib.pyplot as plt # ploting packages
import seaborn as sns # more plotting routines
from scipy.stats import beta # funtion defining beta distribution
from scipy.stats import binom # funtion defining binomial distribution
#--- Configure plotting environments/defaults
# use 'cartoon-style'
plt.xkcd()
# use white background
sns.set_style('white')
# set color choices
c = sns.color_palette('deep')
# show plots in notebook
% matplotlib inline
#--- Set up example
# create a figure
fig, chart = plt.subplots(3,3,figsize=(12,9),sharex=True,sharey=True)
# set number of examples to show
n_trials = 8
# possible 'probabilities of heads' to evaluate
p = np.arange(0.01,1.0,0.01)
# set bias of weighted coin
w = 0.75
# set initial parameter values
a = 0.5 ; b = 0.5
# set initial number of counts
N = 0
# set initial number of hits
H = 0
#--- Show initial (prior) distribution
# calculate the prob. density for the test points
dens = beta.pdf(p,a,b) / beta.pdf(p,a,b).sum()
# show density
chart[int(N/3),N%3].fill_between(p,dens,0,alpha=0.5)
# write out how many times coin has been flipped
chart[int(N/3),N%3].text(0.2,0.04,'number of flips = '+str(N), style='italic',
bbox={'facecolor':c[4], 'alpha':0.4, 'pad':10})
# write out how many times coin has come up heads
chart[int(N/3),N%3].text(0.2,0.03,'number of heads = '+str(H), style='italic',
bbox={'facecolor':c[4], 'alpha':0.4, 'pad':10})
# set y-axis limit
chart[int(N/3),N%3].set_ylim(0,0.05)
# draw line showing 'true' probability of heads
chart[int(N/3),N%3].axvline(x=0.75,ymin=0,ymax=0.85,color=c[2],linestyle='--')
# label axis
chart[int(N/3),N%3].set_ylabel('density')
#--- Run simulation
# perform individual coin flips and update stats each time
for i in range(n_trials) :
# flip coin (coin is flipped once per sample)
n = 1
h = binom.rvs(n,w,random_state=N)
# update parameters
a = h + a
b = n - h + b
N = N + n
H = H + h
# calculate the prob. density for the test points
dens = beta.pdf(p,a,b) / beta.pdf(p,a,b).sum()
# show density
chart[int(N/3),N%3].fill_between(p,dens,0,alpha=0.5)
# write out how many times coin has been flipped
chart[int(N/3),N%3].text(0.2,0.04,'number of flips = '+str(N), style='italic',
bbox={'facecolor':c[4], 'alpha':0.4, 'pad':10})
# write out how many times coin has come up heads
chart[int(N/3),N%3].text(0.2,0.03,'number of heads = '+str(H), style='italic',
bbox={'facecolor':c[4], 'alpha':0.4, 'pad':10})
# set y-axis limit
chart[int(N/3),N%3].set_ylim(0,0.05)
# draw line showing 'true' probability of heads
chart[int(N/3),N%3].axvline(x=0.75,ymin=0,ymax=0.85,color=c[2],linestyle='--')
# label axis
if N>5 :
chart[int(N/3),N%3].set_xlabel('probability of heads')
if N%3 == 0 :
chart[int(N/3),N%3].set_ylabel('density')
# save figure
plt.savefig('../figures/initial_coin_flips.png')
#--- Set up example
# create a figure
fig, chart = plt.subplots(1,figsize=(8,4))
# set number of examples to show
n_trials = 40
# possible 'probabilities of heads' to evaluate
p = np.arange(0.01,1.0,0.01)
# set bias of weighted coin
w = 0.75
# set initial parameter values
a = 0.5 ; b = 0.5
# set initial number of counts
N = 0
# set initial number of hits
H = 0
#--- Show initial (prior) distribution
# calculate the prob. density for the test points
dens = beta.pdf(p,a,b) / beta.pdf(p,a,b).sum()
# show density
chart.fill_between(p,dens,0,alpha=0.3)
# set y-axis limit
chart.set_ylim(0,0.06)
# label axis
chart.set_ylabel('density')
chart.set_xlabel('probability of heads')
#--- Simulation
for i in range(n_trials) :
# flip coin (coin is flipped once per sample)
n = 1
h = binom.rvs(n,w,random_state=N)
# update parameters
a = h + a
b = n - h + b
N = N + n
H = H + h
# calculate the prob. density for the test points
dens = beta.pdf(p,a,b) / beta.pdf(p,a,b).sum()
# show evolving densities, with emphisis on final estimate
if N < n_trials :
chart.fill_between(p,dens,0,alpha=0.3)
else :
chart.fill_between(p,dens,0,alpha=0.6,color=c[5])
chart.plot(p,dens,alpha=0.5,color='black')
chart.axvline(x=0.75,ymin=0,ymax=0.95,color=c[2],linestyle='--')
chart.text(0.2,0.05,'number of flips = '+str(N), style='italic',
bbox={'facecolor':c[4], 'alpha':0.4, 'pad':10})
chart.text(0.2,0.04,'number of heads = '+str(H), style='italic',
bbox={'facecolor':c[4], 'alpha':0.4, 'pad':10})
plt.savefig('../figures/extended_coin_flips.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ✅ DO THIS
Step2: 2. Modify code to be more flexible
Step3: 3. Use our ANN on the "Digits" dataset.
Step5: The following is copied and pasted from Section 1 and rewritten it to use the training and testing sets above.
Step6: ✅ DO THIS
Step7: ✅ DO THIS - Create an example to demonstrate that the Neural Network is working. Preferably using an example that comes with the provided NN Package.
Step8: ✅ DO THIS - Reproduce the results from the "Grade" example above using X and y
Step9: ✅ DO THIS - Reproduce the results from the "Digits" example above
Step11: Question
|
<ASSISTANT_TASK:>
Python Code:
# %load partSix.py
# Neural Networks Demystified
# Part 6: Training
#
# Supporting code for short YouTube series on artificial neural networks.
#
# Stephen Welch
# @stephencwelch
## ----------------------- Part 1 ---------------------------- ##
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
## ----------------------- Part 5 ---------------------------- ##
class Neural_Network(object):
def __init__(self):
#Define Hyperparameters
self.inputLayerSize = 2
self.outputLayerSize = 1
self.hiddenLayerSize = 3
#Weights (parameters)
self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayerSize)
self.W2 = np.random.randn(self.hiddenLayerSize,self.outputLayerSize)
def forward(self, X):
#Propogate inputs though network
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
yHat = self.sigmoid(self.z3)
return yHat
def sigmoid(self, z):
#Apply sigmoid activation function to scalar, vector, or matrix
return 1/(1+np.exp(-z))
def sigmoidPrime(self,z):
#Gradient of sigmoid
return np.exp(-z)/((1+np.exp(-z))**2)
def costFunction(self, X, y):
#Compute cost for given X,y, use weights already stored in class.
self.yHat = self.forward(X)
J = 0.5*sum((y-self.yHat)**2)
return J
def costFunctionPrime(self, X, y):
#Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
#Helper Functions for interacting with other classes:
def getParams(self):
#Get W1 and W2 unrolled into vector:
params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
return params
def setParams(self, params):
#Set W1 and W2 using single paramater vector.
W1_start = 0
W1_end = self.hiddenLayerSize * self.inputLayerSize
self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize , self.hiddenLayerSize))
W2_end = W1_end + self.hiddenLayerSize*self.outputLayerSize
self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))
def computeGradients(self, X, y):
dJdW1, dJdW2 = self.costFunctionPrime(X, y)
return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))
def computeNumericalGradient(N, X, y):
paramsInitial = N.getParams()
numgrad = np.zeros(paramsInitial.shape)
perturb = np.zeros(paramsInitial.shape)
e = 1e-4
for p in range(len(paramsInitial)):
#Set perturbation vector
perturb[p] = e
N.setParams(paramsInitial + perturb)
loss2 = N.costFunction(X, y)
N.setParams(paramsInitial - perturb)
loss1 = N.costFunction(X, y)
#Compute Numerical Gradient
numgrad[p] = (loss2 - loss1) / (2*e)
#Return the value we changed to zero:
perturb[p] = 0
#Return Params to original value:
N.setParams(paramsInitial)
return numgrad
## ----------------------- Part 6 ---------------------------- ##
from scipy import optimize
class trainer(object):
def __init__(self, N):
#Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X,y)
return cost, grad
def train(self, X, y):
#Make an internal variable for the callback function:
self.X = X
self.y = y
#Make empty list to store costs:
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp' : True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
args=(X, y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
print("Input Data", X)
print("Output Data", y)
#Untrained Random Network
NN = Neural_Network()
y1 = NN.forward(X)
print("Untrained Output", y1)
#Training step
T = trainer(NN)
T.train(X,y)
#Trained Network
y2 = NN.forward(X)
print("Trained Output",y2)
#Put your code here
def MSE(y, yhat):
return (1/len(y1))*sum((y-yhat)**2)
print(MSE(y, y1))
print(MSE(y, y2))
class Neural_Network(Neural_Network):
def __init__(self,insize, outsize, hiddensize):
#Define Hyperparameters
self.inputLayerSize = insize
self.outputLayerSize = outsize
self.hiddenLayerSize = hiddensize
#Weights (parameters)
self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayerSize)
self.W2 = np.random.randn(self.hiddenLayerSize,self.outputLayerSize)
#Untrained Random Network
NN = Neural_Network(2,1,5)
y1 = NN.forward(X)
print("Untrained Output", y1)
T = trainer(NN)
T.train(X,y)
#Trained Network
y2 = NN.forward(X)
print("Trained Output",y2)
print(MSE(y, y1))
print(MSE(y, y2))
%matplotlib inline
import matplotlib.pylab as plt
import numpy as np
from sklearn.datasets import fetch_lfw_people, load_digits
from sklearn.cross_validation import train_test_split
sk_data = load_digits();
#Cool slider to browse all of the images.
from ipywidgets import interact
def browse_images(images, labels, categories):
n = len(images)
def view_image(i):
plt.imshow(images[i], cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('%s' % categories[labels[i]])
plt.axis('off')
plt.show()
interact(view_image, i=(0,n-1))
browse_images(sk_data.images, sk_data.target, sk_data.target_names)
feature_vectors = sk_data.data
class_labels = sk_data.target
categories = sk_data.target_names
N, h, w = sk_data.images.shape
train_vectors, test_vectors, train_labels, test_labels = train_test_split(feature_vectors, class_labels, test_size=0.25, random_state=1)
train_vectors = train_vectors/train_vectors.max()
train_vectors = train_vectors
train_labels = train_labels.reshape(1347,1)
train_labels = train_labels/train_labels.max()
print(train_vectors.shape)
print(train_labels.shape)
print(train_labels)
#Run the training.
# X = np.array(([3,5], [5,1], [10,2]), dtype=float) 2,1,3
# y = np.array(([75], [82], [93]), dtype=float)
NN = Neural_Network(64,1,10) #len(train_vectors)
NN.forward(train_vectors)
T = trainer(NN)
T.train(train_vectors, train_labels)
pred_labels = NN.forward(train_vectors)
print("Training Data error", np.sum(np.sqrt((train_labels - pred_labels)*(train_labels-pred_labels)))/len(train_vectors))
pred_labels = NN.forward(test_vectors)
print("Testing Data error", np.sum(np.sqrt((test_labels - pred_labels)*(test_labels-pred_labels)))/len(test_vectors))
# Pay attention to how the plotting code rescales the data labels,
# if you scaled them differently, you may need to change this code.
def plot_gallery(images, true_titles, pred_titles, h, w, n_row=5, n_col=5):
Helper function to plot a gallery of portraits
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray_r)
plt.title(np.round(pred_titles[i]*10, 2))
plt.xlabel('Actual='+str(true_titles[i]), size=9)
plt.xticks(())
plt.yticks(())
plot_gallery(test_vectors, test_labels, pred_labels, h,w)
# Put your installation code here
# Put your example code here
# Put your Grade example code here
# Put your Digits example code here
from IPython.display import HTML
HTML(
<iframe
src="https://goo.gl/forms/nRQj6A0xZHgrS4WK2"
width="80%"
height="500px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: tf.function으로 성능 향상하기
Step2: 에러 출력을 위한 헬퍼 함수를 정의합니다
Step3: 기초
Step4: 다른 함수 내부에 사용할 수 있습니다.
Step5: tf.function은 즉시 실행 모드 보다 빠릅니다. 특히 그래프에 작은 연산이 많을 때 그렇습니다. 하지만 (합성곱처럼) 계산량이 많은 연산 몇 개로 이루어진 그래프는 속도 향상이 크지 않습니다.
Step6: 디버깅
Step7: 트레이싱 동작을 제어하기 위해 다음 기법을 사용할 수 있습니다
Step8: get_concrete_function 메서드를 사용해 트레이싱된 특정 함수를 얻을 수 있습니다.
Step9: tf.function에 input_signature를 지정하여 트레이싱을 제한할 수도 있습니다.
Step10: 언제 다시 트레이싱되나요?
Step11: 이를 해결하는 간단한 방법은 생성된 그래프에 영향을 미치지 않도록 매개변수를 Tensor로 바꾸는 것입니다.
Step12: tf.function의 부수 효과
Step13: tf.function을 호출할 때마다 파이썬 코드를 실행하려면 tf.py_function이 해결책입니다.
Step14: 파이썬 상태 주의하기
Step15: 변수
Step16: 하지만 모호하지 않은 코드는 괜찮습니다.
Step17: 함수가 처음 호출될 때만 변수가 생성되도록 tf.function 안에서 변수를 생성할 수 있습니다.
Step18: 변수 초기화가 함수 매개변수와 다른 변수 값에 의존할 수 있습니다.
Step19: 오토그래프 변환
Step20: 관심있다면 오토그래프가 생성한 코드를 확인해 볼 수 있습니다.
Step21: 조건문
Step22: 오토그래프가 변환한 if 문장에 대한 추가 제약 사항에 대해서는 레퍼런스 문서를 참고하세요.
Step23: 데이터셋으로 파이썬/넘파이 데이터를 감쌀 때 tf.data.Dataset.from_generator와 tf.data.Dataset.from_tensors의 차이를 주의하세요.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import traceback
import contextlib
# 에러 출력을 위한 헬퍼 함수
@contextlib.contextmanager
def assert_raises(error_class):
try:
yield
except error_class as e:
print('기대하는 예외 발생 \n {}:'.format(error_class))
traceback.print_exc(limit=2)
except Exception as e:
raise e
else:
raise Exception('{}를 기대했지만 아무런 에러도 발생되지 않았습니다!'.format(
error_class))
@tf.function
def add(a, b):
return a + b
add(tf.ones([2, 2]), tf.ones([2, 2])) # [[2., 2.], [2., 2.]]
v = tf.Variable(1.0)
with tf.GradientTape() as tape:
result = add(v, 1.0)
tape.gradient(result, v)
@tf.function
def dense_layer(x, w, b):
return add(tf.matmul(x, w), b)
dense_layer(tf.ones([3, 2]), tf.ones([2, 2]), tf.ones([2]))
import timeit
conv_layer = tf.keras.layers.Conv2D(100, 3)
@tf.function
def conv_fn(image):
return conv_layer(image)
image = tf.zeros([1, 200, 200, 100])
# 워밍 업
conv_layer(image); conv_fn(image)
print("즉시 실행 합성곱:", timeit.timeit(lambda: conv_layer(image), number=10))
print("tf.function 합성곱:", timeit.timeit(lambda: conv_fn(image), number=10))
print("합성곱 연산 속도에 큰 차이가 없습니다.")
# 함수와 다형성
@tf.function
def double(a):
print("트레이싱:", a)
return a + a
print(double(tf.constant(1)))
print()
print(double(tf.constant(1.1)))
print()
print(double(tf.constant("a")))
print()
def f():
print('트레이싱!')
tf.print('실행')
tf.function(f)()
tf.function(f)()
print("콘크리트 함수 얻기")
double_strings = double.get_concrete_function(tf.TensorSpec(shape=None, dtype=tf.string))
print("트레이싱된 함수 실행")
print(double_strings(tf.constant("a")))
print(double_strings(a=tf.constant("b")))
print("콘크리트 함수에 다른 타입을 사용하면 예외가 발생합니다")
with assert_raises(tf.errors.InvalidArgumentError):
double_strings(tf.constant(1))
@tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.int32),))
def next_collatz(x):
print("트레이싱", x)
return tf.where(x % 2 == 0, x // 2, 3 * x + 1)
print(next_collatz(tf.constant([1, 2])))
# input_signature에 1-D 텐서를 지정했기 때문에 다음은 실패합니다.
with assert_raises(ValueError):
next_collatz(tf.constant([[1, 2], [3, 4]]))
def train_one_step():
pass
@tf.function
def train(num_steps):
print("트레이싱 num_steps = {}".format(num_steps))
for _ in tf.range(num_steps):
train_one_step()
train(num_steps=10)
train(num_steps=20)
train(num_steps=tf.constant(10))
train(num_steps=tf.constant(20))
@tf.function
def f(x):
print("트레이싱", x)
tf.print("실행", x)
f(1)
f(1)
f(2)
external_list = []
def side_effect(x):
print('파이썬 부수 효과')
external_list.append(x)
@tf.function
def f(x):
tf.py_function(side_effect, inp=[x], Tout=[])
f(1)
f(1)
f(1)
assert len(external_list) == 3
# py_function이 1을 tf.constant(1)로 바꾸기 때문에 .numpy()를 호출해야 합니다.
assert external_list[0].numpy() == 1
external_var = tf.Variable(0)
@tf.function
def buggy_consume_next(iterator):
external_var.assign_add(next(iterator))
tf.print("external_var의 값:", external_var)
iterator = iter([0, 1, 2, 3])
buggy_consume_next(iterator)
# 다음은 반복자의 다음 값을 추출하지 않고 첫 번째 값을 재사용합니다.
buggy_consume_next(iterator)
buggy_consume_next(iterator)
@tf.function
def f(x):
v = tf.Variable(1.0)
v.assign_add(x)
return v
with assert_raises(ValueError):
f(1.0)
v = tf.Variable(1.0)
@tf.function
def f(x):
return v.assign_add(x)
print(f(1.0)) # 2.0
print(f(2.0)) # 4.0
class C:
pass
obj = C()
obj.v = None
@tf.function
def g(x):
if obj.v is None:
obj.v = tf.Variable(1.0)
return obj.v.assign_add(x)
print(g(1.0)) # 2.0
print(g(2.0)) # 4.0
state = []
@tf.function
def fn(x):
if not state:
state.append(tf.Variable(2.0 * x))
state.append(tf.Variable(state[0] * 3.0))
return state[0] * x * state[1]
print(fn(tf.constant(1.0)))
print(fn(tf.constant(3.0)))
# 간단한 루프
@tf.function
def f(x):
while tf.reduce_sum(x) > 1:
tf.print(x)
x = tf.tanh(x)
return x
f(tf.random.uniform([5]))
print(tf.autograph.to_code(f.python_function))
@tf.function
def fizzbuzz(n):
for i in tf.range(1, n + 1):
print('루프 트레이싱')
if i % 15 == 0:
print('fizzbuzz 브랜치 트레이싱')
tf.print('fizzbuzz')
elif i % 3 == 0:
print('fizz 브랜치 트레이싱')
tf.print('fizz')
elif i % 5 == 0:
print('buzz 브랜치 트레이싱')
tf.print('buzz')
else:
print('디폴트 브랜치 트레이싱')
tf.print(i)
fizzbuzz(tf.constant(5))
fizzbuzz(tf.constant(20))
def measure_graph_size(f, *args):
g = f.get_concrete_function(*args).graph
print("{}({})는 그래프에 {}개의 노드를 포함합니다".format(
f.__name__, ', '.join(map(str, args)), len(g.as_graph_def().node)))
@tf.function
def train(dataset):
loss = tf.constant(0)
for x, y in dataset:
loss += tf.abs(y - x) # 의미없는 연산
return loss
small_data = [(1, 1)] * 3
big_data = [(1, 1)] * 10
measure_graph_size(train, small_data)
measure_graph_size(train, big_data)
measure_graph_size(train, tf.data.Dataset.from_generator(
lambda: small_data, (tf.int32, tf.int32)))
measure_graph_size(train, tf.data.Dataset.from_generator(
lambda: big_data, (tf.int32, tf.int32)))
batch_size = 2
seq_len = 3
feature_size = 4
def rnn_step(inp, state):
return inp + state
@tf.function
def dynamic_rnn(rnn_step, input_data, initial_state):
# [batch, time, features] -> [time, batch, features]
input_data = tf.transpose(input_data, [1, 0, 2])
max_seq_len = input_data.shape[0]
states = tf.TensorArray(tf.float32, size=max_seq_len)
state = initial_state
for i in tf.range(max_seq_len):
state = rnn_step(input_data[i], state)
states = states.write(i, state)
return tf.transpose(states.stack(), [1, 0, 2])
dynamic_rnn(rnn_step,
tf.random.uniform([batch_size, seq_len, feature_size]),
tf.zeros([batch_size, feature_size]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We need a function to parse the HTML data after extracting the result.
Step3: Now we extract the result pages against each of the id(1 to 66000) and store the strings in a pandas Dataframe. We will tokenize the resultant string later.
Step4: Parsing
Step5: Photo extraction
|
<ASSISTANT_TASK:>
Python Code:
#Load the necessary modules
from mechanize import Browser
import pandas as pd
from IPython.core.display import HTML
import requests
def extract_sub_string(string, start, finish):
extract a substring between the 'start' substring and the first occurence of 'finish' substring after that point.
:param string: main string, to be parsed
:type string: str
:param start: starting string
:type start: str
:param end: ending string
:type end: str
new_string_index = string.find(start)
new_string = string[new_string_index:]
end_index =new_string.find(finish)
final_string = string[new_string_index:new_string_index+end_index]
return final_string
start = 'doctor_info'
finish="</div"
extracted_strings = []
extracted_df = pd.DataFrame(columns=['extracted'])
for reg_no in xrange(1,66001):
browser = Browser()
browser.open("http://bmdc.org.bd/doctors-info/")
for form in browser.forms():
pass
# We have 2 forms in this page and we going to select the second form
browser.select_form(nr=1)
# This form has 2 input fields, first field, search_doc_id takes an number and second field type indicates if the
# id is assocated to a medical doctor or dentist
form['search_doc_id']=str(reg_no)
form['type']=['1']
# Submit the form and read the result
response = browser.submit()
content = response.read()
str_content = str(content)
#Extract only the relevant portion
extracted_str = extract_sub_string(str_content, start, finish)
extracted_strings.append(extracted_str)
# Originally these commnted out snipppets were run so that each group of 100 doctors are recorded at a time in
# seperate csv files. for testing and stability purpose. Each 100 doctors took around 6-7 minutes to record.
#if reg_no%100==0:
# file_number = reg_no/100
# extracted_df = pd.DataFrame(columns=['extracted'])
# extracted_df.extracted = extracted_strings
# extracted_df.to_csv(str(file_number)+'.csv')
# extracted_strings = []
extracted_df.extracted = extracted_strings
extracted_df.to_csv('all_bma_doctor.csv')
tokenized_df = pd.DataFrame(columns=['Registration','Name','Father','Address', 'Division'])
#Since originally we created a number of csv files each containing 100 doctors we parsed them differently.
#file_list = []
#for item in xrange(1,66):
# file_list.append(str(item)+'.csv')
#for file_ in file_list:
df = pd.read_csv('all_bma_doctor.csv')
for index in df.index:
string = df.ix[index, 'extracted']
start="Registration Number</td>\r\n"
finish='</td>\r\n </tr>\r\n\r\n <tr class="odd">\r\n'
reg_no = extract_sub_string(string , start, finish)
reg_no = reg_no.strip()
reg_no = reg_no.split(" ")[-1]
#reg_no
start = '<td>Doctor\'s Name</td>\r\n'
finish = '</td>\r\n </tr>\r\n'
dr_name = extract_sub_string(string , start, finish)
dr_name=dr_name.strip()
dr_name = dr_name.split(">")[-1]
#dr_name
start = "<td>Father's Name</td>"
finish = "</td>\r\n </tr>"
father = extract_sub_string(string , start, finish)
father = father.strip()
father = father.split(">")[-1]
#father
start = '<td> <address> '
finish = "</address>"
address = extract_sub_string(string , start, finish)
address = address.strip()
address = address.split("<address>")[-1]
address = address.replace("<br/>",' ').strip()
#address
division = 'Medical'
values = pd.Series()
values['Registration'] = reg_no
values['Name'] = dr_name
values['Father'] = father
values['Address'] = address
values['Division'] = division
tokenized_df.loc[len(tokenized_df)] = values
tokenized_df[5000:5010]
for bma_id in xrange(1,66001):
f = open(str(bma_id)+'.jpg','wb')
f.write(requests.get('http://bmdc.org.bd/dphotos/medical/'+str(bma_id)+'.JPG').content)
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We see that this function just takes a numeric input and returns its squared value.
Step2: What if we wanted to square a list of numbers? We could set an iterable and just split up the workflow in multiple sub-workflows. But say we were making a simple workflow that squared a list of numbers and then summed them. The sum node would expect a list, but using an iterable would make a bunch of sum nodes, and each would get one number from the list. The solution here is to use a MapNode.
Step3: Because iterfield can take a list of names, you can operate over multiple sets of data, as long as they're the same length. The values in each list will be paired; it does not compute a combinatoric product of the lists.
Step4: But not every input needs to be an iterfield.
Step5: As in the case of iterables, each underlying MapNode execution can happen in parallel. Hopefully, you see how these tools allow you to write flexible, reusable workflows that will help you processes large amounts of data efficiently and reproducibly.
Step6: If we try to specify the input for the Gunzip node with a simple Node, we get the following error
Step7: bash
Step8: Now, we just have to create a workflow, connect the nodes and we can run it
|
<ASSISTANT_TASK:>
Python Code:
from nipype import Function
def square_func(x):
return x ** 2
square = Function(["x"], ["f_x"], square_func)
square.run(x=2).outputs.f_x
from nipype import MapNode
square_node = MapNode(square, name="square", iterfield=["x"])
square_node.inputs.x = [0, 1, 2, 3]
square_node.run().outputs.f_x
def power_func(x, y):
return x ** y
power = Function(["x", "y"], ["f_xy"], power_func)
power_node = MapNode(power, name="power", iterfield=["x", "y"])
power_node.inputs.x = [0, 1, 2, 3]
power_node.inputs.y = [0, 1, 2, 3]
print(power_node.run().outputs.f_xy)
power_node = MapNode(power, name="power", iterfield=["x"])
power_node.inputs.x = [0, 1, 2, 3]
power_node.inputs.y = 3
print(power_node.run().outputs.f_xy)
from nipype.algorithms.misc import Gunzip
from nipype.interfaces.spm import Realign
from nipype.pipeline.engine import Node, MapNode, Workflow
files = ['/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz',
'/data/ds000114/sub-02/ses-test/func/sub-02_ses-test_task-fingerfootlips_bold.nii.gz']
realign = Node(Realign(register_to_mean=True),
name='motion_correction')
gunzip = Node(Gunzip(), name='gunzip',)
gunzip.inputs.in_file = files
gunzip = MapNode(Gunzip(), name='gunzip',
iterfield=['in_file'])
gunzip.inputs.in_file = files
mcflow = Workflow(name='realign_with_spm')
mcflow.connect(gunzip, 'out_file', realign, 'in_files')
mcflow.base_dir = '/output'
mcflow.run('MultiProc', plugin_args={'n_procs': 4})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Flower power
Step2: ConvNet Codes
Step3: Below I'm running images through the VGG network in batches.
Step4: Building the Classifier
Step5: Data prep
Step6: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn.
Step7: If you did it right, you should see these sizes for the training sets
Step9: Batches!
Step10: Training
Step11: Testing
Step12: Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# Get the values from the relu6 layer of the VGG network
feed_dict = {input_:images}
codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
# Your one-hot encoded labels array here
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels)
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(ss.split(codes, labels))
half_val_len = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
fc = tf.contrib.layers.fully_connected(inputs_, 256)
logits = tf.contrib.layers.fully_connected(fc, labels_vecs.shape[1], activation_fn=None)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer().minimize(cost)
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_batches(x, y, n_batches=10):
Return a generator that yields batches from arrays x and y.
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
epochs = 10
iteration = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in get_batches(train_x, train_y):
feed = {inputs_: x,
labels_: y}
loss, _ = sess.run([cost, optimizer], feed_dict=feed)
print("Epoch: {}/{}".format(e+1, epochs),
"Iteration: {}".format(iteration),
"Training loss: {:.5f}".format(loss))
iteration += 1
if iteration % 5 == 0:
feed = {inputs_: val_x,
labels_: val_y}
val_acc = sess.run(accuracy, feed_dict=feed)
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Validation Acc: {:.4f}".format(val_acc))
saver.save(sess, "checkpoints/flowers.ckpt")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
test_img_path = 'flower_photos/dandelion/146023167_f905574d97_m.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A simple transmission line
Step2: Assuming the source generates an input power of $P_f$ with a phase $\phi$, with such a line the voltage and current at the entrance of the line are
Step3: The voltage and current evolve along the transmission line. The voltage and current at the output of the line can be calculated using the transmission line tools provided with scikit-rf
Step4: Let's perform the same calculation using Circuit. First, one needs to define the circuit, that is to create input/output ports and to connect these ports to the transmission line Network we've already created. Then, we can build the circuit
Step5: It's always a good practice to check if the circuit's graph is as expected. In this case, the graph is pretty simple
Step6: Circuit provides two methods to determine voltages and currents at the circuit input/output ports (also known as "external ports"). These methods take as input the power and phase inputs at each ports
Step7: The results are similar to the previous, except the sign of the current at port2 which is reversed.
Step8: In a Circuit simulator
Step9: With Circuit
Step10: The voltages and currents at the ports for the given excitation is
Step11: These results matches well the one given by the full-wave calculations, hourra.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# standard imports
import numpy as np
import matplotlib.pyplot as plt
import skrf as rf
rf.stylely()
P_f = 1 # forward power in Watt
Z = 50 # source internal impedance, line characteristic impedance and load impedance
L = 10 # line length in [m]
freq = rf.Frequency(2, 2, 1, unit='GHz')
line_media = rf.media.DefinedGammaZ0(freq, z0=Z) # lossless line medium
line = line_media.line(d=L, unit='m', name='line') # transmission line Network
V_in = np.sqrt(2*Z*P_f)
I_in = np.sqrt(2*P_f/Z)
print(f'Input voltage and current: {V_in} V and {I_in} A')
theta = rf.theta(line_media.gamma, freq.f, L) # electrical length
V_out, I_out = rf.tlineFunctions.voltage_current_propagation(V_in, I_in, Z, theta)
print(f'Output voltage and current: {V_out} V and {I_out} A')
port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50)
port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50)
cnx = [
[(port1, 0), (line, 0)],
[(port2, 0), (line, 1)]
]
crt = rf.Circuit(cnx)
crt.plot_graph(network_labels=True, edge_labels=True, inter_labels=True)
power = [1, 0] # 1 Watt at port1 and 0 at port2
phase = [0, 0] # 0 radians
V_at_ports = crt.voltages_external(power, phase)
print(V_at_ports)
I_at_ports = crt.currents_external(power, phase)
print(I_at_ports)
import pandas as pd # convenient to read .csv files
pd.read_csv('circuit_vi_HFSS_Voltages.csv')
pd.read_csv('circuit_vi_HFSS_Currents.csv')
pd.read_csv('circuit_vi_Designer_Voltages.csv')
pd.read_csv('circuit_vi_Designer_Currents.csv')
# Importing the 3-port .s3p file exported from full-wave simulation
coaxial_T = rf.Network('circuit_vi_Coaxial_T.s3p')
# pay attention to the port's characteristic impedance
# it should match the Network characteristic impedances otherwise this will generate mismatches
port1 = rf.Circuit.Port(coaxial_T.frequency, 'port1', coaxial_T.z0[:,0])
port2 = rf.Circuit.Port(coaxial_T.frequency, 'port2', coaxial_T.z0[:,1])
port3 = rf.Circuit.Port(coaxial_T.frequency, 'port3', coaxial_T.z0[:,2])
# connexion list
cnx = [
[(port1, 0), (coaxial_T, 0)],
[(port2, 0), (coaxial_T, 1)],
[(port3, 0), (coaxial_T, 2)]
]
# building the circuit
crt = rf.Circuit(cnx)
# let's check if our connexion list is correctly defined
crt.plot_graph(network_labels=True, edge_labels=True, inter_labels=True)
power = [1, 2, 3] # input power in watts at ports 1, 2 and 3
phase = np.deg2rad([-10, -20, +60]) # input phase in rad at ports 1, 2 and 3
voltages = crt.voltages_external(power, phase)
currents = crt.currents_external(power, phase)
# just for a better rendering in the notebook
pd.concat([
pd.DataFrame(np.abs(voltages), columns=['mag V1', 'mag V2', 'mag V3'], index=crt.frequency.f/1e6),
pd.DataFrame(np.angle(voltages, deg=True), columns=['Phase V1', 'Phase V2', 'Phase V3'], index=crt.frequency.f/1e6)
], axis=1)
# just for a better rendering in the notebook
pd.concat([
pd.DataFrame(np.abs(currents), columns=['mag I1', 'mag I2', 'mag I3'], index=crt.frequency.f/1e6),
pd.DataFrame(np.angle(currents, deg=True), columns=['Phase I1', 'Phase I2', 'Phase I3'], index=crt.frequency.f/1e6)
], axis=1)
# internals currents (currents at all connexions)
# in this example, there are 6 internal connexions (3 pairs of connexions)
crt.currents(power, phase)
# This gives the indices of the "external" ports
crt.port_indexes
# So we can keep only "external" ports
crt.currents(power, phase)[:, crt.port_indexes]
# note the sign difference due to the convention chosen for internal ports
crt.currents_external(power, phase)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: RASP Diabetes Rates
Step3: Poverty, Age and Race
Step4: Compare to CHIS
Step5: AskCHIS, By Race, 55-64
Step6: AskCHIS, By Race, 55-64, Male
Step7: AskChis, By Race, 55-64, In Poverty
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
import metapack as mp
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display
from publicdata.chis import *
%matplotlib inline
sns.set_context('notebook')
idx = pd.IndexSlice # Convenience redefinition.
#pkg = mp.jupyter.open_package()
pkg = mp.jupyter.open_source_package()
pkg
def recode(df):
Recode to a simpler group of races. For a lot of health outcomes, the major divisions are
* White + Asian ( whasian )
* Black + Latino Afrotino
* Others
from pandas.api.types import CategoricalDtype
df['race_recode'] = df.racedf_p1
df.replace({'race_recode':{
'NON-LATINO WHITE':'nhwhite',
'NON-LATINO ASIAN':'asian',
'NON-LATINO AMERICAN INDIAN/ALASKAN NATIVE': 'other',
'NON-LATINO AFR. AMER.': 'black',
'LATINO': 'hisp',
'NON-LATINO, TWO+ RACES': 'other',
'NON-LATINO OTHER, ONE RACE': 'other'
}}, inplace=True)
df.race_recode = df.race_recode.astype('category')
df['minority'] = (df['race_recode'] != 'white_asian').astype(int)
df['old'] = (df.srage_p1 < '45-49 YEARS').astype(CategoricalDtype(categories=[False, True],ordered=True))
df.old.cat.rename_categories(['OLD','YOUNG'], inplace=True)
df['poor'] = (df.povll.isin(('200-299% FPL', '300% FPL AND ABOVE')) )\
.astype(CategoricalDtype(categories=[True, False],ordered=True))
df.poor.cat.rename_categories(['NPOV','POV'], inplace=True)
return df
df17 = pkg.reference('adult_2017').dataframe()
df16 = pkg.reference('adult_2016').dataframe()
df15 = pkg.reference('adult_2015').dataframe()
df14 = pkg.reference('adult_2014').dataframe()
df13 = pkg.reference('adult_2013').dataframe()
# Rename some categories. 2016 and 2015 have "ALASKA" where the others have "ALASKAN", which
# causes the concat() operation to convert categories to strings
cats_17 = df17.racedf_p1.cat.categories
cat_map = dict(zip(df16.racedf_p1.cat.categories, df17.racedf_p1.cat.categories))
for e in [df13,df14,df15,df16,df17]:
e.racedf_p1.cat.rename_categories(cat_map, inplace=True)
for df, year in zip([df13, df14, df15, df16, df17], range(2013, 2018)):
df['year'] = year
df = recode(df)
n_years, df1517 = chis_concat([df17,df16,df15], ['diabetes', 'srsex', 'srage_p1', 'racedf_p1', 'race_recode', 'povll', 'minority', 'poor', 'old'])
def age_group_parts(v):
try:
y1, y2, _ = v.replace('-',' ').split()
return y1,y2
except ValueError:
# Probably '85+ YEARS'
return 85, 120
def age_group_to_age(v):
y1, y2 = age_group_parts(v)
if y1 == 85:
return 85
else:
return (int(y1)+int(y2))/2
# Convert to census age ranges
census_age_ranges = [
pd.Interval(18, 24, closed='both'),
pd.Interval(25, 34, closed='both'),
pd.Interval(35, 44, closed='both'),
pd.Interval(45, 54, closed='both'),
pd.Interval(55, 64, closed='both'),
pd.Interval(65, 74, closed='both'),
pd.Interval(75, 85, closed='both') # Actualy range is 75:120, but want a lower mean for prediction
]
pov_status_map = {
"0-99% FPL": 1,
"100-199% FPL": 0,
"200-299% FPL":0,
"300% FPL AND ABOVE": 0
}
dflr = pd.get_dummies(df1517, columns=['race_recode'], prefix='', prefix_sep = '')
dflr['race_recode'] = df1517.race_recode
dflr['diabetes_bool'] = (dflr.diabetes == 'YES').astype(int)
dflr['group_age_mean'] = dflr.srage_p1.apply(lambda v:age_group_to_age(v)).astype(float)
dflr['group_age_min'] = dflr.srage_p1.apply(lambda v:age_group_parts(v)[0]).astype(int)
dflr['group_age_max'] = dflr.srage_p1.apply(lambda v:age_group_parts(v)[1]).astype(int)
dflr['census_age_group'] = pd.cut(dflr.group_age_mean, pd.IntervalIndex(census_age_ranges))
#dflr['age_group_name'] = dflr.apply(lambda r: '{:02d}-{:03d}'.format(r.group_age_min, r.group_age_max), axis=1)
dflr['age_group'] = dflr.census_age_group.apply(lambda v: '{:02d}-{:03d}'.format(v.left, v.right))
dflr['pov'] = dflr.povll.apply(lambda v: pov_status_map[v])
dflr['is_male'] = (dflr.srsex == 'MALE').astype(int)
dflr.head()
raked_col = [c for c in df.columns if c.startswith('rake')]
index_cols = ['race_recode','is_male', 'age_group', 'pov' ]
value_cols = ['diabetes_bool', 'rec_count']
dflr['rec_count'] = 1
dflr['census_age_group'] = pd.cut(dflr.group_age_mean, pd.IntervalIndex(census_age_ranges))
t = dflr[index_cols+value_cols+raked_col].copy()
# Couldn't figure out the idomatic way to do this.
t['group_pop'] = t.rakedw0
for c in raked_col:
t[c] *= t.diabetes_bool
# Now the raked columns are the estimates for the number of people who have diabetes,
# so we just have to sum up everything
t2 = t.groupby(index_cols).sum().reset_index()
t2['estimate'] = t2.rakedw0
t2['repl_mean'] = t2[raked_col[1:]].mean(axis=1)
t2['repl_std'] = t2[raked_col[1:]].std(axis=1)
t2['rse'] = t2['repl_std']/t2.estimate
t2['rate'] = t2.estimate / t2.group_pop
#t2['rate_std'] = t2['rate'] * t2['rse']
rasp_d = t2.sort_values('rate',ascending=False)[list(c for c in t2.columns if c not in raked_col)]
rasp_d.rename(columns={'race_recode': 'raceeth','repl_std':'std'}, inplace=True)
rasp_d['sex'] = rasp_d.is_male
#rasp_d['pov'] = rasp_d.poor.apply(lambda v: 1 if v == 'POV' else 0)
rasp_d.head()
rasp_d.estimate.sum(), rasp_d.group_pop.sum(), rasp_d.estimate.sum()/rasp_d.group_pop.sum()
index_cols = ['raceeth','age_group', 'sex','pov' ]
val_cols = ['group_pop','estimate','std','rate']
rasp_diabetes = rasp_d[index_cols+val_cols].reset_index(drop=True).sort_values(index_cols)[index_cols+val_cols]
re_group = rasp_diabetes[rasp_diabetes.rate != 0].groupby(['raceeth', 'age_group']).mean()
# There are a lot of entries, particularly for yound asians, where the group won't have any estimate
# for diabetes because the sample group is too small to have even one case, and all values are integers.
# So, we impute these values from others in the same raceeth+age_group
def impute_rate(r, re_group):
Impute rates from other values in the similar group
if r.rate == 0:
return re_group.loc[(r.raceeth,r.age_group)].rate
else:
return r.rate
rasp_diabetes['imputed_rate'] = rasp_diabetes.apply(impute_rate,re_group=re_group, axis=1)
rasp_diabetes['imputed_estimate'] = (rasp_diabetes.imputed_rate * rasp_diabetes.group_pop).round(0).astype(int)
rasp_diabetes['group_pop'] = rasp_diabetes['group_pop'].round(0).astype(int)
rasp_diabetes['estimate'] = rasp_diabetes['estimate'].round(0).astype(int)
rasp_diabetes.head()
t = rasp_diabetes.reset_index().groupby(['raceeth','age_group']).sum()
t['rate'] = t.estimate/t.group_pop
t[['rate']].unstack(0).plot()
t = rasp_diabetes.reset_index().groupby(['raceeth','pov']).sum()
t['rate'] = t.estimate/t.group_pop
t[['rate']].unstack(0).plot(kind='bar')
t = rasp_diabetes.groupby('raceeth').sum().copy()
t['rate'] = (t.estimate / t.group_pop * 100).round(1)
t['rate']
t = rasp_diabetes[ (rasp_diabetes.age_group=='55-064') ].groupby('raceeth').sum().copy()
t['rate'] = (t.estimate / t.group_pop * 100).round(1)
assert round(t.loc['asian'].estimate,-3) == 96_000
t[['estimate','group_pop','rate']]
t = rasp_diabetes[ (rasp_diabetes.age_group=='55-064') & (rasp_diabetes.sex == 1) ].groupby('raceeth').sum().copy()
t['rate'] = (t.estimate / t.group_pop * 100).round(1)
assert round(t.loc['asian'].estimate,-3) == 47_000
t[['estimate','rate']]
t = rasp_diabetes[ (rasp_diabetes.age_group=='55-064') & (rasp_diabetes.pov == 1) ].groupby('raceeth').sum().copy()
t['rate'] = (t.estimate / t.group_pop * 100).round(1)
assert round(t.loc['asian'].estimate,-3) == 30_000
t[['estimate','rate', 'group_pop']]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tuples
Step2: Dictionaries
Step3: Sets
|
<ASSISTANT_TASK:>
Python Code:
# Create a list of countries, then print the results
allies = ['USA','UK','France','New Zealand',
'Australia','Canada','Poland']; allies
# Print the length of the list
len(allies)
# Add an item to the list, then print the results
allies.append('China'); allies
# Sort list, then print the results
allies.sort(); allies
# Reverse sort list, then print the results
allies.reverse(); allies
# View the first item of the list
allies[0]
# View the last item of the list
allies[-1]
# Delete the item in the list
del allies[0]; allies
# Add a numeric value to a list of strings
allies.append(3442); allies
# Create a tuple of state names
usa = ('Texas', 'California', 'Maryland'); usa
# Create a tuple of countries
# (notice the USA has a state names in the nested tuple)
countries = ('canada', 'mexico', usa); countries
# View the third item of the top tuple
countries[2]
# View the third item of the third tuple
countries[2][2]
# Create a dictionary with key:value combos
staff = {'Chris' : 'chris@stater.org',
'Jake' : 'jake@stater.org',
'Ashley' : 'ashley@stater.org',
'Shelly' : 'shelly@stater.org'
}
# Print the value using the key
staff['Chris']
# Delete a dictionary entry based on the key
del staff['Chris']; staff
# Add an item to the dictionary
staff['Guido'] = 'guido@python.org'; staff
# Create a set of BRI countries
BRI = set(['brazil', 'russia', 'india'])
# Is India in the set BRI?
'india' in BRI
# Is the US in the set BRI?
'usa' in BRI
# Create a copy of BRI called BRIC
BRIC = BRI.copy()
# Add China to BRIC
BRIC.add('china')
# Is BRIC a super-set of BRI?
BRIC.issuperset(BRI)
# Remove Russia from BRI
BRI.remove('russia')
# What items are the union of BRI and BRIC?
BRI & BRIC
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We define the elements we are interested in
Step2: We will investiage ternary M1-M2-O combinations exhaustively, where M1 and M2 are different transition metals.
Step3: Next steps
Step4: Pandas
|
<ASSISTANT_TASK:>
Python Code:
### Imports
from smact import Element, element_dictionary, ordered_elements
from smact.screening import smact_filter
from datetime import datetime
import itertools
import multiprocessing
all_el = element_dictionary() # A dictionary of all element objects
# Say we are just interested in first row transition metals
els = [all_el[symbol] for symbol in ordered_elements(21,30)]
# We can print the symbols
print([i.symbol for i in els])
# Generate all M1-M2 combinations
metal_pairs = itertools.combinations(els, 2)
# Add O to each pair
ternary_systems = [[*m, Element('O')] for m in metal_pairs]
# Prove to ourselves that we have all unique chemical systems
for i in ternary_systems:
print(i[0].symbol, i[1].symbol, i[2].symbol)
# Use multiprocessing and smact_filter to quickly generate our list of compositions
start = datetime.now()
if __name__ == '__main__': # Always use pool protected in an if statement
with multiprocessing.Pool(processes=4) as p: # start 4 worker processes
result = p.map(smact_filter, ternary_systems)
print('Time taken to generate list: {0}'.format(datetime.now()-start))
# Flatten the list of lists
flat_list = [item for sublist in result for item in sublist]
print('Number of compositions: --> {0} <--'.format(len(flat_list)))
print('Each list entry looks like this:\n elements, oxidation states, stoichiometries')
for i in flat_list[:5]:
print(i)
from pymatgen import Composition
def comp_maker(comp):
form = []
for el, ammt in zip(comp[0], comp[2]):
form.append(el)
form.append(ammt)
form = ''.join(str(e) for e in form)
pmg_form = Composition(form).reduced_formula
return pmg_form
if __name__ == '__main__':
with multiprocessing.Pool(processes=4) as p:
pretty_formulas = p.map(comp_maker, flat_list)
print('Each list entry now looks like this: ')
for i in pretty_formulas[:5]:
print(i)
import pandas as pd
new_data = pd.DataFrame({'pretty_formula': pretty_formulas})
# Drop any duplicate compositions
new_data = new_data.drop_duplicates(subset = 'pretty_formula')
new_data.describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We run the simulation up to a fixed number of iterations, controlled by the variable niter, storing the value of the EM field $E_z$ at every timestep so we can analyze them later
Step2: Electrostatic / Electromagnetic Waves
Step3: Electrostatic Plasma Waves
Step4: Electromagnetic Plasma Waves
|
<ASSISTANT_TASK:>
Python Code:
import em1ds as zpic
#v_the = 0.001
v_the = 0.02
#v_the = 0.20
electrons = zpic.Species( "electrons", -1.0, ppc = 64, uth=[v_the,v_the,v_the])
sim = zpic.Simulation( nx = 500, box = 50.0, dt = 0.0999/2, species = electrons )
sim.filter_set("sharp", ck = 0.99)
#sim.filter_set("gaussian", ck = 50.0)
import numpy as np
niter = 4000
Ex_t = np.zeros((niter,sim.nx))
Ez_t = np.zeros((niter,sim.nx))
tmax = niter * sim.dt
print("\nRunning simulation up to t = {:g} ...".format(tmax))
while sim.t <= tmax:
print('n = {:d}, t = {:g}'.format(sim.n,sim.t), end = '\r')
Ex_t[sim.n,:] = sim.emf.Ex
Ez_t[sim.n,:] = sim.emf.Ez
sim.iter()
print("\nDone.")
import matplotlib.pyplot as plt
iter = sim.n//2
plt.plot(np.linspace(0, sim.box, num = sim.nx),Ex_t[iter,:], label = "$E_x$")
plt.plot(np.linspace(0, sim.box, num = sim.nx),Ez_t[iter,:], label = "$E_z$")
plt.grid(True)
plt.xlabel("$x_1$ [$c/\omega_n$]")
plt.ylabel("$E$ field []")
plt.title("$E_x$, $E_z$, t = {:g}".format( iter * sim.dt))
plt.legend()
plt.show()
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# (omega,k) power spectrum
win = np.hanning(niter)
for i in range(sim.nx):
Ex_t[:,i] *= win
sp = np.abs(np.fft.fft2(Ex_t))**2
sp = np.fft.fftshift( sp )
k_max = np.pi / sim.dx
omega_max = np.pi / sim.dt
plt.imshow( sp, origin = 'lower', norm=colors.LogNorm(vmin = 1.0),
extent = ( -k_max, k_max, -omega_max, omega_max ),
aspect = 'auto', cmap = 'gray')
k = np.linspace(-k_max, k_max, num = 512)
w=np.sqrt(1 + 3 * v_the**2 * k**2)
plt.plot( k, w, label = "Electron Plasma Wave", color = 'r',ls = '-.' )
plt.ylim(0,2)
plt.xlim(0,k_max)
plt.xlabel("$k$ [$\omega_n/c$]")
plt.ylabel("$\omega$ [$\omega_n$]")
plt.title("Wave dispersion relation")
plt.legend()
plt.show()
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# (omega,k) power spectrum
win = np.hanning(niter)
for i in range(sim.nx):
Ez_t[:,i] *= win
sp = np.abs(np.fft.fft2(Ez_t))**2
sp = np.fft.fftshift( sp )
k_max = np.pi / sim.dx
omega_max = np.pi / sim.dt
plt.imshow( sp, origin = 'lower', norm=colors.LogNorm(vmin = 1e-5, vmax = 0.01),
extent = ( -k_max, k_max, -omega_max, omega_max ),
aspect = 'auto', cmap = 'gray')
k = np.linspace(-k_max, k_max, num = 512)
w=np.sqrt(1 + k**2)
plt.plot( k, w, label = "$\omega^2 = \omega_p^2 + k^2 c^2$", color = 'r', ls = '-.' )
plt.ylim(0,k_max)
plt.xlim(0,k_max)
plt.xlabel("$k$ [$\omega_n/c$]")
plt.ylabel("$\omega$ [$\omega_n$]")
plt.title("EM-wave dispersion relation")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trigonometric functions
Step2: Q2. Calculate inverse sine, inverse cosine, and inverse tangent of x, element-wise.
Step3: Q3. Convert angles from radians to degrees.
Step4: Q4. Convert angles from degrees to radians.
Step5: Hyperbolic functions
Step6: Rounding
Step7: Q7. Implement out5 in the above question using numpy.
Step8: Sums, products, differences
Step9: Q9. Calculate the difference between neighboring elements, element-wise.
Step10: Q10. Calculate the difference between neighboring elements, element-wise, and
Step11: Q11. Return the cross product of x and y.
Step12: Exponents and logarithms
Step13: Q13. Calculate exp(x) - 1 for all elements in x.
Step14: Q14. Calculate $2^p$ for all p in x.
Step15: Q15. Compute natural, base 10, and base 2 logarithms of x element-wise.
Step16: Q16. Compute the natural logarithm of one plus each element in x in floating-point accuracy.
Step17: Floating point routines
Step18: Q18. Change the sign of x to that of y, element-wise.
Step19: Arithmetic operations
Step20: Q20. Subtract y from x element-wise.
Step21: Q21. Multiply x by y element-wise.
Step22: Q22. Divide x by y element-wise in two different ways.
Step23: Q23. Compute numerical negative value of x, element-wise.
Step24: Q24. Compute the reciprocal of x, element-wise.
Step25: Q25. Compute $x^y$, element-wise.
Step26: Q26. Compute the remainder of x / y element-wise in two different ways.
Step27: Miscellaneous
Step28: Q28. Compute the square of x, element-wise.
Step29: Q29. Compute square root of x element-wise.
Step30: Q30. Compute the absolute value of x.
Step31: Q31. Compute an element-wise indication of the sign of x, element-wise.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.__version__
__author__ = "kyubyong. kbpark.linguist@gmail.com. https://github.com/kyubyong"
x = np.array([0., 1., 30, 90])
print "sine:", np.sin(x)
print "cosine:", np.cos(x)
print "tangent:", np.tan(x)
x = np.array([-1., 0, 1.])
print "inverse sine:", np.arcsin(x2)
print "inverse cosine:", np.arccos(x2)
print "inverse tangent:", np.arctan(x2)
x = np.array([-np.pi, -np.pi/2, np.pi/2, np.pi])
out1 = np.degrees(x)
out2 = np.rad2deg(x)
assert np.array_equiv(out1, out2)
print out1
x = np.array([-180., -90., 90., 180.])
out1 = np.radians(x)
out2 = np.deg2rad(x)
assert np.array_equiv(out1, out2)
print out1
x = np.array([-1., 0, 1.])
print np.sinh(x)
print np.cosh(x)
print np.tanh(x)
x = np.array([2.1, 1.5, 2.5, 2.9, -2.1, -2.5, -2.9])
out1 = np.around(x)
out2 = np.floor(x)
out3 = np.ceil(x)
out4 = np.trunc(x)
out5 = [round(elem) for elem in x]
print out1
print out2
print out3
print out4
print out5
print np.floor(np.abs(x) + 0.5) * np.sign(x)
# Read http://numpy-discussion.10968.n7.nabble.com/why-numpy-round-get-a-different-result-from-python-round-function-td19098.html
x = np.array(
[[1, 2, 3, 4],
[5, 6, 7, 8]])
outs = [np.sum(x),
np.sum(x, axis=0),
np.sum(x, axis=1, keepdims=True),
"",
np.prod(x),
np.prod(x, axis=0),
np.prod(x, axis=1, keepdims=True),
"",
np.cumsum(x),
np.cumsum(x, axis=0),
np.cumsum(x, axis=1),
"",
np.cumprod(x),
np.cumprod(x, axis=0),
np.cumprod(x, axis=1),
"",
np.min(x),
np.min(x, axis=0),
np.min(x, axis=1, keepdims=True),
"",
np.max(x),
np.max(x, axis=0),
np.max(x, axis=1, keepdims=True),
"",
np.mean(x),
np.mean(x, axis=0),
np.mean(x, axis=1, keepdims=True)]
for out in outs:
if out == "":
print
else:
print("->", out)
x = np.array([1, 2, 4, 7, 0])
print np.diff(x)
x = np.array([1, 2, 4, 7, 0])
out1 = np.ediff1d(x, to_begin=[0, 0], to_end=[100])
out2 = np.insert(np.append(np.diff(x), 100), 0, [0, 0])
assert np.array_equiv(out1, out2)
print out2
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
print np.cross(x, y)
x = np.array([1., 2., 3.], np.float32)
out = np.exp(x)
print out
x = np.array([1., 2., 3.], np.float32)
out1 = np.expm1(x)
out2 = np.exp(x) - 1.
assert np.allclose(out1, out2)
print out1
x = np.array([1., 2., 3.], np.float32)
out1 = np.exp2(x)
out2 = 2 ** x
assert np.allclose(out1, out2)
print out1
x = np.array([1, np.e, np.e**2])
print "natural log =", np.log(x)
print "common log =", np.log10(x)
print "base 2 log =", np.log2(x)
x = np.array([1e-99, 1e-100])
print np.log1p(x)
# Compare it with np.log(1 +x)
x = np.array([-3, -2, -1, 0, 1, 2, 3])
out1 = np.signbit(x)
out2 = x < 0
assert np.array_equiv(out1, out2)
print out1
x = np.array([-1, 0, 1])
y = -1.1
print np.copysign(x, y)
x = np.array([1, 2, 3])
y = np.array([-1, -2, -3])
out1 = np.add(x, y)
out2 = x + y
assert np.array_equal(out1, out2)
print out1
x = np.array([3, 4, 5])
y = np.array(3)
out1 = np.subtract(x, y)
out2 = x - y
assert np.array_equal(out1, out2)
print out1
x = np.array([3, 4, 5])
y = np.array([1, 0, -1])
out1 = np.multiply(x, y)
out2 = x * y
assert np.array_equal(out1, out2)
print out1
x = np.array([3., 4., 5.])
y = np.array([1., 2., 3.])
out1 = np.true_divide(x, y)
out2 = x / y
assert np.array_equal(out1, out2)
print out1
out3 = np.floor_divide(x, y)
out4 = x // y
assert np.array_equal(out3, out4)
print out3
# Note that in Python 2 and 3, the handling of `divide` differs.
# See https://docs.scipy.org/doc/numpy/reference/generated/numpy.divide.html#numpy.divide
x = np.array([1, -1])
out1 = np.negative(x)
out2 = -x
assert np.array_equal(out1, out2)
print out1
x = np.array([1., 2., .2])
out1 = np.reciprocal(x)
out2 = 1/x
assert np.array_equal(out1, out2)
print out1
x = np.array([[1, 2], [3, 4]])
y = np.array([[1, 2], [1, 2]])
out = np.power(x, y)
print out
x = np.array([-3, -2, -1, 1, 2, 3])
y = 2
out1 = np.mod(x, y)
out2 = x % y
assert np.array_equal(out1, out2)
print out1
out3 = np.fmod(x, y)
print out3
x = np.arange(10)
out1 = np.clip(x, 3, 7)
out2 = np.copy(x)
out2[out2 < 3] = 3
out2[out2 > 7] = 7
assert np.array_equiv(out1, out2)
print out1
x = np.array([1, 2, -1])
out1 = np.square(x)
out2 = x * x
assert np.array_equal(out1, out2)
print out1
x = np.array([1., 4., 9.])
out = np.sqrt(x)
print out
x = np.array([[1, -1], [3, -3]])
out = np.abs(x)
print out
x = np.array([1, 3, 0, -1, -3])
out1 = np.sign(x)
out2 = np.copy(x)
out2[out2 > 0] = 1
out2[out2 < 0] = -1
assert np.array_equal(out1, out2)
print out1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function that reads CSV files into DataFrames is called read_csv. In the simplest form, you supply it with a filename or URL. The cars data set is stored as a CSV in github. We'll reference the URL, and use the head() method to print the first several observations.
Step2: Displaying Information about DataFrames
Step3: Simple Statistics
Step4: Another way is to calculate one simple statistic across all columns in a DataFrame...
Step5: Subsetting DataFrames
Step6: Indexing DataFrames
Step7: Suppose I want a new DataFrame containing only models with an MSRP greater than 80,000.
Step8: Plotting
Step9: Exercise 2 - Connecting to CAS
Step10: Tip
Step11: Exercise 3 - Running CAS Actions
Step12: Using CAS' help Function
Step13: You can ask for help on specific actions and action sets.
Step14: Exercise 4 - Loading Data into CAS
Step15: CASTable object vs. DataFrame
Step16: Below is another example of printing the first ten observations from the CASTable object tbl
Step17: Subsetting, Indexing, Plotting, and Calculating Simple Statistics for CASTable Objects
Step18: Creating a new CASTable object containing only Models with an MSRP greater than $80,000.
Step19: Calculating simple statistics for the CASTable object.
Step20: Exercise 5 - Loading and Using Action Sets
Step21: Now that the 'simple' Action set has been loaded, we can use the 'correlation' action within the 'simple' Action set to calculate a Pearson correlation matrix.
Step22: Tip
Step23: How do you delete a CAS table?
Step24: Running DATA Step Code
Step25: Create a new CAS Table using DATA Step Functionality
Step26: Creating an object called 'new' to reference the CARS_TEMP table on the CAS server.
Step27: Now the CAS server has two tables loaded onto it. This can be verified using the tableinfo() function and the CASConnection object.
Step28: Running SQL Code
Step29: Tip
Step30: Create a new CAS Table Using SQL
Step31: Exercise 6 - Building Analytical Models
Step32: ODS-like Output Style
Step33: Random Forest from the decisiontree Action Set.
Step34: You can list all of the available actions (for decision trees, random forests, and gradient boosting tree models)
Step35: The models in the decisiontree action set support either continuous, binary or multilevel response
Step36: Random forest models are commonly used in variable selection. Selection is usually determined by the
Step37: To score the training data or the holdout data using the forest model, you can use the forestscore action.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df=pd.read_csv('https://raw.githubusercontent.com/'
'sassoftware/sas-viya-programming/master/data/cars.csv')
df.head(10)
df.dtypes
df[['MSRP','Horsepower']].describe()
df.mean()
subdf=df[['Make','Model','Horsepower']]
subdf.head(15)
df=df.set_index('Model')
df.head()
exp=df['MSRP'] > 80000
exp.head(15)
df80=df[exp]
df80.head()
%matplotlib inline
df.plot(kind='scatter', x='MSRP', y='Horsepower', figsize=(12,6))
import swat
conn = swat.CAS("server", 8777, "student", "Metadata0", protocol="http")
conn
out = conn.serverstatus()
out
conn.help()
conn.help(actionset='simple')
conn.help(action='correlation')
tbl = conn.read_csv('c:/users/student/Documents/cars.csv',"cars_cas")
tbl
conn.columninfo(table=tbl)
conn.fetch(table=tbl, to=10)
expCAS=tbl['MSRP'] > 80000
expCAS.head()
tbl80=tbl[expCAS]
tbl80.head()
tbl[['MSRP','Horsepower']].describe()
tbl.mean()
conn.loadactionset('simple')
tbl.correlation(simple=False)
conn.tableinfo()
# conn.dropTable('<INSERT TABLE HERE>')
conn.loadactionset('dataStep')
out= conn.datastep.runcode(code='''
data cars_temp(caslib='casuser');
set cars_cas(caslib='casuser');
if MSRP > 80000 then Category='Very Expensive';
else Category='Less Expensive';
keep Model MSRP Category EngineSize;
run;
''')
out
new = out.OutputCasTables.loc[0, 'casTable']
new.head()
conn.tableinfo()
conn.loadactionset('fedsql')
conn.fedsql.execDirect(query='''
select model,
msrp,
case when msrp > 80000 then 'Very Expensive' else 'Less Expensive' end as CategorySQL
from cars_temp
where EngineSize > 5
order by msrp desc
''')
conn.fedsql.execDirect(query='''
create table cars_temp_SQL as
select *
from cars_temp
where EngineSize>5
''')
conn.tableinfo()
conn.loadactionset('regression')
conn.help(actionset='regression')
glm_out=tbl.glm(
target = 'MSRP',
nominals = ['Type', 'Origin'],
inputs = ['Type', 'Origin', 'MPG_City', 'Length', 'Weight']
)
glm_out
from swat.render import render_html
render_html(glm_out)
conn.loadactionset('decisiontree')
conn.decisiontree?
forest1 = tbl.Foresttrain()
forest1.target = 'Origin'
forest1.inputs = ['MPG_City','MPG_Highway','Type','Weight','Length','Cylinders']
forest1.nominals = ['Type','Cylinders']
forest1.casout = conn.CASTable('forestModel1', replace=True)
forest1()
forest1.varimp = True
result = forest1()
dfVarImp = result['DTreeVarImpInfo']
import matplotlib.pyplot as plt
import numpy as np
y_pos = np.arange(len(dfVarImp['Importance']))
plt.barh(y_pos, dfVarImp['Importance'], align='center')
plt.yticks(y_pos, dfVarImp['Variable'])
plt.xlabel('Variable Importance')
plt.show()
scored_data = conn.CASTable('scored_output', replace=True)
tbl.forestscore(modeltable=conn.CASTable('forestModel1'),casout=scored_data)
scored_data.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: De esta manera, Python estandariza el aspecto del código desde la definición del lenguaje.
Step2: Los operadores para variables booleanas son sólo 3
Step3: La siguiente tabla muestra las operaciones posibles entre variables numéricas
Step4: También pueden crearse directamente con valores adentro
Step5: Las listas no están restringidas a tener elementos del mismo tipo
Step6: Para acceder a algun valor, solo hace falta indizarlo haciendo uso de la siguiente sintaxis con corchetes
Step7: Adicionalmente se pueden seleccionar subsecciones de las listas haciendo slicing.
Step8: Nota
Step9: Una forma rápida de definir una lista conteniendo una secuencia de enteros es haciendo uso de los rangos.
Step10: Pero si las reglas son un poco más complejas, es necesario hacer uso de los llamados list comprehensions. Las list comprehensions son expresiones que son usadas para describir un conjunto de valores, de forma similar a como se describen los conjuntos por comprensión en matemática
Step11: Los list comprehensions tienen tres partes
Step12: Cadenas de caracteres (Secuencias)
Step13: Adicionalmente tienen muchos métodos adicionales (que pueden ver en la referencia oficial), como por ejemplo upper()
Step14: Sets o Conjuntos
Step15: Adicionalmente, dentro de un set no pueden existir elementos duplicados. Transformar una lista o tupla en set remueve los duplicados, lo cual a menudo resulta muy útil.
Step16: Diccionarios
Step17: Los valores de los diccionarios se acceden a través de su llave usando la sintaxis
Step18: Es posible recorrer un diccionario, valor por valor, pero cabe mencionar que no se garantiza ninguna clase de orden, a diferencia de las secuencias
Step19: Estructuras de control
Step20: Flujo Iterativo
Step21: También podemos salir de un bucle en cualquier momento utilzando la palabra reservada break. Ejecutar esta instrucción implica terminar la ejecución del bucle más cercano.
Step22: Asimismo podemos utilizar la palabra reservada continue para terminar temparanamente una iteración y continuar con la siguiente
Step23: Documentación de formateo de strings
Step24: El bucle anterior puede ser resumido con el uso de la función predefinida enumerate
Step25: Asimismo podemos utilizar for para realizar una operación una cantidad determinada de veces haciendo uso de la función predefinida range()
Step26: También es factible usar break y continue dentro de los bucles for.
Step28: Funciones
Step30: Una vez definida la función podemos llamarla como sigue
Step31: Es importante notar que todas las variables declaradas dentro de una función son accesibles únicamente dentro de la misma función. Por ejemplo si intentamos obtener el valor de b, obtendremos un error que indica que la variable no está definida
Step32: Esto nos permite mantener los contextos de declaración y llamada de la función separados, de tal forma de que no haya que preocuparse por el conflicto de nombres de variables en dichas situaciones. Esto se ilustra en el siguiente ejemplo, donde además se declara una función con más de una variable
Step33: Podemos también definir funciones con valores por defecto. De esta manera volvemos dichos parámetros opcionales, y cuando no se ingrese un valor específico durante una llamada, la función se ejecute con el valor indicado en lugar de echar un error.
Step34: Cuando la función tiene más de un parámetro, es posible llamar a la función nombrando específicamente a cual parámetro va cada valor, para facilitar la lectura en caso de que hayan muchos.
Step35: También podemos definir funciones con una cantidad variable de parámetros, de la misma forma que trabaja print(), anteponiendo un asterisco * al último nombre de variable (es importante que sea el último), el cual será tratado como tupla dentro de la función.
Step36: La siguiente función devuelve dos valores
Step38: Podemos también definir funciones que no devuelvan un valor (Nota
Step39: Una última nota, si se modifica un parámetro de tipo mutable dentro de una función, se modificará tambien fuera de ella. No así con las variables de tipo inmutable
Step40: Lambdas
Step41: Map, Filter, Reduce
Step42: O podríamos utilizar map.
Step43: Comúnmente también veremos map utilizado con lambdas.
Step44: Filter
Step45: Reduce
Step46: Internamente el proceso que está ocurriendo es como lo muestra el gráfico
|
<ASSISTANT_TASK:>
Python Code:
if 2 + 3 == 5:
x = 5 + 3
mensaje = "Verdadero!"
else:
x = 5 - 3
mensaje = "Falso!"
print(x)
print(mensaje)
type(True)
type(5)
type(3.1416)
lista_vacia = []
print(lista_vacia)
#O equivalentemente
lista_vacia = list()
print(lista_vacia)
semana = ["Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado", "Domingo"]
print(semana)
cosas_aleatorias = [1+2, "Donald Trump", None, 3.5]
print(cosas_aleatorias)
#En Python, los índices inician en 0
print (cosas_aleatorias[0])
print (cosas_aleatorias[1])
print (semana[0:3]) #Desde el primero hasta el tercer elemento
print (semana[0:7:2]) #Toda la lista, pero cada dos elementos
print (semana[:3]) #Desde el inicio de la lista hasta el tercer elemento
print (semana[5:]) #Desde el quinto elemento hasta el final de la lista
print (semana[:-2]) #Desde el primer elemento hasta 2 espacios antes del final de la lista
print (semana[:]) #Toda la lista
cosas_aleatorias[:3] = [1, 2, 3]
print(cosas_aleatorias)
dias_en_diciembre = list(range(1, 32))
print (dias_en_diciembre)
[2**x for x in range(5)]
[x for x in dias_en_diciembre if x % 2 == 0] #Días pares en diciembre
#Las tuplas se crean haciendo uso de paréntesis en lugar de corchetes
semana = ("Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado", "Domingo")
semana[0] = "Enero" #Intentar modificar un valor de una tupla genera un error
pangrama = "El veloz murciélago hindú comía feliz cardillo y kiwi. La cigüeña tocaba el saxofón detrás del palenque de paja"
print(pangrama[3:8])
pangrama[3:8] = "lento"
print(pangrama.upper())
numeros_pares = {0, 2, 4, 6, 8, 10, 12, 14, 16, 18} #La creación de conjuntos es con llaves o con la funcion set()
print ("numeros_pares:", numeros_pares)
multiplos_de_tres = set(range(0, 20, 3)) #La función set puede recibir otros iterables o secuencias
print ("multiplos_de_tres:", multiplos_de_tres, "\n")
print("Intersección:", numeros_pares & multiplos_de_tres)
print("Union:", numeros_pares | multiplos_de_tres)
print("Diferencia:", numeros_pares - multiplos_de_tres)
semana = ["Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado", "Domingo"]
semana *= 3
print("Lista semana: ", semana, "\n")
semana = set(semana)
print("Set semana:", semana)
#Todas estas maneras de crear diccionarios son equivalentes, definiendo la capital de tres países
capitales_1 = dict(Peru = 'Lima', Ecuador = 'Quito', Argentina = 'Buenos Aires')
capitales_2 = {'Peru': 'Lima', 'Ecuador': 'Quito', 'Argentina': 'Buenos Aires'}
capitales_3 = dict(zip(['Peru', 'Ecuador', 'Argentina'], ['Lima', 'Quito', 'Buenos Aires']))
capitales_4 = dict([('Ecuador', 'Quito'), ('Peru', 'Lima'), ('Argentina', 'Buenos Aires')])
capitales_5 = dict({'Argentina': 'Buenos Aires', 'Peru': 'Lima', 'Ecuador': 'Quito'})
capitales_1 == capitales_2 == capitales_3 == capitales_4 == capitales_5
print("'Peru' ->", capitales_1['Peru'])
print("'Ecuador' ->", capitales_1['Ecuador'])
print("'Argentina' ->", capitales_1['Argentina'])
for capital in capitales_1: #En breve veremos que significa 'for'
print(capital)
x = int(input("Por favor ingrese un entero: "))
if x < 0:
print("El numero ingresado es negativo")
elif x == 0:
print("El numero ingresado es cero")
else:
print("El numero ingresado es positivo")
#Fibonacci
a, b = 0, 1
while b < 1000:
print(b, end=',')
a, b = b, a + b
password = ""
while True: # <- Con True como condición, el bucle se ejecutaría permanentemente
password = input("Por favor ingrese la contraseña: ")
if password == "secret":
print("Gracias. Ha ingresado la contraseña correcta.")
break # <- Pero con break podemos salir del bucle
else:
print("Lo sentimos, la contraseña es incorrecta - inténtelo de nuevo.")
tareas = []
while True:
#Formateo de strings https://docs.python.org/3/library/string.html#format-string-syntax
tarea = input("%d tareas ingresadas. Ingrese una tarea o 'exit' para terminar: " % len(tareas))
if len(tarea) == 0:
print ("Por favor ingrese una tarea")
continue # <- Esta palabra reservada termina inmediatamente la actual iteración y continúa con la siguiente
if tarea == "exit": #
break # Todo este código es ignorado durante la actual iteración si
# anteriormente se ejecuta una instrucción continue
tareas.append(tarea) #
print("Su lista de tareas:")
print("\r\n".join(tareas))
#Añdadir número de tarea a la lista definida anteriormente
i = 1
for tarea in tareas:
print ("%d. %s" % (i, tarea))
i += 1
for (i, tarea) in enumerate(tareas):
print ("%d. %s" % (i+1, tarea))
#Diez primeros dígitos de la secuencia Fibonacci
a, b = 0, 1
for _ in range(10):
print(b, end=',')
a, b = b, a + b
#Diez primeros dígitos de la secuencia Fibonacci, o hasta encontrar un múltiplo de 7
a, b = 0, 1
for _ in range(10):
print(b, end=',')
if b % 7 == 0:
break
a, b = b, a + b
#Función que devuelve el elemento n de la serie Fibonacci. Tiene un solo parámetro, el número de elemento deseado.
def fib(n):
Escribe la serie Fibonacci hasta el número n.
a, b = 0, 1
for _ in range(n):
a, b = b, a+b
return a
x = fib(20)
print("Nnúmero Fibonacci #20:", x)
print(b)
import math
def magnitud(x, y):
x = x**2
y = y**2
mag = x + y #Declarando variable interna mag
mag = math.sqrt(mag)
return mag
mag = 0 #Declarando variable externa mag para demostrar que es una variable completamente diferente a la interna
print ("Magnitud del vector [4, 3]:", magnitud(4, 3))
#Aquí podemos notar que las variables x e y son independientes de aquellas declaradas en la función
print ("Las variables declaradas y manipuladas fuera de la función siguen teniendo sus valores originales: mag =", mag)
def saludar(nombre, saludo="Hola"):
print("{0} {1}!".format(saludo, nombre))
saludar("Kenyi") #Llamando a la función sin el parámetro saludo hace que la función se ejecute con el valor por defecto "Hola"
saludar("Kenyi", "Buenos días") #Pero si especificamos el valor, se utiliza ese en su lugar
print( magnitud(x = 12, y = 9) )
#Otro beneficio de llamar funciones de esta forma es que no es necesario llamar a los argumentos en orden
saludar(saludo = "Buenos días", nombre = "Kenyi")
#Pero si se especifican parámetros con nombre, todas los parámetros subsecuentes deben ser llamados con nombre también
saludar(saludo = "Buenos días", "Kenyi")
#En este caso el parámetro sumandos contendrá todos los parámetros ingresados
def sumatoria(*sumandos):
total = 0
#sumandos es una tupla
for elemento in sumandos:
total += elemento
return total
print (sumatoria(3, 5, 6))
print (sumatoria(1, 2))
def cuadrado_y_cubo(n): # devuelve el cuadrado y el cubo del número ingresado
cuadrado = n**2
cubo = n**3
return cuadrado, cubo
x, y = cuadrado_y_cubo(4)
print("Cuadrado:", x, "- Cubo:", y)
def fib(n): # escribir los primeros n elementos de la serie Fibonacci
Escribe la serie Fibonacci hasta el número n.
a, b = 0, 1
for _ in range(n):
a, b = b, a+b
print(a, end=' ')
print()
# Ahora podemos llamar la función que acabamos de definir:
fib(20)
#0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 1597 2584 4181
def mutabilidad(entero, lista):
entero += 10
lista.append("Hola")
lista.append("Mundo")
entero = 0
lista = []
mutabilidad(entero, lista)
print(entero) #Entero sigue conteniendo el valor 0, y no 10
print(lista) #Lista ahora tiene 2 nuevos elementos, añadidos dentro de la función
#En este caso, la variable al_cuadrado contendrá una función, y podrá ser llamada posteriormente como tal
al_cuadrado = lambda x: x**2
print(al_cuadrado(4))
print(al_cuadrado(10))
semana_mayusculas = []
for i in semana:
semana_mayusculas.append(i.upper())
print(semana_mayusculas)
semana_mayusculas = []
semana_mayusculas = map(str.upper, semana)
print (list(semana_mayusculas))
enteros = [1, 2, 3, 4, 5]
cuadrados = map(lambda x: x**2, enteros)
print(list(cuadrados))
lista = [3, -5, -6, 1, 2, -9, 7, -2]
lista_filtrada = filter(lambda x: x <= 0, lista)
#La función lambda devuelve True si es que el número en cuestión es menor o igual a 0
print(list(lista_filtrada))
from functools import reduce
lista = [47, 11, 42, 13]
sumatoria = reduce(lambda x, y: x + y, lista)
print(sumatoria)
#Styling del notebook
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check all montages against a sphere
Step2: Check all montages against fsaverage
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD Style.
import os.path as op
import mne
from mne.channels.montage import get_builtin_montages
from mne.datasets import fetch_fsaverage
from mne.viz import set_3d_title, set_3d_view
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg', montage=montage)
sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head',
bem=sphere, info=info)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
subjects_dir = op.dirname(fetch_fsaverage())
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
# Create dummy info
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg', montage=montage)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,
subject='fsaverage', subjects_dir=subjects_dir, info=info,
coord_frame='mri',
trans='fsaverage', # transform from head coords to fsaverage's MRI
)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(ds.data, ds.target, test_size = 0.20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset
Step2: Gráficos
Step3: Modelo a partir de la ecuación normal de mínimos cuadrados
Step4: Ahora, graficamos la recta contra los datos de entrenamiento
Step5: Ejercicios
Step6: Regularizadores
Step7: Varianza esperada y sesgo esperado
Step8: Varianza y sesgo
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from semana2_datos import *
X_1 = np.array([[1,x] for x, y in data_1])
Y_1 = np.array([y for x, y in data_1])
X_2 = np.array([[1,x] for x, y in data_2])
Y_2 = np.array([y for x, y in data_2])
X_3 = np.array([[1,x] for x, y in data_3])
Y_3 = np.array([y for x, y in data_3])
X_4 = np.array([[1,x] for x, y in data_4])
Y_4 = np.array([y for x, y in data_4])
X_real = np.array([[1,x] for x, y in data_real])
Y_real = np.array([y for x, y in data_real])
# data_1
x_1 = np.array([x for _, x in X_1])
plt.scatter(x_1, Y_1)
# data_1
W_1 = np.matmul(np.matmul(np.linalg.inv(np.matmul(X_1.T,
X_1)), X_1.T), Y_1)
f1 = lambda X: W_1[1]*X + W_1[0]
print('Los pesos aprendidos son\nw0: {}\nw1: {}'.format(W_1[0], W_1[1]))
# data_1
plt.scatter(x_1, Y_1)
lower = min(x_1)
upper = max(x_1)
sample_x1 = np.linspace(lower, upper, num=len(x_1))
plt.plot(sample_x1, f1(sample_x1), color='r')
# Polynomial regression
x_1 = np.expand_dims(x_1, axis=1)
poly = PolynomialFeatures(3)
features = poly.fit_transform(x_1)
features.shape
lin_reg = LinearRegression()
lin_reg.fit(features, Y_1)
sample_x1 = np.expand_dims(sample_x1, axis=1)
sample_x1_poly = poly.fit_transform(sample_x1)
y1_poly = lin_reg.predict(sample_x1_poly)
plt.scatter(x_1, Y_1)
plt.plot(sample_x1, y1_poly, color='g')
# Ridge para x_1
ridge = Ridge(alpha=2.5)
ridge.fit(features, Y_1)
y1_ridge = ridge.predict(sample_x1_poly)
plt.scatter(x_1, Y_1)
plt.plot(sample_x1, y1_ridge, color='r')
# Expected prediction
def E(f_x):
return float(sum(f_x)) / float(len(f_x))
x_real = np.array([x for _, x in X_real])
exp_pred_1 = E(f1(x_real))
avg_y_real = E(Y_real)
avg_y_real - exp_pred_1
# data 1
f1 = W_1[1]*sample_x1 + W_1[0]
f1_to_2 = f1 ** 2
E11 = E(f1_to_2)
E_f1 = E(f1)
E12 = E_f1 ** 2
E11 - E12
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Interaktive Hilfe
Step2: Die weitere Funktionalität der Pandas-Bibliothek können wir erkunden, indem wir die Methoden von Pandas ansehen. Dazu verwenden wir pd. und nutzen die integrierte Autovervollständigung von Jupyter mittels der Tabulatortaste Tab, um zu sehen, welche Methoden uns Pandas bietet. Gehen wir dann mit der Pfeiltaste unten z. B. auf Categorical, drücken Enter und schließend Shift+ Tab, dann erscheint die Signatur des entsprechenden Funktionalität und der Ausschnitt der Hilfedokumentation. Bei zweimaligem Drücken von Shift + Tab erscheint die Hilfe vollständig.
Step3: Laden von Daten
Step4: Als nächstes sehen wir uns mit der info()-Methode an, was uns nun Pandas in die cdr-Variable geschrieben hat.
Step5: DataFrame
Step6: Zudem sehen wir Informationen über die Datentypen in dem Dataframe. Die beiden ersten Spalten In und Out sind vom Typ int64, also Ganzzahlen, welche als 64-Bit gespeichert werden (können also sehr große Zahlen werden). Die vier anderen Spalten sind vom Typ object, was in unserem Fall soviel bedeutet, dass wir hier ersteinmal reine Zeichenketten bzw. Texte vorliegen haben.
Step7: Nun können wir auf der Spalte Duration den Durchschnitt mit mean() berechnen.
Step8: Wir sehen, dass die durchschnittliche Anrufdauer bei etwa sechs Minuten liegt.
Step9: Im nächsten Schritt interessiert uns, wo John Doe überall telefoniert. Wir wissen dank der TowerId, an welchem Mobilfunkmasten John Doe eingeloggt war, als das Telefonat stattgefunden hatte. In der Datei data/towers.csb haben wir zudem die Informationen, an welcher geografischen Koordinate sich ein Mobilfunkmast befindet. Diese Angaben sind pro TowerId als Breitengrad und Höhengrad abgelegt. Zuerst laden wir die entsprechende CSV-Datei mittels read_csv(). Wir geben zusätzlich mittels index_col=0 mit an, dass wir die erste Spalte im DataFrame als Index verwenden wollen.
Step10: Zum Zusammenführen der Daten mit unseren bestehenden CDR-DataFrame können wir diesesmal die join()-Methode verwenden, da wir im towers DataFrame einen Index auf die TowerID-Spalte angelegt haben. Die join()-Methode kann nun auf dieser Basis die Daten zusammenführen. Dazu müssen wir noch angeben, welche Spalte im john_doe-DataFrame die Index-Spalte im anderen DataFrame repräsentiert. Das Ergebnis speichern wir in der Variable john_doe_towers.
Step11: Nun können wir irgendwie nicht mehr erwarten zu sehen, wo die Telefonate durchgeführt wurden. Dazu plotten wir mit der Visualisierungsbibliothek "matplotlib" die entsprechenden Koordinaten der Mobilfunkmasten.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd?
pd.Categorical
cdr = pd.read_excel('data/CDR_data.xlsx', dtype={0:str, 1:str})
cdr.head()
cdr["Direction"].value_counts()
cdr.loc[cdr["Direction"] == "Incoming", "Dir"] = "->"
cdr.loc[cdr["Direction"] == "Missed", "Dir"] = "-X"
cdr['Call'] = cdr['Out'] + cdr['Dir'] + cdr['In']
cdr.head()
cdr["td"] = pd.to_timedelta(cdr['Duration'])
cdr["td"].head()
cdr['Start'] = pd.to_datetime(cdr['CallTimestamp'])
cdr['End'] = cdr['Start'] + cdr["td"]
cdr.head()
cdr['End'] = cdr.loc[cdr['Direction'] == "Missed", 'End'] = pd.to_timedelta("00:00:01.00")
cdr.head()
cdr["NewDur"] = cdr['End'] - cdr['Start']
cdr[['Direction', 'NewDur', 'Duration']]
cdr[['Call', 'Start', 'End', 'TowerID']].to_csv('cdr_raw_data.csv', index=None)
pd.read_csv("cdr_raw_data.csv")
cdr[['Caller', 'Symbol', 'Callee']] = cdr['Call'].str.split("(->|-X)", expand=True)
cdr.head()
cdr['Action'] = cdr['Symbol'].map(
{
"->" : "Incoming",
"-X" : "Missed"
})
cdr.head()
writer = pd.ExcelWriter(r'C:\dev\repos\software-analytics\courses\python_pandas_basics\session0\output2.xlsx',
engine='xlsxwriter',
options={'strings_to_numbers': True})
cdr.to_excel(writer)
pd.read_excel("output.xlsx")
cdr.info()
len(cdr)
cdr.CallTimestamp = pd.to_datetime(cdr.CallTimestamp)
cdr.Duration = pd.to_timedelta(cdr.Duration)
cdr.info()
cdr.Duration.mean()
john_doe = cdr_names[ist_john_doe]
john_doe.head()
towers = pd.read_csv("data/towers.csv", index_col=0)
towers.head()
john_doe_towers = john_doe.join(towers, on='TowerID')
john_doe_towers.head()
import matplotlib.pyplot as plt
import matplotlib
# sagt Jupyter, dass die Plotausgabe direkt im Notebook passieren soll
%matplotlib inline
# macht die Plots schöner
matplotlib.style.use('ggplot')
ax = john_doe_towers.plot.scatter(
x='TowerLon',
y='TowerLat',
alpha=0.1,
title='Anruforte',
s = 100)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 2)
data = pd.concat([john_doe_towers.TowerLon, john_doe_towers.TowerLat], axis = 1)
labels = kmeans.fit_predict(data)
centroids = kmeans.cluster_centers_
ax.scatter(x = centroids[:, 0], y = centroids[:, 1], c = 'r', marker = 'x', s = 100)
ax.figure
#showandtell() # TODO: Comment this line out when you're ready to proceed
data['TowerLon'].value_counts()
from geojson import GeometryCollection, Point
data['Point'] = data.apply(lambda x: Point((x['TowerLon'], x['TowerLat'])), axis=1)
data.head()
data.count()
gc = GeometryCollection(data['Point'].values.tolist())
gc
GeometryCollection([Point((x[0], x[1])) for x in centroids.tolist()])
joined['CallTimestamp'] = pd.to_datetime(joined['CallTimestamp'])
weekdays = joined['CallTimestamp'].dt.dayofweek.isin(Workweek) & joined['CallTimestamp'].dt.hour.isin(range(8,18))
dfweekdays = joined[weekdays]
dfweekdays.head()
targetname = 'John Doe'
user1 = joined[joined['Name'] == targetname]
#user1 = user1[weekdays]
user1 = user1.reset_index(drop=True)
user1.head()
#
# INFO: The locations map above should be too "busy" to really wrap your head around. This is where domain expertise comes into play.
# Your intuition tells you that people are likely to behave differently on weekends:
#
# On Weekends:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
#
# TODO: Add more filters to the user1 slice you created. Add bitwise logic so that you're only examining records that came in on
# weekends (sat/sun).
#
# .. your code here ..
user1['DOW'] = user1.CallTimestamp.dt.strftime("%a")
user1 = user1[(user1.DOW == 'Sat') | (user1.DOW == 'Sun')]
user1.head()
#
# TODO: Further filter it down for calls that are came in either before 6AM OR after 10pm (22:00:00). You can use < and > to compare
# the string times, just make sure you code them as military time strings, eg: "06:00:00", "22:00:00":
# https://en.wikipedia.org/wiki/24-hour_clock
#
# You might also want to review the Data Manipulation section for this. Once you have your filtered slice, print out its length:
#
# .. your code here ..
user1 = user1[(user1.CallTimestamp < "06:00:00") | (user1.CallTimestamp > "22:00:00")]
user1.head()
#
# INFO: Visualize the dataframe with a scatter plot as a sanity check. Since you're familiar with maps, you know well that your
# X-Coordinate should be Longitude, and your Y coordinate should be the tower Latitude. Check the dataset headers for proper column
# feature names.
# https://en.wikipedia.org/wiki/Geographic_coordinate_system#Geographic_latitude_and_longitude
#
# At this point, you don't yet know exactly where the user is located just based off the cell phone tower position data; but
# considering the below are for Calls that arrived in the twilight hours of weekends, it's likely that wherever they are bunched up
# is probably near the caller's residence:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1.TowerLon,user1.TowerLat, c='g', marker='o', alpha=0.2)
ax.set_title('Weekend Calls (<6am or >10p)')
#showandtell() # TODO: Comment this line out when you're ready to proceed
#
# TODO: Run K-Means with a K=1. There really should only be a single area of concentration. If you notice multiple areas that are
# "hot" (multiple areas the usr spends a lot of time at that are FAR apart from one another), then increase K=2, with the goal being
# that one of the centroids will sweep up the annoying outliers; and the other will zero in on the user's approximate home location.
# Or rather the location of the cell tower closest to their home.....
#
# Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other data is suitable for your purposes.
# Since both Lat and Lon are (approximately) on the same scale, no feature scaling is required. Print out the centroid locations and
# add them onto your scatter plot. Use a distinguishable marker and color.
#
# Hint: Make sure you graph the CORRECT coordinates. This is part of your domain expertise.
#
# .. your code here ..
#coordinates = "" + centroids[0][1].to_string().split('.')[0] + "°" + centroids[0][1].split('.')[1][0] + centroids[0][1].split('.')[1][1] + centroids[0][0]
#str.split(' ', 1 )
centroids
difference1 = centroids[0][1] - centroids[1][1]
difference2 = centroids[0][0] - centroids[1][0]
difference1 = 0.5 * difference1
difference2 = 0.5 * difference2
coordinate1 = centroids[0][1] + difference1
coordinate2 = centroids[0][0] + difference2
coordinates = str(coordinate1) + " " + str(coordinate2)
coordinates
#-96°90'92.4672"N 96°56'57.3"W
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We first create an example automaton using HOA.
Step3: The following semi-deterministic automata demonstrate three strategies to "cut", i.e., which transitions of the non-deterministic component will be duplicated to jump into the deterministic part.
Step4: It is clear that each strategy adds more cut-transitions, but sometimes the regularity of the added transitions helps automata simplifications performed afterwards. In this case, we observe it makes very little differences.
Step6: powerset-on-cut
Step7: The unoptimized behaviour of seminator creates a 7-state automaton which is barely touched by the postprocessings of Spot (some non-determinism is removed on the initial state)
Step8: The orange state below will be avoided by the proposed change. The target states of cut-edges always hold exactly 1 state and this state is the same as the source of the red cut-edge. The powerset-successor of the state is thus always computed and reached anyway, now from the breakpoint state. The proposed idea substitutes the red edge by the green edge which makes the orange state unreachable.
|
<ASSISTANT_TASK:>
Python Code:
import spot
from spot.seminator import seminator
from spot.jupyter import display_inline
import buddy
spot.setup(show_default=".n")
aut1 = spot.automaton(HOA: v1
States: 3 Start: 0 AP: 1 "a"
Acceptance: 1 Inf(0) --BODY--
State: 0 [0] 0 [!0] 1 [0] 2
State: 1 [!0] 1 [0] 0 {0}
State: 2 [0] 2 {0} --END--); display(aut1)
def example(a, **opts):
# note: the pure=True option disables all optimizations that are usually on by default
default = seminator(a, highlight=True, pure=True, **opts)
default.set_name("default")
cutentry = seminator(a, highlight=True, pure=True, cut_on_scc_entry=True, **opts)
cutentry.set_name("cut-on-scc-entry")
cutalways = seminator(a, highlight=True, pure=True, cut_always=True, **opts)
cutalways.set_name("cut-always")
display_inline(default, cutentry, cutalways, per_row=3)
example(aut1, postprocess=False)
example(aut1, postprocess=True)
aut2 = spot.automaton(HOA: v1 States: 2 Start: 0
AP: 1 "a" Acceptance: 2 Inf(0)&Inf(1) --BODY--
State: 0 [0] 0 {0} [0] 1 [!0] 0
State: 1 [!0] 1 {1} [!0] 0 [0] 1 --END--)
display(aut2)
basic = seminator(aut2, pure=True, postprocess=False)
display(basic)
display(seminator(aut2, pure=True, postprocess=True))
a_ap = buddy.bdd_ithvar(basic.register_ap('a'))
basic.new_edge(1,3,-a_ap)
basic.highlight_state(2,2).highlight_edge(5,5).highlight_edge(16,4)
power = seminator(aut2, pure=True, powerset_on_cut=True, postprocess=False)
power.set_name("powerset-on-cut")
psimp = seminator(aut2, pure=True, powerset_on_cut=True, postprocess=True)
psimp.set_name("powerset-on-cut simplified")
display_inline(power, psimp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preprocessing
Step2: Regression tree
Step3: Randomly defined train and test set
Step4: Know, we want to define the max_depht parameter that minimizes the out of sample using cross validation.
Step5: Ploting tree
Step6: Random Forest
Step7: The random forest model has a lot of parameter we can optimize to get a better fit in our model.
Step8: Even if with the random forest model we loss a lot of the model interpretation that a simple decision tree gave us, we can still get the importance of each feauture
Step9: GBM
Step10: XGBOOST
Step11: Classification problem
Step12: Optimize max_depth in classification problem
Step13: Random forest for classification problems
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import graphviz
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestRegressor
from sklearn import tree
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
df = pd.read_csv('co_properties.csv.gz', compression='gzip', header=0, sep=',', quotechar='"', error_bad_lines=False)
def categoricas(df,lista):
dummies= pd.get_dummies(df[lista])
df=df.drop(columns=lista)
data = pd.concat([df, dummies], axis=1)
return data
df=df[df['operation_type']=='Venta']
df=df[df['property_type']=='Casa']
df=df[df['currency']=='COP']
df=df[df['l3']=='Bogotá D.C']
df=df.drop(columns=['l6','id','ad_type','start_date','created_on','l1','l2','l3','end_date','title','price_period','title','description','property_type','operation_type','currency'])
df=df.dropna(subset=['l4','l5','price'])
df=categoricas(df,['l4','l5'])
variables=['lat', 'lon', 'rooms', 'bedrooms', 'bathrooms', 'surface_total',
'surface_covered']
for i in variables:
df.loc[df[i].isnull()==True,i+'null']=1
df.loc[df[i].isnull()==False,i+'null']=0
df.loc[df[i].isnull()==True,i]=-1
data=df
df.columns
x_train, x_test, y_train, y_test = train_test_split(data.drop(columns=['price']),data['price'], test_size=0.30,
random_state=200,
shuffle=True)
tree2=DecisionTreeRegressor().fit(x_train,y_train)
mse(y_test, tree2.predict(x_test))
model = DecisionTreeRegressor()
gs = GridSearchCV(model,
param_grid = {'max_depth': range(1, 30)},
cv=10,
n_jobs=10,
scoring='neg_mean_squared_error')
cv_tree1=gs.fit(x_train, y_train)
gs.best_estimator_
mse(y_test, cv_tree1.predict(x_test))
from sklearn import tree
model=DecisionTreeRegressor(max_depth=3).fit(x_train,y_train)
data.columns
fn=data.columns
fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=300)
tree.plot_tree(model,
feature_names = fn,
filled = True)
fig.savefig('regression_tree.png')
rf = RandomForestRegressor(random_state = 42).fit(x_train,y_train)
mse(y_test, rf.predict(x_test))
n_estimators = [50,100,150]
max_depth = [10,20,30,40]
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
param_grid = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
}
rf = RandomForestRegressor()
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
cv = 5, n_jobs = 10, verbose = 2)
rf_cv=grid_search.fit(x_train, y_train)
grid_search.best_estimator_
importances1 =rf_cv.best_estimator_.feature_importances_
importances_df1=pd.DataFrame({'importances':importances1,'feauture':data.drop(columns=['price']).columns})
importances_df1=importances_df1.sort_values(by=['importances'],ascending=False)
importances_df1
fig = plt.figure(figsize = (10, 5))
plt.bar(importances_df1.feauture[:15],importances_df1.importances[:15], color ='maroon',
width = 0.4)
plt.xticks( rotation='vertical')
n_estimators = [700]
max_depth = [10,20,30,40]
min_samples_split = [2, 5, 10]
learning_rate=[0.15,0.05,0.01,0.005]
param_grid = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'learning_rate':learning_rate
}
gbm= GradientBoostingRegressor()
grid_search = GridSearchCV(estimator = gbm, param_grid = param_grid,
cv = 5, n_jobs = 7, verbose = 2,scoring='neg_mean_squared_error')
gbm_cv=grid_search.fit(x_train, y_train)
grid_search.best_estimator_
mse(y_test, gbm_cv.predict(x_test))
grid = {
'learning_rate': [0.01, 0.1,0.2,0.3,0.5],
'max_depth': [10,20,30,40,50],
'objective': ['reg:squarederror']
}
xgb_model = XGBRegressor()
gsearch = GridSearchCV(estimator = xgb_model,
param_grid = grid,
scoring = 'neg_mean_squared_error',
cv = 5,
n_jobs = 7,
verbose = 1)
xg_cv=gsearch.fit(x_train,y_train)
mse(y_test, xg_cv.predict(x_test))
xg_cv.best_estimator_
df2= pd.Series(xg_cv.best_estimator_.feature_importances_, list(data.drop(columns=['price']))).sort_values(ascending=False)
df2[:15].plot(kind='bar', title='Importance of Features')
plt.ylabel('Feature Importance Score')
data2=pd.read_csv('diabetes.csv')
data2
x1_train, x1_test, y1_train, y1_test = train_test_split(data2.drop(columns=['Outcome']),data2['Outcome'], test_size=0.10,
random_state=200,
shuffle=True)
data2.isnull().sum()
clf1=DecisionTreeClassifier(max_depth=3).fit(x1_train,y1_train)
y_pred=clf1.predict(x1_test)
accuracy_score(y1_test, y_pred)
accuracy_score(y1_test, y_pred)
model = DecisionTreeClassifier()
gs = GridSearchCV(model,
param_grid = {'max_depth': range(1, 30)},
cv=10,
n_jobs=10,
scoring='accuracy')
clf2=gs.fit(x1_train, y1_train)
gs.best_estimator_
y_pred1=clf2.predict(x1_test)
accuracy_score(y1_test, y_pred1)
rf_clf=RandomForestClassifier().fit(x1_train,y1_train)
y_pred2=rf_clf.predict(x1_test)
accuracy_score(y1_test, y_pred2)
rfcl = RandomForestClassifier()
n_estimators = [50,100,150]
max_depth = [10,20,30,40]
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
param_grid = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
}
grid_search = GridSearchCV(estimator = rfcl, param_grid = param_grid,
cv = 5, n_jobs = 10, verbose = 2)
rfcl_cv=grid_search.fit(x1_train, y1_train)
y_pred3=rfcl_cv.predict(x1_test)
accuracy_score(y1_test, y_pred3)
importances =rfcl_cv.best_estimator_.feature_importances_
importances_df=pd.DataFrame({'importances':importances,'feauture':data2.drop(columns=['Outcome']).columns})
importances_df
fig = plt.figure(figsize = (10, 5))
plt.bar(importances_df.feauture,importances_df.importances, color ='maroon',
width = 0.4)
plt.xticks( rotation='vertical')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Who has Grit?
Step2: AHHH
Step3: This is the way my quantitative data looks. Most of the column headers are self explanatory, but i'll go into further detail later.
Step4: Above is the quantitative means of the data i've acquired. Grit is a weighted compilation of penalty minutes, hits, blocked shots, and fights (making it somewhat subjective).
Step5: Since i'm primarily interested in players with the most Grit, i'm going to limit my searches to a higher percentile.
Step6: Of the original 900, these are the 10 players with the most Grit.
Step7: Above is how my qualitative data is structured. I've seperated the datasets for ease of manipulation.
Step8: Above are the hardiest players in the NHL, but how do they perform?
Step9: The two graphs above represent hits given and hits received respectively.
Step10: The above graph reflects the amount of games played during the season. The x-axis is simply the index value attributed to the player.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd #PandasPandas
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
print('PandaPandaPanda ', pd.__version__)
df=pd.read_csv('NHLQUANT.csv')
plt.plot(df.index,df['Grit'])
df.head(10)
df.mean()
pd.to_numeric(df, errors='ignore')
y=df["Age"]
z=df["Grit"]
plt.plot(y,z)
df['Grit']>130
df.ix[df['Grit']>130]
df.ix[df['Grit']>300]
df.ix[df['Grit']>400]
Best=df.ix[df['Grit']>400]
Best.sort("Age").plot.barh('Age',"Grit")
QL=pd.read_csv("NHLQUAL.csv")
QL.head(5)
print(QL.at[61, "First Name"]+QL.at[61, 'Last Name'],QL.at[94, 'First Name']+QL.at[94, 'Last Name'],
QL.at[712, "First Name"]+QL.at[712, "Last Name"],QL.at[209, 'First Name']+QL.at[209, 'Last Name'],QL.at[306, "First Name"]+QL.at[306, 'Last Name'],QL.at[497, 'First Name']+QL.at[497, 'Last Name'],QL.at[524, 'First Name']+QL.at[524, 'Last Name'],QL.at[565, 'First Name']+QL.at[565, 'Last Name'],QL.at[641, 'First Name']+QL.at[641, 'Last Name'],QL.at[877, 'First Name']+QL.at[877, 'Last Name'])
Best.sort("Age").plot.barh('Age',"HitF")
Best.sort("Age").plot.barh('Age',"HitA")
Best.plot(Best.index ,'GP')
plt.ylim([60,85])
fig, ax=plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)
Best['G'].plot(ax=ax[0],color='green')
Best['A'].plot(ax=ax[1],color='red')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 6.1 Cube Arithmetic<a id='arithmetic'></a>
Step2: Notice that the resultant cube's name is now unknown. Also, the resultant cube's attributes and cell_methods have disappeared; this is because these were different between the two input cubes.
Step3: Although a cube's units can be freely set to any valid unit, the calculation of result units and compatibility checking is built into the arithmetic operations.
Step4: <div class="alert alert-block alert-warning">
Step5: Note that you can update the cube's data and metadata directly, for instance by assigning to cube.data, cube.standard_name or cube.units. When you do this, though, you need to be careful that the metadata is still an accurate description. By changing data explicitly you are basically stating that the result is correct.
Step6: Another function of cube arithmetic is to support 'broadcasting', in the numpy sense
Step7: An even simpler example of broadcasting is doing arithmetic between a cube and a scalar value.
Step8: If the scalar is just a value, like this one, then it is assumed to have the same units as the cube.
Step9: To take the vertical mean of this cube
Step10: NOTE
Step11: In addition to "collapse", other types of statistical reductions are also possible. These also use aggregators to define the statistic. See the following documentation areas
Step12: 2. Calculate the difference between scenarios for a given year
Step13: 3. Plot E1, A1B and difference side by side
Step14: 4. Produce time sequences of regional average air temperatures
Step15: 5. Draw comparison line plots
|
<ASSISTANT_TASK:>
Python Code:
import iris
import numpy as np
a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc'))
e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc'))
print(e1.summary(True))
print(a1b)
scenario_difference = a1b - e1
print(scenario_difference)
#
# edit space for user code
#
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.1b
six_feet = iris.cube.Cube(6.0, units='feet')
twelve_days = iris.cube.Cube(12.0, units='days')
print(six_feet / twelve_days)
#
# edit space for user code
#
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.1c
#
# edit space for user code
#
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.1d
#
# edit space for user code
#
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.1e
#
# edit space for user code
#
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.1f
fname = iris.sample_data_path('uk_hires.pp')
cube = iris.load_cube(fname, 'air_potential_temperature')
print(cube.summary(True))
print(cube.collapsed('model_level_number', iris.analysis.MEAN))
#
# edit space for user code
#
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.2a
# user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.3a
# user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.3b
# user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.3c
# user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.3d
# user code ...
# SAMPLE SOLUTION
# %load solutions/iris_exercise_6.3e
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vector creation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# From Python lists or iterators
n1 = np.array( [0,1,2,3,4,5,6] )
n2 = np.array( range(6) )
# Using numpy iterators
n3 = np.arange( 10, 20, 0.1)
n3
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The first thing we shall need is holdings data. For this example, we assume that holdings data is provided in a CSV format, and insert that CSV data into a node in the computation called holdings using the add_node method.
Step3: Initial Processing of Holdings Data
Step4: We can easily see the structure of a computation, although at this stage it is pretty simple. There is a node holdings, in dark green because we set its value, and so it is up-to-date. And there is a second node df_holdings, which depends on holdings, as shown by the arrow. The lighter green color tells us the df_holdings is computable, but not currently up-to-date, which we expect, as we have not told Loman to calculate it yet.
Step5: So we can go ahead and calculate df_holdings and check that it looks correct. Computation objects have an attribute-based accessor, v, which allows us to see the value of any node. It also works great with the auto-complete in interactive environments, such as IPython Notebook.
Step6: A quick sidenote
Step7: As before, we can compute nodes and inspect the results to make sure they are as we expect
Step8: Gathering Market Data
Step9: As before, we can compute and inspect the node to see what it contains. Yahoo returns a large number of fields, and we capture them all. For now, we will only use a small number of those fields, but there is no extra cost to capture them, and they may be useful for other calculations that we would want to add in the future - for example, we may want to calculate sets of alerts as prices fall below certain moving averages, say.
Step10: Scraping Equity Betas
Step11: And again, we quickly compute and inspect the result.
Step12: Futures Prices
Step13: We also need some static data for futures. In this case we need two things
Step14: Calculating P&Ls
Step15: The process is very similar for futures. df_future_pnl is created by merging the holdings, price and static data in df_holdings_future, df_future_prices and df_future_static_data respectively. We again calculate each of the P&L and exposure measures that we would like to see, but note that the treatment for futures is different. For example, intraday P&L is calculated as $(\text{Last Price} - \text{Close Price}) \times \text{Quantity} \times \text{Contract Unit}$.
Step16: The calculations for the cash component are more basic, as cash has no exposure by these measures
Step17: Finally, we add a node df_pnl, where we take all the rows from the equity, future and cash P&L and exposure DataFrames in df_equity_pnl, df_future_pnl and df_cash_pnl respectively. This gives us a position-by-position P&L report.
Step18: From there, various aggregations are possible. For example, we can easily see intraday P&L, and inception-to-date P&L
Step19: As well as seeing our exposure, and beta-adjusted exposure metrics across the whole portfolio.
Step20: We could even put the aggregated data itself into new nodes.
|
<ASSISTANT_TASK:>
Python Code:
import loman
comp = loman.Computation()
holdings = Type,Symbol,Qty,CostBasis
Equity,AVGO,126,22680
Equity,EVHC,349,22685
Equity,STT,287,22673
Equity,DAL,454,22700
Equity,DY,283,22640
Future,ESM7,-1,0
Cash,USD,2000,
comp.add_node('holdings', value=holdings)
from io import StringIO
import pandas as pd
@loman.node(comp)
def df_holdings(holdings):
f = StringIO(holdings)
df = pd.read_csv(f)
return df
comp
comp.compute('df_holdings')
comp.v.df_holdings
def filter_df(df_holdings, type):
return df_holdings[df_holdings.Type == type]
comp.add_node('df_holdings_equity', filter_df, kwds={'type': loman.C('Equity')})
comp.add_node('df_holdings_future', filter_df, kwds={'type': loman.C('Future')})
comp.add_node('df_holdings_cash', filter_df, kwds={'type': loman.C('Cash')})
comp.compute_all()
comp.v.df_holdings_equity
comp.v.df_holdings_future
comp.v.df_holdings_cash
import yahoo_finance
def get_yahoo_equity_data(symbol):
s = yahoo_finance.Share(symbol)
return pd.Series(s.data_set)
@loman.node(comp)
def df_equity_yql_data_yahoo(df_holdings_equity):
return df_holdings_equity.Symbol.apply(get_yahoo_equity_data)
comp.compute_all()
comp.v.df_equity_yql_data_yahoo
import requests, bs4
def scrape_yahoo_data(symbol):
resp = requests.get('https://finance.yahoo.com/quote/{}?ltr=1'.format(symbol))
soup = bs4.BeautifulSoup(resp.content, 'lxml')
el = soup.find(attrs={'data-test': 'BETA-value'})
beta = float(el.text)
return pd.Series({'Symbol': symbol, 'Beta': beta})
@loman.node(comp)
def df_equity_scraped_data_yahoo(df_holdings_equity):
return df_holdings_equity.Symbol.apply(scrape_yahoo_data)
comp.compute_all()
comp.v.df_equity_scraped_data_yahoo
import numpy as np
import re
from dateutil import relativedelta
id_regex = re.compile('quotesFuturesProductTable1_(?P<Symbol>.{4})_(?P<Field>.*)')
def get_el_data(el):
m = id_regex.match(el['id'])
d = m.groupdict()
d['Value'] = el.text
return d
def try_float(x):
try:
return float(x)
except:
return np.nan
@loman.node(comp)
def df_future_prices():
resp = requests.get('http://www.cmegroup.com/trading/equity-index/us-index/e-mini-sandp500.html')
soup = bs4.BeautifulSoup(resp.content, 'lxml')
els = soup.findAll('td', attrs={'id': id_regex})
df = pd.DataFrame([get_el_data(el) for el in els])
df = df.set_index(['Symbol', 'Field']).Value.unstack()
for col in ['change', 'high', 'last', 'low', 'open', 'volume']:
df[col] = df[col].apply(try_float)
return df
comp.compute_all()
comp.v.df_future_prices
data = [
['ESM7', 50, 'USD', 1.],
['ESU7', 50, 'USD', 1.],
['ESZ7', 50, 'USD', 1.],
['ESH8', 50, 'USD', 1.],
['ESM8', 50, 'USD', 1.],
]
df = pd.DataFrame(data, columns=['Symbol', 'UnitAmount', 'UnitCurrency', 'Beta'])
comp.add_node('df_future_static_data', value=df)
@loman.node(comp)
def df_equity_pnl(df_holdings_equity, df_equity_yql_data_yahoo, df_equity_scraped_data_yahoo):
# Merge DataFrames
df = pd.merge(df_holdings_equity, df_equity_yql_data_yahoo[['Symbol', 'PreviousClose', 'LastTradePriceOnly']],
how='left', on='Symbol')
df.rename(columns={'PreviousClose': 'Close', 'LastTradePriceOnly': 'Last'}, inplace=True)
for col in ['Close', 'Last']:
df[col] = df[col].astype(float)
df = pd.merge(df, df_equity_scraped_data_yahoo, how='left', on='Symbol')
# Calculate Measures
df['CloseValue'] = df.Qty * df.Close
df['CurrentValue'] = df.Qty * df.Last
df['PNL_Day'] = df.CurrentValue - df.CloseValue
df['PNL_ITD'] = df.CurrentValue - df.CostBasis
df['CurrentExposure'] = df.CurrentValue
df['CurrentBetaAdjExposure'] = df.Beta * df.CurrentExposure
return df
comp.compute_all()
comp.v.df_equity_pnl
@loman.node(comp)
def df_future_pnl(df_holdings_future, df_future_prices, df_future_static_data):
# Merge DataFrames
df = pd.merge(df_holdings_future, df_future_prices[['priorSettle', 'last']], how='left', left_on='Symbol', right_index=True)
df.rename(columns={'priorSettle': 'Close', 'last': 'Last'}, inplace=True)
for col in ['Close', 'Last']:
df[col] = df[col].astype(float)
df = pd.merge(df, df_future_static_data, how='left', on='Symbol')
# Calculate Measures
df['PNL_Day'] = (df.Last - df.Close) * df.Qty * df.UnitAmount
df['CloseValue'] = 0
df['CurrentValue'] = df.PNL_Day
df['PNL_ITD'] = df.CurrentValue - df.CostBasis
df['CurrentExposure'] = df.Last * df.Qty * df.UnitAmount
df['CurrentBetaAdjExposure'] = df.Beta * df.CurrentExposure
return df
comp.compute_all()
comp.v.df_future_pnl
@loman.node(comp)
def df_cash_pnl(df_holdings_cash):
df = df_holdings_cash.copy()
df['Close'] = np.nan
df['Last'] = np.nan
df['PNL_Day'] = df['PNL_ITD'] = 0.
df['CloseValue'] = df['CurrentValue'] = df['CostBasis'] = df.Qty
df['CurrentExposure'] = df['CurrentBetaAdjExposure'] = 0.
df['Beta'] = 0
return df
comp.compute_all()
comp.v.df_cash_pnl
@loman.node(comp)
def df_pnl(df_equity_pnl, df_future_pnl, df_cash_pnl):
df = pd.concat([df_equity_pnl, df_future_pnl, df_cash_pnl])
df = df[['Type', 'Symbol', 'Qty', 'Close', 'Last', 'CostBasis', 'CloseValue', 'CurrentValue', 'PNL_ITD', 'PNL_Day', 'Beta', 'CurrentExposure', 'CurrentBetaAdjExposure']]
return df
comp.compute_all()
comp.v.df_pnl
comp.v.df_pnl.PNL_Day.sum()
comp.v.df_pnl.PNL_ITD.sum()
comp.v.df_pnl[['CurrentExposure', 'CurrentBetaAdjExposure']].sum()
comp.draw(graph_attr={'size': '"8,6"'})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sum function
Step2: Let's %timeit
Step3: Factorial functions
Step4: Let's %timeit
Step7: Dbscan clustering algorithm
Step8: Dbscan with F2Py
Step9: Dbscan with Numba
Step10: Modifying the code
Step11: Let's %timeit
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from numba import jit
import math
import dbscanf2py # Import the extension module file dbscanf2py.so
# A pure python funcion
def sum_0(arr):
M, N = arr.shape
result = 0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
# A NumPy version of the previous funcion
def sum_1(arr):
return np.sum(arr)
# The numba-optimized version of the first function
@jit
def sum_2(arr):
M, N = arr.shape
result = 0
for i in range(M):
for j in range(N):
result += arr[i,j]
return result
# The numba-optimized version of the second function
@jit
def sum_3(arr):
return np.sum(arr)
# Fortran function, brought to you by F2Py
import array_sum
# an array to test our functions
a = np.arange(65536).reshape(256,256)
%timeit sum_0(a) # Python
%timeit sum_1(a) # NumPy
%timeit sum_2(a) # Numba
%timeit sum_3(a) # NumPy + Numba
%timeit array_sum.array_sum(a) # F2Py
x = [8, 16, 32, 64, 128, 256]
y_py = [0.0000236, 0.0000865, 0.000323, 0.00125, 0.00544, 0.0217]
y_np = [0.00000555, 0.00000543, 0.00000633, 0.00000883, 0.0000189, 0.0000541]
y_nu = [0.000000329, 0.000000422, 0.000000667, 0.00000177, 0.00000605, 0.0000245]
y_npnu = [0.000000302, 0.00000038, 0.000000681, 0.00000229, 0.0000083, 0.0000343]
y_f2py = [0.000000935, 0.00000125, 0.00000198, 0.00000448, 0.0000254, 0.000139]
y_fo_o0 = [2.00001523e-06, 3.99991404e-06, 9.00006853e-06, 3.20002437e-05, 5.90002164E-05, 5.69000840e-04]
y_fo_o3 = [1.00000761e-06, 1.00000761E-06, 1.00000761E-06, 1.00000761E-06, 1.00000761E-06, 1.00000761E-06]
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(x, y_py, 'cs--', label="Python")
plt.plot(x, y_np, 'bs--', label="Numpy")
plt.plot(x, y_nu, 'gs--', label="Numba")
plt.plot(x, y_npnu, "ks--", label="Numpy + Numba")
plt.plot(x, y_f2py, 'rs--', label="F2Py")
plt.plot(x, y_fo_o0, "ms--", label="Fortran -O0")
plt.plot(x, y_fo_o3, "ys--", label="Fortran -O3")
plt.ylabel('Seconds')
plt.xlabel("Square matrix dimensions")
ax = fig.add_subplot(1,1,1)
ax.set_yscale('log')
plt.legend(loc=2);
# Pure Python
def factorial_0(n):
Ans = 1
for i in range(1, n+1):
Ans = Ans * i
return Ans
# NumPy function
def factorial_1(n):
return np.math.factorial(n)
# Numba-optimized pure Python function
@jit
def factorial_2(n):
Ans = 1
for i in range(1, n+1):
Ans = Ans * i
return Ans
# Numba-optimized NumPy function
@jit
def factorial_3(n):
return np.math.factorial(n)
# Recursive Fortran function, compiled with F2Py
import factorial1f2py
# Non-recursive Fortran function, compiled with F2Py
import factorial2f2py
%timeit factorial_0(12)
%timeit factorial_1(12)
%timeit factorial_2(12)
%timeit factorial_3(12)
%timeit factorial1f2py.factorial(12)
%timeit factorial2f2py.factorial(12)
x = [1, 3, 6, 12]
y_py = [673, 814, 1030, 1460]
y_np = [342, 376, 430, 601]
y_nu = [256, 258, 274, 262]
y_npnu = [388, 409, 476, 622]
y_f2py1 = [195, 200, 234, 252]
y_f2py2 = [205, 198, 204, 218]
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(x, y_py, 'cs--', label="Python")
plt.plot(x, y_np, 'bs--', label="Numpy")
plt.plot(x, y_nu, 'gs--', label="Numba")
plt.plot(x, y_npnu, "ks--", label="Numpy + Numba")
plt.plot(x, y_f2py1, 'rs--', label="F2Py 1")
plt.plot(x, y_f2py2, "ms--", label="F2Py 2")
plt.ylabel('Nanoseconds')
plt.xlabel("Factorial")
ax = fig.add_subplot(1,1,1)
#ax.set_yscale('log')
plt.legend(loc=2);
UNCLASSIFIED = False
NOISE = None
def _dist(p,q):
return math.sqrt(np.power(p-q,2).sum())
def _eps_neighborhood(p,q,eps):
return _dist(p,q) < eps
def _region_query(m, point_id, eps):
n_points = m.shape[1]
seeds = []
for i in range(0, n_points):
if not i == point_id:
if _eps_neighborhood(m[:,point_id], m[:,i], eps):
seeds.append(i)
return seeds
def _expand_cluster(m, classifications, point_id, cluster_id, eps, min_points):
seeds = _region_query(m, point_id, eps)
if len(seeds) < min_points:
classifications[point_id] = NOISE
return False
else:
classifications[point_id] = cluster_id
for seed_id in seeds:
classifications[seed_id] = cluster_id
while len(seeds) > 0:
current_point = seeds[0]
results = _region_query(m, current_point, eps)
if len(results) >= min_points:
for i in range(0, len(results)):
result_point = results[i]
if classifications[result_point] == UNCLASSIFIED or \
classifications[result_point] == NOISE:
if classifications[result_point] == UNCLASSIFIED:
seeds.append(result_point)
classifications[result_point] = cluster_id
seeds = seeds[1:]
return True
def dbscan(m, eps, min_points):
Implementation of Density Based Spatial Clustering of Applications with Noise
See https://en.wikipedia.org/wiki/DBSCAN
scikit-learn probably has a better implementation
Uses Euclidean Distance as the measure
Inputs:
m - A matrix whose columns are feature vectors
eps - Maximum distance two points can be to be regionally related
min_points - The minimum number of points to make a cluster
Outputs:
An array with either a cluster id number or dbscan.NOISE (None) for each
column vector in m.
cluster_id = 1
n_points = m.shape[1]
classifications = [UNCLASSIFIED] * n_points
for point_id in range(0, n_points):
#point = m[:,point_id]
if classifications[point_id] == UNCLASSIFIED:
if _expand_cluster(m, classifications, point_id, cluster_id, eps, min_points):
cluster_id = cluster_id + 1
return classifications
def test_dbscan():
m = np.matrix('1 1.2 0.8 3.7 3.9 3.6 10; 1.1 0.8 1 4 3.9 4.1 10')
eps = 0.5
min_points = 2
assert dbscan(m, eps, min_points) == [1, 1, 1, 2, 2, 2, None]
data = np.genfromtxt('easy_clust.txt')
x = data[:,0]
y = data[:,1]
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(x, y, 's');
m = np.array([x, y])
eps = 30
min_points = 5
clusters = dbscan(m, eps, min_points)
#results
cnone = [[], []]
cone = [[], []]
ctwo = [[], []]
cthree = [[], []]
for index in range (len(clusters)):
if clusters[index] == None:
cnone[0].append(m[0, index])
cnone[1].append(m[1, index])
if clusters[index] == 1:
cone[0].append(m[0, index])
cone[1].append(m[1, index])
if clusters[index] == 2:
ctwo[0].append(m[0, index])
ctwo[1].append(m[1, index])
if clusters[index] == 3:
cthree[0].append(m[0, index])
cthree[1].append(m[1, index])
cone = np.array([cone[0], cone[1]])
cnone = np.array([cnone[0], cnone[1]])
ctwo = np.array([ctwo[0], ctwo[1]])
#print cone[0]
#print cnone
#print ctwo
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(cnone[0], cnone[1], 'k^', cone[0], cone[1], 'bs', ctwo[0], ctwo[1], "gs", cthree[0], cthree[1], "rs");
def make_circles(n=500, scatter=3):
Draw two concentric circles made of scattered points
Parameters
----------
n: int. Number of points in each circle
scatter. float. Control the spread of the points.
r0 = 50 + np.random.normal(loc=0, scale=scatter, size=n)
r1 = 25 + np.random.normal(loc=0, scale=scatter, size=n)
r = np.concatenate((r0, r1))
theta0 = np.linspace(0, np.pi*2, n)
theta = np.concatenate((theta0, theta0))
x = r * np.cos(theta)
y = r * np.sin(theta)
return np.vstack((x, y)).T
data_circ = make_circles(200, 3)
x = data_circ[:,0]
y = data_circ[:,1]
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(x, y, 's');
m = np.array([x, y])
eps = 9
min_points = 2
clusters = dbscan(m, eps, min_points)
cnone = [[], []]
cone = [[], []]
ctwo = [[], []]
cthree = [[], []]
for index in range (len(clusters)):
if clusters[index] == None:
cnone[0].append(m[0, index])
cnone[1].append(m[1, index])
if clusters[index] == 1:
cone[0].append(m[0, index])
cone[1].append(m[1, index])
if clusters[index] == 2:
ctwo[0].append(m[0, index])
ctwo[1].append(m[1, index])
if clusters[index] == 3:
cthree[0].append(m[0, index])
cthree[1].append(m[1, index])
cone = np.array([cone[0], cone[1]])
cnone = np.array([cnone[0], cnone[1]])
ctwo = np.array([ctwo[0], ctwo[1]])
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(cnone[0], cnone[1], 'k^', cone[0], cone[1], 'bs', ctwo[0], ctwo[1], "gs", cthree[0], cthree[1], "rs");
%timeit dbscan(m, eps, min_points)
print dbscanf2py.dist.__doc__ #F2Py automatically generates a docstring for our functions
def _eps_neighborhood(p,q,eps):
return dbscanf2py.dist(p,q) < eps
%timeit dbscan(m, eps, min_points)
@jit
def _dist(p,q):
return math.sqrt(np.power(p-q,2).sum())
%timeit dbscan(m, eps, min_points)
@jit
def _dist(p,q):
return (sum((p-q)**2))**0.5
%timeit dbscan(m, eps, min_points)
x = [250, 500, 1000, 2000]
y_np = [3.35, 12.4, 52.9, 217]
y_num = [2.74, 11.5, 45.1, 180]
y_nu = [3.57, 13.8, 57.8, 232]
y_f2py = [0.629, 2.64, 9.74, 38.6]
fig = plt.figure(figsize=(12, 10), dpi=100)
plt.plot(x, y_np, 'bs--', label="Numpy")
plt.plot(x, y_num, 'gs--', label="Numba")
plt.plot(x, y_nu, 'ks--', label="Numba + Numpy")
plt.plot(x, y_f2py, 'rs--', label="F2Py")
plt.ylabel('Seconds')
plt.xlabel("Points")
ax = fig.add_subplot(1,1,1)
#ax.set_yscale('log')
plt.legend(loc=2);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Зафиксируем генератор случайных чисел для воспроизводимости
Step2: Домашка!
Step3: Измерять качество будем с помощью метрики среднеквадратичной ошибки
Step4: <div class="panel panel-info" style="margin
Step5: <div class="panel panel-info" style="margin
Step6: <div class="panel panel-info" style="margin
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_boston
bunch = load_boston()
print(bunch.DESCR)
X, y = pd.DataFrame(data=bunch.data, columns=bunch.feature_names.astype(str)), bunch.target
X.head()
SEED = 22
np.random.seed = SEED
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
clf = LinearRegression()
clf.fit(X_train, y_train);
print('Вышла средняя ошибка, равная %5.4f' % \
(-np.mean(cross_val_score(clf, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))
from sklearn.linear_model import SGDRegressor
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
X_scaled = ss.fit_transform(X_train)
y_scaled = ss.fit_transform(y_train)
sgd = SGDRegressor()
sgd.fit(X_scaled, y_scaled);
print('Вышла средняя ошибка, равная %5.4f' % \
(-np.mean(cross_val_score(sgd, X_scaled, y_scaled, cv=5, scoring='neg_mean_squared_error'))))
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import RidgeCV
############Ridge
params = {
'alpha': [10**x for x in range(-2,3)]
}
from sklearn.linear_model import Ridge
gsR = RidgeCV() #GridSearchCV(Ridge(), param_grid=params)
gsR.fit(X_train, y_train);
print('Вышла средняя ошибка, равная %5.4f' % \
(-np.mean(cross_val_score(gsR, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))
############Lasso
from sklearn.linear_model import Lasso
from sklearn.linear_model import LassoCV
gsL = GridSearchCV(Lasso(), param_grid=params) #LassoCV() - медленнее
gsL.fit(X_train, y_train);
print('Вышла средняя ошибка, равная %5.4f' % \
(-np.mean(cross_val_score(gsL, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import ElasticNetCV
gsE = GridSearchCV(ElasticNet(), param_grid=params) #ElasticNetCV() - просто заменить, не слишком точен
gsE.fit(X_train, y_train);
print('Вышла средняя ошибка, равная %5.4f' % \
(-np.mean(cross_val_score(gsE, X_test, y_test, cv=5, scoring='neg_mean_squared_error'))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the Skywater PDK
Step2: For my purposes, I only needed a simple NFET and PFET to build some logic gates. I figured 1.8V versions of these would be found in the nfet_01v8 and pfet_01v8 subdirectories, but I wasn't expecting all these files
Step3: After some further poking about and reading, I categorized the files as follows
Step4: Knowing where the device files are and what they contain is great, but how do I actually use them? It turns out there is a master library file
Step5: The progress messages indicate a few errors in the libraries (maybe they're corrected by now), but I'm not using those particular devices so I'm not going to worry about it. From the list above, I can pick out the 1.8V general-purpose transistors I want, but I also need to specify their gate dimensions so the right model gets loaded. I picked out a small NMOS FET, and then a PMOS FET that's 3x the width. (That's because I learned in my 1983 VLSI class that PMOS transistors have 3x the resistance-per-square of NMOS ones, so make them wider to make the current-driving capability about the same in each.)
Step6: Now I can extract the NMOS and PMOS transistors from the library and use them to build logic gates
Step8: Some Infrastructure
Step10: In addition to an oscilloscope, every electronics bench has a signal generator. For my purposes, I only need a simple function that generates one or more square waves whose frequencies decrease by a factor of two. (The collection of square waves looks like the output of a binary counter, hence the name.)
Step12: All the circuits in this notebook will run from a 1.8V supply, so the following function instantiates a global power supply and a $V_{dd}$ voltage rail for them to use
Step13: Finally, here are some convenience functions that 1) generate a netlist from the SKiDL code and use that to create a PySpice simulator object, 2) use the simulator object to perform a DC-level analysis, 3) use the simulator to perform a transient analysis, and 4) count the number of transistors in a circuit.
Step14: With the infrastructure in place, I can begin building logic gates, starting from the simplest one I know.
Step15: First, I'll test the inverter's transfer function by attaching a voltage ramp to its input and see when the output transitions. (For those playing at home, you may notice the SPICE simulations take a minute or two to run. These transistor models are complicated.)
Step16: For a low-level input, the inverter's output is high and vice-versa as expected. From the shape of the transfer curve, I'd estimate the inverter's trigger point is around 0.8V.
Step17: As we learned in our textbooks so long ago, the quiescent current for CMOS logic is near zero but surges as the input voltage goes through the transition zone when both transistors are ON. For this inverter, the current maxes-out at about 13 $\mu$A at the trigger point.
Step18: There is a bit of ringing on the inverter's output but no appreciable propagation delay, probably because there is no real load on the output. In order to get more delay, I'll cascade thirty inverters together and look at the output of the last one
Step19: Thirty cascaded inverters creates a total delay of around 0.65 ns, so each inverter contributes about 20 ps. This simulation doesn't include things like wiring delays, so don't get your hopes up about running at 50 GHz.
Step20: Thirty NMOS and thirty PMOS transistors. We're good to go.
Step21: Like with the inverter, I'll do a transient analysis but using two square waves to drive both NAND inputs
Step22: The NAND gate output only goes low when both inputs are high, as expected. Ho hum.
Step23: The output only goes high when the inputs have opposite values, so the XOR gate is working correctly.
Step24: I'll use a cntgen() with three outputs to apply all eight input combinations to the full-adder
Step25: The sum and carry-out bits of the full-adder match the truth-table for all the combinations of A, B and the carry input.
Step26: I'll instantiate a two-bit adder and test it with all 32 input combinations of A$0$, A$_1$, B$_0$, B$_1$, and C${in}$
Step30: The outputs look like they might be correct, but I'm not going to waste my time trying to eyeball it when Python can do that. The following code subsamples the waveforms and converts them into a table of integers for the adder's inputs and outputs
Step31: That's better, but even checking all the table entries is too much work so I'll write a little code to do that
Step33: OK, at this point I'm convinced I have a working two-bit adder. And I can make any size adder I want just by changing the input and output bus widths.
Step34: The SKiDL implementation for half of this flip-flop creates a latch that allows data to enter and pass through when the write-enable is active, and then latches the data bit with a feedback gate when the write-enable is not asserted
Step35: By cascading two of these latches, I arrive at the complete flip-flop
Step36: A simple test shows the flip-flop retains data and the output only changes upon the rising edge of the write-enable (after a small propagation delay)
Step37: Once I have a basic flip-flop, it's easy to build multi-bit registers
Step38: The Simplest State Machine
Step39: Now just give it a clock and watch it go!
Step40: Looking at the counter bits shows its obviously incrementing 0, 1, 2, ..., 7, 0, ... The bottom trace shows the pulses of supply current on every clock edge. (Remember that whole current-pulse-during-input-transition thing?) But how much energy is being used? Multiplying the supply current by its output voltage and summing over time will answer that
Step41: As for the total number of transistors in the counter ...
Step43: Bonus
Step45: By setting the sum and carry opcodes appropriately, I can build a subtractor from the ALU
Step46: Now I'll test the subtractor just as I did previously with the adder
Step47: Extra Bonus
Step48: From the waveforms, it's obvious the counter is decrementing
Step49: And it uses about 2.5x the number of transistors (402 versus 162)
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # For data frames.
import matplotlib.pyplot as plt # For plotting.
from skidl.pyspice import * # For describing circuits and interfacing to ngspice.
!ls -F ~/tmp/skywater-pdk/libraries/sky130_fd_pr/latest/cells/
!ls -F ~/tmp/skywater-pdk/libraries/sky130_fd_pr/latest/cells/nfet_01v8
import pandas as pd
nfet_sizes = pd.read_table("~/tmp/skywater-pdk/libraries/sky130_fd_pr/latest/cells/nfet_01v8/sky130_fd_pr__nfet_01v8.bins.csv", delimiter=",")
pfet_sizes = pd.read_table("~/tmp/skywater-pdk/libraries/sky130_fd_pr/latest/cells/pfet_01v8/sky130_fd_pr__pfet_01v8.bins.csv", delimiter=",")
pd.concat((nfet_sizes, pfet_sizes), axis=1)
# Select a particular corner using tt, ff, fs, sf, ss, hh, hl, lh, ll.
corner = "tt" # Use typical transistor models.
# Create a SKiDL library for the Skywater devices at that process corner.
sky_lib = SchLib(
"/home/devb/tmp/skywater-pdk/libraries/sky130_fd_pr/latest/models/sky130.lib.spice",
lib_section=corner, # Load the transistor models for this corner.
recurse=True, # The master lib includes sublibraries, so recurse thru them to load everything.
)
print(sky_lib) # Print the list of devices in the library.
nfet_wl = Parameters(W=0.42, L=0.15)
pfet_wl = Parameters(W=1.26, L=0.15) # 3x the width of the NMOS FET.
nfet = Part(sky_lib, "sky130_fd_pr__nfet_01v8", params=nfet_wl)
pfet = Part(sky_lib, "sky130_fd_pr__pfet_01v8", params=pfet_wl)
disp_vmin, disp_vmax = -0.4@u_V, 2.4@u_V
disp_imin, disp_imax = -10@u_mA, 10@u_mA
def oscope(waveforms, *nets_or_parts):
Plot selected waveforms as a stack of individual traces.
Args:
waveforms: Complete set of waveform data from ngspice simulation.
nets_or_parts: SKiDL Net or Part objects that correspond to individual waveforms.
vmin, vmax: Minimum/maximum voltage limits for each waveform trace.
imin, imax: Minimum/maximum current limits for each waveform trace.
# Determine if this is a time-series plot, or something else.
try:
x = waveforms.time # Sample times are used for the data x coord.
except AttributeError:
# Use the first Net or Part data to supply the x coord.
nets_or_parts = list(nets_or_parts)
x_node = nets_or_parts.pop(0)
x = waveforms[node(x_node)]
# Create separate plot traces for each selected waveform.
num_traces = len(nets_or_parts)
trace_hgt = 1.0 / num_traces
fig, axes = plt.subplots(nrows=num_traces, sharex=True, squeeze=False,
subplot_kw=None, gridspec_kw=None)
traces = axes[:,0]
# Set the X axis label on the bottom-most trace.
if isinstance(x.unit, SiUnits.Second):
xlabel = 'Time (S)'
elif isinstance(x.unit, SiUnits.Volt):
xlabel = x_node.name + ' (V)'
elif isinstance(x.unit, SiUnits.Ampere):
xlabel = x_node.ref + ' (A)'
traces[-1].set_xlabel(xlabel)
# Set the Y axis label position for each plot trace.
trace_ylbl_position = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
# Plot each Net/Part waveform in its own trace.
for i, (net_or_part, trace) in enumerate(zip(nets_or_parts, traces), 1):
y = waveforms[node(net_or_part)] # Extract the waveform data
# Set the Y axis label depending upon whether data is voltage or current.
if isinstance(y.unit, SiUnits.Volt):
trace.set_ylim(float(disp_vmin), float(disp_vmax))
trace.set_ylabel(net_or_part.name + ' (V)', trace_ylbl_position)
elif isinstance(y.unit, SiUnits.Ampere):
trace.set_ylim(float(disp_imin), float(disp_imax))
trace.set_ylabel(net_or_part.ref + ' (A)', trace_ylbl_position)
# Set position of trace within stacked traces.
trace.set_position([0.1, (num_traces-i) * trace_hgt, 0.8, trace_hgt])
# Place grid on X axis.
trace.grid(axis='x', color='orange', alpha=1.0)
# Plot the waveform data.
trace.plot(x, y)
default_freq = 500@u_MHz # Specify a default frequency so it doesn't need to be set every time.
def cntgen(*bits, freq=default_freq):
Generate one or more square waves varying in frequency by a factor of two.
Args:
bits: One or more Net objects, each of which will carry a square wave.
bit_period = 1.0/freq
for bit in bits:
# Create a square-wave pulse generator with the current period.
pulse = PULSEV(initial_value=vdd_voltage, pulsed_value=0.0@u_V,
pulse_width=bit_period/2, period=bit_period)
# Attach the pulse generator between ground and the net that carries the square wave.
gnd & pulse["n, p"] & bit
# Double the period (halve the frequency) for each successive bit.
bit_period = 2 * bit_period
default_voltage = 1.8@u_V # Specify a default supply voltage.
def pwr(voltage=default_voltage):
Create a global power supply and voltage rail.
# Clear any pre-existing circuitry. (Start with a clear slate.)
reset()
# Global variables for the power supply and voltage rail.
global vdd_ps, vdd, vdd_voltage
# Create a power supply and attach it between the Vdd rail and ground.
vdd_voltage = voltage
vdd_ps = V(ref="VDD_SUPPLY", dc_value=vdd_voltage)
vdd = Net("Vdd")
vdd & vdd_ps["p, n"] & gnd
get_sim = lambda : generate_netlist().simulator() # Compile netlist & create simulator.
do_dc = lambda **kwargs: get_sim().dc(**kwargs) # Run a DC-level analysis.
do_trans = lambda **kwargs: get_sim().transient(**kwargs) # Run a transient analysis.
def how_big(circuit=default_circuit):
from collections import defaultdict
parts = defaultdict(lambda: 0)
for p in circuit.parts:
parts[p.name] += 1
for part_name, num_parts in parts.items():
print(f"{part_name}: {num_parts}")
@package
def inverter(a=Net(), out=Net()):
# Create the NFET and PFET transistors.
qp, qn = pfet(), nfet()
# Attach the NFET substrate to ground and the PFET substrate to Vdd.
gnd & qn.b
vdd & qp.b
# Connect Vdd through the PFET source-to-drain on to the output node.
# From the output node, connect through the NFET drain-to-source to ground.
vdd & qp["s,d"] & out & qn["d,s"] & gnd
# Attach the input to the NFET and PFET gate terminals.
a & qn.g & qp.g
pwr() # Apply power to the circuitry.
inv = inverter() # Create an inverter.
# Attach a voltage source between ground and the inverter's input.
# Then attach the output to a net.
gnd & V(ref="VIN", dc_value=0.0@u_V)["n, p"] & Net("VIN") & inv["a, out"] & Net("VOUT")
# Do a DC-level simulation while ramping the voltage source from 0 to Vdd.
vio = do_dc(VIN=slice(0, vdd_voltage, 0.01))
# Plot the inverter's output against its input.
oscope(vio, inv.a, inv.out)
# Add a trace for the Vdd power supply current.
disp_imin, disp_imax = -15@u_uA, 1@u_uA
oscope(vio, inv.a, inv.out, vdd_ps)
pwr()
# Connect a 500 MHz square wave to net A.
a = Net("A")
cntgen(a)
# Pump the square wave through an inverter.
inv = inverter()
a & inv["a, out"] & Net("A_BAR")
# Do a transient analysis and look at the timing between input and output.
waveforms = do_trans(step_time=0.01@u_ns, end_time=3.5@u_ns)
oscope(waveforms, a, inv.out)
pwr()
a = Net("A")
cntgen(a)
# Create a list of 30 inverters.
invs = [inverter() for _ in range(30)]
# Attach the square wave to the first inverter in the list.
a & invs[0].a
# Go through the list, attaching the input of each inverter to the output of the previous one.
for i in range(1, len(invs)):
invs[i-1].out & invs[i].a
# Attach the output of the last inverter to the output net.
invs[-1].out & Net("A_DELAY")
# Do a transient analysis.
waveforms = do_trans(step_time=0.01@u_ns, end_time=3.5@u_ns)
oscope(waveforms, a, invs[-1].out)
how_big()
@package
def nand(a=Net(), b=Net(), out=Net()):
# Create the PFET and NFET transistors.
q1, q2 = pfet(2)
q3, q4 = nfet(2)
# Connect the PFET/NFET substrates to Vdd/gnd, respectively.
vdd & q1.b & q2.b
gnd & q3.b & q4.b
# Go from Vdd through a parallel-pair of PFETs to the output and then
# through a series-pair of NFETs to ground.
vdd & (q1["s,d"] | q2["s,d"]) & out & q3["d,s"] & q4["d,s"] & gnd
# Connect the pair of inputs to the gates of the transistors.
a & q1.g & q3.g
b & q2.g & q4.g
pwr()
a, b, out = Net("A"), Net("B"), Net("OUT")
# Create two square waves: a at 500 MHz and b at 250 MHz.
cntgen(a, b)
# Create a NAND gate and connect its I/O to the nets.
nand()["a, b, out"] += a, b, out
# Perform a transient analysis.
waveforms = do_trans(step_time=0.01@u_ns, end_time=10@u_ns)
oscope(waveforms, a, b, out)
@package
def xor(a=Net(), b=Net(), out=Net()):
# Create eight transistors: four NFETs and four PFETs.
qn_a, qn_ab, qn_b, qn_bb = nfet(4)
qp_a, qp_ab, qp_b, qp_bb = pfet(4)
# Connect the substrates of the transistors.
vdd & qp_a.b & qp_ab.b & qp_b.b & qp_bb.b
gnd & qn_a.b & qn_ab.b & qn_b.b & qn_bb.b
# Create the two parallel "legs" of series PFETs-NFETs with a
# common output node in the middle.
vdd & qp_ab["s,d"] & qp_b["s,d"] & out & qn_a["d,s"] & qn_b["d,s"] & gnd
vdd & qp_a["s,d"] & qp_bb["s,d"] & out & qn_ab["d,s"] & qn_bb["d,s"] & gnd
# Create two inverters to get the complements of both inputs.
ab, bb = inverter(), inverter()
ab.a += a
bb.a += b
# Attach the two inputs and their complements to the transistor gates.
a & qp_a.g & qn_a.g
ab.out & qp_ab.g & qn_ab.g
b & qp_b.g & qn_b.g
bb.out & qp_bb.g & qn_bb.g
pwr()
a, b, out = Net("A"), Net("B"), Net("OUT")
cntgen(a, b)
xor()["a, b, out"] += a, b, out
waveforms = do_trans(step_time=0.01@u_ns, end_time=10@u_ns)
oscope(waveforms, a, b, out)
@package
def full_adder(a=Net(), b=Net(), cin=Net(), s=Net(), cout=Net()):
# Use two XOR gates to compute the sum bit.
ab_sum = Net() # Net to carry the intermediate result of a+b.
xor()["a,b,out"] += a, b, ab_sum # Compute ab_sum=a+b
xor()["a,b,out"] += ab_sum, cin, s # Compute s=a+b+cin
# Through the magic of DeMorgan's Theorem, the AND-OR carry circuit
# can be done using three NAND gates.
nand1, nand2, nand3 = nand(), nand(), nand()
nand1["a,b"] += ab_sum, cin
nand2["a,b"] += a, b
nand3["a,b,out"] += nand1.out, nand2.out, cout
pwr()
# Generate nets for the inputs and outputs.
a, b, cin, s, cout = Net("A"), Net("B"), Net("CIN"), Net("S"), Net("COUT")
# Drive the A, B and CIN full-adder inputs with all eight combinations.
cntgen(a, b, cin)
# Connect the I/O nets to the full-adder.
full_adder()["a, b, cin, s, cout"] += a, b, cin, s, cout
# Do a transient analysis.
waveforms = do_trans(step_time=0.01@u_ns, end_time=8@u_ns)
oscope(waveforms, a, b, cin, s, cout)
@subcircuit
def adder(a, b, cin, s, cout):
# a, b and s are multi-bit buses. The width of the adder will
# be determined by the length of the sum output.
width = len(s)
# Create a list of full-adders equal to the width of the sum output.
fadds = [full_adder() for _ in range(width)]
# Iteratively connect the full-adders to the input and output bits.
for i in range(width):
# Connect the i'th full adder to the i'th bit of a, b and s.
fadds[i]["a, b, s"] += a[i], b[i], s[i]
if i == 0:
# Connect the carry input to the first full-adder.
fadds[i].cin += cin
else:
# Connect the carry input of the rest of the full-adders
# to the carry output from the previous one.
fadds[i].cin += fadds[i-1].cout
# Connect the carry output to the carry output from the last bit of the adder.
cout += fadds[-1].cout
pwr()
# Create the two-bit input and output buses and the carry input & output nets.
w = 2
a, b, cin, s, cout = Bus("A",w), Bus("B",w), Net("CIN"), Bus("S",w), Net("COUT")
# Drive the A0, A1, B0, B1, and CIN inputs with a five-bit counter.
cntgen(*a, *b, cin)
# Connect the I/O to an adder.
adder(a, b, cin, s, cout)
# Do a transient analysis
waveforms = do_trans(step_time=0.01@u_ns, end_time=32@u_ns)
oscope(waveforms, *a, *b, cin, *s, cout)
def integerize(waveforms, *nets, threshold=0.9@u_V):
Convert a set of N waveforms to a stream of N-bit integer values.
Args:
waveforms: Waveform data from ngspice.
nets: A set of nets comprising a digital word.
threshold: Voltage threshold for determining if a waveform value is 1 or 0.
Returns:
A list of integer values, one for each sample time in the waveform data.
def binarize():
Convert multiple waveforms into streams of ones and zeros.
binary_vals = []
for net in nets:
binary_vals.append([v > threshold for v in waveforms[node(net)]])
return binary_vals
# Convert the waveforms into streams of bits, then combine the bits into integers.
int_vals = []
for bin_vector in zip(*reversed(binarize())):
int_vals.append(int(bytes([ord('0')+b for b in bin_vector]), base=2))
return int_vals
def subsample(subsample_times, sample_times, *int_waveforms):
Take a subset of samples from a set of integerized waveforms at a set of specific times.
Args:
subsample_times: A list of times (in ascending order) at which to take subsamples.
sample_times: A list of times (in ascending order) for when each integerized sample was taken.
int_waveforms: List of integerized waveform sample lists.
Returns:
A list of subsample lists.
# Create a list of the empty lists to hold the subsamples from each integerized waveform.
subsamples = [[] for _ in int_waveforms]
# Get the first subsample time.
subsample_time = subsample_times.pop(0)
# Step through the sample times, looking for the time to take a subsample.
for sample_time, *samples in zip(sample_times, *int_waveforms):
# Take a subsample whenever the sample time is less than the current subsample time.
if sample_time > subsample_time:
# Store a subsample from each waveform.
for i, v in enumerate(samples):
subsamples[i].append(v)
# Get the next subsample time and break from loop if there isn't one.
try:
subsample_time = subsample_times.pop(0)
except IndexError:
break
return subsamples
# Convert the waveforms for A, B, Cin, S, and Cout into lists of integers.
a_ints = integerize(waveforms, *a)
b_ints = integerize(waveforms, *b)
cin_ints = integerize(waveforms, cin)
# Combine the N-bit sum and carry-out into a single N+1-bit integer.
s_ints = integerize(waveforms, *s, cout)
# Set the subsample times just before the adder's inputs change.
ts = [(i+0.9)@u_ns for i in range(32)]
# Subsample the integerized adder waveforms.
av, bv, cinv, sv = subsample(ts, waveforms.time, a_ints, b_ints, cin_ints, s_ints)
# Display a table of the adder's inputs and corresponding output.
pd.DataFrame({'A': av, 'B': bv, 'CIN': cinv, 'S': sv})
error_flag = False
for a, b, cin, s in zip(av, bv, cinv, sv):
if a+b+cin != s:
print(f"ERROR: {a}+{b}+{cin} != {s}")
error_flag = True
if not error_flag:
print("No errors found.")
@package
def tx_gate(i, g, g_b, o):
NMOS/PMOS transmission gate. When g is high and g_b is low, i and o are connected.
# NMOS and PMOS transistors for passing input to output.
qn, qp = nfet(), pfet()
# Transistor substrate connections.
gnd & qn.b
vdd & qp.b
# Parallel NMOS/PMOS transistors between the input and output.
i & (qn["s,d"] | qp["s,d"]) & o
# Connect the gate input to the NMOS and the complement of the gate input
# to the PMOS. Both transistors will conduct when the gate input is high,
# and will block the input from the output when the gate input is low.
g & qn.g
g_b & qp.g
@package
def latch_bit(wr=Net(), wr_b=Net(), d=Net(), out_b=Net()):
in_tx, fb_tx = tx_gate(), tx_gate()
in_inv, fb_inv = inverter(), inverter()
# Input data comes in through the input gate, goes through an inverter to the data output.
d & in_tx["i,o"] & in_inv["a, out"] & out_b
# The data output is fed back through another inverter and transmission gate to the input inverter.
out_b & fb_inv["a, out"] & fb_tx["i,o"] & in_inv.a # Feed output back to input.
# wr activates the input gate and deactivates the feedback gate, allowing data into the latch.
wr & in_tx.g & fb_tx.g_b
# Complement of wr deactivates the input gate and activates the feedback gate, latching the data.
wr_b & in_tx.g_b & fb_tx.g
@package
def ms_ff(wr=Net(), d=Net(), out=Net()):
# Create the master and slave latches.
master, slave = latch_bit(), latch_bit()
# Data passes from the input through the master to the slave latch and then to the output.
d & master["d, out_b"] & slave["d, out_b"] & out
# Data continually enters the master latch when the write-enable is low, but gets
# latched when the write-enable goes high..
wr & inverter()["a, out"] & master.wr & slave.wr_b
# Data from the master passes through the slave when the write-enable goes high, and
# this data stays stable in the slave when the write-enable goes low and new data
# is entering the master.
wr & slave.wr & master.wr_b
pwr()
wr, d, out = Net('WR'), Net('D'), Net('OUT')
cntgen(wr, d)
ms_ff()["wr, d, out"] += wr, d, out
waveforms = do_trans(step_time=0.01@u_ns, end_time=8@u_ns)
oscope(waveforms, wr, d, out)
@subcircuit
def register(wr, d, out):
# Create a flip-flop for each bit in the output bus.
reg_bits = [ms_ff() for _ in out]
# Connect the inputs and outputs to the flip-flops.
for i, rb in enumerate(reg_bits):
rb["wr, d, out"] += wr, d[i], out[i]
@subcircuit
def cntr(clk, out):
# Create two buses: one for the next counter value, and one that's all zero bits.
width = len(out)
nxt, zero = Bus(width), Bus(width)
# Provide access to the global ground net.
global gnd
# Connect all the zero bus bits to ground (that's why it's zero).
gnd += zero
# The next counter value is the current counter value plus 1. Set the
# adder's carry input to 1 and the b input to zero to do this.
adder(a=out, b=zero, cin=vdd, s=nxt, cout=Net())
# Clock the next counter value into the register on the rising clock edge.
register(wr=clk, d=nxt, out=out)
pwr()
# Generate a clock signal.
clk = Net('clk')
cntgen(clk)
# Create a three-bit counter.
cnt = Bus('CNT', 3)
cntr(clk, cnt)
# Simulate the counter.
waveforms = do_trans(step_time=0.01@u_ns, end_time=30@u_ns)
# In addition to the clock and counter value, also look at the power supply current.
disp_imin, disp_imax = -3@u_mA, 3@u_mA
oscope(waveforms, clk, *cnt, vdd_ps)
time_steps = waveforms.time[1:] - waveforms.time[0:-1]
ps_current = -waveforms[node(vdd_ps)][0:-1] # Mult by -1 to get current FROM the + terminal of the supply.
ps_voltage = waveforms[node(vdd)][0:-1]
energy = sum(ps_current * ps_voltage * time_steps)@u_J
print(f"Total energy = {energy}")
how_big()
@package
def mux8(in_, i0=Net(), i1=Net(), i2=Net(), out=Net()):
# Create the complements of the selection inputs.
i0b, i1b, i2b = Net(), Net(), Net()
i0 & inverter()["a,out"] & i0b
i1 & inverter()["a,out"] & i1b
i2 & inverter()["a,out"] & i2b
out_ = Net() # Output from the eight legs of the mux.
i = 0 # Input bit index.
# Create the eight legs of the mux by nested iteration of the selection inputs
# and their complements. Each leg is turned on by a different combination of inputs.
for i2_g, i2_g_b in ((i2b, i2), (i2, i2b)):
for i1_g, i1_g_b in ((i1b, i1), (i1, i1b)):
for i0_g, i0_g_b in ((i0b, i0), (i0, i0b)):
# Place 3 transmission gates in series from input bit i to output.
i0_gate, i1_gate, i2_gate = tx_gate(), tx_gate(), tx_gate()
in_[i] & i0_gate["i,o"] & i1_gate["i,o"] & i2_gate["i,o"] & out_
# Attach the selection inputs and their complements to the transmission gates.
i0_gate["g, g_b"] += i0_g, i0_g_b
i1_gate["g, g_b"] += i1_g, i1_g_b
i2_gate["g, g_b"] += i2_g, i2_g_b
i = i+1 # Go to the next input bit.
# Run the output through two inverters to restore signal strength.
out_ & inverter()["a, out"] & inverter()["a, out"] & out
@subcircuit
def alu(a, b, cin, s, cout, s_opcode, c_opcode):
Multi-bit ALU with the operation determined by the eight-bit codes
that determine the output from the sum and carry muxes.
width = len(s)
s_bits = [mux8() for _ in range(width)]
c_bits = [mux8() for _ in range(width)]
# For each bit in the ALU...
for i in range(width):
# Connect truth-table bits to the sum and carry mux inputs.
s_bits[i].in_ += s_opcode
c_bits[i].in_ += c_opcode
# Connect inputs to the sum and carry mux selectors.
s_bits[i]["i0, i1"] += a[i], b[i]
c_bits[i]["i0, i1"] += a[i], b[i]
# Connect the carry input of each ALU bit to the carry output of the previous bit.
if i == 0:
s_bits[i].i2 & cin
c_bits[i].i2 & cin
else:
s_bits[i].i2 & c_bits[i-1].out
c_bits[i].i2 & c_bits[i-1].out
# Connect the output bit of each sum mux to the ALU sum output.
s[i] & s_bits[i].out
# Connect the carry output from the last ALU bit.
cout & c_bits[-1].out
@subcircuit
def subtractor(a, b, cin, s, cout):
Create a subtractor by applying the required opcodes to the ALU.
# Set the opcodes to perform subtraction (a - b - c), so in reality the carry
# is actually a borrow.
# cin b a s cout
# ====================
# 0 0 0 0 0
# 0 0 1 1 0
# 0 1 0 1 1
# 0 1 1 0 0
# 1 0 0 1 1
# 1 0 1 0 0
# 1 1 0 0 1
# 1 1 1 1 1
one = vdd
zero = gnd
s_opcode = Bus(zero, one, one, zero, one, zero, zero, one)
c_opcode = Bus(zero, zero, one, zero, one, zero, one, one)
# Connect the I/O and opcodes to the ALU.
alu(a=a, b=b, cin=cin, s=s, cout=cout, s_opcode=s_opcode, c_opcode=c_opcode)
pwr()
# Create the two-bit input and output buses and the carry input & output nets.
w = 2
a, b, cin, s, cout = Bus("A",w), Bus("B",w), Net("CIN"), Bus("S",w), Net("COUT")
# Drive the A0, A1, B0, B1, and CIN inputs with a five-bit counter.
cntgen(*a, *b, cin)
# Connect the I/O to the subtractor.
subtractor(a=a, b=b, cin=cin, s=s, cout=cout)
# Do a transient analysis
disp_vmax = 4@u_V
waveforms = do_trans(step_time=0.01@u_ns, end_time=32@u_ns)
# Display the output waveforms.
oscope(waveforms, *a, *b, cin, *s, cout)
# Convert the waveforms for A, B, Cin, S, and Cout into lists of integers.
a_ints = integerize(waveforms, *a)
b_ints = integerize(waveforms, *b)
cin_ints = integerize(waveforms, cin)
# Combine the N-bit sum and carry-out into a single N+1-bit integer.
s_ints = integerize(waveforms, *s, cout)
# Set the subsample times right before the ALU's inputs change.
ts = [(i+0.9)@u_ns for i in range(32)]
# Subsample the integerized ALU waveforms.
av, bv, cinv, sv = subsample(ts, waveforms.time, a_ints, b_ints, cin_ints, s_ints)
# Display a table of the ALU's inputs and corresponding output.
pd.DataFrame({'A': av, 'B': bv, 'CIN': cinv, 'S': sv})
@subcircuit
def down_cntr(clk, out):
# Provide access to the global ground net.
global gnd
width = len(out)
nxt, zero = Bus(width), Bus(width)
gnd += zero
# The next counter value is the current counter value minus 1. Set the
# subtractor's borrow input to 1 and the b input to zero to do this.
subtractor(a=out, b=zero, cin=vdd, s=nxt, cout=Net())
register(wr=clk, d=nxt, out=out)
pwr()
clk = Net('clk')
cntgen(clk)
# Create a three-bit down counter.
cnt = Bus('CNT', 3)
down_cntr(clk, cnt)
# Simulate it.
waveforms = do_trans(step_time=0.01@u_ns, end_time=30@u_ns)
oscope(waveforms, clk, *cnt, vdd_ps)
time_steps = waveforms.time[1:] - waveforms.time[0:-1]
ps_current = -waveforms[node(vdd_ps)][0:-1] # Mult by -1 to get current FROM the + terminal of the supply.
ps_voltage = waveforms[node(vdd)][0:-1]
energy = sum(ps_current * ps_voltage * time_steps)@u_J
print(f"Total energy = {energy}")
how_big()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup batches
Step2: Rather than using batches, we could just import all the data into an array to save some processing time. (In most examples I'm using the batches, however - just because that's how I happened to start out.)
Step3: Re-run sample experiments on full dataset
Step4: Interestingly, with no regularization or augmentation we're getting some reasonable results from our simple convolutional model. So with augmentation, we hopefully will see some very good results.
Step5: I'm shocked by how good these results are! We're regularly seeing 75-80% accuracy on the validation set, which puts us into the top third or better of the competition. With such a simple model and no dropout or semi-supervised learning, this really speaks to the power of this approach to data augmentation.
Step6: This is looking quite a bit better - the accuracy is similar, but the stability is higher. There's still some way to go however...
Step7: Batchnorm dense layers on pretrained conv layers
Step8: Looking good! Let's try pre-computing 5 epochs worth of augmented data, so we can experiment with combining dropout and augmentation on the pre-trained model.
Step9: We use those to create a dataset of convolutional features 5x bigger than the training set.
Step10: Let's include the real training data as well in its non-augmented form.
Step11: Since we've now got a dataset 6x bigger than before, we'll need to copy our labels 6 times too.
Step12: Based on some experiments the previous model works well, with bigger dense layers.
Step13: Now we can train the model as usual, with pre-computed augmented data.
Step14: Looks good - let's save those weights.
Step15: Pseudo labeling
Step16: ...concatenate them with our training labels...
Step17: ...and fine-tune our model using that data.
Step18: That's a distinct improvement - even although the validation set isn't very big. This looks encouraging for when we try this on the test set.
Step19: Submit
Step20: This gets 0.534 on the leaderboard.
Step21: Ensembling
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
#path = "data/state/"
path = "data/state/sample/"
from importlib import reload # Python 3
import utils; reload(utils)
from utils import *
from IPython.display import FileLink
batch_size=64
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
steps_per_epoch = int(np.ceil(batches.samples/batch_size))
validation_steps = int(np.ceil(val_batches.samples/(batch_size*2)))
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
trn = get_data(path+'train')
val = get_data(path+'valid')
save_array(path+'results/val.dat', val)
save_array(path+'results/trn.dat', trn)
val = load_array(path+'results/val.dat')
trn = load_array(path+'results/trn.dat')
def conv1(batches):
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Conv2D(32,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Conv2D(64,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Flatten(),
Dense(200, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr = 0.001
model.fit_generator(batches, steps_per_epoch, epochs=4, validation_data=val_batches,
validation_steps=validation_steps)
return model
model = conv1(batches)
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
model.optimizer.lr = 0.0001
model.fit_generator(batches, steps_per_epoch, epochs=15, validation_data=val_batches,
validation_steps=validation_steps)
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Conv2D(32,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D(),
Conv2D(64,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D(),
Conv2D(128,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D(),
Flatten(),
Dense(200, activation='relu'),
BatchNormalization(),
Dropout(0.5),
Dense(200, activation='relu'),
BatchNormalization(),
Dropout(0.5),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=10e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr=0.001
model.fit_generator(batches, steps_per_epoch, epochs=10, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr=0.00001
model.fit_generator(batches, steps_per_epoch, epochs=10, validation_data=val_batches,
validation_steps=validation_steps)
vgg = Vgg16()
model=vgg.model
last_conv_idx = [i for i,l in enumerate(model.layers) if type(l) is Convolution2D][-1]
conv_layers = model.layers[:last_conv_idx+1]
conv_model = Sequential(conv_layers)
(val_classes, trn_classes, val_labels, trn_labels,
val_filenames, filenames, test_filenames) = get_classes(path)
test_batches = get_batches(path+'test', batch_size=batch_size*2, shuffle=False)
conv_feat = conv_model.predict_generator(batches, int(np.ceil(batches.samples/batch_size)))
conv_val_feat = conv_model.predict_generator(val_batches, int(np.ceil(val_batches.samples/(batch_size*2))))
conv_test_feat = conv_model.predict_generator(test_batches, int(np.ceil(test_batches.samples/(batch_size*2))))
save_array(path+'results/conv_val_feat.dat', conv_val_feat)
save_array(path+'results/conv_test_feat.dat', conv_test_feat)
save_array(path+'results/conv_feat.dat', conv_feat)
conv_feat = load_array(path+'results/conv_feat.dat')
conv_val_feat = load_array(path+'results/conv_val_feat.dat')
conv_val_feat.shape
def get_bn_layers(p):
return [
MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]),
Flatten(),
Dropout(p/2),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(p/2),
Dense(128, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(10, activation='softmax')
]
p=0.8
bn_model = Sequential(get_bn_layers(p))
bn_model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, epochs=1,
validation_data=(conv_val_feat, val_labels))
bn_model.optimizer.lr=0.01
bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, epochs=2,
validation_data=(conv_val_feat, val_labels))
bn_model.save_weights(path+'models/conv8.h5')
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
da_batches = get_batches(path+'train', gen_t, batch_size=batch_size, shuffle=False)
da_conv_feat = conv_model.predict_generator(da_batches, 5*int(np.ceil((da_batches.samples)/(batch_size))), workers=3)
save_array(path+'results/da_conv_feat2.dat', da_conv_feat)
da_conv_feat = load_array(path+'results/da_conv_feat2.dat')
da_conv_feat = np.concatenate([da_conv_feat, conv_feat])
da_trn_labels = np.concatenate([trn_labels]*6)
def get_bn_da_layers(p):
return [
MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]),
Flatten(),
Dropout(p),
Dense(256, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(256, activation='relu'),
BatchNormalization(),
Dropout(p),
Dense(10, activation='softmax')
]
p=0.8
bn_model = Sequential(get_bn_da_layers(p))
bn_model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
bn_model.fit(da_conv_feat, da_trn_labels, batch_size=batch_size, epochs=1,
validation_data=(conv_val_feat, val_labels))
bn_model.optimizer.lr=0.01
bn_model.fit(da_conv_feat, da_trn_labels, batch_size=batch_size, epochs=4,
validation_data=(conv_val_feat, val_labels))
bn_model.optimizer.lr=0.0001
bn_model.fit(da_conv_feat, da_trn_labels, batch_size=batch_size, epochs=4,
validation_data=(conv_val_feat, val_labels))
bn_model.save_weights(path+'models/da_conv8_1.h5')
val_pseudo = bn_model.predict(conv_val_feat, batch_size=batch_size)
comb_pseudo = np.concatenate([da_trn_labels, val_pseudo])
comb_feat = np.concatenate([da_conv_feat, conv_val_feat])
bn_model.load_weights(path+'models/da_conv8_1.h5')
bn_model.fit(comb_feat, comb_pseudo, batch_size=batch_size, epochs=1,
validation_data=(conv_val_feat, val_labels))
bn_model.fit(comb_feat, comb_pseudo, batch_size=batch_size, epochs=4,
validation_data=(conv_val_feat, val_labels))
bn_model.optimizer.lr=0.00001
bn_model.fit(comb_feat, comb_pseudo, batch_size=batch_size, epochs=4,
validation_data=(conv_val_feat, val_labels))
bn_model.save_weights(path+'models/bn-ps8.h5')
def do_clip(arr, mx): return np.clip(arr, (1-mx)/9, mx)
val_preds = bn_model.predict(conv_val_feat, batch_size=batch_size*2)
np.mean(keras.metrics.categorical_crossentropy(val_labels, do_clip(val_preds, 0.93)).eval())
conv_test_feat = load_array(path+'results/conv_test_feat.dat')
preds = bn_model.predict(conv_test_feat, batch_size=batch_size*2)
subm = do_clip(preds,0.93)
subm_name = path+'results/subm.gz'
classes = sorted(batches.class_indices, key=batches.class_indices.get)
submission = pd.DataFrame(subm, columns=classes)
submission.insert(0, 'img', [a[4:] for a in test_filenames])
submission.head()
submission.to_csv(subm_name, index=False, compression='gzip')
FileLink(subm_name)
#for l in get_bn_layers(p): conv_model.add(l) # this choice would give a weight shape error
for l in get_bn_da_layers(p): conv_model.add(l) # ... so probably this is the right one
for l1,l2 in zip(bn_model.layers, conv_model.layers[last_conv_idx+1:]):
l2.set_weights(l1.get_weights())
for l in conv_model.layers: l.trainable =False
for l in conv_model.layers[last_conv_idx+1:]: l.trainable =True
comb = np.concatenate([trn, val])
# not knowing what the experiment was about, added this to avoid a shape match error with comb using gen_t.flow
comb_pseudo = np.concatenate([trn_labels, val_pseudo])
gen_t = image.ImageDataGenerator(rotation_range=8, height_shift_range=0.04,
shear_range=0.03, channel_shift_range=10, width_shift_range=0.08)
batches = gen_t.flow(comb, comb_pseudo, batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
conv_model.compile(Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['accuracy'])
conv_model.fit_generator(batches, steps_per_epoch, epochs=1, validation_data=val_batches,
validation_steps=validation_steps)
conv_model.optimizer.lr = 0.0001
conv_model.fit_generator(batches, steps_per_epoch, epochs=3, validation_data=val_batches,
validation_steps=validation_steps)
for l in conv_model.layers[16:]: l.trainable =True
conv_model.optimizer.lr = 0.00001
conv_model.fit_generator(batches, steps_per_epoch, epochs=8, validation_data=val_batches,
validation_steps=validation_steps)
conv_model.save_weights(path+'models/conv8_ps.h5')
#conv_model.load_weights(path+'models/conv8_da.h5') # conv8_da.h5 was not saved in this notebook
val_pseudo = conv_model.predict(val, batch_size=batch_size*2)
save_array(path+'models/pseudo8_da.dat', val_pseudo)
drivers_ds = pd.read_csv(path+'driver_imgs_list.csv')
drivers_ds.head()
img2driver = drivers_ds.set_index('img')['subject'].to_dict()
driver2imgs = {k: g["img"].tolist()
for k,g in drivers_ds[['subject', 'img']].groupby("subject")}
# It seems this function is not used in this notebook
def get_idx(driver_list):
return [i for i,f in enumerate(filenames) if img2driver[f[3:]] in driver_list]
# drivers = driver2imgs.keys() # Python 2
drivers = list(driver2imgs) # Python 3
rnd_drivers = np.random.permutation(drivers)
ds1 = rnd_drivers[:len(rnd_drivers)//2]
ds2 = rnd_drivers[len(rnd_drivers)//2:]
# The following cells seem to require some preparation code not included in this notebook
models=[fit_conv([d]) for d in drivers]
models=[m for m in models if m is not None]
all_preds = np.stack([m.predict(conv_test_feat, batch_size=128) for m in models])
avg_preds = all_preds.mean(axis=0)
avg_preds = avg_preds/np.expand_dims(avg_preds.sum(axis=1), 1)
keras.metrics.categorical_crossentropy(val_labels, np.clip(avg_val_preds,0.01,0.99)).eval()
keras.metrics.categorical_accuracy(val_labels, np.clip(avg_val_preds,0.01,0.99)).eval()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Given a state and a parent dictionary Parent, the function path_to returns a path leading to the given state.
Step2: Display Code
Step3: The function $\texttt{toDot}(\texttt{source}, \texttt{Edges}, \texttt{Fringe}, \texttt{Visited})$ takes a graph that is represented by
Step4: Testing
Step5: Saving the Infidels
Step6: Solving the Sliding Puzzle
Step7: The next line is needed to enable the %memit magic command.
|
<ASSISTANT_TASK:>
Python Code:
def search(start, goal, next_states):
Frontier = { start }
Visited = set()
Parent = { start: start }
while Frontier:
NewFrontier = set()
for s in Frontier:
for ns in next_states(s):
if ns not in Visited and ns not in Frontier:
NewFrontier.add(ns)
Parent[ns] = s
if ns == goal:
print(len(Visited) + len(Frontier) + len(NewFrontier))
return path_to(goal, Parent)
Visited |= Frontier
Frontier = NewFrontier
def path_to(state, Parent):
p = Parent.get(state)
if p == state:
return [state]
return path_to(p, Parent) + [state]
import graphviz as gv
def toDot(source, goal, Edges, Frontier, Visited, Parent=None):
V = set()
for x, L in Edges.items():
V.add(x)
for y in L:
V.add(y)
dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'})
dot.attr(rankdir='LR')
for x in V:
if x == source:
dot.node(str(x), color='blue', shape='doublecircle')
elif x in Frontier and x == goal:
dot.node(str(x), label=str(x), color='magenta')
elif x in Frontier:
dot.node(str(x), label=str(x), color='red')
elif x in Visited:
dot.node(str(x), label=str(x), color='blue')
else:
dot.node(str(x), label=str(x))
if Parent:
Path = path_to(goal, Parent)
for u in V:
if Edges.get(u):
for v in Edges[u]:
if Parent and v in Path and Parent[v] == u:
dot.edge(str(u), str(v), color='brown', style='bold')
else:
dot.edge(str(u), str(v))
return dot
def next_states_test(node):
x, y = node
return { (x+1, y), (x, y+1) }
def create_edges(n):
Edges = {}
for row in range(n):
for col in range(n):
if (row, col) != (n-1, n-1):
Edges[(row, col)] = list(next_states_test((row, col)))
for k in range(n-1):
Edges[(k, n-1)] = [(k+1, n-1)]
Edges[(n-1, k)] = [(n-1, k+1)]
return Edges
def search_show(start, goal, next_states, Edges):
Visited = set()
Frontier = { start }
Parent = { start: start }
while len(Frontier) > 0:
display(toDot(start, goal, Edges, Frontier, Visited))
NewFrontier = set()
Visited |= Frontier
for s in Frontier:
for ns in next_states(s):
if not (ns in Visited):
NewFrontier.add(ns)
Parent[ns] = s
if ns == goal:
display(toDot(start, goal, Edges, NewFrontier, Visited, Parent))
return
Frontier = NewFrontier
def main(n):
Edges = create_edges(n)
search_show((0,0), (n-1, n -1), next_states_test, Edges)
main(6)
%run Missionaries.ipynb
dot_graph(createRelation(start))
%%time
Path = search(start, goal, next_states)
printPath(Path)
%run Sliding-Puzzle.ipynb
%load_ext memory_profiler
%%time
%memit Path = search(start, goal, next_states)
animation(Path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1
Step2: Here's a typical time series
Step3: Here's a function that simulates for n periods, starting from x0, and returns only the final value
Step4: Let's see how fast this runs
Step5: Now let's try this in FORTRAN.
Step6: Now let's do the same thing in Python using Numba's JIT compilation
Step7: After JIT compilation, function execution speed is about the same as FORTRAN.
Step8: Vectorized code
Step9: JITTed code
Step10: Numba for vectorization with automatic parallelization - even faster
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from numba import vectorize, jit, float64
from quantecon.util import tic, toc
import matplotlib.pyplot as plt
α = 4
n = 200
x = np.empty(n)
x[0] = 0.2
for t in range(n-1):
x[t+1] = α * x[t] * (1 - x[t])
plt.plot(x)
plt.show()
def quad(x0, n):
x = x0
for i in range(1, n):
x = α * x * (1 - x)
return x
n = 10_000_000
tic()
x = quad(0.2, n)
toc()
!cat fastquad.f90
!gfortran -O3 fastquad.f90
!./a.out
quad_jitted = jit(quad)
tic()
x = quad_jitted(0.2, n)
toc()
tic()
x = quad_jitted(0.2, n)
toc()
def f(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2) + 1
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
gridsize = 50
gmin, gmax = -3, 3
xgrid = np.linspace(gmin, gmax, gridsize)
ygrid = xgrid
x, y = np.meshgrid(xgrid, ygrid)
# === plot value function === #
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x,
y,
f(x, y),
rstride=2, cstride=2,
cmap=cm.jet,
alpha=0.4,
linewidth=0.05)
ax.scatter(x, y, c='k', s=0.6)
ax.scatter(x, y, f(x, y), c='k', s=0.6)
ax.view_init(25, -57)
ax.set_zlim(-0, 2.0)
ax.set_xlim(gmin, gmax)
ax.set_ylim(gmin, gmax)
plt.show()
grid = np.linspace(-3, 3, 10000)
x, y = np.meshgrid(grid, grid)
tic()
np.max(f(x, y))
toc()
@jit
def compute_max():
m = -np.inf
for x in grid:
for y in grid:
z = np.cos(x**2 + y**2) / (1 + x**2 + y**2) + 1
if z > m:
m = z
return m
compute_max()
tic()
compute_max()
toc()
@vectorize('float64(float64, float64)', target='parallel')
def f_par(x, y):
return np.cos(x**2 + y**2) / (1 + x**2 + y**2) + 1
x, y = np.meshgrid(grid, grid)
np.max(f_par(x, y))
tic()
np.max(f_par(x, y))
toc()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The state column shows the outcome of the project.
Step2: Using this data, how can we use features such as project category, currency, funding goal, and country to predict if a Kickstarter project will succeed?
Step3: Convert timestamps
Step4: Prep categorical variables
Step5: We collect all of these features in a new dataframe that we can use to train a model.
Step6: Create training, validation, and test splits
Step7: Train a model
Step8: Make predictions & evaluate the model
|
<ASSISTANT_TASK:>
Python Code:
#$HIDE_INPUT$
import pandas as pd
ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv',
parse_dates=['deadline', 'launched'])
ks.head(6)
print('Unique values in `state` column:', list(ks.state.unique()))
# Drop live projects
ks = ks.query('state != "live"')
# Add outcome column, "successful" == 1, others are 0
ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int))
ks = ks.assign(hour=ks.launched.dt.hour,
day=ks.launched.dt.day,
month=ks.launched.dt.month,
year=ks.launched.dt.year)
from sklearn.preprocessing import LabelEncoder
cat_features = ['category', 'currency', 'country']
encoder = LabelEncoder()
# Apply the label encoder to each column
encoded = ks[cat_features].apply(encoder.fit_transform)
# Since ks and encoded have the same index and I can easily join them
data = ks[['goal', 'hour', 'day', 'month', 'year', 'outcome']].join(encoded)
data.head()
valid_fraction = 0.1
valid_size = int(len(data) * valid_fraction)
train = data[:-2 * valid_size]
valid = data[-2 * valid_size:-valid_size]
test = data[-valid_size:]
import lightgbm as lgb
feature_cols = train.columns.drop('outcome')
dtrain = lgb.Dataset(train[feature_cols], label=train['outcome'])
dvalid = lgb.Dataset(valid[feature_cols], label=valid['outcome'])
param = {'num_leaves': 64, 'objective': 'binary'}
param['metric'] = 'auc'
num_round = 1000
bst = lgb.train(param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=10, verbose_eval=False)
from sklearn import metrics
ypred = bst.predict(test[feature_cols])
score = metrics.roc_auc_score(test['outcome'], ypred)
print(f"Test AUC score: {score}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Our feed forward neural network will look very similar to our softmax classifier. However, now we have multiple layers and non-linear activations over logits!
Step2: Step 1
Step3: Step 2
Step4: Step 3
Step5: Step 5
Step6: Step 6
Step7: Now lets run our graph as usual
|
<ASSISTANT_TASK:>
Python Code:
# Necessary imports
import time
from IPython import display
import numpy as np
from matplotlib.pyplot import imshow
from PIL import Image, ImageOps
import tensorflow as tf
%matplotlib inline
from tensorflow.examples.tutorials.mnist import input_data
# Read the mnist dataset
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Hyperparameters (these are similar to the ones used in the previous example)
learning_rate = 0.5
training_epochs = 5
batch_size = 100
# Additional hyperparameters for our Neural Nets - Layer sizes
layer_1_size = 256
layer_2_size = 128
# Create placeholders
x = tf.placeholder(tf.float32, shape=(None, 784))
y = tf.placeholder(tf.float32, shape=(None, 10))
# Model parameters that have to be learned
# Note that the weights & biases are now initialized to small random numbers
# Also note that the number of columns for should be the size of the first layer!
W_h1 = tf.Variable(0.01 * tf.random_normal([784, layer_1_size]))
b_h1 = tf.Variable(tf.random_normal([layer_1_size]))
# Layer 2
# The input dimensions are not 784 anymore but the size of the first layer.
# The number of columns are the size of the second layer
W_h2 = tf.Variable(0.01 * tf.random_normal([layer_1_size, layer_2_size]))
b_h2 = tf.Variable(tf.random_normal([layer_2_size]))
# Output layer - Layer 3
# This is the softmax layer that we implemented earlier
# The input dimension size is now the size of the 2nd layer and the number of columns = number of classes
W_o = tf.Variable(0.01 * tf.random_normal([layer_2_size, 10]))
b_o = tf.Variable(tf.random_normal([10]))
# Get the weighted sum for the first layer
preact_h1 = tf.matmul(x, W_h1) + b_h1
# Compute the activations which forms the output of this layer
out_h1 = tf.sigmoid(preact_h1)
# out_h1 = tf.nn.relu(preact_h1)
# Get the weighted sum for the second layer
# Note that the input is now the output from the previous layer
preact_h2 = tf.matmul(out_h1, W_h2) + b_h2
# Compute the activations which forms the output of this layer
out_h2 = tf.sigmoid(preact_h2)
# out_h2 = tf.nn.relu(preact_h2)
# Get the logits for the softmax output layer
logits_o = tf.matmul(out_h2, W_o) + b_o
# Final layer doesn't have activations. Simply compute the cross entropy loss
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits_o))
# Create an optimizer with the learning rate
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# optimizer = tf.train.AdamOptimizer(learning_rate)
# Use the optimizer to minimize the loss
train_step = optimizer.minimize(cross_entropy_loss)
# First create the correct prediction by taking the maximum value from the prediction class
# and checking it with the actual class. The result is a boolean column vector
correct_predictions = tf.equal(tf.argmax(logits_o, 1), tf.argmax(y, 1))
# Calculate the accuracy over all the images
# Cast the boolean vector into float (1s & 0s) and then compute the average.
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
# Initializing global variables
init = tf.global_variables_initializer()
# Create a saver to save our model
saver = tf.train.Saver()
# Create a session to run the graph
with tf.Session() as sess:
# Run initialization
sess.run(init)
# For the set number of epochs
for epoch in range(training_epochs):
# Compute the total number of batches
num_batches = int(mnist.train.num_examples/batch_size)
# Iterate over all the examples (1 epoch)
for batch in range(num_batches):
# Get a batch of examples
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Now run the session
curr_loss, cur_accuracy, _ = sess.run([cross_entropy_loss, accuracy, train_step],
feed_dict={x: batch_xs, y: batch_ys})
if batch % 50 == 0:
display.clear_output(wait=True)
time.sleep(0.05)
# Print the loss
print("Epoch: %d/%d. Batch: %d/%d. Current loss: %.5f. Train Accuracy: %.2f"
%(epoch, training_epochs, batch, num_batches, curr_loss, cur_accuracy))
# Run the session to compute the value and print it
test_accuracy = sess.run(accuracy,
feed_dict={x: mnist.test.images,
y: mnist.test.labels})
print("Test Accuracy: %.2f"%test_accuracy)
# Lets save the entire session
saver.save(sess, '../models/ff_nn.model')
# Load the model back and test its accuracy
with tf.Session() as sess:
saver.restore(sess, '../models/ff_nn.model')
test_accuracy = sess.run(accuracy,
feed_dict={x: mnist.test.images,
y: mnist.test.labels})
print("Test Accuracy: %.2f"%test_accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Single instance text plot
Step2: Multiple instance text plot
Step3: Summarizing text explanations
Step4: Note that how you summarize the importance of features can make a big difference. In the plot above the a token was very importance both because it had an impact on the model, and because it was very common. Below we instead summize the instances using the max function to see the largest impact of a token in any instance.
Step5: You can also slice out a single token from all the instances by using that token as an input name (note that the gray values to the left of the input names are the original text that the token was generated from).
Step6: Text-To-Text Visualization
Step7: Text-To-Text Visualization contains the input text to the model on the left side and output text on the right side (in the default layout). On hovering over a token on the right (output) side the importance of each input token is overlayed on it, and is signified by the background color of the token. Red regions correspond to parts of the text that increase the output of the model when they are included, while blue regions decrease the output of the model when they are included. The explanation for a particular output token can be anchored by clickling on the output token (it can be un-anchored by clicking again).
|
<ASSISTANT_TASK:>
Python Code:
import shap
import transformers
import nlp
import torch
import numpy as np
import scipy as sp
# load a BERT sentiment analysis model
tokenizer = transformers.DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased")
model = transformers.DistilBertForSequenceClassification.from_pretrained(
"distilbert-base-uncased-finetuned-sst-2-english"
).cuda()
# define a prediction function
def f(x):
tv = torch.tensor([tokenizer.encode(v, padding='max_length', max_length=500, truncation=True) for v in x]).cuda()
outputs = model(tv)[0].detach().cpu().numpy()
scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T
val = sp.special.logit(scores[:,1]) # use one vs rest logit units
return val
# build an explainer using a token masker
explainer = shap.Explainer(f, tokenizer)
# explain the model's predictions on IMDB reviews
imdb_train = nlp.load_dataset("imdb")["train"]
shap_values = explainer(imdb_train[:10], fixed_context=1)
# plot the first sentence's explanation
shap.plots.text(shap_values[3])
# plot the first sentence's explanation
shap.plots.text(shap_values[:3])
shap.plots.bar(shap_values.abs.sum(0))
shap.plots.bar(shap_values.abs.max(0))
shap.plots.bar(shap_values[:,"but"])
shap.plots.bar(shap_values[:,"but"])
import numpy as np
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import shap
import torch
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es")
model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-es").cuda()
s=["In this picture, there are four persons: my father, my mother, my brother and my sister."]
explainer = shap.Explainer(model,tokenizer)
shap_values = explainer(s)
shap.plots.text(shap_values)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <!--Empty Space for separating topics-->
Step2: Then let us create a tensor according to the equation $ y=x^2 $.
Step3: Then let us take the derivative with respect x at x = 2
Step4: The preceding lines perform the following operation
Step5: The function is in the following form
Step6: Double-click <b>here</b> for the solution.
Step7: This is equivalent to the following
Step8: the expression is given by
Step9: The equation is given by
Step10: We can plot the function and its derivative
Step11: The orange line is the slope of the blue line at the intersection point, which is the derivative of the blue line.
Step12: <!--Empty Space for separating topics-->
|
<ASSISTANT_TASK:>
Python Code:
# These are the libraries will be useing for this lab.
import torch
import matplotlib.pylab as plt
import torch.functional as F
# Create a tensor x
x = torch.tensor(2.0, requires_grad = True)
print("The tensor x: ", x)
# Create a tensor y according to y = x^2
y = x ** 2
print("The result of y = x^2: ", y)
# Take the derivative. Try to print out the derivative at the value x = 2
y.backward()
print("The dervative at x = 2: ", x.grad)
# Calculate the y = x^2 + 2x + 1, then find the derivative
x = torch.tensor(2.0, requires_grad = True)
y = x ** 2 + 2 * x + 1
print("The result of y = x^2 + 2x + 1: ", y)
y.backward()
print("The dervative at x = 2: ", x.grad)
# Practice: Calculate the derivative of y = 2x^3 + x at x = 1
# Type your code here
# Calculate f(u, v) = v * u + u^2 at u = 1, v = 2
u = torch.tensor(1.0,requires_grad=True)
v = torch.tensor(2.0,requires_grad=True)
f = u * v + u ** 2
print("The result of v * u + u^2: ", f)
# Calculate the derivative with respect to u
f.backward()
print("The partial derivative with respect to u: ", u.grad)
# Calculate the derivative with respect to v
print("The partial derivative with respect to u: ", v.grad)
# Calculate the derivative with multiple values
x = torch.linspace(-10, 10, 10, requires_grad = True)
Y = x ** 2
y = torch.sum(x ** 2)
# Take the derivative with respect to multiple value. Plot out the function and its derivative
y.backward()
plt.plot(x.detach().numpy(), Y.detach().numpy(), label = 'function')
plt.plot(x.detach().numpy(), x.grad.detach().numpy(), label = 'derivative')
plt.xlabel('x')
plt.legend()
plt.show()
import torch.nn.functional as F
# Take the derivative of Relu with respect to multiple value. Plot out the function and its derivative
x = torch.linspace(-3, 3, 100, requires_grad = True)
Y = F.relu(x)
y = Y.sum()
y.backward()
plt.plot(x.detach().numpy(), Y.detach().numpy(), label = 'function')
plt.plot(x.detach().numpy(), x.grad.detach().numpy(), label = 'derivative')
plt.xlabel('x')
plt.legend()
plt.show()
# Practice: Calculate the derivative of f = u * v + (u * v) ** 2 at u = 2, v = 1
# Type the code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Random sampling
Step2: Sobol
Step3: Classic latin hypercube sampling
Step4: Centered latin hypercube sampling
Step5: Maximin optimized hypercube sampling
Step6: Correlation optimized hypercube sampling
Step7: Ratio optimized hypercube sampling
Step8: Halton sampling
Step9: Hammersly sampling
Step10: Grid sampling
Step11: Pdist boxplot of all methods
|
<ASSISTANT_TASK:>
Python Code:
print(__doc__)
import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
from skopt.space import Space
from skopt.sampler import Sobol
from skopt.sampler import Lhs
from skopt.sampler import Halton
from skopt.sampler import Hammersly
from skopt.sampler import Grid
from scipy.spatial.distance import pdist
def plot_searchspace(x, title):
fig, ax = plt.subplots()
plt.plot(np.array(x)[:, 0], np.array(x)[:, 1], 'bo', label='samples')
plt.plot(np.array(x)[:, 0], np.array(x)[:, 1], 'bs', markersize=40, alpha=0.5)
# ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([0, 5])
ax.set_ylabel("X2")
ax.set_ylim([0, 5])
plt.title(title)
ax.grid(True)
n_samples = 10
space = Space([(0, 5), (0, 5)])
x = space.rvs(n_samples)
plot_searchspace(x, "Random samples")
pdist_data = []
x_label = []
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("random")
sobol = Sobol()
x = sobol.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Sobol')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("sobol")
lhs = Lhs(lhs_type="classic", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'classic LHS')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("lhs")
lhs = Lhs(lhs_type="centered", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'centered LHS')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("center")
lhs = Lhs(criterion="maximin", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'maximin LHS')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("maximin")
lhs = Lhs(criterion="correlation", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'correlation LHS')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("corr")
lhs = Lhs(criterion="ratio", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'ratio LHS')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("ratio")
halton = Halton()
x = halton.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Halton')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("halton")
hammersly = Hammersly()
x = hammersly.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Hammersly')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("hammersly")
grid = Grid(border="include", use_full_layout=False)
x = grid.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Grid')
print("empty fields: %d" % (36 - np.size(np.unique(x, axis=0), 0)))
pdist_data.append(pdist(x).flatten())
x_label.append("grid")
fig, ax = plt.subplots()
ax.boxplot(pdist_data)
plt.grid(True)
plt.ylabel("pdist")
_ = ax.set_ylim(0, 6)
_ = ax.set_xticklabels(x_label, rotation=45, fontsize=8)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PipelineAI
Step2: All Code in GitHub Repo
Step3: Get Allocation Index
Step4: Helper Scripts
Step5: Find Script from Anywhere
Step6: Show pull_force_overwrite_local Script
|
<ASSISTANT_TASK:>
Python Code:
%%bash
pull_force_overwrite_local
%%html
<iframe width=800 height=600 src="http://pipeline.io"></iframe>
import requests
url = 'http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip'
headers = {'Metadata-Flavor': 'Google'}
r = requests.get(url, headers=headers)
ip_address = r.text
print('Your IP: %s' % ip_address)
import requests
import json
url = 'http://allocator.demo.pipeline.io/allocation/%s' % ip_address
r = requests.get(url)
allocation = r.text
allocation_json = json.loads(allocation)
print(allocation_json)
print(allocation_json['index'])
!ls -l /root/scripts/
!which pull_force_overwrite_local
!cat /root/scripts/pull_force_overwrite_local
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, simulate a toy dataset of 50 observations with a cosine relationship.
Step2: Next, define a two-layer Bayesian neural network. Here, we define the neural network manually with tanh nonlinearities.
Step3: Next, make inferences about the model from data. We will use variational inference. Specify a normal approximation over the weights and biases.
Step4: Defining tf.Variable allows the variational factors’ parameters to vary. They are initialized randomly. The standard deviation parameters are constrained to be greater than zero according to a softplus transformation.
Step5: Now, run variational inference with the Kullback-Leibler divergence in order to infer the model’s latent variables with the given data. We specify 1000 iterations.
Step6: Finally, criticize the model fit. Bayesian neural networks define a distribution over neural networks, so we can perform a graphical check. Draw neural networks from the inferred model and visualize how well it fits the data.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from edward.models import Normal
plt.style.use('ggplot')
def build_toy_dataset(N=50, noise_std=0.1):
x = np.linspace(-3, 3, num=N)
y = np.cos(x) + np.random.normal(0, noise_std, size=N)
x = x.astype(np.float32).reshape((N, 1))
y = y.astype(np.float32)
return x, y
def neural_network(x, W_0, W_1, b_0, b_1):
h = tf.tanh(tf.matmul(x, W_0) + b_0)
h = tf.matmul(h, W_1) + b_1
return tf.reshape(h, [-1])
ed.set_seed(42)
N = 50 # number of data ponts
D = 1 # number of features
x_train, y_train = build_toy_dataset(N)
W_0 = Normal(loc=tf.zeros([D, 2]), scale=tf.ones([D, 2]))
W_1 = Normal(loc=tf.zeros([2, 1]), scale=tf.ones([2, 1]))
b_0 = Normal(loc=tf.zeros(2), scale=tf.ones(2))
b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1))
x = x_train
y = Normal(loc=neural_network(x, W_0, W_1, b_0, b_1),
scale=0.1 * tf.ones(N))
qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 2])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, 2]))))
qW_1 = Normal(loc=tf.Variable(tf.random_normal([2, 1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([2, 1]))))
qb_0 = Normal(loc=tf.Variable(tf.random_normal([2])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([2]))))
qb_1 = Normal(loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]))))
# Sample functions from variational model to visualize fits.
rs = np.random.RandomState(0)
inputs = np.linspace(-5, 5, num=400, dtype=np.float32)
x = tf.expand_dims(inputs, 1)
mus = tf.stack(
[neural_network(x, qW_0.sample(), qW_1.sample(),
qb_0.sample(), qb_1.sample())
for _ in range(10)])
# FIRST VISUALIZATION (prior)
sess = ed.get_session()
tf.global_variables_initializer().run()
outputs = mus.eval()
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.set_title("Iteration: 0")
ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)')
ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='prior draws')
ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5)
ax.set_xlim([-5, 5])
ax.set_ylim([-2, 2])
ax.legend()
plt.show()
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1}, data={y: y_train})
inference.run(n_iter=1000, n_samples=5)
# SECOND VISUALIZATION (posterior)
outputs = mus.eval()
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)
ax.set_title("Iteration: 1000")
ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)')
ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='posterior draws')
ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5)
ax.set_xlim([-5, 5])
ax.set_ylim([-2, 2])
ax.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some global data
Step2: Run Strategy
Step3: View logs
Step4: Generate strategy stats - display all available stats
Step5: Equity curve
Step6: Plot Equity Curves
Step7: Plot Trades
Step8: Bar Graph
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import pinkfish as pf
import strategy
# format price data
pd.options.display.float_format = '{:0.2f}'.format
%matplotlib inline
# set size of inline plots
'''note: rcParams can't be in same cell as import matplotlib
or %matplotlib inline
%matplotlib notebook: will lead to interactive plots embedded within
the notebook, you can zoom and resize the figure
%matplotlib inline: only draw static images in the notebook
'''
plt.rcParams["figure.figsize"] = (10, 7)
#symbol = '^GSPC'
symbol = 'SPY'
#symbol = 'DIA'
#symbol = 'QQQ'
#symbol = 'IWM'
#symbol = 'TLT'
#symbol = 'GLD'
#symbol = 'AAPL'
#symbol = 'BBRY'
#symbol = 'GDX'
#symbol = 'OIH'
capital = 10000
start = datetime.datetime(1900, 1, 1)
#start = datetime.datetime(*pf.SP500_BEGIN)
end = datetime.datetime.now()
options = {
'use_adj' : True,
'use_cache' : True,
'lookback': None,
'margin': 1
}
s = strategy.Strategy(symbol, capital, start, end, options)
s.run()
s.tlog.tail(10)
s.dbal.tail()
pf.print_full(s.stats)
benchmark = pf.Benchmark(symbol, s.capital, s.start, s.end, use_adj=True)
benchmark.run()
pf.plot_equity_curve(s.dbal, benchmark=benchmark.dbal)
pf.plot_trades(s.dbal, benchmark=benchmark.dbal)
df = pf.plot_bar_graph(s.stats, benchmark.stats)
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define a Neural Network
Step2: Define a Loss function and optimizer
Step3: Train the network
Step4: Test the network on the test data
Step5: Performance on the test dataset.
Step6: Plot images
|
<ASSISTANT_TASK:>
Python Code:
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.MNIST(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# flatten image
x = x[:, 0, ...].view(-1, 28*28)
# feed layer 1
out_layer1 = self.fc1(x)
out_layer1 = F.relu(out_layer1)
# feed layer 2
out_layer2 = self.fc2(out_layer1)
out_layer2 = F.relu(out_layer2)
# feed layer 3
out_layer3 = self.fc3(out_layer2)
return out_layer3
net = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 99 == 0: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Finished Training')
testset = torchvision.datasets.MNIST(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
predictions = net(images)
_, predicted = torch.max(predictions.data, 1)
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % predicted[j].item() for j in range(4)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We create a simple fcc structure, generate it's distinct slabs, and select the slab with a miller index of (1, 1, 1).
Step2: We make an instance of the AdsorbateSiteFinder and use it to find the relevant adsorption sites.
Step3: We visualize the sites using a tool from pymatgen.
Step4: Use the AdsorbateSiteFinder.generate_adsorption_structures method to generate structures of adsorbates.
Step5: Example 2
Step6: Example 3
Step7: Import the necessary workflow-generating function from atomate
Step8: Adsorption configurations take the form of a dictionary with the miller index as a string key and a list of pymatgen Molecule instances as the values.
Step9: Workflows are generated using the a slab a list of molecules.
Step10: The workflow may be inspected as below. Note that there are 9 optimization tasks correponding the slab, and 4 distinct adsorption configurations for each of the 2 adsorbates. Details on running FireWorks, including singleshot launching, queue submission, workflow management, and more can be found in the FireWorks documentation.
Step11: Note also that running FireWorks via atomate may require system specific tuning (e. g. for VASP parameters). More information is available in the atomate documentation.
Step12: For oxygen evolution, a common metric for the catalytic activity of a given catalyst is the theoretical overpotential corresponding to the mechanism that proceeds through OH*, O*, and OOH*. So we can define our adsorbates
Step13: Then we can retrieve the structures using the MP rest interface, and write a simple for loop which creates all of the workflows corresponding to every slab and every adsorption site for each material. The code below will take ~15 minutes. This could be parallelized to be more efficient, but is not for simplicity in this case.
|
<ASSISTANT_TASK:>
Python Code:
# Import statements
from pymatgen import Structure, Lattice, MPRester, Molecule
from pymatgen.analysis.adsorption import *
from pymatgen.core.surface import generate_all_slabs
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from matplotlib import pyplot as plt
%matplotlib inline
# Note that you must provide your own API Key, which can
# be accessed via the Dashboard at materialsproject.org
mpr = MPRester()
fcc_ni = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.5), ["Ni"], [[0, 0, 0]])
slabs = generate_all_slabs(fcc_ni, max_index=1, min_slab_size=8.0,
min_vacuum_size=10.0)
ni_111 = [slab for slab in slabs if slab.miller_index==(1,1,1)][0]
asf_ni_111 = AdsorbateSiteFinder(ni_111)
ads_sites = asf_ni_111.find_adsorption_sites()
print(ads_sites)
assert len(ads_sites) == 4
fig = plt.figure()
ax = fig.add_subplot(111)
plot_slab(ni_111, ax, adsorption_sites=True)
fig = plt.figure()
ax = fig.add_subplot(111)
adsorbate = Molecule("H", [[0, 0, 0]])
ads_structs = asf_ni_111.generate_adsorption_structures(adsorbate,
repeat=[1, 1, 1])
plot_slab(ads_structs[0], ax, adsorption_sites=False, decay=0.09)
fig = plt.figure()
axes = [fig.add_subplot(2, 3, i) for i in range(1, 7)]
mats = {"mp-23":(1, 0, 0), # FCC Ni
"mp-2":(1, 1, 0), # FCC Au
"mp-13":(1, 1, 0), # BCC Fe
"mp-33":(0, 0, 1), # HCP Ru
"mp-30": (2, 1, 1),
"mp-5229":(1, 0, 0),
} # Cubic SrTiO3
#"mp-2133":(0, 1, 1)} # Wurtzite ZnO
for n, (mp_id, m_index) in enumerate(mats.items()):
struct = mpr.get_structure_by_material_id(mp_id)
struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure()
slabs = generate_all_slabs(struct, 1, 5.0, 2.0, center_slab=True)
slab_dict = {slab.miller_index:slab for slab in slabs}
asf = AdsorbateSiteFinder.from_bulk_and_miller(struct, m_index, undercoord_threshold=0.10)
plot_slab(asf.slab, axes[n])
ads_sites = asf.find_adsorption_sites()
sop = get_rot(asf.slab)
ads_sites = [sop.operate(ads_site)[:2].tolist()
for ads_site in ads_sites["all"]]
axes[n].plot(*zip(*ads_sites), color='k', marker='x',
markersize=10, mew=1, linestyle='', zorder=10000)
mi_string = "".join([str(i) for i in m_index])
axes[n].set_title("{}({})".format(struct.composition.reduced_formula, mi_string))
axes[n].set_xticks([])
axes[n].set_yticks([])
axes[4].set_xlim(-2, 5)
axes[4].set_ylim(-2, 5)
fig.savefig('slabs.png', dpi=200)
!open slabs.png
from fireworks import LaunchPad
lpad = LaunchPad()
lpad.reset('', require_password=False)
from atomate.vasp.workflows.base.adsorption import get_wf_surface, get_wf_surface_all_slabs
co = Molecule("CO", [[0, 0, 0], [0, 0, 1.23]])
h = Molecule("H", [[0, 0, 0]])
struct = mpr.get_structure_by_material_id("mp-23") # fcc Ni
struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure()
slabs = generate_all_slabs(struct, 1, 5.0, 2.0, center_slab=True)
slab_dict = {slab.miller_index:slab for slab in slabs}
ni_slab_111 = slab_dict[(1, 1, 1)]
wf = get_wf_surface([ni_slab_111], molecules=[co, h])
lpad.add_wf(wf)
lpad.get_wf_summary_dict(1)
from pymatgen.core.periodic_table import *
from pymatgen.core.surface import get_symmetrically_distinct_miller_indices
import tqdm
lpad.reset('', require_password=False)
OH = Molecule("OH", [[0, 0, 0], [-0.793, 0.384, 0.422]])
O = Molecule("O", [[0, 0, 0]])
OOH = Molecule("OOH", [[0, 0, 0], [-1.067, -0.403, 0.796],
[-0.696, -0.272, 1.706]])
adsorbates = [OH, O, OOH]
elements = [Element.from_Z(i) for i in range(1, 103)]
trans_metals = [el for el in elements if el.is_transition_metal]
# tqdm adds a progress bar so we can see the progress of the for loop
for metal in tqdm.tqdm_notebook(trans_metals):
# Get relatively stable structures with small unit cells
data = mpr.get_data("{}-O".format(metal.symbol))
data = [datum for datum in data if datum["e_above_hull"] < 0.05]
data = sorted(data, key = lambda x: x["nsites"])
struct = Structure.from_str(data[0]["cif"], fmt='cif')
# Put in conventional cell settings
struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure()
# Get distinct miller indices for low-index facets
wf = get_wf_surface_all_slabs(struct, adsorbates)
lpad.add_wf(wf)
print("Processed: {}".format(struct.formula))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A new environment capsule with preset libraries installed for one or more of your projects can be created. fauenv is the name of the new python 3.x environment in this example.
Step2: Check which environments are currently installed. root is the name of the default one.
Step3: Then, activate the desired environment.
Step4: Install Packages for Machine Learning / Pattern Recognition
Step5: Install packages not in the conda repository via pip.
Step6: Clean up
Step7: Update
Step8: Now, all is set up to get started.
Step9: The console will show an URL like http
Step10: Jupyter Notebook magic functions
Step11: In standard python this would be achieved by
Step12: Some useful magic functions are
Step13: Further readings
Step14: Upgrading to a new version of python
|
<ASSISTANT_TASK:>
Python Code:
conda update --all
conda create -n fauenv python=3
conda info -e
activate fauenv
conda install -n fauenv numpy scipy matplotlib scikit-learn scikit-image ipython ipython-notebook
conda install -n fauenv nose pip anaconda-client pillow ujson flask jinja2 natsort joblib numba pyside
activate fauenv # if not in fauenv environment already
pip install visvis tinydb nibabel pydicom medpy simpleITK pycuda numpy-stl websockets
conda clean -tps # delete downloaded cached tarballs (t), orphaned packages (p), and cached sources (s)
conda update --all -n fauenv
activate fauenv # if not in fauenv environment already
pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs pip install -U
jupyter notebook # --port 8888
%pylab inline
%time sum(range(int(1e7)))
%timeit sum(range(10000000))
python -mtimeit -s"import test" "mytestFunction(42)"
%quickref
conda update --all
conda update --all -n fauenv
activate fauenv
conda clean -tps
pip freeze --local | grep -v '^\-e' | cut -d = -f 1 | xargs pip install -U
jupyter notebook
activate fauenv
conda install python=3.6
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I need a matrix like this
Step2: np array operator
Step3: Sliccing
Step4: Goto opencv/build/python/2.7 folder.
|
<ASSISTANT_TASK:>
Python Code:
print x
type(x)
y=np.ones((2,3))
print y
z=np.arange(2,8,1)
alpha=np.reshape(z,(3,2))
print alpha
beta= np.random.randn(3,4)
print beta
gamma=beta*2.0
print gamma
a=[3,4,5]
a=np.array(a)
type(a)
a=np.random.randint(0,10,(2,3))
b=np.random.randint(0,10,(2,3))
print a
print b
print "element-wise addition:\n%s"%(a + b)
print "element-wise addition:\n%s"%(a * b)
print a
print b.T
print '-----'
print np.dot(a,b.T)
a=np.random.randint(0,10,(4,5))
print a
a[0,2]
a[3,3]=9
print a
print a[:,:3]
print a[:3,:]
print a[:3,:3]
print a[-3:,-3:]
b=np.array([2,3,5,7,8])
print b
b[::-1]
a=np.random.randint(0,10,(4,5))
print a
a[::-1]
print np.fliplr(a)
a.astype(float)
print np.arange(16)[::2]
(-1,)+(1,2,3)+(4,5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download kfctl
Step2: If you are using AI Platform Notebooks, your environment is already authenticated. Skip the following cell.
Step3: Set up environment variables
Step4: Configure gcloud and add kfctl to your path.
Step5: Create service account
Step6: Set GOOGLE_APPLICATION_CREDENTIALS
Step7: Setup and deploy Kubeflow
Step8: Install Kubeflow Pipelines SDK
Step9: Sanity Check
|
<ASSISTANT_TASK:>
Python Code:
work_directory_name = 'kubeflow'
! mkdir -p $work_directory_name
%cd $work_directory_name
## Download kfctl v0.7.0
! curl -LO https://github.com/kubeflow/kubeflow/releases/download/v0.7.0/kfctl_v0.7.0_linux.tar.gz
## Unpack the tar ball
! tar -xvf kfctl_v0.7.0_linux.tar.gz
## Create user credentials
! gcloud auth application-default login
# Set your GCP project ID and the zone where you want to create the Kubeflow deployment
%env PROJECT=<ADD GCP PROJECT HERE>
%env ZONE=<ADD GCP ZONE TO LAUNCH KUBEFLOW CLUSTER HERE>
# google cloud storage bucket
%env GCP_BUCKET=gs://<ADD STORAGE LOCATION HERE>
# Use the following kfctl configuration file for authentication with
# Cloud IAP (recommended):
uri = "https://raw.githubusercontent.com/kubeflow/manifests/v0.7-branch/kfdef/kfctl_gcp_iap.0.7.0.yaml"
uri = uri.strip()
%env CONFIG_URI=$uri
# For using Cloud IAP for authentication, create environment variables
# from the OAuth client ID and secret that you obtained earlier:
%env CLIENT_ID=<ADD OAuth CLIENT ID HERE>
%env CLIENT_SECRET=<ADD OAuth CLIENT SECRET HERE>
# Set KF_NAME to the name of your Kubeflow deployment. You also use this
# value as directory name when creating your configuration directory.
# For example, your deployment name can be 'my-kubeflow' or 'kf-test'.
%env KF_NAME=<ADD KUBEFLOW DEPLOYMENT NAME HERE>
# Set up name of the service account that should be created and used
# while creating the Kubeflow cluster
%env SA_NAME=<ADD SERVICE ACCOUNT NAME TO BE CREATED HERE>
! gcloud config set project ${PROJECT}
! gcloud config set compute/zone ${ZONE}
# Set the path to the base directory where you want to store one or more
# Kubeflow deployments. For example, /opt/.
# Here we use the current working directory as the base directory
# Then set the Kubeflow application directory for this deployment.
import os
base = os.getcwd()
%env BASE_DIR=$base
kf_dir = os.getenv('BASE_DIR') + "/" + os.getenv('KF_NAME')
%env KF_DIR=$kf_dir
# The following command is optional. It adds the kfctl binary to your path.
# If you don't add kfctl to your path, you must use the full path
# each time you run kfctl. In this example, the kfctl file is present in
# the current directory
new_path = os.getenv('PATH') + ":" + os.getenv('BASE_DIR')
%env PATH=$new_path
! gcloud iam service-accounts create ${SA_NAME}
! gcloud projects add-iam-policy-binding ${PROJECT} \
--member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com \
--role 'roles/owner'
! gcloud iam service-accounts keys create key.json \
--iam-account ${SA_NAME}@${PROJECT}.iam.gserviceaccount.com
key_path = os.getenv('BASE_DIR') + "/" + 'key.json'
%env GOOGLE_APPLICATION_CREDENTIALS=$key_path
! mkdir -p ${KF_DIR}
%cd $kf_dir
! kfctl apply -V -f ${CONFIG_URI}
%%capture
# Install the SDK (Uncomment the code if the SDK is not installed before)
! pip3 install 'kfp>=0.1.36' --quiet --user
! kubectl -n istio-system describe ingress
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's start by creating our hmm's. Since this is a simple example, we will only have 2 states, rigged and unrigged, with discrete distributions.
Step2: Now let's create our non-cheater and dumb cheater since their hmm's consist of a single state.
Step3: We can now add our starting states to our models.
Step4: We can also add the transitions to our non-cheater and dumb cheater.
Step5: Now we bake our models to finalize the structure.
Step6: Now we can create our smart cheater.
Step7: The structure of our smart cheater hmm should look like the following.
Step8: Then we can add our transition matrix.
Step9: Finally we can bake the model to finalize its structure.
Step10: We can view the structures of all our hmm's in the images below.
Step11: Now we can finally create our Naive Bayes classifier. We'll let 0 stand for a non-cheater, 1 stand for a smart cheater, and 2 stand for a dumb cheater.
Step12: Now let's create a sequence of coin tosses to test our classifier on.
Step13: We can now check the log probability of each string of data occurring under each model.
Step14: Finally we can classify whether a non-cheater (0), smart cheater (1), or dumb cheater (2) created the string of data.
Step15: We can also train our hmm's through Naive Bayes. Let's used the following data, all marked as known games for smart cheaters.
Step16: Now let's fit our NaiveBayes classifier to our data.
|
<ASSISTANT_TASK:>
Python Code:
from pomegranate import *
import numpy as np
%pylab inline
rigged = State( DiscreteDistribution({'H': 0.8, 'T': 0.2}), name="rigged" )
unrigged = State( DiscreteDistribution({'H': 0.5, 'T': 0.5}), name="unrigged" )
non_cheater = HiddenMarkovModel( name="non-cheater" )
non_cheater.add_state( unrigged )
dumb_cheater = HiddenMarkovModel( name="dumb-cheater" )
dumb_cheater.add_state( rigged )
non_cheater.start = unrigged
dumb_cheater.start = rigged
non_cheater.add_transition( unrigged, unrigged, 1 )
dumb_cheater.add_transition( rigged, rigged, 1 )
non_cheater.bake()
dumb_cheater.bake()
smart_cheater = HiddenMarkovModel( name="smart-cheater" )
smart_cheater.add_transition( smart_cheater.start, unrigged, 0.5 )
smart_cheater.add_transition( smart_cheater.start, rigged, 0.5 )
smart_cheater.add_transition( rigged, rigged, 0.5 )
smart_cheater.add_transition( rigged, unrigged, 0.5 )
smart_cheater.add_transition( unrigged, rigged, 0.5 )
smart_cheater.add_transition( unrigged, unrigged, 0.5 )
smart_cheater.bake()
plt.title("smart cheater hmm")
smart_cheater.plot()
plt.title("dumb cheater hmm")
dumb_cheater.plot()
plt.title("non-cheater hmm")
non_cheater.plot()
players = NaiveBayes([ non_cheater, smart_cheater, dumb_cheater ])
data = np.array([list( 'HHHHHTHTHTTTTHHHTHHTTHHHHHTH' ),
list( 'HHHHHHHTHHHHTTHHHHHHHTTHHHHH' ),
list( 'THTHTHTHTHTHTTHHTHHHHTTHHHTT' )])
probs = players.predict_proba( data )
for i in range(len(probs)):
print("For sequence {}, {:.3}% non-cheater, {:.3}% smart cheater, {:.3}% dumb cheater.".format( i+1, 100*probs[i][0], 100*probs[i][1], 100*probs[i][2]))
output = players.predict( data )
for i in range(len(output)):
print("Sequence {} is a {}".format( i+1, "non-cheater" if output[i] == 0 else "smart cheater" if output[i] == 1 else "dumb cheater"))
X = np.array([list( 'HHHHHTHTHTTTTH' ),
list( 'HHTHHTTHHHHHTH' )])
y = np.array([ 1, 1 ])
players.fit( X, y )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TFX — Running a simple pipeline manually in a Colab Notebook
Step2: Import packages
Step3: Check the versions
Step4: Download example data
Step5: Take a quick look at the CSV file.
Step6: Create the InteractiveContext
Step7: Run TFX Components Interactively
Step8: The component's outputs include 2 artifacts
Step9: Take a peek at the output training examples to see what they look like.
Step10: The StatisticsGen Component
Step11: Again, let's take a peek at the output training artifact. Note that this time it is a TFRecord file containing a single record with a serialized DatasetFeatureStatisticsList protobuf
Step12: The stats can be visualized using the tfdv.visualize_statistics() function (we will look at this in more detail in a subsequent lab).
Step13: The SchemaGen Component
Step14: The generated artifact is just a schema.pbtxt containing a text representation of a schema_pb2.Schema protobuf
Step15: It can be visualized using tfdv.display_schema() (we will look at this in more detail in a subsequent lab)
Step16: The ExampleValidator Component
Step17: The output artifact of the ExampleValidator is an anomalies.pbtxt file describing an anomalies_pb2.Anomalies protobuf
Step18: This can be visualized using the tfdv.display_anomalies() function (we will look at this in more details in a subsequent lab). Did it find any anomalies?
Step19: The Transform Component
Step22: Now let's define a module containing the preprocessing_fn() function that we will pass to the Transform component
Step23: Create and run the Transform component, referring to the files that were created above.
Step24: The Transform component has 2 types of outputs
Step25: Take a peek at the transform_graph artifact
Step26: The transform_fn subdirectory contains the actual preprocessing graph. The metadata subdirectory contains the schema of the original data. The transformed_metadata subdirectory contains the schema of the preprocessed data.
Step33: The Trainer Component
Step34: Create and run the Trainer component, passing it the file that we created above.
Step35: Take a peek at the trained model which was exported from Trainer.
Step36: Analyze Training with TensorBoard
Step37: The Evaluator Component
Step38: Let's load the Evaluator results and render them using the tfma.view.render_slicing_metrics() function
Step39: We can also pass feature slice specifications if we want to evaluate the quality of the model over specific subsets of the data
Step40: Let's look at the results
Step41: The metrics are also accessible programmatically
Step42: The ModelValidator Component
Step43: Examine the output of ModelValidator.
Step44: The Pusher Component
Step45: Create and run a Pusher component.
Step46: Examine the output of Pusher.
Step47: Review the model signatures and methods.
Step48: Alternartively, we can use the command line utility saved_model_cli to look at the MetaGraphDefs (the models) and SignatureDefs (the methods you can call) in our SavedModel. See this discussion of the SavedModel CLI in the TensorFlow Guide.
Step49: That tells us a few important things about our model. In this case we just trained our model, so we already know the inputs and outputs, but if we didn't this would be important information.
Step50: Install TensorFlow Serving
Step51: Start running TensorFlow Serving
Step53: Perform Inference on example data
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q -U \
tensorflow==2.0.0 \
tfx==0.15.0rc0
import base64
import csv
import json
import os
import requests
import tempfile
import urllib
import pprint
pp = pprint.PrettyPrinter()
import tensorflow as tf
import tfx
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
import tensorflow_transform as tft
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
import tensorflow_model_analysis as tfma
import tensorflow_data_validation as tfdv
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.__version__))
# Download the example data.
DATA_PATH = 'https://raw.githubusercontent.com/ageron/open-datasets/master/' \
'online_news_popularity_for_course/online_news_popularity_for_course.csv'
_data_root = tempfile.mkdtemp(prefix='tfx-data')
_data_filepath = os.path.join(_data_root, "data.csv")
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
!head {_data_filepath}
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext.
context = InteractiveContext()
# Use the packaged CSV input data.
input_data = external_input(_data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=input_data)
context.run(example_gen)
for artifact in example_gen.outputs['examples'].get():
print(artifact.split, artifact.uri)
train_uri = example_gen.outputs['examples'].get()[0].uri
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
decoder = tfdv.TFExampleDecoder()
for tfrecord in dataset.take(1):
serialized_example = tfrecord.numpy()
example = decoder.decode(serialized_example)
pp.pprint(example)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
train_uri = statistics_gen.outputs['statistics'].get()[0].uri
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
dataset = tf.data.TFRecordDataset(tfrecord_filenames)
for tfrecord in dataset.take(1):
serialized_example = tfrecord.numpy()
stats = statistics_pb2.DatasetFeatureStatisticsList()
stats.ParseFromString(serialized_example)
tfdv.visualize_statistics(stats)
# Generates schema based on statistics files.
infer_schema = SchemaGen(statistics=statistics_gen.outputs['statistics'])
context.run(infer_schema)
train_uri = infer_schema.outputs['schema'].get()[0].uri
schema_filename = os.path.join(train_uri, "schema.pbtxt")
schema = tfx.utils.io_utils.parse_pbtxt_file(file_name=schema_filename,
message=schema_pb2.Schema())
tfdv.display_schema(schema)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=infer_schema.outputs['schema'])
context.run(validate_stats)
train_uri = validate_stats.outputs['anomalies'].get()[0].uri
anomalies_filename = os.path.join(train_uri, "anomalies.pbtxt")
anomalies = tfx.utils.io_utils.parse_pbtxt_file(
file_name=anomalies_filename,
message=anomalies_pb2.Anomalies())
tfdv.display_anomalies(anomalies)
_constants_module_file = 'online_news_constants.py'
%%writefile {_constants_module_file}
DENSE_FLOAT_FEATURE_KEYS = [
"timedelta", "n_tokens_title", "n_tokens_content",
"n_unique_tokens", "n_non_stop_words", "n_non_stop_unique_tokens",
"n_hrefs", "n_self_hrefs", "n_imgs", "n_videos", "average_token_length",
"n_keywords", "kw_min_min", "kw_max_min", "kw_avg_min", "kw_min_max",
"kw_max_max", "kw_avg_max", "kw_min_avg", "kw_max_avg", "kw_avg_avg",
"self_reference_min_shares", "self_reference_max_shares",
"self_reference_avg_shares", "is_weekend", "global_subjectivity",
"global_sentiment_polarity", "global_rate_positive_words",
"global_rate_negative_words", "rate_positive_words", "rate_negative_words",
"avg_positive_polarity", "min_positive_polarity", "max_positive_polarity",
"avg_negative_polarity", "min_negative_polarity", "max_negative_polarity",
"title_subjectivity", "title_sentiment_polarity", "abs_title_subjectivity",
"abs_title_sentiment_polarity"]
VOCAB_FEATURE_KEYS = ["data_channel"]
BUCKET_FEATURE_KEYS = ["LDA_00", "LDA_01", "LDA_02", "LDA_03", "LDA_04"]
CATEGORICAL_FEATURE_KEYS = ["weekday"]
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [6]
#UNUSED: date, slug
LABEL_KEY = "n_shares_percentile"
VOCAB_SIZE = 10
OOV_SIZE = 5
FEATURE_BUCKET_COUNT = 10
def transformed_name(key):
return key + '_xf'
_transform_module_file = 'online_news_transform.py'
%%writefile {_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
from online_news_constants import *
def preprocessing_fn(inputs):
tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
outputs = {}
for key in DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=VOCAB_SIZE,
num_oov_buckets=OOV_SIZE)
for key in BUCKET_FEATURE_KEYS:
outputs[transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), FEATURE_BUCKET_COUNT,
always_return_num_quantiles=False)
for key in CATEGORICAL_FEATURE_KEYS:
outputs[transformed_name(key)] = _fill_in_missing(inputs[key])
# How popular is this article?
outputs[transformed_name(LABEL_KEY)] = _fill_in_missing(inputs[LABEL_KEY])
return outputs
def _fill_in_missing(x):
Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['schema'],
module_file=_transform_module_file)
context.run(transform)
transform.outputs
train_uri = transform.outputs['transform_graph'].get()[0].uri
os.listdir(train_uri)
train_uri = transform.outputs['transformed_examples'].get()[1].uri
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
decoder = tfdv.TFExampleDecoder()
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = decoder.decode(serialized_example)
pp.pprint(example)
# Setup paths.
_trainer_module_file = 'online_news_trainer.py'
%%writefile {_trainer_module_file}
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from online_news_constants import *
def transformed_names(keys):
return [transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
Small utility returning a record reader that can read gzip'ed files.
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _build_estimator(config, hidden_units=None, warm_start_from=None):
Build an estimator for predicting the popularity of online news articles
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
The estimator that will be used for training and eval.
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in transformed_names(DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=VOCAB_SIZE + OOV_SIZE, default_value=0)
for key in transformed_names(VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=FEATURE_BUCKET_COUNT, default_value=0)
for key in transformed_names(BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
transformed_names(CATEGORICAL_FEATURE_KEYS),
MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedRegressor(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
features = serving_input_receiver.features.copy()
transformed_features = tf_transform_output.transform_raw_features(features)
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=serving_input_receiver.receiver_tensors,
labels=transformed_features[transformed_name(LABEL_KEY)])
def _input_fn(filenames, tf_transform_output, batch_size=200):
Generates features and labels for training or evaluation.
Args:
filenames: [str] list of CSV files to read data from.
tf_transform_output: A TFTransformOutput.
batch_size: int First dimension size of the Tensors returned by input_fn
Returns:
A (features, indices) tuple where features is a dictionary of
Tensors, and indices is a single Tensor of label indices.
transformed_feature_spec = (
tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(
filenames, batch_size, transformed_feature_spec, reader=_gzip_reader_fn)
transformed_features = dataset.make_one_shot_iterator().get_next()
# We pop the label because we do not want to use it as a feature while we're
# training.
return transformed_features, transformed_features.pop(
transformed_name(LABEL_KEY))
# TFX will call this function
def trainer_fn(hparams, schema):
Build the estimator using the high level API.
Args:
hparams: Holds hyperparameters used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(hparams.transform_output)
train_input_fn = lambda: _input_fn(
hparams.train_files,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn(
hparams.eval_files,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec(
train_input_fn,
max_steps=hparams.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn(
tf_transform_output, schema)
exporter = tf.estimator.FinalExporter('online-news', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=hparams.eval_steps,
exporters=[exporter],
name='online-news-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=hparams.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=hparams.warm_start_from)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn(
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
# Uses user-provided Python function that implements a model using TensorFlow's
# Estimators API.
trainer = Trainer(
module_file=_trainer_module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=infer_schema.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
context.run(trainer)
train_uri = trainer.outputs['model'].get()[0].uri
serving_model_path = os.path.join(train_uri, 'serving_model_dir', 'export', 'online-news')
latest_serving_model_path = os.path.join(serving_model_path, max(os.listdir(serving_model_path)))
exported_model = tf.saved_model.load(latest_serving_model_path)
exported_model.graph.get_operations()[:10] + ["..."]
%load_ext tensorboard
%tensorboard --logdir {os.path.join(train_uri, 'serving_model_dir')}
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
)
context.run(model_analyzer)
evaluation_uri = model_analyzer.outputs['output'].get()[0].uri
eval_result = tfma.load_eval_result(evaluation_uri)
tfma.view.render_slicing_metrics(eval_result)
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[
evaluator_pb2.SingleSlicingSpec(
column_for_slicing=['weekday'])
]))
context.run(model_analyzer)
evaluation_uri = model_analyzer.outputs['output'].get()[0].uri
eval_result = tfma.load_eval_result(evaluation_uri)
tfma.view.render_slicing_metrics(
eval_result,
slicing_spec=tfma.slicer.SingleSliceSpec(columns=['weekday']))
for metric in eval_result.slicing_metrics:
pp.pprint(metric)
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
context.run(model_validator)
model_validator.outputs
blessing_uri = model_validator.outputs.blessing.get()[0].uri
!ls -l {blessing_uri}
# Setup serving path
_serving_model_dir = os.path.join(tempfile.mkdtemp(),
'serving_model/online_news_simple')
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=model_validator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
context.run(pusher)
pusher.outputs
push_uri = pusher.outputs['pushed_model'].get()[0].uri
latest_version = max(os.listdir(push_uri))
latest_version_path = os.path.join(push_uri, latest_version)
model = tf.saved_model.load(latest_version_path)
for item in model.signatures.items():
pp.pprint(item)
latest_pushed_model = os.path.join(_serving_model_dir, max(os.listdir(_serving_model_dir)))
!saved_model_cli show --dir {latest_pushed_model} --all
# This is the same as you would do from your command line, but without the [arch=amd64], and no sudo
# You would instead do:
# echo "deb [arch=amd64] http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | sudo tee /etc/apt/sources.list.d/tensorflow-serving.list && \
# curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | sudo apt-key add -
!echo "deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \
curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -
!apt update
!apt-get install tensorflow-model-server
os.environ["MODEL_DIR"] = os.path.split(latest_pushed_model)[0]
%%bash --bg
nohup tensorflow_model_server \
--rest_api_port=8501 \
--model_name=online_news_simple \
--model_base_path="${MODEL_DIR}" >server.log 2>&1
!tail server.log
eval_uri = example_gen.outputs['examples'].get()[1].uri
eval_tfrecord_paths = [os.path.join(eval_uri, name)
for name in os.listdir(eval_uri)]
def strip_label(serialized_example):
example = tf.train.Example.FromString(serialized_example.numpy())
del example.features.feature["n_shares_percentile"]
return example.SerializeToString()
dataset = tf.data.TFRecordDataset(eval_tfrecord_paths,
compression_type="GZIP")
serialized_examples = [strip_label(serialized_example)
for serialized_example in dataset.take(3)]
def do_inference(server_addr, model_name, serialized_examples):
Sends requests to the model and prints the results.
Args:
server_addr: network address of model server in "host:port" format
model_name: name of the model as understood by the model server
serialized_examples: serialized examples of data to do inference on
parsed_server_addr = server_addr.split(':')
host=parsed_server_addr[0]
port=parsed_server_addr[1]
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/' + model_name + ':predict'
response = requests.post(
server_url, data=json_request, timeout=5.0)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
do_inference(server_addr='127.0.0.1:8501',
model_name='online_news_simple',
serialized_examples=serialized_examples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parts of Speech
Step2: Simple Grammar
Step4: Utilities
Step5: Create a Mapping from numbers to words
Step6: To send our sentences in one big chunk to our neural network we transform each sentence into a row vector and place each of these rows into a bigger matrix that holds all these rows. Not all sentences have the same length, so we will pad those that are too short with 0s in pad_into_matrix
Step12: Build a Recurrent Neural Network
Step16: Construct model
Step17: Train Model
|
<ASSISTANT_TASK:>
Python Code:
## Fake dataset:
class Sampler:
def __init__(self, prob_table):
total_prob = 0.0
if type(prob_table) is dict:
for key, value in prob_table.items():
total_prob += value
elif type(prob_table) is list:
prob_table_gen = {}
for key in prob_table:
prob_table_gen[key] = 1.0 / (float(len(prob_table)))
total_prob = 1.0
prob_table = prob_table_gen
else:
raise ArgumentError("__init__ takes either a dict or a list as its first argument")
if total_prob <= 0.0:
raise ValueError("Probability is not strictly positive.")
self._keys = []
self._probs = []
for key in prob_table:
self._keys.append(key)
self._probs.append(prob_table[key] / total_prob)
def __call__(self):
sample = random.random()
seen_prob = 0.0
for key, prob in zip(self._keys, self._probs):
if (seen_prob + prob) >= sample:
return key
else:
seen_prob += prob
return key
samplers = {
"punctuation": Sampler({".": 0.49, ",": 0.5, ";": 0.03, "?": 0.05, "!": 0.05}),
"stop": Sampler({"the": 10, "from": 5, "a": 9, "they": 3, "he": 3, "it" : 2.5, "she": 2.7, "in": 4.5}),
"noun": Sampler(["cat", "broom", "boat", "dog", "car", "wrangler", "mexico", "lantern", "book", "paper", "joke","calendar", "ship", "event"]),
"verb": Sampler(["ran", "stole", "carried", "could", "would", "do", "can", "carry", "catapult", "jump", "duck"]),
"adverb": Sampler(["rapidly", "calmly", "cooly", "in jest", "fantastically", "angrily", "dazily"])
}
def generate_nonsense(word = ""):
if word.endswith("."):
return word
else:
if len(word) > 0:
word += " "
word += samplers["stop"]()
word += " " + samplers["noun"]()
if random.random() > 0.7:
word += " " + samplers["adverb"]()
if random.random() > 0.7:
word += " " + samplers["adverb"]()
word += " " + samplers["verb"]()
if random.random() > 0.8:
word += " " + samplers["noun"]()
if random.random() > 0.9:
word += "-" + samplers["noun"]()
if len(word) > 500:
word += "."
else:
word += " " + samplers["punctuation"]()
return generate_nonsense(word)
def generate_dataset(total_size, ):
sentences = []
for i in range(total_size):
sentences.append(generate_nonsense())
return sentences
# generate dataset
lines = generate_dataset(100)
### Utilities:
class Vocab:
__slots__ = ["word2index", "index2word", "unknown"]
def __init__(self, index2word = None):
self.word2index = {}
self.index2word = []
# add unknown word:
self.add_words(["**UNKNOWN**"])
self.unknown = 0
if index2word is not None:
self.add_words(index2word)
def add_words(self, words):
for word in words:
if word not in self.word2index:
self.word2index[word] = len(self.word2index)
self.index2word.append(word)
def __call__(self, line):
Convert from numerical representation to words
and vice-versa.
if type(line) is np.ndarray:
return " ".join([self.index2word[word] for word in line])
if type(line) is list:
if len(line) > 0:
if line[0] is int:
return " ".join([self.index2word[word] for word in line])
indices = np.zeros(len(line), dtype=np.int32)
else:
line = line.split(" ")
indices = np.zeros(len(line), dtype=np.int32)
for i, word in enumerate(line):
indices[i] = self.word2index.get(word, self.unknown)
return indices
@property
def size(self):
return len(self.index2word)
def __len__(self):
return len(self.index2word)
vocab = Vocab()
for line in lines:
vocab.add_words(line.split(" "))
def pad_into_matrix(rows, padding = 0):
if len(rows) == 0:
return np.array([0, 0], dtype=np.int32)
lengths = map(len, rows)
width = max(lengths)
height = len(rows)
mat = np.empty([height, width], dtype=rows[0].dtype)
mat.fill(padding)
for i, row in enumerate(rows):
mat[i, 0:len(row)] = row
return mat, list(lengths)
# transform into big numerical matrix of sentences:
numerical_lines = []
for line in lines:
numerical_lines.append(vocab(line))
numerical_lines, numerical_lengths = pad_into_matrix(numerical_lines)
from theano_lstm import Embedding, LSTM, RNN, StackedCells, Layer, create_optimization_updates, masked_loss
def softmax(x):
Wrapper for softmax, helps with
pickling, and removing one extra
dimension that Theano adds during
its exponential normalization.
return T.nnet.softmax(x.T)
def has_hidden(layer):
Whether a layer has a trainable
initial hidden state.
return hasattr(layer, 'initial_hidden_state')
def matrixify(vector, n):
return T.repeat(T.shape_padleft(vector), n, axis=0)
def initial_state(layer, dimensions = None):
Initalizes the recurrence relation with an initial hidden state
if needed, else replaces with a "None" to tell Theano that
the network **will** return something, but it does not need
to send it to the next step of the recurrence
if dimensions is None:
return layer.initial_hidden_state if has_hidden(layer) else None
else:
return matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None
def initial_state_with_taps(layer, dimensions = None):
Optionally wrap tensor variable into a dict with taps=[-1]
state = initial_state(layer, dimensions)
if state is not None:
return dict(initial=state, taps=[-1])
else:
return None
class Model:
Simple predictive model for forecasting words from
sequence using LSTMs. Choose how many LSTMs to stack
what size their memory should be, and how many
words can be predicted.
def __init__(self, hidden_size, input_size, vocab_size, stack_size=1, celltype=LSTM):
# declare model
self.model = StackedCells(input_size, celltype=celltype, layers =[hidden_size] * stack_size)
# add an embedding
self.model.layers.insert(0, Embedding(vocab_size, input_size))
# add a classifier:
self.model.layers.append(Layer(hidden_size, vocab_size, activation = softmax))
# inputs are matrices of indices,
# each row is a sentence, each column a timestep
self._stop_word = theano.shared(np.int32(999999999), name="stop word")
self.for_how_long = T.ivector()
self.input_mat = T.imatrix()
self.priming_word = T.iscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
# create symbolic variables for prediction:
self.predictions = self.create_prediction()
# create symbolic variable for greedy search:
self.greedy_predictions = self.create_prediction(greedy=True)
# create gradient training functions:
self.create_cost_fun()
self.create_training_function()
self.create_predict_function()
def stop_on(self, idx):
self._stop_word.set_value(idx)
@property
def params(self):
return self.model.params
def create_prediction(self, greedy=False):
def step(idx, *states):
# new hiddens are the states we need to pass to LSTMs
# from past. Because the StackedCells also include
# the embeddings, and those have no state, we pass
# a "None" instead:
new_hiddens = [None] + list(states)
new_states = self.model.forward(idx, prev_hiddens = new_hiddens)
if greedy:
new_idxes = new_states[-1]
new_idx = new_idxes.argmax()
# provide a stopping condition for greedy search:
return ([new_idx.astype(self.priming_word.dtype)] + new_states[1:-1]), theano.scan_module.until(T.eq(new_idx,self._stop_word))
else:
return new_states[1:]
# in sequence forecasting scenario we take everything
# up to the before last step, and predict subsequent
# steps ergo, 0 ... n - 1, hence:
inputs = self.input_mat[:, 0:-1]
num_examples = inputs.shape[0]
# pass this to Theano's recurrence relation function:
# choose what gets outputted at each timestep:
if greedy:
outputs_info = [dict(initial=self.priming_word, taps=[-1])] + [initial_state_with_taps(layer) for layer in self.model.layers[1:-1]]
result, _ = theano.scan(fn=step,
n_steps=200,
outputs_info=outputs_info)
else:
outputs_info = [initial_state_with_taps(layer, num_examples) for layer in self.model.layers[1:]]
result, _ = theano.scan(fn=step,
sequences=[inputs.T],
outputs_info=outputs_info)
if greedy:
return result[0]
# softmaxes are the last layer of our network,
# and are at the end of our results list:
return result[-1].transpose((2,0,1))
# we reorder the predictions to be:
# 1. what row / example
# 2. what timestep
# 3. softmax dimension
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.cost = masked_loss(self.predictions,
what_to_predict,
for_how_long,
starting_when).sum()
def create_predict_function(self):
self.pred_fun = theano.function(
inputs=[self.input_mat],
outputs =self.predictions,
allow_input_downcast=True
)
self.greedy_fun = theano.function(
inputs=[self.priming_word],
outputs=T.concatenate([T.shape_padleft(self.priming_word), self.greedy_predictions]),
allow_input_downcast=True
)
def create_training_function(self):
updates, _, _, _, _ = create_optimization_updates(self.cost, self.params, method="adadelta")
self.update_fun = theano.function(
inputs=[self.input_mat, self.for_how_long],
outputs=self.cost,
updates=updates,
allow_input_downcast=True)
def __call__(self, x):
return self.pred_fun(x)
# construct model & theano functions:
model = Model(
input_size=10,
hidden_size=10,
vocab_size=len(vocab),
stack_size=1, # make this bigger, but makes compilation slow
celltype=RNN # use RNN or LSTM
)
model.stop_on(vocab.word2index["."])
# train:
for i in range(10000):
error = model.update_fun(numerical_lines, numerical_lengths) if i % 100 == 0:
print("epoch %(epoch)d, error=%(error).2f" % ({"epoch": i, "error": error}))
if i % 500 == 0:
print(vocab(model.greedy_fun(vocab.word2index["the"])))
a=1
print a
import numpy as np
import os
from setuptools import setup, find_packages
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data.
Step2: Feature normalisation.
Step3: Naive baseline
Step6: Binary relevance baseline
Step7: Cross validation according to F1.
Step8: Cross validation according to F1.
Step9: Plain logistic regression.
Step10: Cross validation for classifier of each label.
Step11: Result analysis
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os, sys, time
import pickle as pkl
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, make_scorer, f1_score, label_ranking_loss
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append('src')
from evaluate import avgPrecisionK, evaluatePrecision, evaluateF1, evaluateRankingLoss, f1_score_nowarn
from datasets import create_dataset, dataset_names, nLabels_dict
dataset_names
data_ix = 3
dataset_name = dataset_names[data_ix]
nLabels = nLabels_dict[dataset_name]
print(dataset_name, nLabels)
data_dir = 'data'
SEED = 918273645
fmodel_prec = os.path.join(data_dir, 'br-' + dataset_name + '-prec.pkl')
fmodel_f1 = os.path.join(data_dir, 'br-' + dataset_name + '-f1.pkl')
fmodel_base = os.path.join(data_dir, 'br-' + dataset_name + '-base.pkl')
fperf_prec = os.path.join(data_dir, 'perf-lr-prec.pkl')
fperf_f1 = os.path.join(data_dir, 'perf-lr-f1.pkl')
fperf_base = os.path.join(data_dir, 'perf-lr-base.pkl')
X_train, Y_train = create_dataset(dataset_name=dataset_name, train_data=True, shuffle=True, random_state=SEED)
X_test, Y_test = create_dataset(dataset_name=dataset_name, train_data=False)
X_train_mean = np.mean(X_train, axis=0).reshape((1, -1))
X_train_std = np.std(X_train, axis=0).reshape((1, -1)) + 10 ** (-6)
X_train -= X_train_mean
X_train /= X_train_std
X_test -= X_train_mean
X_test /= X_train_std
#probs = np.mean(Y_train, axis=0)
#probs
#preds = np.tile(probs, (X_test.shape[0], 1))
#evaluatePrecision(Y_test, preds, verbose=1)
#evaluateRankingLoss(Y_test, preds, n_jobs=4)
class BinaryRelevance(BaseEstimator):
Independent logistic regression based on OneVsRestClassifier wrapper.
def __init__(self, C=1, n_jobs=-1):
assert C > 0
self.C = C
self.n_jobs = n_jobs
self.trained = False
def fit(self, X_train, Y_train):
assert X_train.shape[0] == Y_train.shape[0]
# don't make two changes at the same time
#self.estimator = OneVsRestClassifier(LogisticRegression(class_weight='balanced', C=self.C))
self.estimator = OneVsRestClassifier(LogisticRegression(C=self.C), n_jobs=self.n_jobs)
self.estimator.fit(X_train, Y_train)
self.trained = True
def decision_function(self, X_test):
assert self.trained is True
return self.estimator.decision_function(X_test)
def predict(self, X_test, binarise=False):
preds = self.decision_function(X_test)
return preds >= 0.5 if binarise is True else preds
def print_cv_results(clf):
if hasattr(clf, 'best_params_'):
print("\nBest parameters set found on development set:")
print(clf.best_params_)
if hasattr(clf, 'cv_results_'):
for mean, std, params in zip(clf.cv_results_['mean_test_score'], \
clf.cv_results_['std_test_score'], \
clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
def dump_results(predictor, X_train, Y_train, X_test, Y_test, fname):
Compute and save performance results
preds_train = predictor.decision_function(X_train)
preds_test = predictor.decision_function(X_test)
print('Training set:')
perf_dict_train = evaluatePrecision(Y_train, preds_train)
print()
print('Test set:')
perf_dict_test = evaluatePrecision(Y_test, preds_test)
print()
print('Training set:')
perf_dict_train.update(evaluateRankingLoss(Y_train, preds_train))
print(label_ranking_loss(Y_train, preds_train))
print()
print('Test set:')
perf_dict_test.update(evaluateRankingLoss(Y_test, preds_test))
print(label_ranking_loss(Y_test, preds_test))
F1_train = f1_score_nowarn(Y_train, preds_train >= 0.5, average='samples')
F1_test = f1_score_nowarn(Y_test, preds_test >= 0.5, average='samples')
print('\nF1 Train: %.4f, %f' % (F1_train, f1_score(Y_train, preds_train >= 0.5, average='samples')))
print('\nF1 Test : %.4f %f' % (F1_test, f1_score(Y_test, preds_test >= 0.5, average='samples')))
perf_dict_train.update({'F1': (F1_train,)})
perf_dict_test.update({'F1': (F1_test,)})
perf_dict = {'Train': perf_dict_train, 'Test': perf_dict_test}
if os.path.exists(fname):
_dict = pkl.load(open(fname, 'rb'))
if dataset_name not in _dict:
_dict[dataset_name] = perf_dict
else:
_dict = {dataset_name: perf_dict}
pkl.dump(_dict, open(fname, 'wb'))
print()
print(pkl.load(open(fname, 'rb')))
clf = BinaryRelevance(n_jobs=3)
clf.fit(X_train, Y_train)
Y_pred = clf.decision_function(X_test) >= 0
f1_score_nowarn(Y_test, Y_pred, average='samples')
f1_score_nowarn(Y_test, Y_pred, average='macro')
pkl.dump(clf, open(fmodel_base, 'wb'))
def avgF1(Y_true, Y_pred):
F1 = f1_score_nowarn(Y_true, Y_pred >= 0, average='samples')
print('\nF1: %g, #examples: %g' % (F1, Y_true.shape[0]))
return F1
C_set = [1e-6, 3e-6, 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3, 0.01, 0.03, 0.1, 0.3,
1, 3, 10, 30, 100, 300, 1e3, 3e3, 1e4, 3e4, 1e5, 3e5, 1e6, 3e6]
parameters = [{'C': C_set}]
scorer = {'F1': make_scorer(avgF1)}
if os.path.exists(fmodel_f1):
clf = pkl.load(open(fmodel_f1, 'rb'))
else:
clf = GridSearchCV(BinaryRelevance(), parameters, cv=5, scoring=scorer, verbose=2, n_jobs=10, refit='F1')
clf.fit(X_train, Y_train)
pkl.dump(clf, open(fmodel_f1, 'wb'))
f1_score_nowarn(Y_test, clf.decision_function(X_test) >= 0, average='samples')
clf.best_params_
print_cv_results(clf)
dump_results(clf, X_train, Y_train, X_test, Y_test, fperf_prec)
plot_loss_of_clf(clf, X_train, Y_train, X_test, Y_test)
from evaluate import calcLoss
from matplotlib.ticker import NullFormatter
def plot_loss_of_clf(clf, X_train, Y_train, X_test, Y_test):
preds_train = clf.decision_function(X_train)
tploss_train = calcLoss(Y_train, preds_train, 'TopPush', njobs=4)
pak_train = calcLoss(Y_train, preds_train, 'Precision@K', njobs=4)
preds_test = clf.decision_function(X_test)
tploss_test = calcLoss(Y_test, preds_test, 'TopPush', njobs=4)
pak_test = calcLoss(Y_test, preds_test, 'Precision@K', njobs=4)
#plot_loss(tploss_train, pak_train, 'Training set (' + dataset_name + ')')
plot_loss(tploss_test, pak_test, 'Test set (' + dataset_name + ')')
def plot_loss(loss, pak, title):
# the data
x = loss
y = 1 - pak
print('away from diagonal portion:', np.mean(loss != 1-pak))
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y, color='b', alpha=0.5)
axScatter.plot([0, 1], [0, 1], ls='--', color='g')
axScatter.set_xlabel('Top push loss', fontdict={'fontsize': 12})
axScatter.set_ylabel('1 - precision@K', fontdict={'fontsize': 12})
# now determine nice limits by hand:
#binwidth = 0.25
#xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
#lim = (int(xymax/binwidth) + 1) * binwidth
#axScatter.set_xlim((-lim, lim))
#axScatter.set_ylim((-lim, lim))
#bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=10, color='g', alpha=0.3)
axHistx.set_yscale('log')
axHisty.hist(y, bins=10, color='g', alpha=0.3, orientation='horizontal')
axHisty.set_xscale('log')
#axHistx.set_xlim(axScatter.get_xlim())
#axHisty.set_ylim(axScatter.get_ylim())
axHistx.set_title(title, fontdict={'fontsize': 15}, loc='center')
%%script false
# NOTE: binary predictions (by predict()) are required for this method to work
if os.path.exists(fmodel_f1):
clf = pkl.load(open(fmodel_f1, 'rb'))
else:
scorer = make_scorer(f1_score_nowarn, average='samples')
clf = GridSearchCV(BinaryRelevance(), parameters, cv=5, scoring=scorer, verbose=2, n_jobs=6)
clf.fit(X_train, Y_train)
pkl.dump(clf, open(fmodel_f1, 'wb'))
print_cv_results(clf)
#dump_results(clf, X_train, Y_train, X_test, Y_test, fperf_f1)
if os.path.exists(fmodel_base):
clf = pkl.load(open(fmodel_base, 'rb'))
else:
clf = OneVsRestClassifier(LogisticRegression(verbose=1))
clf.fit(X_train, Y_train)
pkl.dump(clf, open(fmodel_base, 'wb'))
dump_results(clf, X_train, Y_train, X_test, Y_test, fperf_base)
%%script false
allPreds_train = [ ]
allPreds_test = [ ]
allTruths_train = [ ]
allTruths_test = [ ]
coefMat = [ ]
labelIndices = [ ]
ranges = range(-6, 7)
parameters = [{'C': sorted([10**(e) for e in ranges] + [3 * 10**(e) for e in ranges])}]
scoring = 'average_precision' # 'accuracy' #'precision_macro'
for label_ix in range(nLabels):
print('Training for Label %d' % (label_ix+1))
y_train = Y_train[:, label_ix]
y_test = Y_test [:, label_ix]
allTruths_train.append(y_train)
allTruths_test.append(y_test)
assert( (not np.all(y_train == 0)) and (not np.all(y_train == 1)) )
# searching for a baseline in (Lin et al.) with:
# test F1 on bibtex 0.372, 26.8
# test F1 on bookmarks 0.307, 0.219
# test F1 on delicious 0.265, 0.102
# test F1 on bibtex: 0.3730, 0.277
# test F1 on bookmarks: 0.2912, 0.2072
# test F1 on delicious: 0.1899, 0.1268
#clf = LogisticRegression(C=100)
# test F1 on bookmarks: 0.2928, 0.2109
#clf = LogisticRegression(C=60)
# test F1 on bibtex: 0.4282
#clf = GridSearchCV(LogisticRegression(class_weight='balanced'), parameters, cv=5, scoring=scoring)
# test F1 on bibtex: < 0.3
# test F1 on bookmarks: 0.2981, 0.2281
# test F1 on delicious: 0.1756, 0.0861
#clf = LogisticRegression()
# test F1 on bibtex: 0.4342
#clf = LogisticRegression(class_weight='balanced')
# test F1 on bibtex: 0.3018
#clf = GridSearchCV(LogisticRegression(), parameters, cv=5, scoring=scoring)
# test F1 on bibtex: 0.3139
#clf = GridSearchCV(LogisticRegression(), parameters, scoring=scoring)
# test F1 on bibtex: 0.4252
#clf = GridSearchCV(LogisticRegression(class_weight='balanced'), parameters, scoring=scoring)
# test F1 on bibtex: 0.3598
#clf = LogisticRegression(C=10)
# test F1 on bibtex: 0.3670
#clf = LogisticRegression(C=30)
estimator = LogisticRegression(class_weight='balanced')#, solver='lbfgs')
clf = GridSearchCV(estimator, parameters, cv=5, scoring=scoring, n_jobs=4)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print(clf.best_params_)
print()
allPreds_train.append(clf.decision_function(X_train))
allPreds_test.append(clf.decision_function(X_test))
allTruths_train = np.array(allTruths_train).T
allTruths_test = np.array(allTruths_test).T
allPreds_train = np.array(allPreds_train).T
allPreds_test = np.array(allPreds_test).T
print(allPreds_test.shape)
print(allTruths_test.shape)
#coefMat = np.array(coefMat).T
#coefMat.shape
#sns.heatmap(coefMat[:, :30])
#precisions_train = [avgPrecision(allTruths_train, allPreds_train, k) for k in range(1, nLabels+1)]
#precisions_test = [avgPrecision(allTruths_test, allPreds_test, k) for k in range(1, nLabels+1)]
#precisionK_train = avgPrecisionK(allTruths_train, allPreds_train)
#precisionK_test = avgPrecisionK(allTruths_test, allPreds_test)
%%script false
plt.figure(figsize=[10,5])
plt.plot(precisions_train, ls='--', c='r', label='Train')
plt.plot(precisions_test, ls='-', c='g', label='Test')
plt.plot([precisionK_train for k in range(nLabels)], ls='-', c='r', label='Train, Precision@K')
plt.plot([precisionK_test for k in range(nLabels)], ls='-', c='g', label='Test, Precision@K')
plt.xticks(np.arange(nLabels), np.arange(1,nLabels+1))
plt.xlabel('k')
plt.ylabel('Precision@k')
plt.legend(loc='best')
plt.title('Independent Logistic Regression on ' + dataset_name + ' dataset')
plt.savefig(dataset_name + '_lr.svg')
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.