code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp interpret
# -
#export
from fastai2.data.all import *
from fastai2.optimizer import *
from fastai2.learner import *
import sklearn.metrics as skm
#hide
from fastai2.test_utils import *
# # Interpretation
#
# > Classes to build objects to better interpret predictions of a model
#export
@typedispatch
def plot_top_losses(x, y, *args, **kwargs):
raise Exception(f"plot_top_losses is not implemented for {type(x)},{type(y)}")
#export
_all_ = ["plot_top_losses"]
#export
class Interpretation():
"Interpretation base class, can be inherited for task specific Interpretation classes"
def __init__(self, dl, inputs, preds, targs, decoded, losses):
store_attr(self, "dl,inputs,preds,targs,decoded,losses")
@classmethod
def from_learner(cls, learn, ds_idx=1, dl=None, act=None):
"Construct interpretatio object from a learner"
if dl is None: dl = learn.dls[ds_idx]
return cls(dl, *learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True, act=None))
def top_losses(self, k=None, largest=True):
"`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)."
return self.losses.topk(ifnone(k, len(self.losses)), largest=largest)
def plot_top_losses(self, k, largest=True, **kwargs):
losses,idx = self.top_losses(k, largest)
if not isinstance(self.inputs, tuple): self.inputs = (self.inputs,)
if isinstance(self.inputs[0], Tensor): inps = tuple(o[idx] for o in self.inputs)
else: inps = self.dl.create_batch(self.dl.before_batch([tuple(o[i] for o in self.inputs) for i in idx]))
b = inps + tuple(o[idx] for o in (self.targs if is_listy(self.targs) else (self.targs,)))
x,y,its = self.dl._pre_show_batch(b, max_n=k)
b_out = inps + tuple(o[idx] for o in (self.decoded if is_listy(self.decoded) else (self.decoded,)))
x1,y1,outs = self.dl._pre_show_batch(b_out, max_n=k)
if its is not None:
plot_top_losses(x, y, its, outs.itemgot(slice(len(inps), None)), self.preds[idx], losses, **kwargs)
#TODO: figure out if this is needed
#its None means that a batch knos how to show itself as a whole, so we pass x, x1
#else: show_results(x, x1, its, ctxs=ctxs, max_n=max_n, **kwargs)
learn = synth_learner()
interp = Interpretation.from_learner(learn)
x,y = learn.dls.valid_ds.tensors
test_eq(interp.inputs, x)
test_eq(interp.targs, y)
out = learn.model.a * x + learn.model.b
test_eq(interp.preds, out)
test_eq(interp.losses, (out-y)[:,0]**2)
#export
class ClassificationInterpretation(Interpretation):
"Interpretation methods for classification models."
def __init__(self, dl, inputs, preds, targs, decoded, losses):
super().__init__(dl, inputs, preds, targs, decoded, losses)
self.vocab = self.dl.vocab
if is_listy(self.vocab): self.vocab = self.vocab[-1]
def confusion_matrix(self):
"Confusion matrix as an `np.ndarray`."
x = torch.arange(0, len(self.vocab))
d,t = flatten_check(self.decoded, self.targs)
cm = ((d==x[:,None]) & (t==x[:,None,None])).long().sum(2)
return to_np(cm)
def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', cmap="Blues", norm_dec=2,
plot_txt=True, **kwargs):
"Plot the confusion matrix, with `title` and using `cmap`."
# This function is mainly copied from the sklearn docs
cm = self.confusion_matrix()
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(**kwargs)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(self.vocab))
plt.xticks(tick_marks, self.vocab, rotation=90)
plt.yticks(tick_marks, self.vocab, rotation=0)
if plot_txt:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}'
plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black")
ax = fig.gca()
ax.set_ylim(len(self.vocab)-.5,-.5)
plt.tight_layout()
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.grid(False)
def most_confused(self, min_val=1):
"Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences."
cm = self.confusion_matrix()
np.fill_diagonal(cm, 0)
res = [(self.vocab[i],self.vocab[j],cm[i,j])
for i,j in zip(*np.where(cm>=min_val))]
return sorted(res, key=itemgetter(2), reverse=True)
def print_classification_report(self):
"Print scikit-learn classification report"
d,t = flatten_check(self.decoded, self.targs)
print(skm.classification_report(t, d, target_names=self.vocab))
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
| nbs/20_interpret.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: 752579dbebe7f4dfe7c1aa72eac13e23fc88be2cc1ea7ab14e1f8d69b2d97d12
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.autograd import Function
from torchvision import datasets, transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import qiskit
from qiskit import transpile, assemble
from qiskit.visualization import *
# -
class QuantumCircuit:
"""
This class provides a simple interface for interaction
with the quantum circuit
"""
def __init__(self, n_qubits, backend, shots):
# --- Circuit definition ---
self._circuit = qiskit.QuantumCircuit(n_qubits)
all_qubits = [i for i in range(n_qubits)]
self.theta = qiskit.circuit.Parameter('theta')
self._circuit.h(all_qubits)
self._circuit.barrier()
self._circuit.ry(self.theta, all_qubits)
self._circuit.measure_all()
# ---------------------------
self.backend = backend
self.shots = shots
def run(self, thetas):
t_qc = transpile(self._circuit,
self.backend)
qobj = assemble(t_qc,
shots=self.shots,
parameter_binds = [{self.theta: theta} for theta in thetas])
job = self.backend.run(qobj)
result = job.result().get_counts(self._circuit)
counts = np.array(list(result.values()))
states = np.array(list(result.keys())).astype(float)
# Compute probabilities for each state
probabilities = counts / self.shots
# Get state expectation
expectation = np.sum(states * probabilities)
return np.array([expectation])
# +
simulator = qiskit.Aer.get_backend('qasm_simulator')
circuit = QuantumCircuit(1, simulator, 100)
print('Expected value for rotation pi {}'.format(circuit.run([np.pi])[0]))
circuit._circuit.draw()
# +
class HybridFunction(Function):
""" Hybrid quantum - classical function definition """
@staticmethod
def forward(ctx, input, quantum_circuit, shift):
""" Forward pass computation """
ctx.shift = shift
ctx.quantum_circuit = quantum_circuit
expectation_z = ctx.quantum_circuit.run(input[0].tolist())
result = torch.tensor([expectation_z])
ctx.save_for_backward(input, result)
return result
@staticmethod
def backward(ctx, grad_output):
""" Backward pass computation """
input, expectation_z = ctx.saved_tensors
input_list = np.array(input.tolist())
shift_right = input_list + np.ones(input_list.shape) * ctx.shift
shift_left = input_list - np.ones(input_list.shape) * ctx.shift
gradients = []
for i in range(len(input_list)):
expectation_right = ctx.quantum_circuit.run(shift_right[i])
expectation_left = ctx.quantum_circuit.run(shift_left[i])
gradient = torch.tensor([expectation_right]) - torch.tensor([expectation_left])
gradients.append(gradient)
gradients = np.array([gradients]).T
return torch.tensor([gradients]).float() * grad_output.float(), None, None
class Hybrid(nn.Module):
""" Hybrid quantum - classical layer definition """
def __init__(self, backend, shots, shift):
super(Hybrid, self).__init__()
self.quantum_circuit = QuantumCircuit(1, backend, shots)
self.shift = shift
def forward(self, input):
return HybridFunction.apply(input, self.quantum_circuit, self.shift)
# +
# Concentrating on the first 100 samples
n_samples = 100
X_train = datasets.MNIST(root='./data', train=True, download=True,
transform=transforms.Compose([transforms.ToTensor()]))
# Leaving only labels 0 and 1
idx = np.append(np.where(X_train.targets == 0)[0][:n_samples],
np.where(X_train.targets == 1)[0][:n_samples])
X_train.data = X_train.data[idx]
X_train.targets = X_train.targets[idx]
train_loader = torch.utils.data.DataLoader(X_train, batch_size=1, shuffle=True)
# -
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.dropout = nn.Dropout2d()
self.fc1 = nn.Linear(256, 64)
self.fc2 = nn.Linear(64, 1)
self.hybrid = Hybrid(qiskit.Aer.get_backend('qasm_simulator'), 100, np.pi / 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = self.dropout(x)
x = x.view(1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.hybrid(x)
return torch.cat((x, 1 - x), -1)
| etc/Hybrid quantum-classical Neural Networks with PyTorch and Qiskit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MATPLOTLIB PYPLOT
#
# ### 0. IMPORT THE PYPLOT MODULE OF MATPLOTLIB
#
# ### 1. GENERAL RULES OF PLOTTING <a href=#1.-GENERAL-RULES-OF-PLOTTING>go >></a>
#
# ### 2. STRUCTURE OF A MATPLOTLIB FIGURE <a href=#2.-STRUCTURE-OF-A-MATPLOTLIB-FIGURE>go >></a>
#
# ### 3. PROGRAMMING TYPES <a href=#3.-PROGRAMMING-TYPES>go >></a>
#
# ### 4. SCATTER PLOTS <a href=#4.-SCATTER-PLOTS >go >></a>
#
# ### 5. BAR PLOTS <a href=#5.-BAR-PLOTS>go >></a>
#
# ### 6. CUSTOMIZE THE AXIS TICK LABELS <a href=#6.-CUSTOMIZE-THE-AXIS-TICK-LABELS >go >></a>
#
# ### 7. CREATE A FIGURE WITH MULTIPLE PLOTS <a href=#7.-CREATE-A-FIGURE-WITH-MULTIPLE-PLOTS >go >></a>
#
# ### 8. PRESENT THE RESULTS OF AN AB TEST <a href=#8.-PRESENT-THE-RESULTS-OF-AN-AB-TEST >go >></a>
#
# ### 9. MAKE A GANTT CHART <a href=#9.-MAKE-A-GANTT-CHART >go >></a>
#
# ### ANNEX 1 - HIGH LEVEL PRINCIPLES <a href=#ANNEX-1---HIGH-LEVEL-PRINCIPLES>go >></a>
#
# ### ANNEX 2 - TRICKS <a href=#ANNEX-2---TRICKS>go >></a>
#
# ### ANNEX 3 - ADVANCED THINGS <a href=#ANNEX-3---ADVANCED-THINGS>go >></a>
#
# ### ANNEX 4 - OTHER POTENTIALLY USEFUL THINGS <a href=#ANNEX-4---OTHER-POTENTIALLY-USEFUL-THINGS>go >></a>
#
# # 0. IMPORT THE PYPLOT MODULE OF MATPLOTLIB
# +
# installing the modules used in the notebook
'''
! pip install pydataset
! pip install matplotlib
! pip install pandas
! pip install seaborn
'''
# import common packages
from pydataset import data # this package imports example datasets to use for plotting
import pandas as pd
import datetime as datetime
import numpy as np
# import packages for visualizations:
# matplotlib is a library to create plots (a plot = a chart, a graph, a visualization)
import matplotlib.pyplot as plt # pyplot is an interface to use matplotlib
import matplotlib.dates as mdates # mdates is useful to work with dates on plots
import matplotlib.ticker as tkr # tkr is useful to format the axis labels on the plot
from matplotlib.patches import Patch # Patch can create legend elements: rectangles of a given color + a label
# -
# # 1. GENERAL RULES OF PLOTTING
#
# * Show <b>all and only</b> the necessary information:
# * <b>Keep it obvious</b>: clear labels for the x and y axes, title, etc.
# * <b>High data/ink ratio</b>: remove 3D or shadow effects, background images, unnecessary borders or grid lines.
#
#
# * To make a point, <b>highlight what matters</b>, hide what's distracting (but don't be deceiving)
#
#
# * When presenting a plot, describe out loud everything on it. <b>If the audience is listening, they can't be reading the labels at the same time</b>, do it for them: "On the x axis you can see blablabla".
#
#
#
# ### QUESTIONS TO ASK YOURSELF AFTER MAKING A PLOT
#
# 1. <b>Are more labels or legends needed</b>? Be explicit.
#
# 2. <b>Can anything be removed</b> without losing clarity?
# Eg. if the x axis is a date, no need for an x label, everyone recognizes a date
#
# 3. <b>How can I make my plot easy to read?</b>
# Eg. use soft colors for what's not important, bright colors for what requires attention. Zoom in, etc.
# # 2. STRUCTURE OF A MATPLOTLIB FIGURE
#
#
# ### HIGH LEVEL STRUCTURE
#
# * The object <b>fig</b> is the figure. It contains everything, determines the size, and can be saved as an image file.
# * The object <b>axes</b> is a collection of plots (individual plots are often called ax).
# * An <b>axis</b> is an element of a plot that determines where each point/line etc is drawn. The plural of "axis" is "axes", so "2 axes" can be either the x and y axes of a plot, or 2 separate plots.
#
# 
# ### LOWER LEVEL STRUCTURE
#
# * Every element can be accessed and customized. An element is called an artist.
# * An axis is a complex part of a plot. It is made of several artists: major ticks, minor ticks, an overall axis label, and ticks labels
#
# 
# # 3. PROGRAMMING TYPES
#
# ### INTERFACE PROGRAMMING
#
# One can plot things interacting with matplotlib through pyplot without creating variables:
# +
# CREATE A PLOT WITH THE PYPLOT INTERFACE
plt.plot([1,2,3],[5,6,8]) # the plot is created and displayed directly
plt.plot([1,2,3],[6,7,9]) # no variable is created
plt.title("My title"); # the ; at the end of the last line is here to prevent the typical behavior of jupyter
# notebook, which is to print the output of the last line
# -
# ### OBJECT ORIENTED PROGRAMMING (OOP)
#
#
# When using OOP, one creates objects that can then be modified individually. It allows more flexibility and control on each elements of a plot.
#
# The commands are a bit different than with interface programming, better learn directly the OOP commands and skip the pyplot interface commands.
#
# OOP still requires the use of a couple of pyplot methods like the one to create the objects needed:
# `fig, axes = plt.subplots()`
#
# * `fig` is the figure, a container that will host 1 or more plots.
#
# * `axes` is the collection of plots, also called 'ax', which will be layed out on `fig`.
#
# Plots are called 'ax' because 1 plot (at least a 2 dimensional one) is made of 2 axes (x axis and y axis).
# +
# CREATE A BASIC LINE PLOT WITH PYPLOT OOP
fig, axes = plt.subplots() # here the axes object will contain the default number of subplots: 1
# add elements on the plot
axes.plot([1,2,3],[5,6,8])
axes.plot([1,2,3],[6,7,9])
axes.set_title("My title");
# +
# BASIC CUSTOMIZATIONS WITH OOP
# create a basic plot again
fig, axes = plt.subplots()
axes.plot([1,2,3],[5,6,8], color='blue', label = 'line1') # using color and labels allows to create a legend later
axes.plot([1,2,3],[6,7,9], color='green', label = 'line2')
# BE EXPLICIT:
# Set a title
axes.set_title("That's my title")
# set x and y axes labels
axes.set_ylabel("that's my y label")
axes.set_xlabel("that's my x label")
# add a legend
axes.legend(loc = 'lower right');
# -
# # 4. SCATTER PLOTS
#
#
# ### Explore the relationship between 2 numerical variables
# * What type of relationship is it?
#
# 
#
# * How strong is the relationship?
# 
#
#
# * Can I use one to predict the other?
# +
# SCATTER PLOT
fig, axes = plt.subplots()
# Get some data
mammals = data('mammals')
small_mammals = mammals.query("body<=5").copy()
small_mammals['body']=small_mammals.body*1000 # turn kg into grams
axes.scatter(x=small_mammals.body, y=small_mammals.brain)
# Set a title
axes.set_title("Brain vs Body Weights in grams")
# set x and y axes labels
axes.set_xlabel("Body")
axes.set_ylabel("Brain");
# -
# # 5. BAR PLOTS
#
# Lines are often used to represent ratios over time, and bars to represent amounts.
# +
# MAKE A SIMPLE BARPLOT
insect_sprays = data('InsectSprays')
insect_sprays_mean = insect_sprays.groupby('spray')['count'].mean().reset_index()
fig, axes = plt.subplots()
axes.bar(insect_sprays_mean['spray'], insect_sprays_mean['count'])
axes.set_title("Average Count of Insects\nKilled By Spray Type")
# +
# THE WIDTH ARGUMENT FOR BAR PLOTS
# The default width in a bar plot is 0.8, it is expressed in 'data' units, not in pixel or mm.
# To make it simple, if some bars seem to be missing, they may be too thin to be displayed, so increase the width
# as much as needed
my_date_range = pd.date_range(datetime.datetime.today(), periods=1000).tolist()
my_values = list(range(len(my_date_range)))
fig, axes = plt.subplots(1,2)
axes[0].bar(my_date_range, my_values)
axes[1].bar(my_date_range, my_values, width=1.1);
# View the customize axis ticks labels to rotate them and modify them
# +
# STACKED BAR PLOT
fig, axes = plt.subplots()
worldphones = data('WorldPhones')
bottom = 0 # bottom is where the bar starts being drawn
for col in worldphones.columns:
axes.bar(worldphones.index, worldphones[col], bottom = bottom, label=col)
bottom += worldphones[col] #each time that we draw a bar, we raise the bottom by the height of the bar previously
# drawned so that in the end they appear stacked
axes.legend(bbox_to_anchor=(1.1, 1.05));
# -
# ## 6. CUSTOMIZE THE AXIS TICK LABELS
# #### How format templates work:
# 
# +
# EXAMPLE OF FORMAT TEMPLATES
my_formats = {'format_name':['percentage','dollar','euro','decimals','comma_sep_thousands_no_dec'],
'format_templates':['{:,.2%}','${:,.0f}','{:,.0f}€','{:,.2f}','{:,.0f}']}
print(">>> Common format templates:")
pd.DataFrame(my_formats).style.hide_index()
# +
# BASIC FORMATTING OF THE AXIS TICK LABELS
# make example data:
dummy_data = pd.DataFrame({"x":range(10),
"y":range(10)})
fig, axes = plt.subplots(2, 2)
# Just plotting 4 times the same data everywhere because we only care about the number formatting on the y axis
axes[0,0].plot(dummy_data.x, dummy_data.y)
axes[0,1].plot(dummy_data.x, dummy_data.y)
axes[1,0].plot(dummy_data.x, dummy_data.y)
axes[1,1].plot(dummy_data.x, dummy_data.y)
# Format the y axes numbers with set_major_formatter
axes[0,0].yaxis.set_major_formatter(tkr.FuncFormatter(lambda y, p: '{:,.0%}'.format(y)))
axes[0,1].yaxis.set_major_formatter(tkr.FuncFormatter(lambda y, p: '${:,.0f}'.format(y)))
axes[1,0].yaxis.set_major_formatter(tkr.FuncFormatter(lambda y, p: '{:,.0f}€'.format(y)))
axes[1,1].yaxis.set_major_formatter(tkr.FuncFormatter(lambda y, p: '{:,.2f}'.format(y)))
# Increase the y tick label size:
axes[0,0].tick_params(axis='y', which='major', labelsize=9)
axes[0,1].tick_params(axis='y', which='major', labelsize=9)
axes[1,0].tick_params(axis='y', which='major', labelsize=8)
axes[1,1].tick_params(axis='y', which='major', labelsize=8)
# Rotate the x ticks labels of the bottom 2 plots
axes[1,0].tick_params(axis='x', which='major', rotation = 90)
axes[1,1].tick_params(axis='x', which='major', rotation = 90)
# Add titles
axes[0,0].set_title("Percentage")
axes[0,1].set_title("Dollar")
axes[1,0].set_title("Euro")
axes[1,1].set_title("Decimals")
# add space between subplots
fig.tight_layout(pad=3.0);
# -
# ### WHEN THE X AXIS IS DATES
# +
# CUSTOMIZE A DATE AXIS
economics = data('economics')
# make sure that python knows that the date is a date
economics['date'] = pd.to_datetime(economics.date)
fig, axes = plt.subplots(1,2)
axes[0].plot(economics['date'], economics['unemploy'])
axes[1].plot(economics['date'], economics['unemploy'])
x_ticks_position = axes[1].get_xticks() # extract the x axis ticks position (these positions are numbers but
# these numbers actually represent dates)
x_ticks_position_as_dates = mdates.num2date(x_ticks_position) # turn the positions into actual dates
# Now format them. The default, with this many dates, shows only the year, as numbers.
# Here we add the day (%d) and show the month as text (%b), just for the example
x_ticks_labels_formatted = [item.strftime('%b %d, %Y') for item in x_ticks_position_as_dates]
# set the ticks positions. why is that needed? just to avoid a dumb warning, it doesn't change anything...
axes[1].xaxis.set_major_locator(tkr.FixedLocator(x_ticks_position))
# set the labels
axes[1].set_xticklabels(x_ticks_labels_formatted, rotation = 45, ha="right");
# Here's the full list of dates format: https://www.programiz.com/python-programming/datetime/strftime
# +
# AVOIDING OVERCROWDING FOR A DATE AXIS:
# simulate data
my_x = range(300)
my_y = range(300)
my_daterange = pd.date_range(datetime.datetime(2021,11,1), periods=300)
# Pyplot decides pretty well how many labels to show based on the space available when the labels are numbers:
fig, axes = plt.subplots(2,2)
axes[0,0].bar(x=my_x, height=my_y);
# When the labels are dates it doesn't work as well
axes[0,1].bar(x=my_daterange, height=my_y)
# Rotating the labels may be enough to make it readable
axes[1,0].bar(x=my_daterange, height=my_y)
axes[1,0].tick_params(axis='x', which='major', rotation = 90)
# But if needed it's also possible to control exactly which labels are shown:
axes[1,1].bar(x=my_daterange, height=my_y)
# Follow the same process as in the previous jupyter cell
x_ticks_position = axes[1,0].get_xticks()
x_ticks_position_as_dates = mdates.num2date(x_ticks_position)
# ADDITIONAL STEP: (show only some labels)
x_ticks_position_as_dates_formatted = []
for date_tick_label in x_ticks_position_as_dates:
if date_tick_label.month % 2 == 0: # show nothing for even month numbers
x_ticks_position_as_dates_formatted.append('')
elif date_tick_label.month == 1: # show the year only when it changes (in January)
x_ticks_position_as_dates_formatted.append(date_tick_label.strftime('%Y %b'))
else:
x_ticks_position_as_dates_formatted.append(date_tick_label.strftime('%b'))
axes[1,1].xaxis.set_major_locator(tkr.FixedLocator(x_ticks_position))
# set the labels
axes[1,1].set_xticklabels(x_ticks_position_as_dates_formatted, rotation = 45, ha="right");
# -
# # 7. CREATE A FIGURE WITH MULTIPLE PLOTS
# +
# get data
swiss = data('swiss')
# Here we create 4 plots that will be arranged on 2 rows and 2 columns.
# We use figsize to increase the figure size and leave enough space for all plots. The first number is the
# width, the second is the height. The default values are: (6.4,4.8)
fig, axes = plt.subplots(2,2, figsize=(9,7))
# We access each plot in the object axes using indexing. in our case axes has 2 plots on 2 rows, so we
# need 2 indices to identify the plot: one for the row, one for the column: axes[0,0] would be the top
# left plot, axes[0,1] the top right plot, axes[1,0] the bottom left plot and axes[1,1] the bottom right plot.
# Note that if we have only 1 row or one column, for example with plt.subplots(1,2), then only one index is
# needed, and we would simply use, for example for the first plot: axes[0]
# plot data on each subplot
axes[0,0].scatter(x=swiss.Fertility, y=swiss['Infant.Mortality'], color="steelblue", marker ='o')
axes[0,1].scatter(x=swiss.Education, y=swiss['Infant.Mortality'], color="aquamarine", marker='v')
axes[1,0].scatter(x=swiss.Agriculture, y=swiss['Infant.Mortality'], color="tomato", marker ='.')
axes[1,1].scatter(x=swiss.Examination, y=swiss['Infant.Mortality'], color="fuchsia", marker='>')
# Set a title for each subplot
axes[0,0].set_title("Infant Mortality vs Fertility")
axes[0,1].set_title("Infant Mortality vs Education")
axes[1,0].set_title("Infant Mortality vs Agriculture")
axes[1,1].set_title("Infant Mortality vs Examination")
# add space between subplots
fig.tight_layout(pad=3.0) #default padding is 1.08
# add title to the whole figure
fig.suptitle('Main title', size='x-large'); # default title size is 'large'
# +
# Important argument in the plt.subplots() method:
# When plotting subplots that show the same data for 2 subsets of the dataframe:
# use plt.subplots(1,2, sharey = True) so all subplots share the same y axes
# -
# Trick: Name the subplots when creating the figure to avoid having to use indexing:
fig, [[top_left, top_right], [bottom_left, bottom_right]] = plt.subplots(2,2, figsize=(9,7))
top_right.plot([1,2,3],[3,1,1],color='red')
top_right.set_title('top_right')
bottom_left.plot([1,2,3],[1,3,6])
bottom_left.set_title('bottom_left');
# # 8. PRESENT THE RESULTS OF AN AB TEST
# +
# COMPARE MEANS AND DISPLAY A CONFIDENCE INTERVAL
# About error bars & statistical inference:
# Error bars are usually 1 standard deviation long.
# Intuitive explanation of what standard deviation is: https://www.mathsisfun.com/data/standard-deviation.html
# If the error bar is long, the individual values are not close to the mean, they are all over the place
# so we consider the mean of our sample less likely to be identical to the mean of the real, entire, population.
# It may be at this level just because we have too few observations and may be very different if we had more samples.
# The error bars can sometimes be 1 standard error long instead of 1 standard deviation.
# The standard error is calculated as follows: (standard deviation) / (square root of the number of observations)
# So it can be much smaller than the standard deviation. The idea is that, even if the values are all over the place,
# having a lot of observations means that we are more confident that the mean of our sample is representative of
# the mean of the entire population, so we make smaller bars when we have many observations
insect_sprays = data('InsectSprays')
fig, axes = plt.subplots()
for sp in insect_sprays.spray.unique():
insect_sprays_subset = insect_sprays[insect_sprays.spray==sp]
axes.bar(sp, insect_sprays_subset['count'].mean(), yerr=insect_sprays_subset['count'].std())
axes.set_title("Average Count of Insects\nKilled By Spray Type");
# +
# COMPARE SEVERAL POPULATIONS DISTRIBUTION WITH BOXPLOTS
fig, axes = plt.subplots()
insect_sprays_b = insect_sprays[insect_sprays.spray=='B']
insect_sprays_f = insect_sprays[insect_sprays.spray=='F']
insect_sprays_c = insect_sprays[insect_sprays.spray=='C']
axes.boxplot([insect_sprays_b['count'], insect_sprays_f['count'], insect_sprays_c['count']])
axes.set_xticklabels(['B','F','C']);
# +
# PLOT HISTOGRAMS TO COMPARE 2 POPULATION DISTRIBUTIONS
fig, axes = plt.subplots()
diamonds = data('diamonds')
diamonds = diamonds[diamonds.color.isin(['E','J'])]
for c in diamonds.color.unique():
diamonds_subset = diamonds[diamonds.color == c]
axes.hist(diamonds_subset['price'], label=c, histtype='step', bins =100)
axes.set_xlabel('price')
axes.legend();
# -
# # 9. MAKE A GANTT CHART
# +
# STRUCTURE OF THE DATA
df = pd.DataFrame({"task":["pull data","clean data","modelling","code for prod","code review","deployment"],
"department":["DS","DS","DS","Engineering","DS","DevOps"],
"start_date":['2022-09-01', '2022-09-07', '2022-09-07', '2022-09-28','2022-10-03','2022-10-10'],
"end_date":['2022-09-07', '2022-09-28', '2022-09-14', '2022-10-03','2022-10-10','2022-10-17']})
df.task = df.task.str.upper()
df.start_date = pd.to_datetime(df.start_date)
df.end_date = pd.to_datetime(df.end_date)
# we put the tasks that start first at the bottom because the last bar to be drawn will be the highest on the plot
df.sort_values(["start_date","end_date"],ascending=[False,True], inplace=True)
# where does each bar on the GANTT start and end?
date0 = df.start_date.min() #this is the start of the x axis
df['task_start'] = (df.start_date-date0).dt.days # this where each bar starts
df['task_end'] = (df.end_date-date0).dt.days # this is where each bar ends
df['task_length'] = df.task_end - df.task_start # this is the length of each bar
date_range_project = pd.date_range(date0, end=df.end_date.max())
# assign a color for each dpt
colors_dict = {"DS":"#4682b4","Engineering":"#ff1493","DevOps":"#DCDCDC"}
df['dpt_color'] = df.department.map(colors_dict)
fig, axes = plt.subplots(figsize=(15,7))
# for each end date, draw a line:
unique_end_dates = df.end_date.unique()
position_all_ed = []
for ed in unique_end_dates:
position_ed = date_range_project.get_loc(ed) # position of date on x axis
height_of_highest_task = df[df.end_date == ed].index[0] # heigth of lowest bar
height_of_hihest_task_prop = 1-height_of_highest_task/(df.shape[0]) # height in % of y axis (1 = 100%)
height_of_hihest_task_prop = height_of_hihest_task_prop - (1/df.shape[0]/2) # decrease the height a little
axes.axvline(position_ed, ls='--',color="grey",alpha=0.3, ymax=height_of_hihest_task_prop)
position_all_ed += [position_ed]
axes.barh(df.task, df.task_length, left=df.task_start, color=df.dpt_color)
lgd = [Patch(facecolor=colors_dict[dpt], label=dpt) for dpt in colors_dict]
axes.legend(handles=lgd)
# show the x label only when a task ends:
big_ticks_position = position_all_ed
big_ticks_labels = date_range_project.strftime("%b %d")
small_ticks_position = range(0, df.task_end.max(), 1)
axes.set_xticks(big_ticks_position)
axes.set_xticks(small_ticks_position, minor=True)
axes.set_xticklabels(big_ticks_labels[position_all_ed], rotation = 90);
# -
# # ANNEX 1 - HIGH LEVEL PRINCIPLES
# ### HIGH DATA/INK RATIO:
#
# * Remove shadow effects
# * Remove chart junk
# * Remove the background
# * Remove redundant labels
# * Remove unnecessary borders and grid lines
# * Reduce colors
# * Remove the special effects of word bubbles
# * Remove bolding or using font to communicate information
# * Less is more effective: simpler titles and labels
# * Keep in mind the data density
#
#
# ### RULES OF COLOR USE
#
# 1. Use color only when needed to serve a particular communication goal.
# 2. Use different colors only when they correspond to differences of meaning in the data.
# 3. Use soft, natural colors to display most information and bright and/or dark colors to highlight
# information that requires greater attention
# 4. Non-data components of tables and graphs should be displayed just visibly enough to perform their role,
# but no more so, for excessive salience could cause them to distract attention from the data.
#
# reference:
# http://www.perceptualedge.com/articles/visual_business_intelligence/rules_for_using_color.pdf
# # ANNEX 2 - TRICKS
# +
# USE 2 AXES: 2 differents axes on the same plot
economics = data('economics')
# make sure that python knows that the date is a date
economics['date'] = pd.to_datetime(economics.date)
economics_recent = economics.query('date>2005')
fig, axes = plt.subplots()
# plot a metric on the left y axis
axes.plot(economics_recent['date'], economics_recent['pop'], color='lightpink')
axes.tick_params('y', colors='lightpink')
# create an additional y axis (twin x means it uses the x axis of our object 'axes')
axes_twin = axes.twinx()
# plot another metric on the additional y axis
axes_twin.plot(economics_recent['date'], economics_recent['unemploy'], color='cornflowerblue')
axes_twin.tick_params('y', colors='cornflowerblue')
# Let's clean up the date labels on the x axis
x_ticks_pos = axes.get_xticks()
x_ticks_pos_as_dates = mdates.num2date(x_ticks_pos)
x_ticks_pos_as_dates_formatted = [item.strftime('%b %Y') for item in x_ticks_pos_as_dates]
axes.set_xticklabels(x_ticks_pos_as_dates_formatted, rotation=45, ha='right')
# Let's add a vertical axis on a random day as if we wanted to signal the start of something.
date_for_vertical_line = datetime.datetime(2006,6,2)
axes.axvline(date_for_vertical_line, ls='--',color="darkred");
# +
# INCREASE DATA/INK RATIO:
# make 2 plots
fig, axes = plt.subplots(1,2, sharey=True) # using sharey hides the y axis on the right plot as it is the same
axes[0].plot(range(100),range(100))
axes[1].plot(range(100),range(100))
axes[0].set_title("title plot 1")
axes[1].set_title("title plot 2")
# hide the frame around the plot: set spine artists to be invisible
axes[1].spines[['top','right']].set_visible(False) # make top and right spines invisible
# add space between subplots
fig.tight_layout(pad=4.0); #default padding is 1.08
# +
# HIGHLIGHT 1 DATA POINT
fig, axes = plt.subplots()
# plot every datapoint with a soft color
big_mammals = data('mammals').sort_values("body",ascending=False).head(10)
big_mammals['brain_to_body_ratio']=big_mammals.brain/big_mammals.body
axes.bar(x=big_mammals.index, height=big_mammals.brain, color = '#8BD3E6')
# Now let's plot over the previous plot only the datapoint we want to highlight with a bright color
donkey = big_mammals[big_mammals.index=='Donkey']
axes.bar(x=donkey.index, height=donkey.brain, color = 'red', label="highest brain to body ratio")
#let's use a label so it can be used in a legend
axes.legend() # and let's add a legend to display the label
# Set a title
axes.set_title("Weight by Mammal (kg)")
# remove spines
axes.spines[:].set_visible(False)
# Rotate the ticks labels
plt.draw() # The tick labels are not populated until the figure is drawn so the method axes.get_xticklabels()
# would return only the positions of the ticks and not the mammals names if we didn't run first plt.draw!
axes.set_xticklabels(axes.get_xticklabels(), rotation=45, ha='right');
# -
# # ANNEX 3 - ADVANCED THINGS
# +
# COMPLEX SCATTER PLOT WITH PYPLOT
# Display the relationship between up to 4 variables at once
fig, axes = plt.subplots()
df = swiss.copy()
x_axis = 'Infant.Mortality'
y_axis = 'Fertility'
colors = 'Education'
sizes = 'Examination'
scatter = axes.scatter(x=df[x_axis], y=df[y_axis], c=df[colors], s= df[sizes], marker ='o')
# watch out, the argument for the color is not "color" this time, it's just c.
# the c argument allows to pass a series of values that will be mapped to a color gradient, instead of a single color
# s is the argument for the size of the dots
# instead of just using axes.scatter(), we created a variable called scatter equal to axes.scatter()
# the reason is that we will need to extract the legend elements later from the object
# It's beautiful and everything but the tricky part is adding a legend for both size and colors
# First we create a legend for the colors.
legendcolors = axes.legend(*scatter.legend_elements(), title=colors,
loc="upper right", bbox_to_anchor=(1.25, 1))
# Instead of just adding the legend to the axes with axes.legend(), we assign the outcome to a variable legendcolors.
# the reason is that, when we create the size legend just below, we will overwrite our color legend
# So we need to add a new container (called a new artist) and give it our color legend:
axes.add_artist(legendcolors)
# Now we can add safely a legend to the axes for the sizes
handles, labels = scatter.legend_elements(prop="sizes", alpha=0.6)
axes.legend(handles, labels, title=sizes, loc="upper right", bbox_to_anchor=(1.5, 1));
# couple of things to note:
# we used loc = 'upper right' to place the legend on the top right corner
# then we used bbox_to_anchor to move the legend further to the right, so it is outside the plot
# -
# # ANNEX 4 - OTHER POTENTIALLY USEFUL THINGS
# +
# High level commands
plt.clf() # this command clears any previous plots elements. when some elements on a plot
# seem to be coming from a previous plot, use it.
plt.show() #if the plot does not appear, use that.
# It is here to display the plot. In Jupyter notebook plots are shown by default, even
# without plt.show(). In pycharm, they're not, so it's necessary there.
fig.savefig("fig.png", bbox_inches='tight') # useful if you want to integrate the image in an html code and/or make a pdf that
# contains it. the figure will be saved as a png file in the current repository (the one where the
# current notebook is)
# bbox_inches='tight' eliminates the unnecessary white space around the image
# +
# MAKE IT EASIER TO READ: ZOOM IN
# Force the y axis limits to start and end at a certain height
#axes[1].set_ylim([4.95,8.05])
# +
# MAKE A BARPLOT WITH NON CONSECUTIVE DATES
fig, axes = plt.subplots(1,2)
# The default width in a bar plot is 0.8, it is expressed 'data' units. Here we have dates, so the unit is days.
# In this dataset, there is a data point every 30 days or so:
print(economics.head().date)
axes[0].bar(economics['date'], economics['unemploy'])
axes[0].tick_params(axis='x', which='major', rotation = 90)
# Let's pass a width that will cover a full month (31 units of data)
axes[1].bar(economics['date'], economics['unemploy'],width=31)
axes[1].tick_params(axis='x', which='major', rotation = 90);
# +
# THE INVISIBLE TICK LABELS:
# when customizing an axis, one may need to retrieve the tick labels with axes.get_xticklabels()
# it's necessary to use plt.draw() first otherwise the tick labels will not be returned properly
fig, axes = plt.subplots()
# plot every datapoint
big_mammals = data('mammals').sort_values("body",ascending=False).head(10)
big_mammals['brain_to_body_ratio']=big_mammals.brain/big_mammals.body
axes.bar(x=big_mammals.index, height=big_mammals.body)
axes.tick_params(axis='x', which='major', rotation = 90)
print("Get the ticks labels before drawing:\n",axes.get_xticklabels()[0:2])
plt.draw() # The tick labels are not populated until the figure is drawn so the method axes.get_xticklabels()
# returns something meaningless until we run first plt.draw!
print("\n\n")
print("Get the ticks labels after drawing:\n",axes.get_xticklabels()[0:2])
# +
# PLOT STYLES: ADAPT YOUR PLOTS TO YOUR AUDIENCE IF NECESSARY
worldphones = worldphones[["Africa","Mid.Amer"]]
for s in ['default','seaborn-colorblind','seaborn-pastel','grayscale','bmh']:
print(">>> style = ",s)
plt.style.use(s) # set up a style
# create a stacked barplot
fig, axes = plt.subplots(figsize=(2,1))
bottom = 0
for col in worldphones.columns:
axes.bar(worldphones.index, worldphones[col], bottom = bottom, label=col)
bottom += worldphones[col]
axes.legend(bbox_to_anchor=(1.1, 1.05))
plt.style.use('default') # to reset the settings from the previous style change
# +
# Drawing with imshow
from matplotlib import pyplot as plt
from sklearn import datasets
digits = datasets.load_digits()
print(digits['images'][55])
fig, axes = plt.subplots(figsize=(1, 1))
axes.imshow(digits['images'][55], cmap="gray");
# -
| pyplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_p36
# language: python
# name: conda_pytorch_p36
# ---
# # Creates a model archive file to be used in the torchserve for deployment
# ## Downloads the model from the s3 link
# +
# Download the model and rename it to pytorch_model.bin, then move it to model folder
# -
# !wget http://dl.fbaipublicfiles.com/dynabench/sentiment/roberta_round1.bin
# +
## Installs torchserve and torch-model-archiver to be used in this kernel
# -
# !pip install torchserve torch-model-archiver
# ### Create torchscript file
# +
#The model should be inside the model folder,go to the model folder and execute the below command
#python3 create_torchscript.py
#then move the .pt file from transformer_model folder to model folder
# -
# ## Model Archiving
# +
# Creates session to get the defualt bucket name to push the tar file
import boto3, time, json, sagemaker
sess = boto3.Session()
sagemaker_session = sagemaker.Session(boto_session=sess)
model_file_name = "sentiment_r1_1"
bucket_name = sagemaker_session.default_bucket()
prefix = 'torchserve'
# +
# # !torch-model-archiver --help
# +
#Make sure you have the following files in the model folder pytorch_model.pt,Transformer_handler_generalized.py,vocab.json,setup_config.json,special_tokens_map.json,settings.py,tokenizer_config.json,merges.txt,qa_utils.py,config.json
# -
# !torch-model-archiver --model-name sentiment_r1_1 --version 1.0 --serialized-file ./model/pytorch_model.pt --handler ./model/Transformer_handler_generalized.py --extra-files "./model/vocab.json,./model/setup_config.json,./model/special_tokens_map.json,./model/settings.py,./model/tokenizer_config.json,./model/merges.txt"
# +
#The mar file will be present in the home directory
#This creates a tar file to be used in the sagemaker deployment
# -
# !tar cvfz {model_file_name}.tar.gz {model_file_name}.mar
#Moves the tar file to mars folder
# !mv {model_file_name}.tar.gz {model_file_name}.mar ./mars/
# change the folder name before the last slash based on the task
# !aws s3 cp mars/{model_file_name}.tar.gz s3://{bucket_name}/{prefix}/models/sent_class/
#The below s3 link will be given as the model data for sagemaker while deployment
| legacy/torchserve/legacy/notebooks/Create_Model_archive_file_for_sentiment_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import json
import numpy as np
import os
import pandas as pd
import sklearn
import sys
import tensorflow as tf
print(sys.version)
print(np.__version__)
print(sklearn.__version__)
print(tf.__version__)
# -
# ## Deep Bayesian Bandits Reproducibility
#
# This notebook explores the reproducibility around the [Deep Bayesian Bandits](https://github.com/tensorflow/models/tree/archive/research/deep_contextual_bandits) work by Google. We look at the LinTS implementation, which forms the baseline of their experiments.
#
# In order to run these experiments, please perform the steps below:
# - Clone the [tensorflow models repo](https://github.com/tensorflow/models), switch to the `archive` branch, and copy `models/research/deep_contextual_bandits/bandits` folder to this directory.
# - Run the cell below to overwrite the LinTS implementation file. This updates the multivariate sampling such that we can select the method to use while sampling, between SVD and Cholesky. Note that the SVD method was used in the original code.
# +
# %%writefile bandits/algorithms/linear_full_posterior_sampling.py
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contextual algorithm that keeps a full linear posterior for each arm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.stats import invgamma
from bandits.core.bandit_algorithm import BanditAlgorithm
from bandits.core.contextual_dataset import ContextualDataset
class LinearFullPosteriorSampling(BanditAlgorithm):
"""Thompson Sampling with independent linear models and unknown noise var."""
def __init__(self, name, hparams):
"""Initialize posterior distributions and hyperparameters.
Assume a linear model for each action i: reward = context^T beta_i + noise
Each beta_i has a Gaussian prior (lambda parameter), each sigma2_i (noise
level) has an inverse Gamma prior (a0, b0 parameters). Mean, covariance,
and precision matrices are initialized, and the ContextualDataset created.
Args:
name: Name of the algorithm.
hparams: Hyper-parameters of the algorithm.
"""
self.name = name
self.hparams = hparams
self.rng = np.random.default_rng(self.hparams.seed)
# Gaussian prior for each beta_i
self._lambda_prior = self.hparams.lambda_prior
self.mu = [
np.zeros(self.hparams.context_dim + 1)
for _ in range(self.hparams.num_actions)
]
self.cov = [(1.0 / self.lambda_prior) * np.eye(self.hparams.context_dim + 1)
for _ in range(self.hparams.num_actions)]
self.precision = [
self.lambda_prior * np.eye(self.hparams.context_dim + 1)
for _ in range(self.hparams.num_actions)
]
# Inverse Gamma prior for each sigma2_i
self._a0 = self.hparams.a0
self._b0 = self.hparams.b0
self.a = [self._a0 for _ in range(self.hparams.num_actions)]
self.b = [self._b0 for _ in range(self.hparams.num_actions)]
self.t = 0
self.data_h = ContextualDataset(hparams.context_dim,
hparams.num_actions,
intercept=True)
def action(self, context):
"""Samples beta's from posterior, and chooses best action accordingly.
Args:
context: Context for which the action need to be chosen.
Returns:
action: Selected action for the context.
"""
# Round robin until each action has been selected "initial_pulls" times
if self.t < self.hparams.num_actions * self.hparams.initial_pulls:
return self.t % self.hparams.num_actions
# Sample sigma2, and beta conditional on sigma2
sigma2_s = [
self.b[i] * invgamma.rvs(self.a[i])
for i in range(self.hparams.num_actions)
]
try:
if self.hparams.method == 'default':
beta_s = [
np.random.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i])
for i in range(self.hparams.num_actions)
]
else:
beta_s = [
self.rng.multivariate_normal(self.mu[i], sigma2_s[i] * self.cov[i], method=self.hparams.method)
for i in range(self.hparams.num_actions)
]
except np.linalg.LinAlgError as e:
# Sampling could fail if covariance is not positive definite
print('Exception when sampling from {}.'.format(self.name))
print('Details: {} | {}.'.format(e.message, e.args))
d = self.hparams.context_dim + 1
beta_s = [
np.random.multivariate_normal(np.zeros((d)), np.eye(d))
for i in range(self.hparams.num_actions)
]
# Compute sampled expected values, intercept is last component of beta
vals = [
np.dot(beta_s[i][:-1], context.T) + beta_s[i][-1]
for i in range(self.hparams.num_actions)
]
return np.argmax(vals)
def update(self, context, action, reward):
"""Updates action posterior using the linear Bayesian regression formula.
Args:
context: Last observed context.
action: Last observed action.
reward: Last observed reward.
"""
self.t += 1
self.data_h.add(context, action, reward)
# Update posterior of action with formulas: \beta | x,y ~ N(mu_q, cov_q)
x, y = self.data_h.get_data(action)
# The algorithm could be improved with sequential update formulas (cheaper)
s = np.dot(x.T, x)
# Some terms are removed as we assume prior mu_0 = 0.
precision_a = s + self.lambda_prior * np.eye(self.hparams.context_dim + 1)
cov_a = np.linalg.inv(precision_a)
mu_a = np.dot(cov_a, np.dot(x.T, y))
# Inverse Gamma posterior update
a_post = self.a0 + x.shape[0] / 2.0
b_upd = 0.5 * (np.dot(y.T, y) - np.dot(mu_a.T, np.dot(precision_a, mu_a)))
b_post = self.b0 + b_upd
# Store new posterior distributions
self.mu[action] = mu_a
self.cov[action] = cov_a
self.precision[action] = precision_a
self.a[action] = a_post
self.b[action] = b_post
@property
def a0(self):
return self._a0
@property
def b0(self):
return self._b0
@property
def lambda_prior(self):
return self._lambda_prior
# -
# We replicate the [quick start example](https://github.com/tensorflow/models/blob/archive/research/deep_contextual_bandits/example_main.py) from the Deep Contextual Bandits research repo below, focusing on just LinTS, and evaluating the cumulative reward at the end. Note that all the seeds are set, such that the cumulative reward should be the same when the code is run in the same environment.
# +
import os
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.util import deprecation
from bandits.data.data_sampler import sample_mushroom_data
from bandits.core.contextual_bandit import run_contextual_bandit
from bandits.algorithms.linear_full_posterior_sampling import LinearFullPosteriorSampling
if type(tf.contrib) != type(tf):
tf.contrib._warning = None
def sample_data(num_contexts):
num_actions = 2
context_dim = 117
file_name = 'mushroom.data'
dataset, opt_mushroom = sample_mushroom_data(file_name, num_contexts)
opt_rewards, opt_actions = opt_mushroom
return dataset, opt_rewards, opt_actions, num_actions, context_dim
# Problem parameters
num_contexts = 2000
# Create dataset
np.random.seed(42)
sampled_vals = sample_data(num_contexts)
dataset, opt_rewards, opt_actions, num_actions, context_dim = sampled_vals
def run_dbb(random_option):
np.random.seed(42)
hparams_linear = tf.contrib.training.HParams(num_actions=num_actions,
context_dim=context_dim,
a0=6,
b0=6,
lambda_prior=0.25,
initial_pulls=2,
seed=42,
method=random_option)
algos = [
LinearFullPosteriorSampling('LinFullPost', hparams_linear),
]
t_init = time.time()
results = run_contextual_bandit(context_dim, num_actions, dataset, algos)
_, h_rewards = results
reward = np.sum(h_rewards[:, 0])
return reward
# -
# ### Option 1
# We use the default implementation, which uses `np.random.multivariate_random`, and set the global seed to ensure reproducibility in a single environment. Note that this is the same as using `np.random.RandomState`, as the global seed sets the random state.
default_reward = run_dbb('default')
default_reward
# ### Option 2
# We use the new `Generator` class with default parameters, which internally uses SVD for decomposition:
svd_reward = run_dbb('svd')
svd_reward
# ### Option 3
# We use Cholesky decomposition with the new Generator class. Our hypothesis is that this will produce reproducible results across different environments.
cholesky_reward = run_dbb('cholesky')
cholesky_reward
# We save all the results for analysis.
rewards = pd.DataFrame({
'env': ['LinuxUbuntu_OpenBLAS'],
'default': [default_reward],
'svd': [svd_reward],
'cholesky': [cholesky_reward],
}).set_index('env')
rewards
os.makedirs('output', exist_ok=True)
rewards.to_csv(os.path.join('output', 'linuxubuntu_openblas_rewards.csv'))
| examples/lints_reproducibility/table_4/LinuxUbuntu_OpenBLAS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Compare Neural Network and NN_LSTM
# In this file, we will test Neural Network and RNN_LSTM with same training data and test data
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense,Activation
from keras.layers.recurrent import LSTM
from sklearn.model_selection import train_test_split
from sklearn import model_selection
# -
# ## Attack 1
# ## Choosing the Configuration
# +
data_attack1 = pd.read_csv("dataset/attack1with7FeatureVector.csv")
data_attack1 = data_attack1.dropna(axis=0, how="any")#remove invalid data
sum(np.array(data_attack1['Label']))/data_attack1['Label'].shape[0]/1
data_attack1.shape
X = data_attack1.iloc[:,12:] #In our case, we use the feature of 1-6, the feature of distance rejected
n = X.shape[1]
y = data_attack1.iloc[:,11]
X = np.reshape(X.values, (X.shape[0], X.shape[1]))
y =np.reshape(y.values, (y.shape[0], 1))
# -
activation_func = ['sigmoid','softmax', 'elu', 'selu', 'softplus', 'softsign',
'relu', 'tanh', 'hard_sigmoid', 'linear']
loss_func = ['binary_crossentropy', 'mean_squared_error', 'mean_absolute_error', 'mean_absolute_percentage_error',
'mean_squared_logarithmic_error', 'squared_hinge', 'hinge', 'logcosh', 'kullback_leibler_divergence',
'poisson', 'cosine_proximity']
optimizer_scheme = ['Adagrad','SGD', 'RMSprop', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
def evaluate_model(y_predict, y_test):
count_ccr = 0
TP = 0
FP = 0
FN = 0
for i in range(len(y_test)):
if y_predict[i]==y_test[i]:
count_ccr+=1
if y_predict[i]==1 and y_test[i]==1:
TP+=1
if y_predict[i]==1 and y_test[i]==0:
FP+=1
if y_predict[i]==0 and y_test[i]==1:
FN+=1
ccr = count_ccr/len(y_test)
if (TP+FP)==0:
print('All the prediction is normal')
preci = 0
else:
preci = TP/(TP+FP)
recall= TP/(TP+FN)
print('For this model, the CCR is', ccr, ', the Precision is', preci, 'and the Recall is', recall )
return ccr,preci,recall
# ## Neural network model
model_NN = Sequential()
model_NN.add(Dense(64, input_dim=n, activation=activation_func_use))
model_NN.add(Dense(16, activation=activation_func_use))
model_NN.add(Dense(1, activation=activation_func_use))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, )
model_NN.compile(loss=loss_func_use, optimizer=optimizer_scheme_use, metrics=['binary_accuracy'])
model.fit(X_train, y_train, epochs=20, batch_size=100,verbose=0)
y_predict = np.round(model.predict(X_test))
cnn,preci,recall = evaluate_model(y_predict, y_test)
# ## Neural Network LSTM model
model_LSTM = Sequential()
model_LSTM.add(LSTM (64, input_shape=(None,n),return_sequences=False))
model_LSTM.add(Dense(16, input_dim =n, activation=activation_func_use))
model_LSTM.add(Dense(1 , activation=activation_func_use))
model_LSTM.compile(loss=loss_func_use, optimizer=optimizer_scheme_use, metrics=['binary_accuracy'])
X_train = np.reshape(X_train, (X_train.shape[0], 1,X_train.shape[1]))
X_test =np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
model_LSTM.fit(X_train,y_train,verbose = 0,epochs=20,batch_size=100)
y_predict = np.round(model_LSTM.predict(X_test))
cnn,preci,recall = evaluate_model(y_predict, y_test)
# In this file, we use the same training data and test data. We find that there are no difference between LSTM_NN model and NN model, in fact CCR, precision and Recall have same values.
| compare_Neural Network_and_NN_LSTM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="vMHU857RbB-B"
# nltk.download('stopwords')
# nltk.download('wordnet')
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import string
import nltk
import it_core_news_sm
import re
from itertools import combinations
import itertools
import os
import networkx as nx
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
#from nltk.tokenize import word_tokenize
from nltk.tokenize import WhitespaceTokenizer
word_tokenize = WhitespaceTokenizer()
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
from wordcloud import WordCloud
# + id="nupiJmsebB-J" outputId="ce7d2887-de72-43ea-e3ab-9371df102db0"
database_words = pd.read_csv("~/Scrivania/common_start/database/id_lemmas.csv",index_col = 0 , sep=',', engine='python')
database_words['text_nlp'] = database_words.apply(lambda row: word_tokenize.tokenize(row['text_nlp']), axis = 1)
database_words
# + id="l9oe8P4ZbB-N" outputId="110d949f-23b5-4caa-8f6a-14051d138d6f"
database_comments = pd.read_csv("~/Scrivania/common_start/database/com_liwc.csv", sep='\t', engine='python')
database_comments.head(2)
# + [markdown] id="422qPgzybB-P"
# # THE FOLLOWING CELLS ARE THE ONES TO BE MODIFIED IN ORDER TO OBTAIN ANY OTHER SUBSET
#
# ## Here keep only problematic and hate comments
# + id="q-LtgWkYbB-Q" outputId="a4700541-c1d2-4366-d207-35876466a215"
subset_comments = database_comments[database_comments.loc[:,'c_rating3'] == 'probl-hate'].loc[:,'c_rating3']
#free memory
subset_comments
# + [markdown] id="psPk1WyBbB-S"
# computes the maching indexes and retrieve the subset from the lemmas dataset
# + id="nZ2ib_9VbB-T" outputId="6b916b72-699f-4e76-f0b7-996e69f216d0"
matching_indexes = database_words.index.intersection(subset_comments.index)
subset_words = database_words.loc[matching_indexes,:]
subset_words
# + id="NDwEB1dYbB-U" outputId="5d777f7c-abe6-44ef-aa79-db189307b06b"
##check for null values
subset_words.isnull().values.any()
# + id="1NkUtHlQbB-W" outputId="e6e1985a-7ba8-4ad5-c702-99318ae3786a"
##create a copy of the dataframe and add the index
df_words = subset_words.copy()
df_words['id'] = df_words.index
df_words
# + [markdown] id="Ewen2MFSbB-X"
# Create a dataframe of unique words
# + id="WFkapy_4bB-Y" outputId="7e727113-2e79-4b2d-ece0-882c34d94264"
unique_words = {}
words_per_comment = {}
for index, row in df_words.iterrows():
if(len(row['text_nlp'])>0):
words_per_comment[row["id"]]= [clean_txt for clean_txt in row['text_nlp']] ###iterating the column text_clean. There are 78174 rows
for single_word in row['text_nlp']:
unique_words.setdefault(single_word, 0) ## setdefault() method returns the value of a key (if the key is in dictionary)
unique_words[single_word] += 1
unique_words.pop('#', None)
#len of unique words
print("Number of unique words: ", len(unique_words))
unique_words
# + id="AXumNd60bB-b"
folder_path = 'subsets/'
os.makedirs(folder_path,exist_ok=True)
# + [markdown] id="YkBsH0H2bB-b"
# ## Create the unique words dataframe
#
# Moreover as attribute it has how many times it appears in a given network, in this case hate comments
# + id="aPEdcyeDbB-c" outputId="f577f4b7-2bf5-43c3-fd27-d49cef8bf9ac"
lemmas_df = pd.DataFrame.from_dict(unique_words, orient='index').reset_index()
lemmas_df.columns = ['label', 'counts']
lemmas_df
# + id="MIYwisSgbB-e" outputId="66d1696e-cc70-4be1-ef73-0ecaf2a20b10"
lemmas_df = lemmas_df.reset_index()
lemmas_df.columns = ['id', 'label', 'counts']
lemmas_df
# + id="1d71ncvUbB-f"
#export
lemmas_df.to_csv('subsets/NODES_hate_problematic_subset.csv', index = False)
# + id="NFbxHsKHbB-g"
def get_id_from_node(word, ids):
return ids.index[ids.label == word][0]
# + id="L5lARRMabB-h" outputId="5f555190-af76-4a4e-80a7-5d4a2605c17a"
words_per_comment = {}
for index, row in df_words.iterrows():
if(len(row['text_nlp'])>0):
words_per_comment[row["id"]]= [get_id_from_node(clean_txt, lemmas_df) for clean_txt in row['text_nlp']
if clean_txt!='#'] ###iterating the column text_clean. There are 78174 rows
words_per_comment
# + id="SHOZioeJbB-i" outputId="a1f7c0bb-11e7-436a-c9ef-275101b6c6f1"
##create a matrix of words
clean_df = pd.DataFrame(0, index=lemmas_df.index, columns=lemmas_df.index)
for key in words_per_comment:
for pair in itertools.product(words_per_comment[key],words_per_comment[key]):
if pair[0]!=pair[1] and not(clean_df.at[pair[0],pair[1]]):
clean_df.at[pair[0],pair[1]] += 1
clean_df.at[pair[1],pair[0]] += 1
display(clean_df) ##this is the full matrix of words
# + id="g49U5pfXbB-k" outputId="b1498c21-4b27-4e42-f3a0-2ca07f9287b9"
##use this as a basis to get edges and their weights
words_projection = {} ##create dictionary
##itertools.product() which computes the cartesian product of input iterables.
for key in words_per_comment:
for pair in itertools.product(words_per_comment[key],words_per_comment[key]):
if pair[0]!=pair[1] and not(pair[::-1] in words_projection):
words_projection.setdefault(pair,0)
words_projection[pair] += 1
words_projection
# + id="92o8rMu1bB-l"
##obtain weighted edges list
##edge lists =36million
#WEIGHTED
#to get weighted graph we need a list of 3-element tuplels (u,v,w) where u and v are nodes
## and w is a number representing weight
words_weighted = []
for edge in words_projection:
words_weighted.append((edge[0],edge[1],words_projection[edge]))
G = nx.Graph()
G.add_weighted_edges_from(words_weighted)
nx.write_weighted_edgelist(G, "subsets/EDGES_hate_problematic_subset.csv",delimiter=",") ##save the edges list as csv
df_edges = pd.read_csv('subsets/EDGES_hate_problematic_subset.csv')
df_edges.columns = ['source', 'target', 'weight']
df_edges.to_csv('subsets/EDGES_hate_problematic_subset.csv', index = False)
# + id="B6p57gPIbB-m" outputId="507ce9dd-e136-47a0-8962-c91537f34c7e"
df_edges
# + id="Mhjsu1nCbB-n"
| Semantic_Group/Get_Subset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## 2.2 PyTorch第一步
#
# PyTorch的简洁设计使得它入门很简单,在深入介绍PyTorch之前,本节将先介绍一些PyTorch的基础知识,使得读者能够对PyTorch有一个大致的了解,并能够用PyTorch搭建一个简单的神经网络。部分内容读者可能暂时不太理解,可先不予以深究,本书的第3章和第4章将会对此进行深入讲解。
#
# 本节内容参考了PyTorch官方教程[^1]并做了相应的增删修改,使得内容更贴合新版本的PyTorch接口,同时也更适合新手快速入门。另外本书需要读者先掌握基础的Numpy使用,其他相关知识推荐读者参考CS231n的教程[^2]。
#
# [^1]: http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html
# [^2]: http://cs231n.github.io/python-numpy-tutorial/
# ### Tensor
#
# Tensor是PyTorch中重要的数据结构,可认为是一个高维数组。它可以是一个数(标量)、一维数组(向量)、二维数组(矩阵)以及更高维的数组。Tensor和Numpy的ndarrays类似,但Tensor可以使用GPU进行加速。Tensor的使用和Numpy及Matlab的接口十分相似,下面通过几个例子来看看Tensor的基本使用。
from __future__ import print_function
import torch as t
t.__version__
# +
# 构建 5x3 矩阵,只是分配了空间,未初始化
x = t.Tensor(5, 3)
x = t.Tensor([[1,2],[3,4]])
x
# -
# 使用[0,1]均匀分布随机初始化二维数组
x = t.rand(5, 3)
x
print(x.size()) # 查看x的形状
x.size()[1], x.size(1) # 查看列的个数, 两种写法等价
# `torch.Size` 是tuple对象的子类,因此它支持tuple的所有操作,如x.size()[0]
y = t.rand(5, 3)
# 加法的第一种写法
x + y
# 加法的第二种写法
t.add(x, y)
# 加法的第三种写法:指定加法结果的输出目标为result
result = t.Tensor(5, 3) # 预先分配空间
t.add(x, y, out=result) # 输入到result
result
# +
print('最初y')
print(y)
print('第一种加法,y的结果')
y.add(x) # 普通加法,不改变y的内容
print(y)
print('第二种加法,y的结果')
y.add_(x) # inplace 加法,y变了
print(y)
# -
# 注意,函数名后面带下划线**`_`** 的函数会修改Tensor本身。例如,`x.add_(y)`和`x.t_()`会改变 `x`,但`x.add(y)`和`x.t()`返回一个新的Tensor, 而`x`不变。
# Tensor的选取操作与Numpy类似
x[:, 1]
# Tensor还支持很多操作,包括数学运算、线性代数、选择、切片等等,其接口设计与Numpy极为相似。更详细的使用方法,会在第三章系统讲解。
#
# Tensor和Numpy的数组之间的互操作非常容易且快速。对于Tensor不支持的操作,可以先转为Numpy数组处理,之后再转回Tensor。c
#
#
a = t.ones(5) # 新建一个全1的Tensor
a
b = a.numpy() # Tensor -> Numpy
b
import numpy as np
a = np.ones(5)
b = t.from_numpy(a) # Numpy->Tensor
print(a)
print(b)
# Tensor和numpy对象共享内存,所以他们之间的转换很快,而且几乎不会消耗什么资源。但这也意味着,如果其中一个变了,另外一个也会随之改变。
b.add_(1) # 以`_`结尾的函数会修改自身
print(a)
print(b) # Tensor和Numpy共享内存
# 如果你想获取某一个元素的值,可以使用`scalar.item`。 直接`tensor[idx]`得到的还是一个tensor: 一个0-dim 的tensor,一般称为scalar.
scalar = b[0]
scalar
scalar.size() #0-dim
scalar.item() # 使用scalar.item()能从中取出python对象的数值
tensor = t.tensor([2]) # 注意和scalar的区别
tensor,scalar
tensor.size(),scalar.size()
# 只有一个元素的tensor也可以调用`tensor.item()`
tensor.item(), scalar.item()
# 此外在pytorch中还有一个和`np.array` 很类似的接口: `torch.tensor`, 二者的使用十分类似。
tensor = t.tensor([3,4]) # 新建一个包含 3,4 两个元素的tensor
scalar = t.tensor(3)
scalar
old_tensor = tensor
new_tensor = old_tensor.clone()
new_tensor[0] = 1111
old_tensor, new_tensor
# 需要注意的是,`t.tensor()`或者`tensor.clone()`总是会进行数据拷贝,新tensor和原来的数据不再共享内存。所以如果你想共享内存的话,建议使用`torch.from_numpy()`或者`tensor.detach()`来新建一个tensor, 二者共享内存。
new_tensor = old_tensor.detach()
new_tensor[0] = 1111
old_tensor, new_tensor
# Tensor可通过`.cuda` 方法转为GPU的Tensor,从而享受GPU带来的加速运算。
# 在不支持CUDA的机器下,下一步还是在CPU上运行
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
x = x.to(device)
y = y.to(x.device)
z = x+y
# 此外,还可以使用`tensor.cuda()` 的方式将tensor拷贝到gpu上,但是这种方式不太推荐。
# 此处可能发现GPU运算的速度并未提升太多,这是因为x和y太小且运算也较为简单,而且将数据从内存转移到显存还需要花费额外的开销。GPU的优势需在大规模数据和复杂运算下才能体现出来。
#
# ### autograd: 自动微分
#
# 深度学习的算法本质上是通过反向传播求导数,而PyTorch的**`autograd`**模块则实现了此功能。在Tensor上的所有操作,autograd都能为它们自动提供微分,避免了手动计算导数的复杂过程。
#
# ~~`autograd.Variable`是Autograd中的核心类,它简单封装了Tensor,并支持几乎所有Tensor有的操作。Tensor在被封装为Variable之后,可以调用它的`.backward`实现反向传播,自动计算所有梯度~~ ~~Variable的数据结构如图2-6所示。~~
#
#
# 
#
# *从0.4起, Variable 正式合并入Tensor, Variable 本来实现的自动微分功能,Tensor就能支持。读者还是可以使用Variable(tensor), 但是这个操作其实什么都没做。建议读者以后直接使用tensor*.
#
# 要想使得Tensor使用autograd功能,只需要设置`tensor.requries_grad=True`.
#
#
# ~~Variable主要包含三个属性。~~
# ~~- `data`:保存Variable所包含的Tensor~~
# ~~- `grad`:保存`data`对应的梯度,`grad`也是个Variable,而不是Tensor,它和`data`的形状一样。~~
# ~~- `grad_fn`:指向一个`Function`对象,这个`Function`用来反向传播计算输入的梯度,具体细节会在下一章讲解。~~
# +
# 为tensor设置 requires_grad 标识,代表着需要求导数
# pytorch 会自动调用autograd 记录操作
x = t.ones(2, 2, requires_grad=True)
# 上一步等价于
# x = t.ones(2,2)
# x.requires_grad = True
x
# -
y = x.sum()
y
y.grad_fn
y.backward() # 反向传播,计算梯度
# y = x.sum() = (x[0][0] + x[0][1] + x[1][0] + x[1][1])
# 每个值的梯度都为1
x.grad
# 注意:`grad`在反向传播过程中是累加的(accumulated),这意味着每一次运行反向传播,梯度都会累加之前的梯度,所以反向传播之前需把梯度清零。
y.backward()
x.grad
y.backward()
x.grad
# 以下划线结束的函数是inplace操作,会修改自身的值,就像add_
x.grad.data.zero_()
y.backward()
x.grad
# ### 神经网络
#
# Autograd实现了反向传播功能,但是直接用来写深度学习的代码在很多情况下还是稍显复杂,torch.nn是专门为神经网络设计的模块化接口。nn构建于 Autograd之上,可用来定义和运行神经网络。nn.Module是nn中最重要的类,可把它看成是一个网络的封装,包含网络各层定义以及forward方法,调用forward(input)方法,可返回前向传播的结果。下面就以最早的卷积神经网络:LeNet为例,来看看如何用`nn.Module`实现。LeNet的网络结构如图2-7所示。
#
# 
#
# 这是一个基础的前向传播(feed-forward)网络: 接收输入,经过层层传递运算,得到输出。
#
# #### 定义网络
#
# 定义网络时,需要继承`nn.Module`,并实现它的forward方法,把网络中具有可学习参数的层放在构造函数`__init__`中。如果某一层(如ReLU)不具有可学习的参数,则既可以放在构造函数中,也可以不放,但建议不放在其中,而在forward中使用`nn.functional`代替。
# +
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
# nn.Module子类的函数必须在构造函数中执行父类的构造函数
# 下式等价于nn.Module.__init__(self)
super(Net, self).__init__()
# 卷积层 '1'表示输入图片为单通道, '6'表示输出通道数,'5'表示卷积核为5*5
self.conv1 = nn.Conv2d(1, 6, 5)
# 卷积层
self.conv2 = nn.Conv2d(6, 16, 5)
# 仿射层/全连接层,y = Wx + b
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# 卷积 -> 激活 -> 池化
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# reshape,‘-1’表示自适应
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
# -
# 只要在nn.Module的子类中定义了forward函数,backward函数就会自动被实现(利用`autograd`)。在`forward` 函数中可使用任何tensor支持的函数,还可以使用if、for循环、print、log等Python语法,写法和标准的Python写法一致。
#
# 网络的可学习参数通过`net.parameters()`返回,`net.named_parameters`可同时返回可学习的参数及名称。
params = list(net.parameters())
print(len(params))
for name,parameters in net.named_parameters():
print(name,':',parameters.size())
# forward函数的输入和输出都是Tensor。
input = t.randn(1, 1, 32, 32)
out = net(input)
out.size()
net.zero_grad() # 所有参数的梯度清零
out.backward(t.ones(1,10)) # 反向传播
# 需要注意的是,torch.nn只支持mini-batches,不支持一次只输入一个样本,即一次必须是一个batch。但如果只想输入一个样本,则用 `input.unsqueeze(0)`将batch_size设为1。例如 `nn.Conv2d` 输入必须是4维的,形如$nSamples \times nChannels \times Height \times Width$。可将nSample设为1,即$1 \times nChannels \times Height \times Width$。
# #### 损失函数
#
# nn实现了神经网络中大多数的损失函数,例如nn.MSELoss用来计算均方误差,nn.CrossEntropyLoss用来计算交叉熵损失。
output = net(input)
target = t.arange(0,10).view(1,10).float()
criterion = nn.MSELoss()
loss = criterion(output, target)
loss # loss是个scalar
# 如果对loss进行反向传播溯源(使用`gradfn`属性),可看到它的计算图如下:
#
# ```
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
# -> view -> linear -> relu -> linear -> relu -> linear
# -> MSELoss
# -> loss
# ```
#
# 当调用`loss.backward()`时,该图会动态生成并自动微分,也即会自动计算图中参数(Parameter)的导数。
# 运行.backward,观察调用之前和调用之后的grad
net.zero_grad() # 把net中所有可学习参数的梯度清零
print('反向传播之前 conv1.bias的梯度')
print(net.conv1.bias.grad)
loss.backward()
print('反向传播之后 conv1.bias的梯度')
print(net.conv1.bias.grad)
# #### 优化器
# 在反向传播计算完所有参数的梯度后,还需要使用优化方法来更新网络的权重和参数,例如随机梯度下降法(SGD)的更新策略如下:
# ```
# weight = weight - learning_rate * gradient
# ```
#
# 手动实现如下:
#
# ```python
# learning_rate = 0.01
# for f in net.parameters():
# f.data.sub_(f.grad.data * learning_rate)# inplace 减法
# ```
#
# `torch.optim`中实现了深度学习中绝大多数的优化方法,例如RMSProp、Adam、SGD等,更便于使用,因此大多数时候并不需要手动写上述代码。
# +
import torch.optim as optim
#新建一个优化器,指定要调整的参数和学习率
optimizer = optim.SGD(net.parameters(), lr = 0.01)
# 在训练过程中
# 先梯度清零(与net.zero_grad()效果一样)
optimizer.zero_grad()
# 计算损失
output = net(input)
loss = criterion(output, target)
#反向传播
loss.backward()
#更新参数
optimizer.step()
# -
#
#
# #### 数据加载与预处理
#
# 在深度学习中数据加载及预处理是非常复杂繁琐的,但PyTorch提供了一些可极大简化和加快数据处理流程的工具。同时,对于常用的数据集,PyTorch也提供了封装好的接口供用户快速调用,这些数据集主要保存在torchvison中。
#
# `torchvision`实现了常用的图像数据加载功能,例如Imagenet、CIFAR10、MNIST等,以及常用的数据转换操作,这极大地方便了数据加载,并且代码具有可重用性。
#
#
# ### 小试牛刀:CIFAR-10分类
#
# 下面我们来尝试实现对CIFAR-10数据集的分类,步骤如下:
#
# 1. 使用torchvision加载并预处理CIFAR-10数据集
# 2. 定义网络
# 3. 定义损失函数和优化器
# 4. 训练网络并更新网络参数
# 5. 测试网络
#
# #### CIFAR-10数据加载及预处理
#
# CIFAR-10[^3]是一个常用的彩色图片数据集,它有10个类别: 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'。每张图片都是$3\times32\times32$,也即3-通道彩色图片,分辨率为$32\times32$。
#
# [^3]: http://www.cs.toronto.edu/~kriz/cifar.html
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
show = ToPILImage() # 可以把Tensor转成Image,方便可视化
# +
# 第一次运行程序torchvision会自动下载CIFAR-10数据集,
# 大约100M,需花费一定的时间,
# 如果已经下载有CIFAR-10,可通过root参数指定
# 定义对数据的预处理
transform = transforms.Compose([
transforms.ToTensor(), # 转为Tensor
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # 归一化
])
# 训练集
trainset = tv.datasets.CIFAR10(
root='/home/cy/tmp/data/',
train=True,
download=True,
transform=transform)
trainloader = t.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2)
# 测试集
testset = tv.datasets.CIFAR10(
'/home/cy/tmp/data/',
train=False,
download=True,
transform=transform)
testloader = t.utils.data.DataLoader(
testset,
batch_size=4,
shuffle=False,
num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# -
# Dataset对象是一个数据集,可以按下标访问,返回形如(data, label)的数据。
# +
(data, label) = trainset[100]
print(classes[label])
# (data + 1) / 2是为了还原被归一化的数据
show((data + 1) / 2).resize((100, 100))
# -
# Dataloader是一个可迭代的对象,它将dataset返回的每一条数据拼接成一个batch,并提供多线程加速优化和数据打乱等操作。当程序对dataset的所有数据遍历完一遍之后,相应的对Dataloader也完成了一次迭代。
dataiter = iter(trainloader)
images, labels = dataiter.next() # 返回4张图片及标签
print(' '.join('%11s'%classes[labels[j]] for j in range(4)))
show(tv.utils.make_grid((images+1)/2)).resize((400,100))
# #### 定义网络
#
# 拷贝上面的LeNet网络,修改self.conv1第一个参数为3通道,因CIFAR-10是3通道彩图。
# +
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
# -
# #### 定义损失函数和优化器(loss和optimizer)
from torch import optim
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# ### 训练网络
#
# 所有网络的训练流程都是类似的,不断地执行如下流程:
#
# - 输入数据
# - 前向传播+反向传播
# - 更新参数
#
t.set_num_threads(8)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# 输入数据
inputs, labels = data
# 梯度清零
optimizer.zero_grad()
# forward + backward
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
# 更新参数
optimizer.step()
# 打印log信息
# loss 是一个scalar,需要使用loss.item()来获取数值,不能使用loss[0]
running_loss += loss.item()
if i % 2000 == 1999: # 每2000个batch打印一下训练状态
print('[%d, %5d] loss: %.3f' \
% (epoch+1, i+1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# 此处仅训练了2个epoch(遍历完一遍数据集称为一个epoch),来看看网络有没有效果。将测试图片输入到网络中,计算它的label,然后与实际的label进行比较。
dataiter = iter(testloader)
images, labels = dataiter.next() # 一个batch返回4张图片
print('实际的label: ', ' '.join(\
'%08s'%classes[labels[j]] for j in range(4)))
show(tv.utils.make_grid(images / 2 - 0.5)).resize((400,100))
# 接着计算网络预测的label:
# +
# 计算图片在每个类别上的分数
outputs = net(images)
# 得分最高的那个类
_, predicted = t.max(outputs.data, 1)
print('预测结果: ', ' '.join('%5s'\
% classes[predicted[j]] for j in range(4)))
# -
# 已经可以看出效果,准确率50%,但这只是一部分的图片,再来看看在整个测试集上的效果。
# +
correct = 0 # 预测正确的图片数
total = 0 # 总共的图片数
# 由于测试的时候不需要求导,可以暂时关闭autograd,提高速度,节约内存
with t.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = t.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('10000张测试集中的准确率为: %d %%' % (100 * correct / total))
# -
# 训练的准确率远比随机猜测(准确率10%)好,证明网络确实学到了东西。
# #### 在GPU训练
# 就像之前把Tensor从CPU转到GPU一样,模型也可以类似地从CPU转到GPU。
# +
device = t.device("cuda:0" if t.cuda.is_available() else "cpu")
net.to(device)
images = images.to(device)
labels = labels.to(device)
output = net(images)
loss= criterion(output,labels)
loss
# -
# 如果发现在GPU上并没有比CPU提速很多,实际上是因为网络比较小,GPU没有完全发挥自己的真正实力。
# 对PyTorch的基础介绍至此结束。总结一下,本节主要包含以下内容。
#
# 1. Tensor: 类似Numpy数组的数据结构,与Numpy接口类似,可方便地互相转换。
# 2. autograd/: 为tensor提供自动求导功能。
# 3. nn: 专门为神经网络设计的接口,提供了很多有用的功能(神经网络层,损失函数,优化器等)。
# 4. 神经网络训练: 以CIFAR-10分类为例演示了神经网络的训练流程,包括数据加载、网络搭建、训练及测试。
#
# 通过本节的学习,相信读者可以体会出PyTorch具有接口简单、使用灵活等特点。从下一章开始,本书将深入系统地讲解PyTorch的各部分知识。
| chapter02-quickstart/chapter2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import ezdxf as dxf
from ezdxf import recover
from ezdxf.addons.drawing import RenderContext, Frontend
from ezdxf.addons.drawing.matplotlib import MatplotlibBackend
from ezdxf.groupby import groupby
import scipy.interpolate
# Loading dxf reference file
# +
# Safe loading procedure of dxf file(requires ezdxf v0.14):
# The auditor.errors attribute stores severe errors,
# which may raise exceptions when rendering.
try:
doc, auditor = recover.readfile('DY-S76-176-1.dxf')
except IOError:
print(f'Not a DXF file or a generic I/O error.')
sys.exit(1)
except ezdxf.DXFStructureError:
print(f'Invalid or corrupted DXF file.')
sys.exit(2)
# Printing the dxf content
if not auditor.has_errors:
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ctx = RenderContext(doc)
out = MatplotlibBackend(ax)
Frontend(ctx, out).draw_layout(doc.modelspace(), finalize=True)
# Saving dxf content in png image
#fig.savefig('your.png', dpi=300)
# -
# Get all entities in each layer
# +
msp = doc.modelspace()
group = groupby(entities=msp, dxfattrib='layer')
group = msp.groupby(dxfattrib='layer')
for layer, entities in group.items():
print(f'Layer "{layer}" contains following entities:')
for entity in entities:
print(' {}'.format(str(entity)))
# -
# Plot control points from spline and line in SKI LAYER
# +
plt.figure(figsize=(40, 8))
for spline in msp.query('SPLINE[layer=="2_Ski"]'):
fitpoints = spline.fit_points.__str__()
controlpoints = spline.control_points.__str__()
knots = spline.knots.__str__()
weights = spline.weights.__str__()
plt.plot(np.array(spline.control_points)[:,0],np.array(spline.control_points)[:,1], 'o')
for line in msp.query('LINE[layer=="2_Ski"]'):
plt.plot([np.array(line.dxf.start)[0],np.array(line.dxf.end)[0]],[np.array(line.dxf.start)[1],np.array(line.dxf.end)[1]],'b-')
plt.axis('equal')
plt.show()
# -
# Shift the offset to have tail at zero length. Save minimum and maximum values.
# +
tot_x = []
tot_y = []
for spline in msp.query('SPLINE[layer=="2_Ski"]'):
points = np.array(spline.control_points)
x_nodes = points[:,0]
y_nodes = points[:,1]
if x_nodes[0] > x_nodes[1]:
x_nodes = x_nodes[::-1]
y_nodes = y_nodes[::-1]
tot_x = np.append(tot_x, x_nodes)
tot_y = np.append(tot_y, y_nodes)
minimum = np.min(tot_x)
tot_x = tot_x - minimum
plt.figure(figsize=(30, 8))
plt.plot(tot_x, tot_y, 'b*', label='myspline')
plt.axis('equal')
plt.show()
# -
# Define function to shift and flip data.
def shift_data(x_points, minimum):
out = x_points - minimum
return out
# Loading dxf file generated from sensors' data
# +
# Safe loading procedure of dxf file(requires ezdxf v0.14):
# The auditor.errors attribute stores severe errors,
# which may raise exceptions when rendering.
try:
doc, auditor = recover.readfile('test.dxf')
except IOError:
print(f'Not a DXF file or a generic I/O error.')
sys.exit(1)
except ezdxf.DXFStructureError:
print(f'Invalid or corrupted DXF file.')
sys.exit(2)
# Printing the dxf content
if not auditor.has_errors:
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ctx = RenderContext(doc)
out = MatplotlibBackend(ax)
Frontend(ctx, out).draw_layout(doc.modelspace(), finalize=True)
# Saving dxf content in png image
#fig.savefig('your.png', dpi=300)
# -
# Plot control points from splines and lines
# +
msp2 = doc.modelspace()
ski_profile_L = []
ski_profile_W = []
plt.figure(figsize=(40, 8))
for spline in msp2.query('SPLINE'):
fitpoints = spline.fit_points.__str__()
controlpoints = spline.control_points.__str__()
knots = spline.knots.__str__()
weights = spline.weights.__str__()
if np.array(spline.control_points)[0,1] > 0:
ski_profile_L = np.append(ski_profile_L,np.array(spline.control_points)[:,0])
ski_profile_W = np.append(ski_profile_W,np.array(spline.control_points)[:,1])
plt.plot(np.array(spline.control_points)[:,0],np.array(spline.control_points)[:,1], 'o')
for line in msp2.query('LINE'):
plt.plot([np.array(line.dxf.start)[0],np.array(line.dxf.end)[0]],[np.array(line.dxf.start)[1],np.array(line.dxf.end)[1]],'b-')
plt.axis('equal')
plt.show()
# -
# Create dataframe with dxf control points from sensors' data
# +
d = {'L': ski_profile_L, 'W': ski_profile_W}
ski_profile = pd.DataFrame(data=d)
ski_profile
# -
# Load and plot sensors data.
# +
#ski_profile = pd.read_csv(r'C:\Users\Administrator\arduino ski scanner\ski scanner\skiScanner-DataLogger\Post processing\sensorsdata.csv', sep = ",")
ski_profile = ski_profile*10 #To get mm values
ski_profile = ski_profile.sort_values('L',ascending=True)
# Plot two sides of the ski
# Flip data to have the tail starting from zero
ski_profile['L'] = ski_profile['L'] - ski_profile['L'].max()
ski_profile['L'] = -ski_profile['L']
ski_profile = ski_profile.sort_values('L',ascending=True)
plt.figure(figsize=(30, 8))
plt.plot(ski_profile['L'], ski_profile['W'], 'ro')
plt.plot(ski_profile['L'], -ski_profile['W'], 'ro')
plt.axis('equal')
plt.show()
# -
# Define function to find the closest point of a dataframe array to a point
def closest_point(point, df):
delta = 10
close_interval = df[abs(df-point) < delta]
min_dist = 1000 #initialise the minimum distance to a very high value
closest_value = [] #initialise a variable to save the closest point among the points in the interval
for j in range(1,len(close_interval)): #iterate over the selected interval
a = point
b = close_interval.iloc[j]
dist = np.linalg.norm(a-b)
if (dist < min_dist):
min_dist = dist
closest_value = b
return closest_value
# +
plt.figure(figsize=(30, 8))
tot_y_sensors = []
tot_x = []
tot_y = []
#for each spline in the dxf file
for spline in msp.query('SPLINE[layer=="2_Ski"]'):
start_point = []
end_point = []
points = np.array(spline.control_points)
x_nodes_RAW = points[:,0]
x_nodes = shift_data(x_nodes_RAW, minimum)
y_nodes = points[:,1]
if x_nodes[0] > x_nodes[1]:
x_nodes = x_nodes[::-1]
y_nodes = y_nodes[::-1]
points = points[::-1]
if y_nodes[1]> 0:
# Find closest start and end points in ski profile
start_point = closest_point(x_nodes[0],ski_profile['L'])
print(start_point)
end_point = closest_point(x_nodes[-1],ski_profile['L'])
print(end_point)
#For each spline create myspline for sensors' data
section = ski_profile[['L','W']][ (ski_profile['L']>=start_point) & (ski_profile['L']<end_point)]
myspline_sensors = scipy.interpolate.UnivariateSpline(section['L'].to_numpy(), section['W'].to_numpy())
#For each spline create myspline from control_points
myspline_dxf = scipy.interpolate.UnivariateSpline(x_nodes, y_nodes)
myspline_knots = myspline_dxf.get_knots()
myspline_coeffs = myspline_dxf.get_coeffs()
#Oversampling
x = np.arange(x_nodes[0], x_nodes[-1], 0.5)
x = np.sort(x)
tot_x = np.append(tot_x, x)
#Myspline evaluation in more points
y_myspline_dxf = myspline_dxf(x)
tot_y = np.append(tot_y,y_myspline_dxf)
y_myspline_sensors = myspline_sensors(x)
tot_y_sensors = np.append(tot_y_sensors,y_myspline_sensors)
plt.plot(x, y_myspline_dxf, 'b*', label='myspline')
plt.plot(x, y_myspline_sensors, 'r*', label='myspline')
#plt.plot(x, -y_myspline_sensors, 'r*', label='myspline')
#plt.plot(tot_x, tot_y, 'b*', label='myspline')
#plt.plot(ski_profile['L'], ski_profile['W'], 'ro')
#plt.plot(ski_profile['L'], -ski_profile['W'], 'ro')
plt.axis('equal')
plt.show()
# -
# Print the difference between the two curves and compute the error term (area betweem curves)
z = tot_y_sensors[:-1] - tot_y[:-1]
dx = 0.5
area = np.sum(np.absolute(z)*dx)
print(area)
plt.figure(figsize=(30, 8))
plt.plot(tot_x[:-1], z, 'b*')
plt.show()
| Post processing/dxf files comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 32-bit
# name: python3
# ---
# # Functions in python
# <hr>
# Functions are series of steps or instructions to complete a specific task
#
# The advantages of using functions in python programming are:
#
# - They **increase readability**, particularly for longer codes, since a function can be called at any point in the program
# - They **reduce code length**, since the same code is not repeated in different places
# - They allow **reuse of code**
# # Types of Functions
#
# ### <b>1) Built-in Function</b>
#
# - These are the functions that are ready-made in python, and are frequently used in many programs.
# - These do **not** need to be imported
#
print("hello")
input("hello")
for i in range(10):
print(i)
# ### <b>2) Module Functions</b>
#
# - The python standard libraries consists of collections functions other than inbuilt ones, which can be used through **modules**
# - Modules are a **group of functions** that can be imported into a program by the import statement and can be used accordingly.
# - Modules have a `.py` extension
# - In fact, when you make a file such as `code.py` you are making a module!
import math
print(math.sqrt(25))
print(math.log(2))
# ### Ways to import modules
# - import <i>module_name</i>
#
# To call a function in a module imported this way, you must write the **name of the module** before it
#
import random
print(random.randint(2,10))
# - import _modulename_ as _newname_
#
# In this method, you can import a module with **any name you want** and you can use that name before calling a function
#
# This new name is known as an **alias** and it points to the module object in the memory.
#
import statistics as st
st.mean([2,3,4,5,6])
# - from _modulename_ import _function1_,_function2_,...
#
# In this method, **you don't need to specify the modules name** before calling a function, since you will be importing specific functions from it
#
# To import **all the functions** from a python module, you can simply use an * instead of the function names
#
#
#
from math import *
print(fabs(-10))
print(sqrt(9))
print(log(e))
# ### <b>3) User Defined Functions</b>
#
# These are functions the **user can define** and **use** in their programs.
#
# +
def sum(a,b):
print(a+b)
sum(2,5)
| functions/1)functions_intro.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xcpp14
// ---
// + [markdown] slideshow={"slide_type": "slide"}
// [](https://www.accu.org)
//
// [](https://github.com/QuantStack/xeus-cling/)
//
// # From "Interactive C++ with Jupyter" by <NAME>
// ## presented by <NAME> @ ACCU 2019, Bristol, UK.
// This notebook was derived form the sample notebook provided with the xeus-cling repo available at the GitHub repo below.
// It has been modified and extended for conference presentation at ACCU 2019, Bristol, UK.
//
// A Jupyter kernel for C++ based on the `cling` C++ interpreter and the `xeus` native implementation of the Jupyter protocol, xeus.
//
// - GitHub repository: https://github.com/QuantStack/xeus-cling/
// - Online documentation: https://xeus-cling.readthedocs.io/
// + [markdown] slideshow={"slide_type": "slide"}
// ## Usage
//
// <div style="background: #efffed;
// border: 1px solid grey;
// margin: 8px 0 8px 0;
// text-align: center;
// padding: 8px; ">
// <i class="fa-play fa"
// style="font-size: 40px;
// line-height: 40px;
// margin: 8px;
// color: #444;">
// </i>
// <div>
// To run the selected code cell, hit <pre style="background: #efffed">Shift + Enter</pre>
// </div>
// </div>
// + [markdown] slideshow={"slide_type": "slide"}
// ## Output and error streams
//
// As with the native command-line `cling` Xeus-cling has both `std::cout` and `std::cerr` are redirected, in this case to the notebook frontend.
// + [markdown] slideshow={"slide_type": "subslide"}
// ### Something traditional to start with
// + [markdown] slideshow={"slide_type": "fragment"}
// # // write something unoriginal
// + [markdown] slideshow={"slide_type": "subslide"}
// ### "compilation" errors.
// + code_folding=[] slideshow={"slide_type": "fragment"}
// make a mistake (if you haven't already)
// + [markdown] slideshow={"slide_type": "subslide"}
// ### stderr is captured too.
// + slideshow={"slide_type": "fragment"}
std::cerr << "This is an error\n";
// + [markdown] slideshow={"slide_type": "subslide"}
// ### and program exceptions (but be careful)
// + slideshow={"slide_type": "fragment"}
#include <stdexcept>
// + slideshow={"slide_type": "fragment"}
throw std::runtime_error("Unknown exception");
// + [markdown] slideshow={"slide_type": "slide"}
// ### A little more
// Omitting the `;` in the last statement of a cell results in an output being printed
// + slideshow={"slide_type": "fragment"}
int j = 5;
// + slideshow={"slide_type": "fragment"}
j
// + slideshow={"slide_type": "fragment"}
j++
// + slideshow={"slide_type": "fragment"}
j
// + slideshow={"slide_type": "fragment"}
--j
// + [markdown] slideshow={"slide_type": "slide"}
// # Interpreting the C++ programming language
//
// `cling` has a broad support of the features of C++. You can define functions, classes, templates, etc ...
// + [markdown] slideshow={"slide_type": "subslide"}
// ## Functions
// + slideshow={"slide_type": "fragment"}
double sqr(double a)
{
return a * a;
}
// + slideshow={"slide_type": "fragment"}
double a = 2.5;
double asqr = sqr(a);
asqr
// + [markdown] slideshow={"slide_type": "fragment"}
// We can also just call the function
// + slideshow={"slide_type": "fragment"}
sqr(5)
// + [markdown] slideshow={"slide_type": "subslide"}
// ### Beware of redefinition
// + slideshow={"slide_type": "fragment"}
auto sqr(double a)
{
return (int)a*a;
}
// + [markdown] slideshow={"slide_type": "slide"}
// ## Classes
// + slideshow={"slide_type": "fragment"}
class Foo
{
public:
virtual ~Foo() {}
virtual void print(double value) const
{
std::cout << "Foo value = " << value << std::endl;
}
};
// + slideshow={"slide_type": "fragment"}
Foo bar;
bar.print(1.2);
// + [markdown] slideshow={"slide_type": "subslide"}
// ## Polymorphism
// + slideshow={"slide_type": "fragment"}
class Bar : public Foo
{
public:
virtual ~Bar() {}
virtual void print(double value) const
{
std::cout << "Bar value = " << 2 * value << std::endl;
}
};
// + slideshow={"slide_type": "fragment"}
Foo* bar2 = new Bar;
bar2->print(1.2);
delete bar2;
// + slideshow={"slide_type": "skip"}
bar2
// + slideshow={"slide_type": "skip"}
bar2->print(1.3)
// + [markdown] slideshow={"slide_type": "slide"}
// ## Templates
// + slideshow={"slide_type": "fragment"}
#include <typeinfo>
template <class T>
class FooT
{
public:
explicit FooT(const T& t) : m_t(t) {}
void print() const
{
std::cout << typeid(T).name() << " m_t = " << m_t << std::endl;
}
private:
T m_t;
};
template <>
class FooT<int>
{
public:
explicit FooT(const int& t) : m_t(t) {}
void print() const
{
std::cout << "m_t = " << m_t << std::endl;
}
private:
int m_t;
};
// + slideshow={"slide_type": "subslide"}
FooT<double> foot1(1.2);
foot1.print();
// + slideshow={"slide_type": "fragment"}
FooT<int> foot2(4);
foot2.print();
// + [markdown] slideshow={"slide_type": "slide"}
// ## C++11 / C++14 support
// + slideshow={"slide_type": "fragment"}
class Foo11
{
public:
Foo11() { std::cout << "Foo11 default constructor" << std::endl; }
Foo11(const Foo11&) { std::cout << "Foo11 copy constructor" << std::endl; }
Foo11(Foo11&&) { std::cout << "Foo11 move constructor" << std::endl; }
};
// + slideshow={"slide_type": "fragment"}
Foo11 f1;
Foo11 f2(f1);
Foo11 f3(std::move(f1));
// + slideshow={"slide_type": "fragment"}
#include <vector>
std::vector<int> v = { 1, 2, 3};
auto iter = ++v.begin();
v
// + slideshow={"slide_type": "skip"}
*iter++
// + [markdown] slideshow={"slide_type": "subslide"}
// ### ... and also lambda, universal references, `decltype`, etc ...
// + slideshow={"slide_type": "fragment"}
#include <iostream>
// generic lambda, operator() is a template with two parameters
auto glambda = [](auto a, auto &&b) { return a + b; };
auto sum = glambda(3, 3.14)
// + slideshow={"slide_type": "fragment"}
sum
// + slideshow={"slide_type": "fragment"}
// generic lambda, operator() is a template with one parameter
auto vglambda = [](auto printer) {
return [=](auto&&... ts) // generic lambda, ts is a parameter pack
{
printer(std::forward<decltype(ts)>(ts)...);
return [=] { printer(ts...); }; // nullary lambda (takes no parameters)
};
};
auto p = vglambda([](auto v1, auto v2, auto v3) { std::cout << v1 << v2 << v3 << '\n'; });
auto q = p(1, 'a', 3.14); // outputs 1a3.14
q(); // outputs 1a3.14
// + [markdown] slideshow={"slide_type": "slide"}
// ## Loading libraries (the zlib example)
// We can do the same as we did with command-line `cling`
// + slideshow={"slide_type": "fragment"}
.L z
// + slideshow={"slide_type": "fragment"}
extern "C" const char * zlibVersion();
// + slideshow={"slide_type": "fragment"}
zlibVersion()
// + [markdown] slideshow={"slide_type": "slide"}
// ## Documentation and completion
//
// - Documentation for types of the standard library is retrieved on cppreference.com.
// - The quick-help feature can also be enabled for user-defined types and third-party libraries. More documentation on this feature is available at https://xeus-cling.readthedocs.io/en/latest/inline_help.html.
//
// + slideshow={"slide_type": "fragment"}
// How do those vvector things work again?
// + [markdown] slideshow={"slide_type": "subslide"}
// ### completion can be a little temperamental
// + slideshow={"slide_type": "fragment"}
float my_float;
std::vector my_vector;
int my_int;
// + [markdown] slideshow={"slide_type": "fragment"}
// #### but it mostly works
// + slideshow={"slide_type": "fragment"}
my_
// + [markdown] slideshow={"slide_type": "slide"}
// ## But what about all the data manipulation, all the rich media?
// # Extending C++
// + [markdown] slideshow={"slide_type": "fragment"}
// For python users, data science is synonymous with `numpy` and `pandas`, plotting and media libraries such as `bqplot` and `matplotlib` make data visualisation practical.
//
// There has been work within SG13 to examine the provision of 2D graphics and audio in standard C++, alongside linear algebra and better mathematical support. In Jupyter use, perhaps we can demonstrate the value of having such foundations.
// + [markdown] slideshow={"slide_type": "slide"}
// # Enabling rich media by the back door.
// For a user-defined type `T`, the rich rendering in the notebook and JupyterLab can be enabled by by implementing the function `xeus::xjson mime_bundle_repr(const T& im)`, which returns the JSON mime bundle for that type.
//
// More documentation on the rich display system of Jupyter and Xeus-cling is available at https://xeus-cling.readthedocs.io/en/latest/rich_display.html
// + [markdown] slideshow={"slide_type": "slide"}
// ### Image example
// + code_folding=[7] slideshow={"slide_type": "fragment"}
#include <string>
#include <fstream>
#include "xtl/xbase64.hpp"
#include "xeus/xjson.hpp"
namespace im
{
struct image
{
inline image(const std::string& filename)
{
std::ifstream fin(filename, std::ios::binary);
m_buffer << fin.rdbuf();
}
std::stringstream m_buffer;
};
xeus::xjson mime_bundle_repr(const image& i)
{
auto bundle = xeus::xjson::object();
bundle["image/png"] = xtl::base64encode(i.m_buffer.str());
return bundle;
}
}
// + slideshow={"slide_type": "fragment"}
im::image marie("images/marie.png");
marie
// + [markdown] slideshow={"slide_type": "slide"}
// ### Audio example
// + code_folding=[7] slideshow={"slide_type": "fragment"}
#include <string>
#include <fstream>
#include "xtl/xbase64.hpp"
#include "xeus/xjson.hpp"
namespace au
{
struct audio
{
inline audio(const std::string& filename)
{
std::ifstream fin(filename, std::ios::binary);
m_buffer << fin.rdbuf();
}
std::stringstream m_buffer;
};
xeus::xjson mime_bundle_repr(const audio& a)
{
auto bundle = xeus::xjson::object();
bundle["text/html"] =
std::string("<audio controls=\"controls\"><source src=\"data:audio/wav;base64,")
+ xtl::base64encode(a.m_buffer.str()) +
"\" type=\"audio/wav\" /></audio>";
return bundle;
}
}
// + slideshow={"slide_type": "fragment"}
au::audio drums("audio/audio.wav");
drums
// + [markdown] slideshow={"slide_type": "slide"}
// ### Display
// + slideshow={"slide_type": "fragment"}
#include "xcpp/xdisplay.hpp"
// + slideshow={"slide_type": "fragment"}
xcpp::display(marie);
// + [markdown] slideshow={"slide_type": "slide"}
// ### Update-display
// + code_folding=[4] slideshow={"slide_type": "fragment"}
#include <string>
#include "xcpp/xdisplay.hpp"
namespace ht
{
struct html
{
inline html(const std::string& content)
{
m_content = content;
}
std::string m_content;
};
xeus::xjson mime_bundle_repr(const html& a)
{
auto bundle = xeus::xjson::object();
bundle["text/html"] = a.m_content;
return bundle;
}
}
// A red rectangle
ht::html rect(R"(
<div style='
width: 90px;
height: 50px;
line-height: 50px;
background-color: red;
color: white;
text-align: center;'>
ACCU
</div>)");
// + slideshow={"slide_type": "fragment"}
xcpp::display(rect, "some_display_id");
// + slideshow={"slide_type": "fragment"}
// Update the rectangle to be blue
rect.m_content = R"(
<div style='
width: 90px;
height: 50px;
line-height: 50px;
background-color: blue;
color: white;
text-align: center;'>
Rocks
</div>)";
xcpp::display(rect, "some_display_id", true);
// + [markdown] slideshow={"slide_type": "slide"}
// ## Magics
//
// Magics are special commands for the kernel that are not part of the C++ language.
//
// They are defined with the symbol `%` for a line magic and `%%` for a cell magic.
//
// More documentation for magics is available at https://xeus-cling.readthedocs.io/en/latest/magics.html.
// magics are quite a rich source of extended notebook capabilities in other kernels and hopefully the xeus-cling kernel will evolve to embrace such things as `store` to allow serialisation between different notebooks.
// + slideshow={"slide_type": "fragment"}
#include <algorithm>
#include <vector>
// + slideshow={"slide_type": "fragment"}
std::vector<double> to_shuffle = {1, 2, 3, 4};
// + slideshow={"slide_type": "fragment"}
// %timeit std::random_shuffle(to_shuffle.begin(), to_shuffle.end());
// + [markdown] slideshow={"slide_type": "slide"}
// # Libraries
// Cling is not intended as an IDE, you are not going to write extensive libraries of code within a cling context, moreover it is designed to allow the fast interaction and prototyping with existing bodies of code.
//
// We'll look now at some libraries, but more importantly, we will look at how they are made accessible to the notebook
// + [markdown] slideshow={"slide_type": "subslide"}
// [](https://github.com/QuantStack/xtensor/)
//
// - GitHub repository: https://github.com/QuantStack/xtensor/
// - Online documentation: https://xtensor.readthedocs.io/
// - NumPy to xtensor cheat sheet: http://xtensor.readthedocs.io/en/latest/numpy.html
//
// `xtensor` is a C++ library for manipulating N-D arrays with an API very similar to that of numpy.
//
// ### As a header only library, no behind the scenes magic is required.
// + slideshow={"slide_type": "slide"}
#include <iostream>
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp"
#include "xtensor/xview.hpp"
xt::xarray<double> arr1
{{1.0, 2.0, 3.0},
{2.0, 5.0, 7.0},
{2.0, 5.0, 7.0}};
xt::xarray<double> arr2
{5.0, 6.0, 7.0};
xt::view(arr1, 1) + arr2
// + [markdown] slideshow={"slide_type": "subslide"}
// Together with the C++ Jupyter kernel, `xtensor` offers a similar experience as `NumPy` in the Python Jupyter kernel, including broadcasting and universal functions.
// + slideshow={"slide_type": "fragment"}
#include <iostream>
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp"
// + slideshow={"slide_type": "fragment"}
xt::xarray<int> arr
{1, 2, 3, 4, 5, 6, 7, 8, 9};
arr.reshape({3, 3});
std::cout << arr;
// + slideshow={"slide_type": "subslide"}
#include "xtensor-blas/xlinalg.hpp"
// + slideshow={"slide_type": "fragment"}
xt::xtensor<double, 2> m = {{1.5, 0.5}, {0.7, 1.0}};
std::cout << "Matrix rank: " << std::endl << xt::linalg::matrix_rank(m) << std::endl;
std::cout << "Matrix inverse: " << std::endl << xt::linalg::inv(m) << std::endl;
std::cout << "Eigen values: " << std::endl << xt::linalg::eigvals(m) << std::endl;
// + slideshow={"slide_type": "fragment"}
xt::xarray<double> arg1 = xt::arange<double>(9);
xt::xarray<double> arg2 = xt::arange<double>(18);
arg1.reshape({3, 3});
arg2.reshape({2, 3, 3});
std::cout << xt::linalg::dot(arg1, arg2) << std::endl;
// + [markdown] slideshow={"slide_type": "slide"}
// # [](https://github.com/QuantStack/xwidgets)
// # The C++ backend for Jupyter interactive widgets.
//
// ## Introduction
//
// `xwidgets` is a C++ implementation of the Jupyter interactive widgets protocol.
// The Python reference implementation is available in the
// [ipywidgets](https://github.com/jupyter-widgets/ipywidgets) project.
//
// `xwidgets` enables the use of the Jupyter interactive widgets in the C++
// notebook, powered by the `xeus-cling` kernel and the `cling` C++ interpreter
// from CERN. `xwidgets` can also be used to create applications making use of the
// Jupyter interactive widgets without the C++ kernel *per se*.
//
// + [markdown] slideshow={"slide_type": "slide"}
// ### Xwidgets a (very) quick example
// Using the xwidgets library we have access to a full range of controls and media renderers within the Cling notebook
// + slideshow={"slide_type": "fragment"}
#include "xwidgets/xslider.hpp"
xw::slider<double> slider;
slider // If the semicolon is ommitted in the last line, the return value is displayed.
// + slideshow={"slide_type": "fragment"}
// changine some more properties
slider.max = 40;
slider.style().handle_color = "blue";
slider.orientation = "vertical";
slider.description = "A slider";
// + [markdown] slideshow={"slide_type": "slide"}
// ### We can chain the arguments for a more natural creation metaphor.
// + slideshow={"slide_type": "fragment"}
auto other_slider = xw::slider_generator<double>()
.min(-1.0)
.max(1.0)
.description("Another slider")
.finalize();
xcpp::display(other_slider);
// + [markdown] slideshow={"slide_type": "fragment"}
// ## Other libraries provided byXeus build upon the XWidgets
// + [markdown] slideshow={"slide_type": "slide"}
// [](https://github.com/QuantStack/xplot/)
//
// - GitHub repository: https://github.com/QuantStack/xplot/
// - Online documentation: https://xplot.readthedocs.io/
//
// ## Introduction
//
// `xplot` is a C++ backend for the bqplot 2-D plotting library. It is based upon
// the `xwidgets` library, the C++ implementation of the Jupyter interactive
// widgets protocol.
//
// `xplot` enables the use of the bqplot 2-D library in the C++ notebook, powered
// by the `xeus-cling` kernel and the `cling` C++ interpreter from CERN. `xplot`
// can also be used to create applications making use of the Jupyter interactive
// widgets without the C++ kernel.
// + [markdown] slideshow={"slide_type": "subslide"}
// ### xplot has cpp files and therefore needs to load libraries
// To load a library Cling needs to be told where to find it, this is achieved by the use of pragmas.
// The following example comes from the `xplot` library
//
// #### Tip #1 Keep the compiler specifics separate
// ```c++
// #ifdef __CLING__
// #include "xplot_config_cling.hpp"
// #endif
// ```
// #### Tip #2 Tell Cling where to find the libraries, and then load them.
// ```c++
// #pragma cling add_library_path(@XPLOT_INSTALL_LIBRARY_DIR@)
// #pragma cling load("libxplot")
// ```
//
// For `add_library_path` the example uses the CMAKE varable expansion but in other cases would simply be the full OS path to the library folder.
// Individual libraries can then be loaded using `load("library_name")` the OS specific extension (.so, dll, etc.) is not specified.
// + [markdown] slideshow={"slide_type": "subslide"}
// | pragma | description
// |:-|:-
// | `add_library_path` | Add the specified path to the library search path used by cling.
// | `add_include_path` | Add the specified path to the include search path used by cling.
// | `load`| Load a specific library. Note only the library file name without the extension should be given.
// + [markdown] slideshow={"slide_type": "slide"}
// ### Using XPlot
//
// + slideshow={"slide_type": "fragment"}
#include "xplot/xfigure.hpp"
#include "xplot/xmarks.hpp"
#include "xplot/xaxes.hpp"
// + code_folding=[0, 2] slideshow={"slide_type": "fragment"}
std::vector<int> x_data{1, 2, 3, 4, 5, 6, 7};
std::vector<std::vector<double>> y_data = {
{160.10, 162.34, 161.82, 162.24, 161.44, 158.51, 157.68, 151.93, 151.41, 153.06, 155.38, 161.07, 160.51, 162.99, 161.86, 163.27, 164.05, 164.52, 162.67},
{161.54, 162.17, 161.95, 161.76, 162.15, 160.92, 160.64, 161.43, 161.89, 164.16, 164.16, 162.79, 161.92, 163.30, 163.49, 162.07, 161.46, 161.82, 162.65},
{164.40, 164.35, 163.46, 163.60, 161.87, 162.08, 162.18, 161.79, 163.23, 169.10, 182.05, 179.84, 181.75, 183.80, 183.52, 185.93, 186.42, 189.36, 185.71},
{188.67, 186.91, 187.17, 189.83, 189.64, 190.06, 189.01, 192.31, 191.62, 193.11, 194.00, 193.75, 192.80, 192.96, 191.81, 191.28, 191.72, 191.20, 190.68},
{191.95, 191.56, 192.30, 192.00, 192.25, 192.99, 191.16, 190.41, 191.23, 190.10, 190.07, 189.36, 187.38, 187.88, 191.81, 191.28, 191.72, 189.99, 190.14},
{187.95, 187.34, 187.47, 186.63, 184.30, 185.97, 187.10, 189.64, 189.15, 191.67, 194.00, 194.57, 195.78, 194.40, 195.24, 193.63, 190.85, 192.5, 192.49},
{192.36, 188.49, 189.86, 188.00, 187.70, 188.42, 187.22, 188.04, 188.53, 188.39, 186.35, 181.27, 181.71, 180.37, 180.72, 180.88, 182.14, 181.55, 182.82}
};
// + slideshow={"slide_type": "fragment"}
xpl::linear_scale xs, ys;
// + slideshow={"slide_type": "fragment"}
auto boxplot = xpl::boxplot_generator(xs, ys)
.x(x_data)
.y(y_data)
.box_fill_color("gray")
.outlier_fill_color("black")
.finalize();
// + slideshow={"slide_type": "fragment"}
xpl::axis hx(xs);
hx.label = "X";
// + slideshow={"slide_type": "fragment"}
auto hy = xpl::axis_generator(ys)
.label("Y")
.orientation("vertical")
.tick_format("0.1f")
.finalize();
// + slideshow={"slide_type": "subslide"}
xpl::figure fig;
fig.add_mark(boxplot);
fig.add_axis(hx);
fig.add_axis(hy);
fig
// + slideshow={"slide_type": "fragment"}
boxplot.stroke = "red";
boxplot.box_fill_color = "blue";
boxplot.outlier_fill_color = "red";
// + slideshow={"slide_type": "fragment"}
boxplot.opacities = std::vector<double>{0.1, 0.2};
// + [markdown] slideshow={"slide_type": "slide"}
// # A few last words on xwidgets
// ### XLeaflet - interactive maps
// ## We do not have time to do this justice
// [](https://github.com/QuantStack/xleaflet)
// ## C++ backend for the jupyter-leaflet map visualization library
//
// + [markdown] slideshow={"slide_type": "slide"}
// # Load OpenStreetMap ad overlay GeoJSON data.
//
// []
// + [markdown] slideshow={"slide_type": "slide"}
// # Plot weather systems such as wind velocities
// [
| accu2019_xcpp_clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import plotly.graph_objs as go
import tensorflow.compat.v1 as tf
# %matplotlib notebook
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
# -
tf.disable_eager_execution()
tf.disable_v2_behavior()
tf.reset_default_graph()
df = pd.read_csv("BTC-Train.csv")
df.head()
df.tail()
df.shape
df.info()
df.dtypes
corr = df.corr()
corr
df.describe()
# +
plt.figure(figsize=(18.2, 8))
ax = sns.distplot(df['Volume']);
plt.title("Distribuição", fontsize=20)
plt.axvline(df['Volume'].mean(), color='k')
plt.axvline(df['Volume'].median(), color='r')
plt.axvline(df['Volume'].mode()[0], color='g');
# -
sns.boxplot(df["High"])
sns.boxplot(df["Low"])
sns.boxplot(df["Close"])
# +
plt.figure(figsize=(20,11))
ax = sns.heatmap(df.corr(), annot=True, cmap='YlGnBu', linewidths=.5, annot_kws={'size':14} ,fmt=".1f")
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
# +
plt.figure(figsize=(20,11))
mask = np.triu(np.ones_like(corr, dtype = bool))
sns.heatmap(corr, mask = mask, annot = True, cmap="YlGnBu", linewidths=.5, annot_kws={'size':15} ,fmt=".1f")
plt.show()
# +
plt.figure(figsize=(25.5, 18))
ax = sns.lineplot(x = "Date", y="High", data = df)
# +
plt.figure(figsize=(18,5))
plt.plot(df["Date"], df["High"])
# -
# # Pré - processamento
df = df.dropna()
df = df.iloc[:,1].values
period = 30
predict_future = 1
x = df[0:(len(df) - (len(df) % period))]
x_batches = x.reshape(-1, period, 1)
y = df[1:(len(df) - (len(df) % period)) + predict_future]
y_batches = y.reshape(-1, period, 1)
# +
X_teste = df[-(period + predict_future):]
X_teste = X_teste[:period]
X_teste = X_teste.reshape(-1, period, 1)
y_teste = df[-(period):]
y_teste = y_teste.reshape(-1, period, 1)
# -
# # Rede Neural
entradas = 1
neuronios_oculta = 100
neuronios_saida = 1
# +
xph = tf.placeholder(tf.float32, [None, period, entradas])
yph = tf.placeholder(tf.float32, [None, period, neuronios_saida])
n_layers = 4
layers = [tf.keras.layers.LSTMCell(units=neuronios_oculta, activation=tf.nn.relu)
for layer in range(n_layers)]
multi_layer_cell = tf.keras.layers.StackedRNNCells(layers)
saida_rnn, _ = tf.nn.dynamic_rnn(multi_layer_cell, xph, dtype = tf.float32)
stacked_rnn_outputs = tf.reshape(saida_rnn, [-1, neuronios_oculta])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, neuronios_saida)
outputs = tf.reshape(stacked_outputs, [-1, period, neuronios_saida])
erro = tf.reduce_mean(tf.square(outputs - yph))
otimizador = tf.train.AdamOptimizer(learning_rate = 0.001)
treinamento = otimizador.minimize(erro)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoca in range(2000):
_, custo = sess.run([treinamento, erro], feed_dict = {xph: x_batches, yph: y_batches})
if epoca % 100 == 0:
print(epoca + 1, ' erro: ', custo)
previsoes = sess.run(outputs, feed_dict = {xph: X_teste})
# -
y_teste.shape
y_teste2 = np.ravel(y_teste)
y_teste2
previsoes2 = np.ravel(previsoes)
previsoes2
# +
from sklearn.metrics import mean_absolute_error
mae = mean_absolute_error(y_teste2, previsoes2)
mae
# -
plt.plot(y_teste2, markersize = 10, label = 'Valor real')
plt.plot(previsoes2, label = 'Previsões')
plt.legend()
plt.plot(y_teste2, label = 'Valor real')
plt.plot(y_teste2, markersize = 20, color = 'blue')
plt.plot(previsoes2, label = 'Previsões')
plt.legend()
| project - ML/Projeto 02 Bitcoin/RNN Bitcoin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from keras.model import Sequential
from keras.layer import Dense
# +
#Read in white wine data
white = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv", sep=';')
# Read in red wine data
red = pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv", sep=';')
# +
#print (white.info())
#print (red.info())
# -
#red.head()
#red.tail()
red.sample(5)
#red.describe()
#pd.isnull(red)
# +
#white.head()
#white.tail()
#white.sample(5)
#white.describe()
#pd.isnull(white)
# +
#Visualizing the data
fig,ax=plt.subplots(1,2)
ax[0].hist(red.alcohol,10,facecolor='red',alpha=0.5,label="Red Wine")
ax[1].hist(white.alcohol,10,facecolor='white',ec="black",lw=0.5,alpha=0.5,label="White wine")
fig.subplots_adjust(left=0, right=1, bottom=0, top=0.5, hspace=0.05, wspace=1)
ax[0].set_ylim([0, 1000])
ax[0].set_xlabel("Alcohol in % Vol")
ax[0].set_ylabel("Frequency")
ax[1].set_xlabel("Alcohol in % Vol")
ax[1].set_ylabel("Frequency")
fig.suptitle("Distribution of Alcohol in % Vol")
plt.show()
# +
#Adding the Wine type Lable in in both red wine dataset and white wine and concatenating them to form one big training set.
red['type']=1
white['type']=0
wines=red.append(white,ignore_index=True)
wines.sample(7)
# -
#Watching coorelation between variable in dataset.
corr=wines.corr()
sns.heatmap(corr,xticklabels=corr.columns.values,yticklabels=corr.columns.values)
sns.plt.show()
#Creating the dataset for training and testing the model.
X=wines.iloc[:,0:11]
y=np.ravel(wines.type)
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.33,random_state=42)
X_test.head()
scalar=StandardScaler().fit(X_train)
X_train=scalar.transform(X_train)
X_test=scalar.transform(X_test) #The return type is numpy array.
X_test
| python_end/Testing/Keras_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Include this on top, as the first import
# This must always be imported first. If you are restarting the notebook
# don't forget to run this cell first!
import allow_local_imports
# +
# %%time
from lib.minority_game import MinorityGame
from lib.agents.agent import Agent
from lib.agents.factory import AgentFactory
from lib.strategies import DefaultStrategy
from lib.memory import UniformMemoryGenerator
from numpy.random import default_rng
import matplotlib.pyplot as plt
# Calculate the distance of best strategies
N = 51
alpha_graph = []
dist_graph = []
n_realizations = 20
rng = default_rng(0)
for m in range(2, 14):
dist = 0
for r in range(n_realizations):
game = MinorityGame(
n_agents=51,
factory_dict={
# Distance calculation also works with multiple agent groups...
1: AgentFactory(
Agent,
agent_kwargs=dict(strategy_clss=[DefaultStrategy, DefaultStrategy]),
memory_generator=UniformMemoryGenerator(m=m)
)
},
rng=rng
)
_, attendances, _, _ = game.simulate_game(max_steps=500)
# in order to create the graph
dist += game.total_strategy_distance()
alpha = 2**m/51
alpha_graph.append(alpha)
dist_graph.append(dist/n_realizations)
# everything in one graph
fig, ax = plt.subplots(figsize=(10, 8))
ax.axhline(y=0.5, color="k", linestyle="--") # vol = 1 -> randomness
ax.scatter(alpha_graph, dist_graph, c="gray", edgecolors="k", marker="o", s=12**2)
ax.plot(alpha_graph, dist_graph, color="k")
plt.ylabel("Distance = $d$",fontsize=15)
plt.xlabel("Alpha = $2^m/N$",fontsize=15)
plt.yticks(fontsize = 12)
plt.xticks(fontsize = 12)
# plt.title("Distance as a function of alpha (MG with s=2)")
plt.xscale('log')
plt.xlim([0.01,100])
plt.ylim([0.45, 0.52])
plt.savefig('out/distances.png', dpi=300, bbox_inches='tight')
plt.show()
| examples/distances.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: JuliaPro_v1.5.3-1 1.5.3
# language: julia
# name: juliapro_v1.5.3-1-1.5
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Einrichtung von Julia JuMP
# ---
# Überprüfen Sie die bereits installierten Pakete. In der JuliaBox sind möglicherweise alle notwendigen Pakete bereits vorhanden.
# + slideshow={"slide_type": "fragment"}
using Pkg;
Pkg.status()
# + [markdown] slideshow={"slide_type": "slide"}
# Installieren Sie ``JuMP`` , mit dem Sie einfach Optimierungsprogramme formulieren können, sowie ``Cbc``, einen open-source Solver zum Lösen des Problems, und ``StatsPlots`` zur Visualisierung der Lösung.
# + slideshow={"slide_type": "fragment"}
#Pkg.add("JuMP");
#Pkg.add("Cbc");
#Pkg.add("StatsPlots");
# + [markdown] slideshow={"slide_type": "fragment"}
# Richten Sie die installierten Pakete so ein, dass sie im folgenden Code verwendet werden können.
# + slideshow={"slide_type": "fragment"}
using JuMP, Cbc, StatsPlots;
# + [markdown] slideshow={"slide_type": "slide"}
# # Capacitated Lot Sizing Problem
#
# ### Entscheidungsvariablen:
# Bestimmung der **Produktionsmengen** und **Lagerbestände** je Periode sowie der daraus folgenden **Rüstvorgänge**,
#
# ### Zielfunktion:
# 0) **Gesamtkostenminimierung:** $$ \qquad \min z = \displaystyle\sum_{i=1}^I \displaystyle\sum_{t=1}^T(k_i^s\cdot \gamma_{it}+k_i^l\cdot L_{it})$$
# + [markdown] slideshow={"slide_type": "slide"}
# ### Nebenbedingungen:
# 1) **Kapazitaetsrestriktion:** $ \hspace{40mm} \displaystyle\sum_{i=1}^I(t_i^p\cdot X_{it}+t_i^s\cdot\gamma_{it} ) \leq c_t \hspace{40mm} \forall t \in T\qquad $
#
# 2) **Lagerbilanzgleichung:** $ \hspace{41mm} L_{it} = L_{i,t-1}+X_{it}-d_{it} \hspace{44mm} \forall t \in T, \quad i \in I$
#
# 3) **Anfangslagerbestand:** $ \hspace{41mm} L_{i,0} = l_{start} \hspace{66mm} \forall i \in I$
#
# 4) **Rüstbedingung:**: $ \hspace{50mm} X_{it} \leq M \cdot \gamma_{it} \hspace{63mm} \forall t \in T, \quad i \in I $
#
# 5) **Nichtnegativitaetsbedingungen:** $ \hspace{23mm} X_{it}, L_{it} \geq 0 \hspace{66mm} \forall t \in T, \quad i \in I $
#
# 6) **Binaerbedingung:**
# $ \hspace{48mm} \gamma_{it} \in \{0,1\} \hspace{67mm} \forall t \in T, \quad i \in I $
# * * *
# + [markdown] slideshow={"slide_type": "slide"}
# ## Verwendete Symbole
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Mengen
#
# $i \in (1,..,I) \hspace{20mm}$ Produkte
#
# $t \in (1,..,T) \hspace{20mm}$ Perioden
#
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Parameter
#
# $k_i^l \hspace{38mm}$ Lagerkostensatz
#
# $k_i^s \hspace{38mm}$ Ruestkostensatz
#
# $t_i^p \hspace{39mm}$ Produktionszeit
#
# $t_i^s \hspace{39mm}$ Rüstzeit
#
# $M \hspace{37mm}$ Große Zahl
#
# $c_t \hspace{38mm}$ Periodenkapazität
#
# $b_{it}\hspace{37mm}$ Bedarf
#
# $l_{start} \hspace{34mm}$ Anfangslagerbestand
#
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Variablen
#
# $X_{it}$ $\geq0 \hspace{28mm}$ Produktionsmenge
#
# $L_{it}$ $\geq0 \hspace{28mm}$ Lagermenge
#
# $\gamma_{it}$ $\in\{0,1\} \hspace{21mm}$ binäre Rüstvariable
#
# * * *
# + [markdown] slideshow={"slide_type": "slide"}
# ## Erstellen Sie das Modell namens ``m`` und geben Sie als zu verwendenden Solver Cbc an.
# ---
# + slideshow={"slide_type": "fragment"}
m = Model(Cbc.Optimizer);
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Mengen und Parameter
# + [markdown] slideshow={"slide_type": "fragment"}
# Fügen Sie die Mengen ein.
#
# + slideshow={"slide_type": "fragment"}
#Mengen
Produkte = ["i1", "i2", "i3"];
Perioden = ["t1", "t2", "t3", "t4"];
#Längen
I = length(Produkte);
T = length(Perioden);
# + [markdown] slideshow={"slide_type": "slide"}
# Fügen Sie die Parameter ein.
# + slideshow={"slide_type": "fragment"}
kl = [4, 6, 5]; #Lagerkostensatz der Produkte
ks = [80, 100, 90]; #Rüstkostensatz der Produkte
ts = [8, 12, 16]; #Rüstzeiten der Produkte
tp = [2, 2, 1]; #Stückbearbeitungszeiten der Produkte
c = [320, 320, 320, 320]; #Periodenkapazität
M = 1000; #Große Zahl
l_start = [0, 0, 0]; #Anfangslagerbestand
#t1, t2, t3, t4
b = [ 50 10 10 140 #Produkt 1
0 40 80 30 #Produkt 2
90 80 10 50]; #Produkt 3
#Bedarf
# + [markdown] slideshow={"slide_type": "slide"}
# ### Entscheidungsvariablen
# ---
# Definieren Sie die Entscheidungsvariablen. Achten Sie auf die Definitionsbereiche (Nebenbedingungen 5 und 6)
# + [markdown] slideshow={"slide_type": "fragment"}
# 5) **Nichtnegativitätsbedingungen**: Produktionsmengen und Lagerbestände dürfen nicht negativ werden.
#
# $ \qquad X_{it}, L_{it} \geq 0 \qquad\qquad\qquad\qquad\qquad\qquad \forall t \in T, \quad i \in I $
# + slideshow={"slide_type": "fragment"}
@variable(m,X[1:I,1:T] >= 0,Int); #Produktionsmenge von Produkt i in Periode t
# + slideshow={"slide_type": "fragment"}
@variable(m,L[1:I,0:T] >= 0,Int); #Lagerbestand von Produkt i am Periodenende von t
# + [markdown] slideshow={"slide_type": "fragment"}
# 6) **Binaerbedingung**: Rüstvorgänge werden immer ganz ($\gamma_{it} = 1$) oder gar nicht ($\gamma_{it} = 0$) ausgeführt. Die binäre Rüstvariable nimmt also entweder den Wert 1 oder 0 an.
#
# $ \qquad \gamma_{it} \in \{0,1\} \qquad\qquad\qquad\qquad\qquad\qquad \forall t \in T, \quad i \in I $
# + slideshow={"slide_type": "fragment"}
@variable(m,gamma[1:I,1:T],Bin); #Die binäre Rüstvariable von Produkt i in Periode t
# + [markdown] slideshow={"slide_type": "slide"}
# ## Zielfunktion
#
# 0) **Gesamtkostenminimierung:** Die Gesamtkosten sollen minimiert werden. Sie bestehen aus der Summe von Rüstkosten und Lagerkosten aller Produkte über alle Perioden. Die Rüstkosten berechnen sich hierbei durch Multipizieren der Anzahl der Rüstvorgänge (Summe aler binären Rüstvariablen) mit dem Rüstkostensatz, die Lagerkosten durch Multiplizieren der eingelagerten Menge mit dem Lagerkostensatz.
#
# $ \qquad \min z = \displaystyle\sum_{i=1}^I \displaystyle\sum_{t=1}^T(k_i^s\cdot \gamma_{it}+k_i^l\cdot L_{it})$
#
# + slideshow={"slide_type": "fragment"}
@objective(m, Min, sum( ks[i] * gamma[i,t] + kl[i] * L[i,t] for i=1:I for t=1:T) );
# + [markdown] slideshow={"slide_type": "slide"}
# ## Nebenbedingungen
# + [markdown] slideshow={"slide_type": "fragment"}
# 1) **Kapazitaetsrestriktion:** Die Summe aus Produktions- und Rüstzeit aller Produkte darf in jeder Periode die vorhandene Kapazität der Ressource j nicht überschreiten.
#
# $$ \qquad \displaystyle\sum_{i=1}^I(t_i^p\cdot X_{it}+t_i^s\cdot\gamma_{it} ) \leq c_t \hspace{40mm} \forall t \in T\qquad $$
#
#
# + slideshow={"slide_type": "fragment"}
@constraint(m, KapRes[t=1:T], sum((ts[i] * gamma[i,t] + tp[i] * X[i,t]) for i=1:I) <= c[t] );
# + [markdown] slideshow={"slide_type": "fragment"}
# 2) **Lagerbilanzgleichung**: Der Lagerbestand am Ende einer Periode berechnet sich aus der Summe der Produktionsmenge und des Lagerbestandes der Vorperiode abzüglich der Absatzmenge (Nachfrage).
#
# $$ \qquad L_{it} = L_{i,t-1}+X_{it}-b_{it} \hspace{40mm} \forall t \in T, \quad i \in I$$
# + slideshow={"slide_type": "fragment"}
@constraint(m, Lager[i=1:I,t=1:T], L[i,t] == L[i,t-1] + X[i,t] - b[i,t]);
# + [markdown] slideshow={"slide_type": "slide"}
# 3) **Anfangslagerbestand**: Der Anfangslagerbestand aller Produkte entspricht dem initial gesetzen $l_i$.
#
# $$ \qquad L_{i,0} = l_{start} \hspace{40mm} \forall i \in I$$
#
# + slideshow={"slide_type": "fragment"}
@constraint(m, AnfLager[i=1:I], L[i,0] == l_start[i]);
# + [markdown] slideshow={"slide_type": "fragment"}
# 4) **Rüstbedingung**: Wenn für ein Produkt in einer Periode nicht gerüstet wird, ist die produzierte Menge dieses Produkts in dieser Periode 0. Wenn für ein Produkt in einer Periode gerüstet wird, wird die produzierte Menge durch die Rüstbedingung nicht eingeschränkt.
#
# $$ \qquad X_{it} \leq M \cdot \gamma_{it} \hspace{40mm} \forall t \in T, \quad i \in I $$
# + slideshow={"slide_type": "fragment"}
@constraint(m, Ruestbed[i=1:I,t=1:T], X[i,t] <= M * gamma[i,t]);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Lösen Sie das Modell.
# ---
# + slideshow={"slide_type": "fragment"}
optimize!(m)
# + [markdown] slideshow={"slide_type": "fragment"}
# Lassen Sie sich den Zielfunktionswert Z anzeigen.
# + slideshow={"slide_type": "fragment"}
println("Objective value Z: ", JuMP.objective_value(m))
# + [markdown] slideshow={"slide_type": "slide"}
# Lassen Sie sich die Produktionsmengen, die Lagermengen und die Rüstvariable anzeigen.
# + slideshow={"slide_type": "fragment"}
JuMP.value.(X)
# + slideshow={"slide_type": "slide"}
JuMP.value.(L)
# + slideshow={"slide_type": "fragment"}
JuMP.value.(gamma)
# + [markdown] slideshow={"slide_type": "slide"}
# Stellen Sie Produktionsmenge, Nachfrage und Lagerbestand grafisch dar.
# + slideshow={"slide_type": "fragment"}
bardata = zeros((4,3,I))
for i in 1:I
bardata[:,:,i] =
[JuMP.value(X[i,1]) b[i,1] JuMP.value(L[i,1])
JuMP.value(X[i,2]) b[i,2] JuMP.value(L[i,2])
JuMP.value(X[i,3]) b[i,3] JuMP.value(L[i,3])
JuMP.value(X[i,4]) b[i,4] JuMP.value(L[i,4])]
end
# + slideshow={"slide_type": "fragment"}
p1 = groupedbar(bardata[:,:,1], label=["Produktionsmenge" "Nachfrage" "Lagerbestand"],legend=:none,
colour = [:DeepSkyBlue4 :Red3 :orange], title = "Produkt 1", xlabel = "Periode",
ylabel="Menge", ylim=(0, 125))
p2 = groupedbar(bardata[:,:,2], label=["Produktionsmenge" "Nachfrage" "Lagerbestand"], legend=:none,
colour = [:DeepSkyBlue4 :Red3 :orange], title = "Produkt 2", xlabel = "Periode",
ylabel="Menge", ylim=(0, 125))
p3 = groupedbar(bardata[:,:,3], label=["Produktionsmenge" "Nachfrage" "Lagerbestand"], legend=:none,
colour = [:DeepSkyBlue4 :Red3 :orange], title = "Produkt 3", xlabel = "Periode",
ylabel="Menge", ylim=(0, 125))
p4 = groupedbar(bardata[:,:,1,1], label=["Produktionsmenge" "Nachfrage" "Lagerbestand"],legend=:topleft,
colour = [:DeepSkyBlue4 :Red3 :orange], grid=false, xlims=(20,3), showaxis=false)
plot(p1, p2, p3, p4, layout = 4)
# + [markdown] slideshow={"slide_type": "slide"}
# Stellen Sie die Rüstvariable für die drei Produkte grafisch dar.
# -
bardataGamma = [JuMP.value(gamma[i,t]) for i in 1:I, t in 1:T]'
# + slideshow={"slide_type": "fragment"}
groupedbar(bardataGamma, label=["Produkt 1" "Produkt 2" "Produkt 3"], xlabel = "Periode",
ylabel = "Rüstvariable", title = "Rüstvariablen der Produkte",
colour = [:DeepSkyBlue4 :grey84 :Red3], ylim = (0,1.5))
| Modelle VL/201207_CLSP_julia_v1.5.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python395jvsc74a57bd0b62550ee9875c103988241e840a33346a8e052936219caf3064f7a25db6fe747
# ---
# +
import pandas as pd
# Load data from our dataset file into a pandas dataframe
# wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/titanic.csv
# wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/graphing.py
dataset = pd.read_csv('titanic.csv', index_col=False, sep=",", header=0)
# Let's take a look at the data
dataset.head()
# -
dataset.info()
# +
import graphing
graphing.histogram(dataset, label_x='Pclass', label_y='Survived', histfunc='avg', include_boxplot=True)
# -
graphing.multiple_histogram(dataset,
label_x='Pclass', # group by ticket class
label_group="Parch", # colour by no parents or children
label_y='Survived',
histfunc="avg")
graphing.box_and_whisker(dataset, label_x="Pclass", label_y="SibSp")
graphing.scatter_2D(dataset, label_x="Age", label_y="Fare")
# +
# Plot Fare vs Survival
graphing.histogram(dataset, label_x="Fare", label_y="Survived", histfunc="avg", nbins=30, title="Fare vs Survival", include_boxplot=True, show=True)
# Plot Age vs Survival
graphing.histogram(dataset, label_x="Age", label_y="Survived", histfunc="avg", title="Age vs Survival", nbins=30, include_boxplot=True)
# +
import plotly.graph_objects as go
import numpy as np
# Create some simple functions
# Read their descriptions to find out more
def get_rows(sex, port):
'''Returns rows that match in terms of sex and embarkment port'''
return dataset[(dataset.Embarked == port) & (dataset.Sex == sex)]
def proportion_survived(sex, port):
'''Returns the proportion of people meeting criteria who survived'''
survived = get_rows(sex, port).Survived
return np.mean(survived)
# Make two columns of data - together these represent each combination
# of sex and embarkment port
sexes = ["male", "male", "male", "female","female", "female"]
ports = ["C", "Q", "S" ] * 2
# Calculate the number of passengers at each port + sex combination
passenger_count = [len(get_rows(sex, port)) for sex,port in zip(sexes, ports)]
# Calculate the proportion of passengers from each port + sex combination who survived
passenger_survival = [proportion_survived(sex, port) for sex,port in zip(sexes, ports)]
# Combine into a single data frame
table = pd.DataFrame(dict(
sex=sexes,
port=ports,
passenger_count=passenger_count,
passenger_survival_rate=passenger_survival
))
# Make a bubble plot
# This is just a scatter plot but each entry in the plot
# has a size and colour. We set colour to passenger_survival
# and size to the number of passengers
graphing.scatter_2D(table,
label_colour="passenger_survival_rate",
label_size="passenger_count",
size_multiplier=0.3,
title="Bubble Plot of Categorical Data")
| ML-For-Beginners/102-introduction-to-data-for-machine-learning/5-exercise-normalize-data-predict-missing-values.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1.3.2 Exercise: Count vehicles and bikes
# Create a new app based on `deepstream-test1-rtsp_out` that shows counts for vehicles and bicycles. Fill in the following cells with appropriate commands to create, build, and run your app.
# TODO
# Create a new app located at /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-counts
# based on deepstream-test1-rtsp_out
# %cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps
# !mkdir -p my_apps/dst1-counts
# !cp -rfv dli_apps/deepstream-test1-rtsp_out/* my_apps/dst1-counts
# Edit the [C file](../../../deepstream_sdk_v4.0.1_jetson/sources/apps/my_apps/dst1-counts/deepstream_test1_app.c)
# TODO
# Edit the C-code to count vehicles and bicycles
# Build the app
# %cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-counts
# !make clean
# !make
# TODO
# Run the app
# %cd /home/dlinano/deepstream_sdk_v4.0.2_jetson/sources/apps/my_apps/dst1-counts
# !./deepstream-test1-app /home/dlinano/deepstream_sdk_v4.0.1_jetson/samples/streams/sample_720p.h264
| solutions/ex1.3.2_CountTwo/solution-1.3.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## A standalone example of the Fourier-based prior (PyTorch)
# In this notebook, we will be running a simple example of the Fourier-based prior, in order to show how it can be applied. We will train a very simple model with and without the Fourier-based prior, and at the end we will show the importance scores on a random sample of input sequences using three different methods of computing attributions.
#
# The goal of this notebook is to present a very simple, standalone example of models trained with and without the Fourier-based prior. None of the code in this notebook will rely on libraries/repositories other than very standard and ubiquitous ones (e.g. PyTorch, NumPy, etc.).
#
# We'll be training a simple binary model to predict binding of the SPI1 transcription factor. For the sake of simplicity and efficiency, we'll be training with only one output task (i.e. single-task models), with a slightly simpler data processing. Thus, these results won't fully match those presented in the paper (the results in the paper can be reproduced from the other code/notebooks in this repository).
#
# For more results, see the corresponding paper [here](https://proceedings.neurips.cc/paper/2020/hash/1487987e862c44b91a0296cf3866387e-Abstract.html).
#
# Some of these cells can take awhile to run, and the entire notebook can take on the order of an 30 minutes to complete. Several intermediates are saved along the way, so feel free to run the notebook in pieces, commenting out certain cells that have already been run before.
import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.special
import scipy.stats
import scipy.ndimage
import sklearn.metrics
import pyfaidx
import tqdm
tqdm.tqdm_notebook()
# ### Download the data
# We'll need the following files to train our model:
# 1. Locations of SPI1 binding in the human genome (using the hg38 annotation). We will download the called peaks from the [ENCODE project](https://www.encodeproject.org/). Specifically, we'll be fetching the IDR-thresholded optimal peaks (i.e. peaks that the replicates agreed upon the most), using these regions as our positive (binding) set. We will be using the experiment [ENCSR000BGQ](https://www.encodeproject.org/experiments/ENCSR000BGQ/), which measures SPI1 binding in the GM12878 cell line.
#
# 2. The hg38 human reference genome. We will be downloading this from the UCSC genome portal.
#
# 3. The hg38 chromosome sizes. We will also be downloading this from the UCSC genome portal.
# Make a directory to store everything
# !mkdir -p prior_example/data
# !mkdir -p prior_example/models
# !mkdir -p prior_example/aux_code
# Download the peaks from ENCODE
# !wget https://www.encodeproject.org/files/ENCFF071ZMW/@@download/ENCFF071ZMW.bed.gz -O prior_example/data/peaks.bed.gz
# Download the hg38 reference genome, and unzip it
# !wget https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz -O prior_example/data/hg38.fasta.gz
# !gunzip prior_example/data/hg38.fasta.gz
# Download the hg38 chromosome sizes
# !wget https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.chrom.sizes -O prior_example/data/hg38.chrom.sizes
import sys
sys.path.append("prior_example/aux_code/")
peaks_bed_path = "prior_example/data/peaks.bed.gz"
reference_fasta_path = "prior_example/data/hg38.fasta"
chrom_sizes_path = "prior_example/data/hg38.chrom.sizes"
# ### Prepare the training data
# We're going to create a rather simple data loader for our binary dataset. We will split each chromosome into windows of length 1000 bp (i.e. the input sequence length for our models), strided across the chromosome with a stride of 50 bp. A 1000 bp window will be considered a "positive" binding example if the central 200 bp region overlaps a peak summit, and will be considered a "negative" otherwise.
#
# We'll save these labels as big BED files: one for training and one for testing. Our test set will consist of chr1. Our training set will consist of all other canonical (non-scaffold) chromosomes other than chrY and chrM.
# Import the chromosome sizes, ignoring the small scaffolds
chrom_sizes = {}
with open(chrom_sizes_path, "r") as f:
for line in f:
chrom, size = line.strip().split()
if len(chrom) > 5 or chrom in ("chrY", "chrM"):
continue
chrom_sizes[chrom] = int(size)
test_chroms = ["chr1"]
train_chroms = [chrom for chrom in chrom_sizes.keys() if chrom not in test_chroms]
# Import the peaks BED
peaks_bed = pd.read_csv(
peaks_bed_path, sep="\t", header=None, # Infer compression
names=[
"chrom", "peak_start", "peak_end", "name", "score",
"strand", "signal", "pval", "qval", "summit_offset"
]
)
peaks_bed["summit"] = peaks_bed["peak_start"] + peaks_bed["summit_offset"]
input_length = 1000
window_stride = 50
center_overlap_length = 200
train_labels_bed_path = "prior_example/data/train_labels.bed"
test_labels_bed_path = "prior_example/data/test_labels.bed"
def write_chrom_labels(
chrom, chrom_size, summit_locs, window_length, window_stride,
window_center_length, labels_bed_fp
):
"""
For a single chromosome, write its labels a BED file.
Arguments:
`chrom`: a single chromosome (e.g. "chr1")
`chrom_size`: size of the chromosome (e.g. 100000)
`summit_locs`: an iterable of locations of peak summits
in this chromosome
`window_length`: length of windows
`labels_bed_fp`: open file pointer of the file to write
the coordinates and labels
"""
coord_starts = np.arange(0, chrom_size - window_length, window_stride)
coord_ends = coord_starts + window_length
centers = coord_starts + (window_length // 2)
values = np.zeros_like(coord_starts)
for summit_loc in summit_locs:
delta = summit_loc - (window_length // 2)
values[np.abs(centers - summit_loc) < (window_center_length // 2)] = 1
for i in tqdm.notebook.trange(len(coord_starts), desc=("Writing " + chrom)):
labels_bed_fp.write("%s\t%d\t%d\t%d\n" % (chrom, coord_starts[i], coord_ends[i], values[i]))
# +
# Create the label BEDs for each chromosome set
# Clear the files first, if they already exist
with open(train_labels_bed_path, "w") as f:
pass
with open(test_labels_bed_path, "w") as f:
pass
# Create the label BEDs
with open(train_labels_bed_path, "a") as f:
for chrom in sorted(train_chroms):
summit_locs = peaks_bed[peaks_bed["chrom"] == chrom]["summit"].values
write_chrom_labels(
chrom, chrom_sizes[chrom], summit_locs, input_length,
window_stride, center_overlap_length, f
)
with open(test_labels_bed_path, "a") as f:
for chrom in sorted(test_chroms):
summit_locs = peaks_bed[peaks_bed["chrom"] == chrom]["summit"].values
write_chrom_labels(
chrom, chrom_sizes[chrom], summit_locs, input_length,
window_stride, center_overlap_length, f
)
# +
def dna_to_one_hot(seqs):
"""
Converts a list of DNA ("ACGT") sequences to one-hot encodings, where the
position of 1s is ordered alphabetically by "ACGT". `seqs` must be a list
of N strings, where every string is the same length L. Returns an N x L x 4
NumPy array of one-hot encodings, in the same order as the input sequences.
All bases will be converted to upper-case prior to performing the encoding.
Any bases that are not "ACGT" will be given an encoding of all 0s.
"""
seq_len = len(seqs[0])
assert np.all(np.array([len(s) for s in seqs]) == seq_len)
# Join all sequences together into one long string, all uppercase
seq_concat = "".join(seqs).upper()
one_hot_map = np.identity(5)[:, :-1]
# Convert string into array of ASCII character codes;
base_vals = np.frombuffer(bytearray(seq_concat, "utf8"), dtype=np.int8)
# Anything that's not an A, C, G, or T gets assigned a higher code
base_vals[~np.isin(base_vals, np.array([65, 67, 71, 84]))] = 85
# Convert the codes into indices in [0, 4], in ascending order by code
_, base_inds = np.unique(base_vals, return_inverse=True)
# Get the one-hot encoding for those indices, and reshape back to separate
return one_hot_map[base_inds].reshape((len(seqs), seq_len, 4))
def one_hot_to_dna(one_hot):
"""
Converts a one-hot encoding into a list of DNA ("ACGT") sequences, where the
position of 1s is ordered alphabetically by "ACGT". `one_hot` must be an
N x L x 4 array of one-hot encodings. Returns a lits of N "ACGT" strings,
each of length L, in the same order as the input array. The returned
sequences will only consist of letters "A", "C", "G", "T", or "N" (all
upper-case). Any encodings that are all 0s will be translated to "N".
"""
bases = np.array(["A", "C", "G", "T", "N"])
# Create N x L array of all 5s
one_hot_inds = np.tile(one_hot.shape[2], one_hot.shape[:2])
# Get indices of where the 1s are
batch_inds, seq_inds, base_inds = np.where(one_hot)
# In each of the locations in the N x L array, fill in the location of the 1
one_hot_inds[batch_inds, seq_inds] = base_inds
# Fetch the corresponding base for each position using indexing
seq_array = bases[one_hot_inds]
return ["".join(seq) for seq in seq_array]
# -
# Create a data loader which returns one-hot encoded sequences
# and labels
class BinaryDataLoader:
def __init__(
self, labels_npy_path, reference_genome_path, batch_size,
reverse_complement=True, seed=20200930
):
labels_table = pd.read_csv(
labels_npy_path, header=None, sep="\t",
names=["chrom", "start", "end", "value"]
)
self.coords = labels_table[["chrom", "start", "end"]].values
labels = labels_table["value"].values
self.pos_inds = np.where(labels)[0]
self.neg_inds = np.where(~labels)[0]
print("Positive coordinates: %d" % len(self.pos_inds))
print("Negative coordinates: %d" % len(self.neg_inds))
print("Total: %d" % len(labels))
self.reference_genome_path = reference_genome_path
self.shuffle_rng = np.random.RandomState(seed)
self.reverse_complement = reverse_complement
def shuffle_data(self):
self.shuffle_rng.shuffle(self.pos_inds)
self.shuffle_rng.shuffle(self.neg_inds)
def __len__(self):
return int(np.ceil(len(self.pos_inds) / batch_size))
def __getitem__(self, index):
"""
Returns batch of data: a B x L x 4 array of one-hot encoded
sequences, and a B-array of binary labels.
"""
batch_slice = slice(index * batch_size, (index + 1) * batch_size)
pos_coords = self.coords[self.pos_inds[batch_slice]]
neg_coords = self.coords[self.neg_inds[batch_slice]]
all_coords = np.concatenate([pos_coords, neg_coords])
labels = np.ones(len(pos_coords) + len(neg_coords))
labels[len(pos_coords):] = 0
genome_reader = pyfaidx.Fasta(self.reference_genome_path)
seqs = [
genome_reader[chrom][start:end].seq for
chrom, start, end in all_coords
]
one_hot = dna_to_one_hot(seqs)
if not self.reverse_complement:
return one_hot, labels
else:
return np.concatenate([one_hot, np.flip(one_hot, axis=(1, 2))]), \
np.concatenate([labels, labels])
batch_size = 64
reverse_complement = True
train_data_loader = BinaryDataLoader(
train_labels_bed_path, reference_fasta_path,
batch_size, reverse_complement
)
test_data_loader = BinaryDataLoader(
test_labels_bed_path, reference_fasta_path,
batch_size, reverse_complement
)
# ### Define the model
# We'll be using the same binary model architecture defined in the paper.
def place_tensor(tensor):
"""
Places a tensor on GPU, if PyTorch sees CUDA; otherwise, the returned tensor
remains on CPU.
"""
if torch.cuda.is_available():
return tensor.cuda()
return tensor
def smooth_tensor_1d(input_tensor, smooth_sigma):
"""
Smooths an input tensor along a dimension using a Gaussian filter.
Arguments:
`input_tensor`: a A x B tensor to smooth along the second dimension
`smooth_sigma`: width of the Gaussian to use for smoothing; this is the
standard deviation of the Gaussian to use, and the Gaussian will be
truncated after 1 sigma (i.e. the smoothing window is
1 + (2 * sigma); sigma of 0 means no smoothing
Returns an array the same shape as the input tensor, with the dimension of
`B` smoothed.
"""
# Generate the kernel
if smooth_sigma == 0:
sigma, truncate = 1, 0
else:
sigma, truncate = smooth_sigma, 1
base = np.zeros(1 + (2 * sigma))
base[sigma] = 1 # Center of window is 1 everywhere else is 0
kernel = scipy.ndimage.gaussian_filter(base, sigma=sigma, truncate=truncate)
kernel = place_tensor(torch.tensor(kernel))
# Expand the input and kernel to 3D, with channels of 1
# Also make the kernel float-type, as the input is going to be of type float
input_tensor = torch.unsqueeze(input_tensor, dim=1)
kernel = torch.unsqueeze(torch.unsqueeze(kernel, dim=0), dim=1).float()
smoothed = torch.nn.functional.conv1d(
input_tensor, kernel, padding=sigma
)
return torch.squeeze(smoothed, dim=1)
def binary_logits_to_probs(logit_pred_vals):
"""
Converts the model's predicted binary logits into probabilities via a
sigmoid on all values.
Arguments:
`logit_pred_vals`: a tensor/array containing the predicted logits
Returns a tensor/array of the same shape, containing the predictions as
raw probabilities by doing a sigmoid. If the input is a tensor, the output
will be a tensor. If the input is a NumPy array, the output will be a NumPy
array.
"""
if type(logit_pred_vals) is np.ndarray:
return scipy.special.expit(logit_pred_vals)
else:
return torch.sigmoid(logit_pred_vals)
class BinaryPredictor(torch.nn.Module):
def __init__(self):
"""
Creates an binary TF binding site predictor from a DNA sequence.
"""
super().__init__()
# ReLU activation for the convolutional layers and attribution prior
self.relu = torch.nn.ReLU()
# Define the convolutional layers
depths = [4, 64, 64, 64]
conv_filter_sizes = [15, 15, 13]
self.conv_layers = torch.nn.ModuleList()
for i in range(3):
self.conv_layers.append(
torch.nn.Conv1d(
in_channels=depths[i],
out_channels=depths[i + 1],
kernel_size=conv_filter_sizes[i],
stride=1,
padding=0 # No padding (AKA "valid")
)
)
self.conv_layers.append(self.relu)
self.conv_layers.append(
torch.nn.BatchNorm1d(depths[i + 1])
)
# Compute sizes of the convolutional outputs
conv_output_sizes = []
last_size = (4, 1000)
for i in range(3):
next_length = int(np.floor(
(last_size[1] - (conv_filter_sizes[i] - 1) - 1)
)) + 1
next_size = (64, next_length)
conv_output_sizes.append(next_size)
last_size = next_size
# Define the max pooling layer
self.max_pool_layer = torch.nn.MaxPool1d(
kernel_size=40,
stride=40,
padding=0 # No padding (AKA "valid")
)
# Compute size of the pooling output
pool_output_depth = conv_output_sizes[-1][0]
pool_output_length = int(np.floor(
(conv_output_sizes[-1][1] - (40 - 1) - 1) / 40
)) + 1
pool_output_size = (pool_output_depth, pool_output_length)
# Define the fully connected layers
dims = [pool_output_size[0] * pool_output_size[1], 50, 15]
self.fc_layers = torch.nn.ModuleList()
for i in range(2):
self.fc_layers.append(
torch.nn.Linear(dims[i], dims[i + 1])
)
self.fc_layers.append(self.relu)
self.fc_layers.append(
torch.nn.BatchNorm1d(dims[i + 1])
)
# Map last fully connected layer to final outputs
self.out_map_fc = torch.nn.Linear(15, 1)
self.bce_loss = torch.nn.BCELoss()
def forward(self, input_seqs):
"""
Computes a forward pass on a batch of sequences.
Arguments:
`inputs_seqs`: a B x L x 4 tensor, where B is the batch size, and
L is the sequence length
Returns the LOGITS of each input as a B-tensor. Note that the logits
are returned in the order according to the input sequences.
"""
batch_size = input_seqs.size(0)
# PyTorch prefers convolutions to be channel first, so transpose the
# input
input_seqs = input_seqs.transpose(1, 2) # Shape: B x 4 x L
# Run through convolutions, activations, and batch norm
x = input_seqs
for layer in self.conv_layers:
x = layer(x)
conv_output = x
# Perform max pooling
pooled = self.max_pool_layer(conv_output)
# Flatten
flattened = pooled.view(batch_size, -1)
# Run through fully connected layers, activations, and batch norm
x = flattened
for layer in self.fc_layers:
x = layer(x)
fc_output = x
# Run through last layer to get logits
logits = self.out_map_fc(fc_output)
return logits.view(-1)
def correctness_loss(self, true_vals, logit_pred_vals):
"""
Computes the binary cross-entropy loss.
Arguments:
`true_vals`: a B-tensor of true binary values
`logit_pred_vals`: a B-tensor containing the predicted LOGITS
Returns a tensor scalar that is the loss for the batch.
"""
# Convert logits to probabilities
probs = binary_logits_to_probs(logit_pred_vals)
return self.bce_loss(probs, true_vals)
def fourier_att_prior_loss(
self, status, input_grads, freq_limit, limit_softness,
att_prior_grad_smooth_sigma
):
"""
Computes an attribution prior loss for some given training examples,
using a Fourier transform form.
Arguments:
`status`: a B-tensor, where B is the batch size; each entry is 1 if
that example is to be treated as a positive example, and 0
otherwise
`input_grads`: a B x L x 4 tensor, where B is the batch size, L is
the length of the input; this needs to be the gradients of the
input with respect to the output; this should be
*gradient times input*
`freq_limit`: the maximum integer frequency index, k, to consider for
the loss; this corresponds to a frequency cut-off of pi * k / L;
k should be less than L / 2
`limit_softness`: amount to soften the limit by, using a hill
function; None means no softness
`att_prior_grad_smooth_sigma`: amount to smooth the gradient before
computing the loss
Returns a single scalar Tensor consisting of the attribution loss for
the batch.
"""
abs_grads = torch.sum(torch.abs(input_grads), dim=2)
# Smooth the gradients
grads_smooth = smooth_tensor_1d(
abs_grads, att_prior_grad_smooth_sigma
)
# Only do the positives
pos_grads = grads_smooth[status == 1]
# Loss for positives
if pos_grads.nelement():
pos_fft = torch.rfft(pos_grads, 1)
pos_mags = torch.norm(pos_fft, dim=2)
pos_mag_sum = torch.sum(pos_mags, dim=1, keepdim=True)
pos_mag_sum[pos_mag_sum == 0] = 1 # Keep 0s when the sum is 0
pos_mags = pos_mags / pos_mag_sum
# Cut off DC
pos_mags = pos_mags[:, 1:]
# Construct weight vector
weights = place_tensor(torch.ones_like(pos_mags))
if limit_softness is None:
weights[:, freq_limit:] = 0
else:
x = place_tensor(
torch.arange(1, pos_mags.size(1) - freq_limit + 1)
).float()
weights[:, freq_limit:] = 1 / (1 + torch.pow(x, limit_softness))
# Multiply frequency magnitudes by weights
pos_weighted_mags = pos_mags * weights
# Add up along frequency axis to get score
pos_score = torch.sum(pos_weighted_mags, dim=1)
pos_loss = 1 - pos_score
return torch.mean(pos_loss)
else:
return place_tensor(torch.zeros(1))
# +
def save_model(model, save_path):
"""
Saves the given model at the given path. This saves the state of the model
(i.e. trained layers and parameters), and the arguments used to create the
model (i.e. a dictionary of the original arguments).
"""
save_dict = {
"model_state": model.state_dict()
}
torch.save(save_dict, save_path)
def restore_model(load_path):
"""
Restores a model from the given path. It will then restore the learned
parameters to the model.
"""
load_dict = torch.load(load_path)
model_state = load_dict["model_state"]
model = BinaryPredictor()
model.load_state_dict(model_state)
return model
# -
# ### Train the models
# We'll train two models, one with the Fourier-based prior, and the other without. Note that the first batch might take some time to load, while the reference Fasta is being indexed.
#
# While the model trained without the prior can converge in just 1 - 2 epochs, the model trained with the prior often requires a few more epochs (due to optimizing over multiple objectives). To keep it simple, we'll just train both models for 1 epoch each. This puts the Fourier-based prior at a slight disadvantage in the comparisons, but we will see that the interpretability of the model trained with the prior is still cleaner. We will also verify that the predictive performances between the two models are reasonably close.
learning_rate = 0.001
freq_limit = 150
limit_softness = 0.2
att_prior_grad_smooth_sigma = 3
def train_model(
data_loader, model, num_epochs, learning_rate, use_prior=False
):
"""
Trains the model for the given number of epochs.
"""
model.train() # Switch to training mode
torch.set_grad_enabled(True)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch_i in range(num_epochs):
losses = []
if torch.cuda.is_available:
torch.cuda.empty_cache() # Clear GPU memory
t_iter = tqdm.notebook.trange(
len(data_loader),
desc=("Epoch %d/%d: Loss: ---" % (epoch_i + 1, num_epochs))
)
data_loader.shuffle_data()
for i in t_iter:
input_seqs, output_vals = data_loader[i]
input_seqs = place_tensor(torch.tensor(input_seqs)).float()
output_vals = place_tensor(torch.tensor(output_vals)).float()
# Clear gradients from last batch if training
optimizer.zero_grad()
if use_prior:
input_seqs.requires_grad = True # Set gradient required
logit_pred_vals = model(input_seqs)
# Compute the gradients of the output with respect to the input
input_grads, = torch.autograd.grad(
logit_pred_vals, input_seqs,
grad_outputs=place_tensor(
torch.ones(logit_pred_vals.size())
),
retain_graph=True, create_graph=True
# We'll be operating on the gradient itself, so we need to
# create the graph
)
input_grads = input_grads * input_seqs # Gradient * input
input_seqs.requires_grad = False # Reset gradient required
loss = model.correctness_loss(output_vals, logit_pred_vals) + \
model.fourier_att_prior_loss(
output_vals, input_grads, freq_limit, limit_softness,
att_prior_grad_smooth_sigma
)
else:
logit_pred_vals = model(input_seqs)
loss = model.correctness_loss(output_vals, logit_pred_vals)
loss.backward() # Compute gradient
optimizer.step() # Update weights through backprop
losses.append(loss.item())
t_iter.set_description(
"Epoch %d/%d: Loss: %6.4f" % (epoch_i + 1, num_epochs, loss.item())
)
print("Average loss: %6.4f" % np.mean(losses))
def predict_model(data_loader, model):
"""
Predicts data from the model, and returns the true values
and the predicted probabilities.
"""
model.eval() # Switch to evaluation mode
torch.set_grad_enabled(False)
true_vals, pred_vals = [], []
t_iter = tqdm.notebook.trange(
len(data_loader), desc="Loss: ---"
)
for i in t_iter:
input_seqs, output_vals = data_loader[i]
true_vals.append(output_vals)
input_seqs = place_tensor(torch.tensor(input_seqs)).float()
output_vals = place_tensor(torch.tensor(output_vals)).float()
logit_pred_vals = model(input_seqs)
loss = model.correctness_loss(output_vals, logit_pred_vals)
t_iter.set_description("Loss: %6.4f" % loss.item())
pred_vals.append(
binary_logits_to_probs(logit_pred_vals.detach().cpu().numpy())
)
return np.concatenate(true_vals), np.concatenate(pred_vals)
def show_performance(true_vals, pred_vals, acc_thresh=0.5):
"""
Shows accuracy, auROC, and auPRC.
"""
pos_mask = true_vals == 1
neg_mask = true_vals == 0
pos_right = np.sum(pred_vals[pos_mask] > acc_thresh)
neg_right = np.sum(pred_vals[neg_mask] <= acc_thresh)
pos_acc = pos_right / np.sum(pos_mask)
neg_acc = neg_right / np.sum(neg_mask)
acc = (pos_right + neg_right) / len(true_vals)
auroc = sklearn.metrics.roc_auc_score(true_vals, pred_vals)
precis, recall, thresh = \
sklearn.metrics.precision_recall_curve(true_vals, pred_vals)
auprc = sklearn.metrics.auc(recall, precis)
print("Accuracy: %.2f%%" % (acc * 100))
print("Positive accuracy: %.2f%%" % (pos_acc * 100))
print("Negative accuracy: %.2f%%" % (neg_acc * 100))
print("auROC: %.3f" % auroc)
print("auPRC: %.3f" % auprc)
device = torch.device("cuda") if torch.cuda.is_available() \
else torch.device("cpu")
prior_model_path = "prior_example/models/prior_model.pt"
noprior_model_path = "prior_example/models/noprior_model.pt"
# +
# Instantiate new models
prior_model = BinaryPredictor()
prior_model = prior_model.to(device)
noprior_model = BinaryPredictor()
noprior_model = noprior_model.to(device)
# -
train_model(
train_data_loader, prior_model, 1, learning_rate, use_prior=True
)
save_model(prior_model, prior_model_path)
train_model(
train_data_loader, noprior_model, 1, learning_rate, use_prior=False
)
save_model(noprior_model, noprior_model_path)
# +
# Load in saved models
prior_model = restore_model(prior_model_path)
prior_model = prior_model.to(device)
noprior_model = restore_model(noprior_model_path)
noprior_model = noprior_model.to(device)
# -
prior_true_vals, prior_pred_vals = predict_model(test_data_loader, prior_model)
noprior_true_vals, noprior_pred_vals = predict_model(test_data_loader, noprior_model)
# Compare predictive performance
print("Performance with prior")
show_performance(prior_true_vals, prior_pred_vals)
print("")
print("Performance without prior")
show_performance(noprior_true_vals, noprior_pred_vals)
# ### Compare interpretability
# We'll show the importance score tracks for a random sample of the test-set input sequences, to visually confirm the improved signal-to-noise ratio. More sophisticated methods of quantifying the improved interpretability can be found in the other notebooks of this repository (see paper for details).
#
# We will show the importance scores using input gradients, DeepSHAP, and _in silico_ mutagenesis.
#
# Note that SPI1 binding is a relatively simple task, because of the straightforward motif and binding mode. Although we will see an improvement in the interpretability of models trained with the Fourier-based prior, these improvements are still small compared to the improvements we get when we train on more complex tasks (see the paper for examples of these complex tasks).
# To use DeepSHAP, we'll need to install the library. If you don't want to install this, then comment out all DeepSHAP-related cells (including the next four).
# The code below will clone and install the DeepSHAP repository
# !git clone https://github.com/amtseng/shap.git prior_example/aux_code/shap
# !pip install prior_example/aux_code/shap
import shap
# Download the code for performing dinucleotide shuffles
# !wget https://raw.githubusercontent.com/amtseng/fourier_attribution_priors/master/src/extract/dinuc_shuffle.py -O prior_example/aux_code/dinuc_shuffle.py
from dinuc_shuffle import dinuc_shuffle
# We'll also need some code to visualize the importance score tracks.
# Download code for visualizing importance scores
# !wget https://raw.githubusercontent.com/amtseng/fourier_attribution_priors/master/src/plot/viz_sequence.py -O prior_example/aux_code/viz_sequence.py
import viz_sequence
def interpret_input_grad(model, input_seq):
"""
Computes input gradient x input for an L x 4 one-hot
encoded sequence.
"""
assert input_seq.shape == (input_length, 4)
torch.set_grad_enabled(True)
input_seq_np = input_seq
input_seqs = place_tensor(torch.tensor([input_seq])).float()
input_seqs.requires_grad = True
pred_logits = model(input_seqs)
model.zero_grad()
input_grads, = torch.autograd.grad(pred_logits, input_seqs)
return input_grads.cpu().numpy()[0] * input_seq_np
def interpret_deepshap(model, input_seq):
"""
Computes DeepSHAP scores for an L x 4 one-hot
encoded sequence.
"""
assert input_seq.shape == (input_length, 4)
torch.set_grad_enabled(True)
class WrapperModel(torch.nn.Module):
def __init__(self, inner_model):
super().__init__()
self.inner_model = inner_model
def forward(self, input_data):
return torch.unsqueeze(self.inner_model(input_data), dim=1)
# Create wrapper model whose sole output is B x 1 tensor, which DeepSHAP
# requires
wrapper_model = WrapperModel(model)
def bg_func(input_seq):
if not input_seq:
return place_tensor(torch.zeros((10, input_length, 4))).float()
else:
return place_tensor(torch.tensor(
dinuc_shuffle(input_seq[0].cpu().numpy(), 10)
)).float()
explainer = shap.DeepExplainer(
model=wrapper_model,
data=bg_func
)
input_seqs = torch.tensor([input_seq]).float().cuda()
# We'll hide some of the internal DeepSHAP messages just for aesthetic purposes
scores = explainer.shap_values(input_seqs)[0]
return scores * input_seq
def interpret_ism(model, input_seq):
"""
Computes in silico mutagenesis for an L x 4 one-hot
encoded sequence.
"""
assert input_seq.shape == (input_length, 4)
torch.set_grad_enabled(False)
mutations = np.tile(input_seq, (len(input_seq) + 1, 1, 1))
inds = np.arange(len(input_seq))
mutations[(inds + 1, inds)] = 0 # First one is the original
mutations = place_tensor(torch.tensor(mutations)).float()
pred_logits = model(mutations).detach().cpu().numpy()
return np.expand_dims(pred_logits[0] - pred_logits[1:], axis=1) * input_seq
# Pick a few random positive sequences from the test set
rng = np.random.RandomState(20200930)
rand_inds = rng.choice(test_data_loader.pos_inds, size=5, replace=False)
pos_coords = test_data_loader.coords[rand_inds]
# Show the importance scores
genome_reader = pyfaidx.Fasta(reference_fasta_path)
center_slice = slice(300, 700)
for chrom, start, end in pos_coords:
print("%s:%d-%d" % (chrom, start, end))
print("-------------------------------")
input_seq = dna_to_one_hot([genome_reader[chrom][start:end].seq])[0]
print("With prior:")
print("Input gradients")
viz_sequence.plot_weights(
interpret_input_grad(prior_model, input_seq)[center_slice],
subticks_frequency=100
)
print("DeepSHAP")
viz_sequence.plot_weights(
interpret_deepshap(prior_model, input_seq)[center_slice],
subticks_frequency=100
)
print("ISM")
viz_sequence.plot_weights(
interpret_ism(prior_model, input_seq)[center_slice],
subticks_frequency=100
)
print("No prior:")
print("Input gradients")
viz_sequence.plot_weights(
interpret_input_grad(noprior_model, input_seq)[center_slice],
subticks_frequency=100
)
print("DeepSHAP")
viz_sequence.plot_weights(
interpret_deepshap(noprior_model, input_seq)[center_slice],
subticks_frequency=100
)
print("ISM")
viz_sequence.plot_weights(
interpret_ism(noprior_model, input_seq)[center_slice],
subticks_frequency=100
)
print("===============================")
| notebooks/fourier_prior_example_pytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rayleigh Scattering <a class="tocSkip">
#
# **<NAME>**
#
# **30 Jan 2019, version 2**
# +
# Execute this cell first
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import miepython as mp
def rayleigh(m,x):
"""
Calculate the efficiencies for a small sphere.
Based on equations 5.7 - 5.9 in Bohren and Huffman
Args:
m: the complex index of refraction of the sphere
x: the size parameter of the sphere
Returns:
qext: the total extinction efficiency
qsca: the scattering efficiency
qback: the backscatter efficiency
g: the average cosine of the scattering phase function
"""
ratio = (m**2-1)/(m**2+2)
qsca = 8/3*x**4*abs(ratio)**2
qext = 4*x*ratio*(1+x**2/15*ratio*(m**4+27*m**2+38)/(2*m**2+3))
qext = abs(qext.imag + qsca)
qback = 4*x**4*abs(ratio)**2
g = 0
return qext, qsca, qback, g
def rayleigh_S1_S2(m,x,mu):
"""
Calculate the scattering amplitude functions for small spheres.
Based on equation 5.4 in Bohren and Huffman
The amplitude functions are normalized so that when integrated
over all 4*pi solid angles, the integral will be qext*pi*x**2.
The units are weird, sr**(-0.5)
Args:
m: the complex index of refraction of the sphere
x: the size parameter of the sphere
mu: the angles, cos(theta), to calculate scattering amplitudes
Returns:
S1, S2: the scattering amplitudes at each angle mu [sr**(-0.5)]
"""
a1 = (2*x**3)/3 * (m**2-1)/(m**2+2)*1j
a1 += (2*x**5)/5 * (m**2-2)*(m**2-1)/(m**2+2)**2 *1j
s1 = (3/2)*a1*np.ones_like(mu)
s2 = (3/2)*a1*mu
## scale so integral over all angles is single scattering albedo
qext, qsca, qback, g = rayleigh(m,x)
factor = np.sqrt(np.pi*qext)*x
return s1/factor, s2/factor
def rayleigh_unpolarized(m,x,mu):
"""
Return the unpolarized scattered intensity for small spheres.
This is the average value for randomly polarized incident light.
The intensity is normalized so the integral of the unpolarized
intensity over 4pi steradians is equal to the single scattering albedo.
Args:
m: the complex index of refraction of the sphere
x: the size parameter
mu: the cos(theta) of each direction desired
Returns
The intensity at each angle in the array mu. Units [1/sr]
"""
s1, s2 = rayleigh_S1_S2(m,x,mu)
return (abs(s1)**2+abs(s2)**2)/2
# -
# Mie scattering describes the special case of the interaction of light passing through a non-absorbing medium with a single embedded spherical object. The sphere itself can be non-absorbing, moderately absorbing, or perfectly absorbing.
#
# Rayleigh scattering is a simple closed-form solution for the scattering from small spheres.
# + [markdown] heading_collapsed=true
# ## Goals for this notebook:
#
# * Plot Rayleigh scattering
# * Compare total scattering between Rayleigh and Mie
# * Compare scattering functions for unpolarized light
# * Compare polarized results.
# -
# ## The Rayleigh scattering phase function
#
#
# Rayleigh scattering describes the elastic scattering of light by spheres that are much smaller than the wavelength of light. The intensity $I$ of the scattered radiation is given by
#
# $$
# I=I_{0}\left(\frac {1+\cos ^{2}\theta }{2R^{2}}\right)
# \left(\frac {2\pi }{\lambda }\right)^{4}
# \left(\frac {n^{2}-1}{n^{2}+2}\right)^{2}
# \left(\frac {d}{2}\right)^{6}
# $$
#
# where $I_0$ is the light intensity before the interaction with the particle, $R$ is the distance between the particle and the observer, $\theta$ is the scattering angle, $n$ is the refractive index of the particle, and $d$ is the diameter of the particle.
#
# $$
# x = \frac{\pi d}{\lambda} \qquad \rho=\frac{R}{\lambda}
# $$
#
# and thus
#
# $$
# I=\frac{I_0}{8\pi^2\rho^2}
# \left(\frac{n^2-1}{n^2+2}\right)^{2}
# x^{4}(1+\cos^2\theta)
# $$
#
#
# ## Compare Efficiencies with Mie Code
# +
for x in [0.1,0.2,0.3,0.4]:
m = 1.5-1j
theta = np.linspace(-180,180,180)
mu = np.cos(theta*np.pi/180)
rscat = rayleigh_unpolarized(m,x,mu)
mscat = mp.i_unpolarized(m,x,mu)
plt.plot(theta,rscat,'--b')
plt.plot(theta,mscat,'r')
plt.annotate('x=%.1f '%x,(theta[-20],mscat[-20]),ha='right',va='bottom')
plt.xlim(-180,180)
plt.xlabel('Angle [degrees]')
plt.ylabel('Scattered Light [1/sr]')
plt.title('Solid Mie, Dashed Rayleigh')
plt.show()
# -
# ## Polar plots for fun
# +
m = 1.5
x = 0.1
theta = np.linspace(-180,180,180)
mu = np.cos(theta/180*np.pi)
unp = rayleigh_unpolarized(m,x,mu)
s1,s2 = rayleigh_S1_S2(m,x,mu)
par = abs(s1)**2
per = abs(s2)**2
fig,ax = plt.subplots(1,2,figsize=(12,5))
ax=plt.subplot(121, projection='polar')
ax.plot(theta/180*np.pi,unp)
ax.plot(theta/180*np.pi,par)
ax.plot(theta/180*np.pi,per)
ax.set_rticks([0.05, 0.1,0.15])
plt.subplot(122)
#plt.plot(theta,scat)
plt.plot(theta,unp)
plt.plot(theta,par)
plt.plot(theta,per)
plt.xlabel('Exit Angle [degrees]')
plt.ylabel('Unpolarized Scattered light [1/sr]')
plt.title("m=1.5, x = %.2f"%x)
plt.ylim(0.00,0.2)
plt.xlim(0,180)
plt.show()
# -
# ## Compare Rayleigh and Mie efficiencies
# +
m = 1.5
x = 0.1
qext, qsca, qback, g = mp.mie(m,x)
rext, rsca, rback, rg = rayleigh(m,x)
print('Qext Qsca Qback g')
print("%.5e %.5e %.5e %.5f Mie"%(qext, qsca, qback, g))
print("%.5e %.5e %.5e %.5f Rayleigh"%(rext, rsca, rback, rg))
# -
# ## Compare scattering amplitudes S1 and S2
# +
m = 1.5
x = 0.1
theta = np.linspace(-180,180,19)
mu = np.cos(np.deg2rad(theta))
s1,s2 = mp.mie_S1_S2(m,x,mu)
rs1, rs2 = rayleigh_S1_S2(m,x,mu)
# the real part of the Rayleigh scattering is always zero
print(" Mie Rayleigh | Mie Rayleigh")
print(" angle | S1.imag S1.imag | S2.imag S2.imag")
print("------------------------------------------------")
for i,angle in enumerate(theta):
print("%7.2f | %8.5f %8.5f | %8.5f %8.5f " % (angle,s1[i].imag,rs1[i].imag, s2[i].imag ,rs2[i].imag))
# -
| docs/04_rayleigh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
library(tidyverse)
library(countrycode)
library(magrittr)
library(glue)
# ## Calculo RCA
rel_comp_adv <- function(data, country_filt=NA, digits=4){
if (digits<5) { #si tiene 5 son los datos originales y me ahorro el calculo
data <- data %>%
mutate(SITC = as.character(substr(SITC,1,digits))) %>%
group_by(year, reporter,rep_iso, SITC) %>%
summarise(value = sum(value))
}
#el denominador se calcula con todos los paises
mean_dist_SITC <- data %>%
group_by(year, SITC) %>%
summarise(value = sum(as.numeric(value),na.rm = T)) %>%
group_by(year) %>%
mutate(mean_prop = value/sum(value, na.rm = TRUE))
if (!is.na(country_filt)) { #filtro para elegir resultados solo de una seleccion de paises
data <- data %>%
filter(rep_iso %in% country_filt)
}
#el denominador despues del filtro
data <- data %>%
group_by(year, SITC, rep_iso,reporter) %>%
summarise(value = sum(as.numeric(value),na.rm = T)) %>%
group_by(year,rep_iso) %>%
mutate(prop = value / sum(value, na.rm = TRUE))
data <- data %>%
left_join(mean_dist_SITC %>% select(year,SITC,mean_prop),by = c("year", "SITC")) %>%
mutate(RCA = prop/mean_prop)
data
}
#Leo la info
data <- read_csv(file = "../dataset/Export_World_directo.csv",col_types = cols(SITC = col_character()))
#solo tengo que quedarme con los productos a 5 digitos
data <- data %>% filter(nchar(SITC)==5)
RCA <- rel_comp_adv(data = data, digits = 4)
write_delim(RCA,"results/RCA_mundo4d.txt",delim = ",")
# -----------
# #### Largo plazo
# tengo que cambiar algunos detalles de la funcion para los datos en LP
rel_comp_adv <- function(data, country_filt=NA){
#el denominador se calcula con todos los paises
mean_dist_SITC <- data %>%
group_by(year, SITC) %>%
summarise(value = sum(as.numeric(export_value),na.rm = T)) %>%
group_by(year) %>%
mutate(mean_prop = value/sum(value, na.rm = TRUE))
if (!is.na(country_filt)) { #filtro para elegir resultados solo de una seleccion de paises
data <- data %>%
filter(rep_iso %in% country_filt)
}
#el denominador despues del filtro
data <- data %>%
group_by(year, SITC, rep_iso,reporter) %>%
summarise(value = sum(as.numeric(export_value),na.rm = T)) %>%
group_by(year,rep_iso) %>%
mutate(prop = value / sum(value, na.rm = TRUE))
data <- data %>%
left_join(mean_dist_SITC %>% select(year,SITC,mean_prop),by = c("year", "SITC")) %>%
mutate(RCA = prop/mean_prop)
data
}
#Leo la info
data <- read_csv(file = "../dataset/country_yr_sitc_4d.csv",col_types = cols(SITC = col_character()))
# +
RCA <- rel_comp_adv(data = data)
write_delim(RCA,"results/RCA_LP.txt",delim = ",")
# -
# ----------------
# # Calculo Similitud
symmetric_max <- function(M){
M[M<t(M)] <- M[M<t(M)]
M[M>t(M)] <- t(M)[M>t(M)]
return(M)
}
similarity <- function(RCA){
cualitative_RCA <- RCA %>%
mutate(RCA = as.integer(case_when(RCA > 1 ~ 1,
RCA <= 1 ~ 0)))
w <- cualitative_RCA %>% spread(., reporter,RCA,fill = 0) %>%
ungroup()
SITC <- w$SITC
mat <- as.matrix(w[,-1])
v <- mat %*% t(mat)
diag(v) <- 0
dimnames(v) <- list(SITC, SITC)
totales <- rowSums(w[,-1])
probabilities <- v/totales
symmetric_proba <- symmetric_max(probabilities)
return(symmetric_proba)
}
RCA <- read_csv("results/RCA_mundo4d.txt",col_types = cols(SITC = col_character()))
RCA %>% glimpse
RCA_2016 <- RCA %>%
filter(year == 2016)%>%
select(-year, -rep_iso, -value, -prop, -mean_prop)
symmetric_proba <- similarity(RCA = RCA_2016)
symmetric_proba_df <-as_data_frame(symmetric_proba)
symmetric_proba_df$SITC <-names(symmetric_proba_df)
symmetric_proba_df <- symmetric_proba_df %>% select(SITC, everything())
write_csv(symmetric_proba_df,"results/similitud_4d_2016.csv")
# similitud con RCA promedio
RCA_promedio <- RCA %>%
group_by(reporter,SITC)%>%
summarise(RCA = mean(RCA))
# +
symmetric_proba <- similarity(RCA = RCA_promedio)
symmetric_proba_df <-as_data_frame(symmetric_proba)
symmetric_proba_df$SITC <-names(symmetric_proba_df)
symmetric_proba_df <- symmetric_proba_df %>% select(SITC, everything())
write_csv(symmetric_proba_df,"results/similitud_4d_mean.csv")
# -
symmetric_proba_df
# ----------------
# #### Largo plazo
RCA <- read_csv("results/RCA_LP.txt",col_types = cols(SITC = col_character()))
RCA_2016 <- RCA %>%
filter(year == 2016)%>%
select(-reporter, -value, -prop, -mean_prop)
RCA_2016 %>% glimpse
## En esta version. Creo un id por reporter_year.
similarity <- function(RCA){
cualitative_RCA <- RCA %>%
mutate(RCA = as.integer(case_when(RCA > 1 ~ 1,
RCA <= 1 ~ 0)),
id = paste0(rep_iso,'_',year))%>%
ungroup() %>%
select(id,SITC,RCA)
w <- cualitative_RCA %>% spread(., id,RCA,fill = 0) %>%
ungroup()
SITC <- w$SITC
mat <- as.matrix(w[,-1])
v <- mat %*% t(mat)
diag(v) <- 0
dimnames(v) <- list(SITC, SITC)
totales <- rowSums(w[,-1])
probabilities <- v/totales
symmetric_proba <- symmetric_max(probabilities)
return(symmetric_proba)
}
symmetric_proba <- similarity(RCA = RCA_2016)
symmetric_proba_df <-as_data_frame(symmetric_proba)
symmetric_proba_df$SITC <-names(symmetric_proba_df)
symmetric_proba_df <- symmetric_proba_df %>% select(SITC, everything())
write_csv(symmetric_proba_df,"results/similitud_LP_2016.csv")
# Utilizando todos los paises&años
RCA_all <- RCA %>%
select(-reporter, -value, -prop, -mean_prop)
symmetric_proba <- similarity(RCA = RCA_all)
symmetric_proba_df <-as_data_frame(symmetric_proba)
symmetric_proba_df$SITC <-names(symmetric_proba_df)
symmetric_proba_df <- symmetric_proba_df %>% select(SITC, everything())
write_csv(symmetric_proba_df,"results/similitud_LP_all.csv")
| desagregado/bipartito/calculo_RCA__similitud.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Goal:
#
# * Download most up-to-date version of NCBI 'complete' genomes
# # Setting variables
workDir = '/var/seq_data/ncbi_db/genome/Jan2016/'
# # Init
import os
# %load_ext rpy2.ipython
# + language="R"
# library(ggplot2)
# library(dplyr)
# library(tidyr)
# library(genomes)
# library(permute)
# -
if not os.path.isdir(workDir):
os.makedirs(workDir)
# # Assessing NCBI prokaryote genome listing
# + language="R"
# data(proks)
# summary(proks)
# + language="R"
# update(proks)
# summary(proks)
# + magic_args="-w 600 -h 300" language="R"
# # plotting GC distribution
# ggplot(proks, aes(gc)) +
# geom_histogram(binwidth=1) +
# geom_vline(xintercept=50, linetype='dashed', color='red', alpha=0.7) +
# labs(x='G+C') +
# theme(
# text = element_text(size=16)
# )
# -
# # Complete genomes
# + language="R"
# proks.complete = proks %>% as.data.frame %>%
# filter(status == 'Complete Genome')
# proks.complete %>% head(n=3)
# + magic_args="-w 600 -h 300" language="R"
# # plotting GC distribution
# ggplot(proks.complete, aes(gc)) +
# geom_histogram(binwidth=1) +
# geom_vline(xintercept=50, linetype='dashed', color='red', alpha=0.7) +
# labs(x='G+C') +
# theme(
# text = element_text(size=16)
# )
# + language="R"
# proks.complete$gc %>% summary
# -
# # Adding taxonomy
#
# * using taxID
prokFile = os.path.join(workDir, 'proks_complete.txt')
# + magic_args="-i prokFile" language="R"
#
# write.table(proks.complete, prokFile, sep='\t', row.names=F, quote=F)
# cat('File written: ', prokFile, '\n')
# -
taxFile = os.path.splitext(prokFile)[0] + '_tax.txt'
# !cd $workDir; \
# tail -n +2 $prokFile | \
# cut -f 5 | sort -u | \
# seqDB_tools taxid2taxonomy -p 30 > $taxFile
# ### Reading in files
# + magic_args="-i taxFile" language="R"
#
# df.tax = read.delim(taxFile, sep='\t') %>%
# distinct(taxid)
# df.proks.complete = dplyr::inner_join(proks.complete, df.tax, c('taxid' = 'taxid'))
#
# # checking join
# proks.complete %>% nrow %>% print
# df.proks.complete %>% nrow %>% print
# df.proks.complete %>% head(n=3)
# -
# # Just Bacteria
# + language="R"
# df.bac.complete = df.proks.complete %>%
# filter(superkingdom == 'Bacteria')
#
# df.bac.complete %>% nrow
# -
# ### Phylum representation
# + magic_args="-w 800" language="R"
# df.bac.complete.s = df.bac.complete %>%
# group_by(phylum) %>%
# summarize(n = n())
#
# ggplot(df.bac.complete.s, aes(phylum, n)) +
# geom_bar(stat='identity') +
# scale_y_log10() +
# labs(y = 'Number of genomes') +
# theme_bw() +
# theme(
# text = element_text(size=16),
# axis.text.x = element_text(angle=60, hjust=1)
# )
# -
# ## Other Filtering
# ### Genome size
# + language="R"
# df.bac.complete %>%
# filter(size < 0.8) %>%
# arrange(size) %>%
# select(size, name) %>% tail(n=20)
# + language="R"
#
# cat('Pre-filter:', df.bac.complete %>% nrow, '\n')
# df.bac.complete = df.bac.complete %>%
# filter(size > 0.8)
# cat('Post-filter:', df.bac.complete %>% nrow, '\n')
# -
# ### removing what are really phage/plasmid genomes
# + language="R"
# cat('Pre-filter:', df.bac.complete %>% nrow, '\n')
#
# to.rm = c("Thermoanaerobacterium saccharolyticum JW/SL-YS485",
# "Streptococcus salivarius 57.I")
#
# df.bac.complete = df.bac.complete %>%
# filter(! name %in% to.rm)
#
# cat('Post-filter:', df.bac.complete %>% nrow, '\n')
# -
# ## Random selection of just 1 taxon per species
# + language="R"
#
# # parsing out representatives
# df.bac.complete.rep = df.bac.complete %>%
# filter(! is.na(refseq)) %>%
# group_by(species) %>%
# sample_n(1) %>%
# ungroup()
#
# cat('Number of representative genomes:', df.bac.complete.rep %>% nrow, '\n')
# -
# ### Phylum representation
# + magic_args="-w 800" language="R"
# df.bac.complete.rep.s = df.bac.complete.rep %>%
# group_by(phylum) %>%
# summarize(n = n())
#
# ggplot(df.bac.complete.rep.s, aes(phylum, n)) +
# geom_bar(stat='identity') +
# labs(y = 'Number of representative genomes') +
# theme_bw() +
# theme(
# text = element_text(size=16),
# axis.text.x = element_text(angle=60, hjust=1)
# )
# -
# ### genome GC content
# + magic_args="-w 600 -h 300" language="R"
# # plotting GC distribution
# ggplot(df.bac.complete.rep, aes(gc)) +
# geom_histogram(binwidth=1) +
# geom_vline(xintercept=50, linetype='dashed', color='red', alpha=0.7) +
# labs(x='G+C') +
# theme(
# text = element_text(size=16)
# )
# -
# #### Notes
#
# * A very bi-modal distribution (as see in Youngblut & Buckley (2014))
# ## Reducing bias toward low G+C endosymbionts
# + magic_args="-h 400" language="R"
# # culmulative function of Genomes w/ GC
#
# df.bac.complete.rep.f = df.bac.complete.rep %>%
# group_by() %>%
# mutate(total_genomes = n()) %>%
# ungroup() %>%
# arrange(gc) %>%
# mutate(n = 1,
# culm_perc_total = cumsum(n) / total_genomes * 100) %>%
# select(gc, culm_perc_total, phylum, genus)
#
#
# # plot
# ggplot(df.bac.complete.rep.f, aes(gc, culm_perc_total)) +
# geom_point() +
# geom_line() +
# labs(x='Genome G+C', y='Culmulative total (%)') +
# theme_bw() +
# theme(
# text = element_text(size=16)
# )
# + magic_args="-w 1100 -h 500" language="R"
# GC_cutoff = 30
#
# df.bac.complete.rep.f = df.bac.complete.rep %>%
# group_by(genus) %>%
# mutate(genus_medianGC = median(gc)) %>%
# ungroup() %>%
# filter(genus_medianGC <= GC_cutoff) %>%
# select(phylum, genus, gc)
#
# df.bac.complete.rep.f$genus = reorder(df.bac.complete.rep.f$genus, df.bac.complete.rep.f$phylum)
#
# ggplot(df.bac.complete.rep.f, aes(genus, gc, color=phylum)) +
# geom_boxplot() +
# theme_bw() +
# theme(
# text = element_text(size=16),
# axis.text.x = element_text(angle=60, hjust=1)
# )
# -
# ### Making low GC genomes just 1% of dataset
# + language="R"
# df.bac.complete.rep = df.bac.complete.rep %>%
# group_by(genus) %>%
# mutate(genus_medianGC = median(gc)) %>%
# ungroup()
#
# df.lowGC = df.bac.complete.rep %>%
# filter(genus_medianGC <= 30)
#
# df.highGC = df.bac.complete.rep %>%
# filter(genus_medianGC > 30)
#
# n.lowGC = df.lowGC %>% nrow
# n.total = df.bac.complete.rep %>% nrow
#
# cat('Number of low GC genomes: ', n.lowGC, '\n')
# cat('Low GC genomes make up ', n.lowGC / n.total * 100, '% of the dataset\n')
# + magic_args="-h 300 -w 600" language="R"
# n.sample = round(n.total * 0.01, 0)
# cat('Sampling', n.sample, 'low GC genomes\n')
#
# to.keep = sample(df.bac.complete.rep$name, n.sample, replace=FALSE)
# to.keep = append(to.keep, df.highGC$name)
#
# df.bac.complete.rep.p = df.bac.complete.rep %>%
# filter(name %in% to.keep)
#
# cat('Number of representative genomes:', df.bac.complete.rep.p %>% nrow, '\n')
#
# # plotting GC distribution
# ggplot(df.bac.complete.rep.p, aes(gc)) +
# geom_histogram(binwidth=1) +
# geom_vline(xintercept=50, linetype='dashed', color='red', alpha=0.7) +
# labs(x='G+C') +
# theme(
# text = element_text(size=16)
# )
# -
# ## Sequence download
# + magic_args="-i workDir" language="R"
#
# outFile = file.path(workDir, 'bac_complete_spec-rep1.txt')
# write.table(df.bac.complete.rep.p, outFile, sep='\t', quote=F, row.names=F)
# -
# !cd $workDir; \
# seqDB_tools accession-GI2fasta \
# -a 11 -n 2 -f 30 -header \
# -o bac_complete_spec-rep1 \
# < bac_complete_spec-rep1.txt \
# 2> bac_complete_spec-rep1.log
# !cd $workDir; \
# seqDB_tools accession-GI2fasta \
# -a 11 -n 2 -f 30 -b 1 -header \
# -o bac_complete_spec-rep1_tmp \
# < re_bac.txt
Leptospirillum_ferrooxidans_C2-3.fna
bac_complete_spec-rep1/Marinithermus_hydrothermalis_DSM_14884.fna
bac_complete_spec-rep1/Octadecabacter_arcticus_238.fna
bac_complete_spec-rep1/Paracoccus_aminophilus_JCM_7686.fna
# ### Checking output
genomeDir = os.path.join(workDir, 'bac_complete_spec-rep1')
# number of genomes downloaded
# !printf "Number of bacterial genomes: "
# !cd $genomeDir; \
# find . -name "*.fna" | wc -l
# file size
# !echo "Genome file size distribution (bytes):"
# !cd $genomeDir; \
# ls -tlc *.fna | \
# perl -pe 's/ +/\t/g' | \
# cut -f 5 | NY_misc_perl stats_descriptive
# checking for non-bacterial genomes
# !find $genomeDir -name "*fna" | xargs -P 20 egrep "phage|virus|phage"
# # Renaming genomes
genomeDirRn = genomeDir + '_rn'
genomeDirRn
# making sure each sequence is unique
# !cd $genomeDir; \
# find . -name "*fna" | \
# SIPSim genome_rename -n 24 --prefix $genomeDirRn -
# ## Checking genome info
#
# * Making sure there are no outliers that need to be filtered out
# determining the largest genome
# !cd $genomeDirRn; \
# find . -name "*.fna" | \
# seq_tools fasta_info --fn --tn --tl --tgc --sn --sl --sgc --header -n 24 - \
# > genome_info.txt
# ### loading table and plotting
# + magic_args="-i genomeDirRn" language="R"
#
# inFile = file.path(genomeDirRn, 'genome_info.txt')
#
# df = read.delim(inFile, sep='\t')
# df %>% head(n=3)
# + magic_args="-w 500 -h 350" language="R"
# # genome lengths
# ggplot(df, aes(total_seq_length)) +
# geom_histogram(binwidth=50000) +
# labs(x='Genome length (bp)', y='Count') +
# theme_bw() +
# theme(
# text = element_text(size=16)
# )
# + language="R"
# summary(df$total_seq_length)
# + magic_args="-w 500 -h 350" language="R"
# # genome GC
# ggplot(df, aes(total_GC)) +
# geom_histogram(binwidth=1) +
# labs(x='Genome G+C', y='Count') +
# theme_bw() +
# theme(
# text = element_text(size=16)
# )
# + magic_args="-w 500 -h 350" language="R"
# # genome GC
# ggplot(df, aes(total_GC)) +
# geom_density(fill='red', alpha=0.3) +
# scale_x_continuous(limits=c(10,90)) +
# labs(x='Genome G+C', y='Density') +
# theme_bw() +
# theme(
# text = element_text(size=16)
# )
# + magic_args="-w 500 -h 350" language="R"
# # sequences per genome
# ggplot(df, aes(total_sequences)) +
# geom_histogram(binwidth=1) +
# labs(x='Total chromosomes/contigs', y='Count') +
# theme_bw() +
# theme(
# text = element_text(size=16)
# )
# -
# # Indexing genomes
# list of all genomes files and their associated names
# !cd $genomeDirRn; \
# find . -name "*fna" | \
# perl -pe 's/.+\///' | \
# perl -pe 's/(.+)(\.[^.]+)/\$1\t\$1\$2/' > genome_index.txt
# !cd $genomeDirRn; \
# SIPSim genome_index \
# genome_index.txt \
# --fp . --np 30 > index_log.txt
# # Summary
# +
# !printf 'Genome directory: '; echo $genomeDirRn
# !printf 'Number of genomes: '
# !find $genomeDirRn -name "*.fna"| wc -l
# !printf 'Number of indexed genomes: '
# !find $genomeDirRn -name "*.fna.sqlite3.db*" | wc -l
# -
# ***
# ***
# ***
# # -- OLD --
# ## Weighted sampling of genome dataset
#
# * weighting against low GC organisms
# + language="R"
#
# curve(-exp(-x/10 + 1), from = 10, to = 70)
# + magic_args="-h 300" language="R"
# #weights = sapply(df.bac.complete.rep$gc, function(gc) ifelse(gc < 35, 1 - ((35 - gc) / 100), 1))
# #weights = sapply(df.bac.complete.rep$gc, function(gc) ifelse(gc < 35, 1 - exp(-gc/10), 1))
# #weights = sapply(df.bac.complete.rep$gc, function(gc) 1 - exp(-gc/50 + 2))
# weights = data.frame('weights' = weights, 'gc' = df.bac.complete.rep$gc)
#
# ggplot(weights, aes(gc, weights)) +
# geom_point() +
# theme_bw()
# + magic_args="-w 600 -h 300" language="R"
# n.sample = 500
#
# #weights = sapply(df.bac.complete.rep$gc, function(gc) ifelse(gc < 35, 1 - ((35 - gc) / 50), 1))
# to.keep = sample(df.bac.complete.rep$name, n.sample, replace=FALSE, prob=weights$weights)
#
# df.bac.complete.rep.p = df.bac.complete.rep %>%
# filter(name %in% to.keep)
#
# # plotting GC distribution
# ggplot(df.bac.complete.rep.p, aes(gc)) +
# geom_histogram(binwidth=1) +
# geom_vline(xintercept=50, linetype='dashed', color='red', alpha=0.7) +
# labs(x='G+C') +
# theme(
# text = element_text(size=16)
# )
# -
| ipynb/bac_genome/all_complete_NCBI_bac_genomes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Prior selection
# In this notebook we illustrate the selection of a prior for a gaussian process.
#
# The prior is the value returned by the gaussian process when the uncertainty is too large, in can be any model of the data (and will be learned conjointly with the gaussian process during the training).
from fastai.tabular.all import *
from tabularGP import tabularGP_learner
from tabularGP.prior import *
# + [markdown] heading_collapsed=true
# ## Data
# + [markdown] hidden=true
# Builds a regression problem on a subset of the adult dataset:
# + hidden=true
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv').sample(1000)
procs = [FillMissing, Normalize, Categorify]
# + hidden=true
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['education-num', 'fnlwgt']
dep_var = 'age'
# + hidden=true
data = TabularDataLoaders.from_df(df, path, procs=procs, cat_names=cat_names, cont_names=cont_names, y_names=dep_var)
# -
# ## Priors
# The default prior is a constant (`ConstantPrior`), it tend to be a solid choice across problems (as long as the problem is stationary):
learn = tabularGP_learner(data, prior=ConstantPrior)
learn.fit_one_cycle(5, max_lr=1e-3)
# The simplest prior is the `ZeroPrior` which returns zero for all values. It is only recommended if you have prior knowledge on your output domain and know that zero should be the default value:
learn = tabularGP_learner(data, prior=ZeroPrior)
learn.fit_one_cycle(5, max_lr=1e-3)
# We also provide a linear prior (`LinearPrior`) which is usefull when you know that your output is non stationary and follows a trend:
learn = tabularGP_learner(data, prior=LinearPrior)
learn.fit_one_cycle(5, max_lr=1e-3)
# ## Transfer learning
# As the prior can be any arbitrary model (as long as the input and output types are compatible), nothing stops us from building our gaussian process on top of a prior model (which might be a gaussian process used for a similar output or a different type of model used for the same output type).
#
# Here is a deep neural network trained on the same task as our gaussian process:
learn_dnn = tabular_learner(data, layers=[200,100])
learn_dnn.fit_one_cycle(5, max_lr=1e-3)
# We can now pass the trained prior to the prior argument of our builder:
learn = tabularGP_learner(data, prior=learn_dnn)
learn.fit_one_cycle(5, max_lr=1e-3)
# Note that, by default, the prior is frozen when transfering knowledge. Lets unfreeze it now that the rest of the gaussian process is trained:
learn.unfreeze(prior=True)
learn.fit_one_cycle(5, max_lr=1e-3)
| examples/3_prior_selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ### Transform to normal distro
#
# Before we can use a t-test, we must make sure the sample(s) are normally distributed. One of the assumptions of a t-test is that the sample data should be normally distributed.
#
# We can use a log transform to make the sample statistic's distribution more normal before conducting t-tests.
source("../data_science_helpers/R_helpers.R")
x <- seq(0, 20, by = 0.1)
# +
# left skewed data
left_skews = (dchisq(x, df = 2))# + dchisq(x, df = 4))
normal = is_normal_distribution(data = left_skews)
plot_distribution(data = left_skews, label = "left skew", distribution_is_normal = normal)
# +
log_transformed_data = log(left_skews)
normal = is_normal_distribution(data = log_transformed_data)
plot_distribution(data = log_transformed_data, label = "left skew after log transform", distribution_is_normal = normal)
# -
# ### It's not perfect, but much better
# +
# right skewed data
right_skews = 0 - left_skews + 0.5
normal = is_normal_distribution(data = right_skews)
plot_distribution(data = right_skews, label = "right skew", distribution_is_normal = normal)
# +
exp_transformed_data = exp(right_skews)
normal = is_normal_distribution(data = exp_transformed_data)
plot_distribution(data = exp_transformed_data, label = "right skew after exp transform", distribution_is_normal = normal)
# -
# This doesn't seem to work. I thought that $ e^x $ should be the opposite of $ log(x) $ but it doesn't seem to work well. Probably best to reverse the distribution to be left skewed and then apply the $ log $ transformation.
#
# ### It depends on your hypothesis
#
# If you use a single sample test and the hypothesis looks like:
#
# $ h_0: \mu = 100 $
#
# $ h_a: \mu \ne 100 $
#
# then if you transform the sample, for example with $ log() $ then you should also transform the hypothesis value:
#
# $ h_0: \mu' = log(100) $
#
# $ h_a: \mu' \ne log(100) $
#
# If you use a paired sample or independent sample test, then as long as both samples are transformed in the same way, then that will work without transforming the hypothesis value.
#
| statistics/transform_to_normal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Analysis of COVID dataset
# + slideshow={"slide_type": "slide"}
import numpy as np
import pandas as pd
# + slideshow={"slide_type": "fragment"}
# !ls data/
# + slideshow={"slide_type": "slide"}
df=pd.read_csv("data/COVID19_open_line_list.csv")
df.head()
# + slideshow={"slide_type": "slide"}
df.info()
# + slideshow={"slide_type": "slide"}
features=["ID","age","sex","city","province","country"]
df1=df[features]
df1.head()
# + slideshow={"slide_type": "fragment"}
df1.info()
# + slideshow={"slide_type": "skip"}
type(df1)
# + slideshow={"slide_type": "slide"}
df2=df1.dropna()
df2.head()
# + slideshow={"slide_type": "slide"}
df2.groupby(["country","province"])["ID"].count()
# + slideshow={"slide_type": "slide"}
df2.groupby("sex")["ID"].count()
# + slideshow={"slide_type": "fragment"}
df2["sex"].unique()
# + slideshow={"slide_type": "fragment"}
df2[df2["sex"]=="Female"]
# + slideshow={"slide_type": "fragment"}
df2[df2["sex"]=="Female"]["sex"]
# + slideshow={"slide_type": "slide"}
df2.loc[df2.sex=="Female",:]
# + slideshow={"slide_type": "fragment"}
df2.loc[df2.sex=="Female","sex"]
# + slideshow={"slide_type": "fragment"}
df2["sex"].replace({"Male": "male", "Female": "female"},inplace=True)
# + slideshow={"slide_type": "slide"}
df2["sex"].unique()
# + slideshow={"slide_type": "fragment"}
df2.pivot_table(values="ID",index=["country","province"],columns="sex")
# + slideshow={"slide_type": "slide"}
df2.pivot_table(values="ID",index="country",columns="sex")
# + slideshow={"slide_type": "slide"}
import matplotlib.pyplot as plt
df2.pivot_table(values="ID",index="country",columns="sex").plot(kind="bar")
plt.show()
# -
# !pwd
| python/projects/covid/COVID.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import pandas
import numpy
import matplotlib as plt
from sklearn import linear_model, feature_extraction
def categorical_features(row):
d = {}
d["STATE"] = row[1]["STATE"]
return d
#feature
def last_poll(full_data):
"""
Create feature from last poll in each state
"""
#adding topic
# Only care about republicans
repub = full_data[full_data["PARTY"] == "Rep"]
repub = full_data[full_data["CHOICE"]!="Undecided"]
# Sort by date
chron = repub.sort_values(by="DATE", ascending=True)
# Only keep the last one
dedupe = chron.drop_duplicates(subset="STATE", keep="last")
# Remove national polls
return dedupe[dedupe["STATE"] != "US"]
if __name__ == "__main__":
# Read in the X data
all_data = pandas.read_csv("data.csv")
# Remove non-states
all_data = all_data[pandas.notnull(all_data["STATE"])]
# split between testing and training
train_x = last_poll(all_data[all_data["TOPIC"] == '2012-president'])
test_x=train_x.tail(10)
train_x=train_x.head(40)
train_x.set_index("STATE")
test_x.set_index("STATE")
# Read in the Y data
y_data = pandas.read_csv("../data/2012_pres.csv", sep=';')
y_data = y_data[y_data["PARTY"] == "R"]
y_data = y_data[pandas.notnull(y_data["GENERAL %"])]
y_data["GENERAL %"] = [float(x.replace(",", ".").replace("%", ""))
for x in y_data["GENERAL %"]]
y_data["STATE"] = y_data["STATE ABBREVIATION"]
y_data.set_index("STATE")
backup = train_x
train_x = y_data.merge(train_x, on="STATE",how='left')
# make sure we have all states in the test data
for ii in set(y_data.STATE) - set(test_x.STATE):
new_row = pandas.DataFrame([{"STATE": ii}])
test_x = test_x.append(new_row)
# format the data for regression
train_x = pandas.concat([train_x.STATE.astype(str).str.get_dummies(),
train_x], axis=1)
test_x = pandas.concat([test_x.STATE.astype(str).str.get_dummies(),
test_x], axis=1)
# handle missing data
for dd in train_x, test_x:
dd["NOPOLL"] = pandas.isnull(dd["VALUE"])
dd["VALUE"] = dd["VALUE"].fillna(0.0)
# create feature list
features = list(y_data.STATE)
features.append("VALUE")
features.append("NOPOLL")
# fit the regression
mod = linear_model.LinearRegression()
mod.fit(train_x[features], train_x["GENERAL %"])
# Write out the model
with open("model.txt", 'w') as out:
out.write("BIAS\t%f\n" % mod.intercept_)
for jj, kk in zip(features, mod.coef_):
out.write("%s\t%f\n" % (jj, kk))
# Write the predictions
pred_test = mod.predict(test_x[features])
with open("pred.txt", 'w') as out:
for ss, vv in sorted(zip(list(test_x.STATE), pred_test)):
out.write("%s\t%f\n" % (ss, vv))
# -
print("Mean squared error: %.2f"
% numpy.mean((mod.predict(test_x[features]) - train_x["GENERAL %"]) ** 2))
#test data - train data
#(prediction - observed)^2
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % mod.score(test_x[features],train_x["GENERAL %"]))
all_data
if (all_data.TOPIC=="obama-job-approval"):
print(all_data.TOPIC)
| regression/mycode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
# + [markdown] heading_collapsed=true
# ## Loading and Cleaning Data
# + hidden=true
retail_df = pd.read_csv('retail_dataset_II.csv', index_col=0)
retail_df.head()
# + hidden=true
retail_df.shape
# + hidden=true
retail_df.columns = retail_df.columns.str.lower().str.replace(' ', '_')
# + hidden=true
retail_df.rename(columns={'price':'unit_price'}, inplace=True)
# + hidden=true
retail_df.isna().sum()
# + hidden=true
retail_df.info()
# + hidden=true
retail_df.describe()
# + [markdown] hidden=true
# - There are some odd values in data.
# - quantity and unit price columns have some negative values
# - max value of quantity is 19152 (seems impossible)
# - Additionally, the maximum value of unit price is 25111, which is plausible, but seems unlikely.
# + hidden=true
retail_df.loc[retail_df.unit_price==25111.09]
# + hidden=true
retail_df.loc[retail_df['unit_price'] == -53594.360000]
# + hidden=true
(retail_df.unit_price <= 0).sum()
# + hidden=true
(retail_df.quantity <= 0).sum()
# + hidden=true
((retail_df.unit_price <=0) & (retail_df.quantity<=0) & (retail_df.customer_id.isna())).sum()
# + hidden=true
condition = (retail_df.unit_price <=0) & (retail_df.quantity<=0) & (retail_df.customer_id.isna())
new_retail = retail_df.loc[~condition]
new_retail.describe()
# + hidden=true
new_retail.shape
# + hidden=true
new_retail.isna().sum()
# + hidden=true
((new_retail.customer_id.isna()) & (new_retail.unit_price<=0)).sum()
# + hidden=true
mask = (new_retail.customer_id.isna()) & (new_retail.unit_price<=0)
new_retail = new_retail.loc[~mask]
new_retail.isna().sum()
# + hidden=true
new_retail = new_retail.loc[~(new_retail.quantity<=0)]
# + hidden=true
new_retail = new_retail.loc[~(new_retail.unit_price<=0)]
# + hidden=true
new_retail.isna().sum()
# + hidden=true
new_retail.shape
# + hidden=true
new_retail.dropna(inplace=True)
# + hidden=true
plt.rcParams['figure.figsize'] = [12, 6]
sns.boxplot(data=new_retail, x='unit_price')
plt.show()
# + hidden=true
new_retail = new_retail.loc[new_retail.unit_price<100]
# + hidden=true
sns.boxplot(data=new_retail, x='unit_price')
plt.show()
# + hidden=true
sns.boxplot(data=new_retail, x='quantity')
plt.show()
# + hidden=true
new_retail = new_retail.loc[new_retail.quantity<2500]
# + hidden=true
sns.boxplot(data=new_retail, x='quantity')
plt.show()
# + hidden=true
new_retail.shape
# + [markdown] heading_collapsed=true
# ## Data Preprocessing
# + hidden=true
retail_df = new_retail
retail_df.head()
# + hidden=true
retail_df.describe(include='O').T
# + hidden=true
retail_df.description = retail_df.description.str.lower()
# + hidden=true
retail_df.invoicedate = pd.to_datetime(retail_df.invoicedate)
# + hidden=true
retail_df.customer_id = retail_df.customer_id.astype('int')
# + hidden=true
retail_df.head()
# + hidden=true
retail_df.info()
# + [markdown] heading_collapsed=true hidden=true
# ### Feature Ingineering
# + hidden=true
retail_df['year'] = retail_df.invoicedate.dt.year
retail_df['month'] = retail_df.invoicedate.dt.month
retail_df['day'] = retail_df.invoicedate.dt.day
retail_df['day_of_week'] = retail_df.invoicedate.dt.day_of_week + 1
retail_df['hour'] = retail_df.invoicedate.dt.hour
# + hidden=true
retail_df.head()
# + hidden=true
retail_df['spent'] = retail_df.quantity * retail_df.unit_price
# + hidden=true
retail_df.head()
# + hidden=true
## columns repositioning
retail_df = retail_df[['invoice', 'country', 'customer_id', 'stockcode',
'description','quantity', 'unit_price', 'invoicedate', 'spent',
'year', 'month', 'day','day_of_week', 'hour']]
# -
# ## Data Analysis
# 1. Which customers placed the most and fewest orders?
# 2. Which customers spent the most and least money?
# 3. Which months were the most and least popular for this online retail store?
# 4. Which dates of the month were the most and least popular for this online retail store?
# 5. Which days were the most and least popular for this online retail store?
# 6. Which hours of the day were most and least popular for this online retail store?
# 7. Which items were ordered the most and least?
# 8. Which countries placed the most and fewest orders?
# 9. Which countries spent the most and least money?
# +
## Q.1
customer_data = retail_df.groupby(['customer_id'], as_index=False)['invoice'].count()
customer_data.head(5)
# -
# Top 10 customers in terms of order placing
customer_data.nlargest(n=10, columns=['invoice'])
# bottom 10 customers in terms of order placing
customer_data.nsmallest(n=10, columns=['invoice'])
customer_data.describe()
# +
spent_data = retail_df.groupby(['customer_id', 'quantity'], as_index=False)['spent'].sum()
spent_data.head()
# -
spent_data.nlargest(10, 'spent')
spent_data.nsmallest(10, 'spent')
# +
# Q.3 Which months were the most and least popular for this online retail store?
ord_data = retail_df.groupby(['year','month'])['invoice'].unique().apply(lambda x: x.size)
ord_data = ord_data.to_frame().reset_index()
ord_data.sort_values('invoice')
# +
# Q.4 Which day were the most and least popular for this online retail store?
ord_data = retail_df.groupby('day')['invoice'].unique().apply(lambda x: x.size)
ord_data
# -
plt.bar(ord_data.index, ord_data)
plt.show()
plot_data = retail_df.groupby('day_of_week')['invoice'].unique().apply(lambda x: x.size)
plot_data
plot_data.to_frame().plot.bar(rot=0)
plt.show()
hour_data = retail_df.groupby('hour')['invoice'].unique().apply(lambda x: x.size)
hour_data
hour_data.plot.bar(rot=0)
plt.show()
# +
# Which items were ordered the most and least?
items_data = retail_df.groupby('description', as_index=False)['quantity'].sum()
items_data
# -
items_data.sort_values('quantity', ascending=False)
# +
# Which countries placed the most and fewest orders?
countries_data = retail_df.groupby('country', as_index=False)['invoice'].count()
countries_data.sort_values('invoice', ascending=False)
# -
# - This store is based in the UK, so it seems obvious that customers in the UK would have placed the most orders. Remove the UK from ord_coun to see how the other countries are ordered.
countries_data_new = countries_data.loc[~(countries_data.country=='United Kingdom')]
countries_data_new = countries_data_new.sort_values('invoice', ascending=False)
plt.figure(figsize=(12,10))
plt.barh(y=countries_data_new.country, width=countries_data_new.invoice)
plt.show()
total_spent = retail_df.groupby('country')['spent'].sum()
total_spent
total_spent = total_spent.to_frame().sort_values('spent')
total_spent
total_spent.drop(index=['United Kingdom']).plot.bar()
plt.show()
| Online Retail Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy as scipy
from scipy.interpolate import interp1d
# # Optimization and Root Finding
# Many problems in statistics can be reduced to optimization problems, which in turn are reduced to root finding (because we optimize functions by taking derivatives and finding the zeroes of the derivative functions). Before we dive into the techniques, lets look at some examples of where optimization comes up in statistics.
# ## Example: Maximum Likelihood Estimation (MLE)
# Recall that in MLE, we are interested in estimating the value of a parameter $\theta$ that maximizes a log-likelihood function $\ell(X;\theta)$. Let $X_1,...,X_n$ be an iid set of random variables with pdf $f(x;\theta)$, where $\theta \in \mathbb{R}^k$ is a parameter. The likelihood function is:
#
#
#
# $$L(X;\theta) = \prod_{i=1}^n f(X_i;\theta)$$
#
# We want the value of $\theta$ that maximizes $L$. We can accomplish this by taking the first derivative (or gradient) of $L$ with respect to $\theta$, setting it to zero and solving for $\theta$. However, this is more easily accomplished if we first take $\log(L)$, as $L$ is a product of densities, and taking the log of a product yields a sum. Because $log$ is a monotonically increasing function, any value of $\theta$ that maximizes $\log(L)$ also maximizes $L$.
# $$
# \begin{eqnarray*}
# \ell(X;\theta) &=& \log(L(X;\theta)) \\\\
# &=& \log\left(\prod_{i=1}^n f(X_i;\theta)\right)\\\\
# &=&\sum_{i=1}^n \log(f(X_i;\theta)
# \end{eqnarray*}
# $$
# Optimization then amounts to finding the zeros of
# $$
# \begin{eqnarray*}
# \frac{\partial\ell}{\partial \theta} &=& \frac{\partial}{\partial \theta} \left(\sum_{i=1}^n\log(f(X_i;\theta)\right)\\\\
# &=& \sum_{i=1}^n \frac{\partial\log(f(X_i;\theta)}{\partial \theta}
# \end{eqnarray*}
# $$
# ## Example: Linear Least Squares
# Fitting a regression line is a very simple example of least squares optimization. Here, we have data points $(x_i,y_i)$ with $1\leq i \leq n$. We wish to find the line
#
# $$f(x) = y = ax+b$$
#
# Such that the sum of squares of the errors are minimized. Thus, we find parameters $a,b$ such that:
#
# $$f(a,b) = \sum_{i=1}^n \left(y_i - ax_i -b\right)^2$$
#
# is minimized.
#
# We now move on to the details of some common root-finding algorithms. In practice, we are usually interested in multivariate optimization. We begin with the single variable case to develop the main ideas and then proceed to generalize to multi-dimensional problems.
# ## Main Issues in Root Finding in One Dimension
#
# * Separating close roots
# * Numerical Stability
# * Rate of Convergence
# * Continuity and Differentiability
# ## Bisection Method
# The bisection method is one of the simplest methods for finding zeroes of a non-linear function. It is guaranteed to find a root - but it can be slow. The main idea comes from the intermediate value theorem: If $f(a)$ and $f(b)$ have different signs and $f$ is continous, then $f$ must have a zero between $a$ and $b$. We evaluate the function at the midpoint, $c = \frac12(a+b)$. $f(c)$ is either zero, has the same sign as $f(a)$ or the same sign as $f(b)$. Suppose $f(c)$ has the same sign as $f(a)$ (as pictured below). We then repeat the process on the interval $[c,b]$.
# +
def f(x):
return x**3 + 4*x**2 -3
x = np.linspace(-3.1, 0, 100)
plt.plot(x, x**3 + 4*x**2 -3)
a = -3.0
b = -0.5
c = 0.5*(a+b)
plt.text(a,-1,"a")
plt.text(b,-1,"b")
plt.text(c,-1,"c")
plt.scatter([a,b,c], [f(a), f(b),f(c)], s=50, facecolors='none')
plt.scatter([a,b,c], [0,0,0], s=50, c='red')
xaxis = plt.axhline(0);
# +
x = np.linspace(-3.1, 0, 100)
plt.plot(x, x**3 + 4*x**2 -3)
d = 0.5*(b+c)
plt.text(d,-1,"d")
plt.text(b,-1,"b")
plt.text(c,-1,"c")
plt.scatter([d,b,c], [f(d), f(b),f(c)], s=50, facecolors='none')
plt.scatter([d,b,c], [0,0,0], s=50, c='red')
xaxis = plt.axhline(0);
# -
# We can terminate the process whenever the function evaluated at the new midpoint is 'close enough' to zero. This method is an example of what are known as 'bracketed methods'. This means the root is 'bracketed' by the end-points (it is somewhere in between). Another class of methods are 'open methods' - the root need not be somewhere in between the end-points (but it usually needs to be close!)
# ## Secant Method
# The secant method also begins with two initial points, but without the constraint that the function values are of opposite signs. We use the secant line to extrapolate the next candidate point.
# +
def f(x):
return (x**3-2*x+7)/(x**4+2)
x = np.arange(-3,5, 0.1);
y = f(x)
p1=plt.plot(x, y)
plt.xlim(-3, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
t = np.arange(-10, 5., 0.1)
x0=-1.2
x1=-0.5
xvals = []
xvals.append(x0)
xvals.append(x1)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--']
while (notconverge==1 and count < 3):
slope=(f(xvals[count+1])-f(xvals[count]))/(xvals[count+1]-xvals[count])
intercept=-slope*xvals[count+1]+f(xvals[count+1])
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(f(nextval)) < 0.001:
notconverge=0
else:
xvals.append(nextval)
count = count+1
plt.show()
# -
# The secant method has the advantage of fast convergence. While the bisection method has a linear convergence rate (i.e. error goes to zero at the rate that $h(x) = x$ goes to zero, the secant method has a convergence rate that is faster than linear, but not quite quadratic (i.e. $\sim x^\alpha$, where $\alpha = \frac{1+\sqrt{5}}2 \approx 1.6$)
# ## Newton-Rhapson Method
# We want to find the value $\theta$ so that some (differentiable) function $g(\theta)=0$.
# Idea: start with a guess, $\theta_0$. Let $\tilde{\theta}$ denote the value of $\theta$ for which $g(\theta) = 0$ and define $h = \tilde{\theta} - \theta_0$. Then:
#
# $$
# \begin{eqnarray*}
# g(\tilde{\theta}) &=& 0 \\\\
# &=&g(\theta_0 + h) \\\\
# &\approx& g(\theta_0) + hg'(\theta_0)
# \end{eqnarray*}
# $$
#
# This implies that
#
# $$ h\approx \frac{g(\theta_0)}{g'(\theta_0)}$$
#
# So that
#
# $$\tilde{\theta}\approx \theta_0 - \frac{g(\theta_0)}{g'(\theta_0)}$$
#
# Thus, we set our next approximation:
#
# $$\theta_1 = \theta_0 - \frac{g(\theta_0)}{g'(\theta_0)}$$
#
# and we have developed an interative procedure with:
#
# $$\theta_n = \theta_{n-1} - \frac{g(\theta_{n-1})}{g'(\theta_{n-1})}$$
# #### Example:
# Let $$g(x) = \frac{x^3-2x+7}{x^4+2}$$
#
# The graph of this function is:
# +
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Example Function')
plt.show()
# +
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Good Guess')
t = np.arange(-5, 5., 0.1)
x0=-1.5
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval=(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
slope=-((4*xvals[count]**3 *(7 - 2 *xvals[count] + xvals[count]**3))/(2 + xvals[count]**4)**2) + (-2 + 3 *xvals[count]**2)/(2 + xvals[count]**4)
intercept=-slope*xvals[count]+(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge=0
else:
xvals.append(nextval)
count = count+1
plt.show()
# -
# From the graph, we see the zero is near -2. We make an initial guess of $$x=-1.5$$
# We have made an excellent choice for our first guess, and we can see rapid convergence!
funval
# In fact, the Newton-Rhapson method converges quadratically. However, NR (and the secant method) have a fatal flaw:
# +
x = np.arange(-5,5, 0.1);
y = (x**3-2*x+7)/(x**4+2)
p1=plt.plot(x, y)
plt.xlim(-4, 4)
plt.ylim(-.5, 4)
plt.xlabel('x')
plt.axhline(0)
plt.title('Bad Guess')
t = np.arange(-5, 5., 0.1)
x0=-0.5
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval=(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
slope=-((4*xvals[count]**3 *(7 - 2 *xvals[count] + xvals[count]**3))/(2 + xvals[count]**4)**2) + (-2 + 3 *xvals[count]**2)/(2 + xvals[count]**4)
intercept=-slope*xvals[count]+(xvals[count]**3-2*xvals[count]+7)/(xvals[count]**4+2)
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
plt.show()
# -
# We have stumbled on the horizontal asymptote. The algorithm fails to converge.
# ### Basins of Attraction Can Be 'Close'
# +
def f(x):
return x**3 - 2*x**2 - 11*x +12
def s(x):
return 3*x**2 - 4*x - 11
x = np.arange(-5,5, 0.1);
p1=plt.plot(x, f(x))
plt.xlim(-4, 5)
plt.ylim(-20, 22)
plt.xlabel('x')
plt.axhline(0)
plt.title('Basin of Attraction')
t = np.arange(-5, 5., 0.1)
x0=2.43
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval = f(xvals[count])
slope = s(xvals[count])
intercept=-slope*xvals[count]+funval
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
plt.show()
xvals[count-1]
# +
p1=plt.plot(x, f(x))
plt.xlim(-4, 5)
plt.ylim(-20, 22)
plt.xlabel('x')
plt.axhline(0)
plt.title('Basin of Attraction')
t = np.arange(-5, 5., 0.1)
x0=2.349
xvals = []
xvals.append(x0)
notconverge = 1
count = 0
cols=['r--','b--','g--','y--','c--','m--','k--','w--']
while (notconverge==1 and count < 6):
funval = f(xvals[count])
slope = s(xvals[count])
intercept=-slope*xvals[count]+funval
plt.plot(t, slope*t + intercept, cols[count])
nextval = -intercept/slope
if abs(funval) < 0.01:
notconverge = 0
else:
xvals.append(nextval)
count = count+1
plt.show()
xvals[count-1]
# -
# ### Convergence Rate
#
# The following is a derivation of the convergence rate of the NR method:
#
#
# Suppose $x_k \; \rightarrow \; x^*$ and $g'(x^*) \neq 0$. Then we may write:
#
# $$x_k = x^* + \epsilon_k$$.
#
# Now expand $g$ at $x^*$:
#
# $$g(x_k) = g(x^*) + g'(x^*)\epsilon_k + \frac12 g''(x^*)\epsilon_k^2 + ...$$
# $$g'(x_k)=g'(x^*) + g''(x^*)\epsilon_k$$
#
# We have that
#
#
# \begin{eqnarray}
# \epsilon_{k+1} &=& \epsilon_k + \left(x_{k-1}-x_k\right)\\
# &=& \epsilon_k -\frac{g(x_k)}{g'(x_k)}\\
# &\approx & \frac{g'(x^*)\epsilon_k + \frac12g''(x^*)\epsilon_k^2}{g'(x^*)+g''(x^*)\epsilon_k}\\
# &\approx & \frac{g''(x^*)}{2g'(x^*)}\epsilon_k^2
# \end{eqnarray}
# ## Gauss-Newton
# For 1D, the Newton method is
# $$
# x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)}
# $$
#
# We can generalize to $k$ dimensions by
# $$
# x_{n+1} = x_n - J^{-1} f(x_n)
# $$
# where $x$ and $f(x)$ are now vectors, and $J^{-1}$ is the inverse Jacobian matrix. In general, the Jacobian is not a square matrix, and we use the generalized inverse $(J^TJ)^{-1}J^T$ instead, giving
# $$
# x_{n+1} = x_n - (J^TJ)^{-1}J^T f(x_n)
# $$
#
# In multivariate nonlinear estimation problems, we can find the vector of parameters $\beta$ by minimizing the residuals $r(\beta)$,
# $$
# \beta_{n+1} = \beta_n - (J^TJ)^{-1}J^T r(\beta_n)
# $$
# where the entries of the Jacobian matrix $J$ are
# $$
# J_{ij} = \frac{\partial r_i(\beta)}{\partial \beta_j}
# $$
# ## Inverse Quadratic Interpolation
# Inverse quadratic interpolation is a type of polynomial interpolation. Polynomial interpolation simply means we find the polynomial of least degree that fits a set of points. In quadratic interpolation, we use three points, and find the quadratic polynomial that passes through those three points.
#
#
# +
def f(x):
return (x - 2) * x * (x + 2)**2
x = np.arange(-5,5, 0.1);
plt.plot(x, f(x))
plt.xlim(-3.5, 0.5)
plt.ylim(-5, 16)
plt.xlabel('x')
plt.axhline(0)
plt.title("Quadratic Interpolation")
#First Interpolation
x0=np.array([-3,-2.5,-1.0])
y0=f(x0)
f2 = interp1d(x0, y0,kind='quadratic')
#Plot parabola
xs = np.linspace(-3, -1, num=10000, endpoint=True)
plt.plot(xs, f2(xs))
#Plot first triplet
plt.plot(x0, f(x0),'ro');
plt.scatter(x0, f(x0), s=50, c='yellow');
#New x value
xnew=xs[np.where(abs(f2(xs))==min(abs(f2(xs))))]
plt.scatter(np.append(xnew,xnew), np.append(0,f(xnew)), c='black');
#New triplet
x1=np.append([-3,-2.5],xnew)
y1=f(x1)
f2 = interp1d(x1, y1,kind='quadratic')
#New Parabola
xs = np.linspace(min(x1), max(x1), num=100, endpoint=True)
plt.plot(xs, f2(xs))
xnew=xs[np.where(abs(f2(xs))==min(abs(f2(xs))))]
plt.scatter(np.append(xnew,xnew), np.append(0,f(xnew)), c='green');
# -
# So that's the idea behind quadratic interpolation. Use a quadratic approximation, find the zero of interest, use that as a new point for the next quadratic approximation.
#
#
# Inverse quadratic interpolation means we do quadratic interpolation on the *inverse function*. So, if we are looking for a root of $f$, we approximate $f^{-1}(x)$ using quadratic interpolation. Note that the secant method can be viewed as a *linear* interpolation on the inverse of $f$. We can write:
#
# $$f^{-1}(y) = \frac{(y-f(x_n))(y-f(x_{n-1}))}{(f(x_{n-2})-f(x_{n-1}))(f(x_{n-2})-f(x_{n}))}x_{n-2} + \frac{(y-f(x_n))(y-f(x_{n-2}))}{(f(x_{n-1})-f(x_{n-2}))(f(x_{n-1})-f(x_{n}))}x_{n-1} + \frac{(y-f(x_{n-2}))(y-f(x_{n-1}))}{(f(x_{n})-f(x_{n-2}))(f(x_{n})-f(x_{n-1}))}x_{n-1}$$
#
# We use the above formula to find the next guess $x_{n+1}$ for a zero of $f$ (so $y=0$):
#
# $$x_{n+1} = \frac{f(x_n)f(x_{n-1})}{(f(x_{n-2})-f(x_{n-1}))(f(x_{n-2})-f(x_{n}))}x_{n-2} + \frac{f(x_n)f(x_{n-2})}{(f(x_{n-1})-f(x_{n-2}))(f(x_{n-1})-f(x_{n}))}x_{n-1} + \frac{f(x_{n-2})f(x_{n-1})}{(f(x_{n})-f(x_{n-2}))(f(x_{n})-f(x_{n-1}))}x_{n}$$
#
# We aren't so much interested in deriving this as we are understanding the procedure:
#
#
#
# +
x = np.arange(-5,5, 0.1);
plt.plot(x, f(x))
plt.xlim(-3.5, 0.5)
plt.ylim(-5, 16)
plt.xlabel('x')
plt.axhline(0)
plt.title("Inverse Quadratic Interpolation")
#First Interpolation
x0=np.array([-3,-2.5,1])
y0=f(x0)
f2 = interp1d(y0, x0,kind='quadratic')
#Plot parabola
xs = np.linspace(min(f(x0)), max(f(x0)), num=10000, endpoint=True)
plt.plot(f2(xs), xs)
#Plot first triplet
plt.plot(x0, f(x0),'ro');
plt.scatter(x0, f(x0), s=50, c='yellow');
# -
# Convergence rate is approximately $1.8$. The advantage of the inverse method is that we will *always* have a real root (the parabola will always cross the x-axis). A serious disadvantage is that the initial points must be very close to the root or the method may not converge.
#
# That is why it is usually used in conjunction with other methods.
#
# ## Brent's Method
# Brent's method is a combination of bisection, secant and inverse quadratic interpolation. Like bisection, it is a 'bracketed' method (starts with points $(a,b)$ such that $f(a)f(b)<0$.
# Roughly speaking, the method begins by using the secant method to obtain a third point $c$, then uses inverse quadratic interpolation to generate the next possible root. Without going into too much detail, the algorithm attempts to assess when interpolation will go awry, and if so, performs a bisection step. Also, it has certain criteria to reject an iterate. If that happens, the next step will be linear interpolation (secant method).
# #### The Brent method is the default method that scypy uses to minimize a univariate function:
# +
from scipy.optimize import minimize_scalar
def f(x):
return (x - 2) * x * (x + 2)**2
res = minimize_scalar(f)
res.x
# -
x = np.arange(-5,5, 0.1);
p1=plt.plot(x, f(x))
plt.xlim(-4, 4)
plt.ylim(-10, 20)
plt.xlabel('x')
plt.axhline(0)
# To find zeroes, use
#
scipy.optimize.brentq(f,-1,.5)
scipy.optimize.brentq(f,.5,3)
scipy.optimize.newton(f,-3)
| notebook/14A_Optimization_One_Dimension.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from utils import *
from external_packages import *
final_stackset=pd.read_csv(processed_data_folder+'/final_stackset_PowerTransformer.csv',index_col=0)
final_stackset_features=final_stackset.drop(['ID','to_predict'],axis=1)
final_stackset_target=final_stackset.loc[:,['to_predict']]
final_stackset_target.groupby('to_predict')['to_predict'].count()
X_train, X_test, y_train, y_test=train_test_split(final_stackset_features,final_stackset_target,
test_size=0.2,stratify=final_stackset_target,random_state =random_state )
# +
pca = PCA(n_components=45)
X_train_pca=pca.fit_transform(X_train)
X_test_pca=pca.transform(X_test)
RUNS=under_sampling.RandomUnderSampler(random_state=random_state)
X_train_under,y_train_under=RUNS.fit_resample(X_train_pca,y_train)
ROVS=over_sampling.RandomOverSampler(random_state=random_state)
X_train_over,y_train_over=ROVS.fit_resample(X_train_pca,y_train)
# -
train_test_splits=[
[X_train,y_train,X_test,y_test],
[X_train_pca,y_train,X_test_pca,y_test],
[X_train_under,y_train_under,X_test_pca,y_test],
[X_train_over,y_train_over,X_test_pca,y_test],
]
modeling=experiment()
modeling.train_test_splits=train_test_splits
ETC=ExtraTreesClassifier(class_weight='balanced',random_state=random_state,min_weight_fraction_leaf=0.026842105263157893)
modeling.run_model(ETC)
modeling.plot_comp(cmap_name='autumn',figsize=(12,8),)
| 4-Modeling_Sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="5aElYAKlV2Mi"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="wmYJlt6LWVOU"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="L-8q8rRRWcp6"
# # TensorFlow アドオン ネットワーク : アテンションメカニズムを使用したシーケンスツーシーケンスのニューラル機械翻訳
#
# <table class="tfo-notebook-buttons" align="left">
# <td><a target="_blank" href="https://www.tensorflow.org/addons/tutorials/networks_seq2seq_nmt"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td>
# <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/addons/tutorials/networks_seq2seq_nmt.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a> </td>
# <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/addons/tutorials/networks_seq2seq_nmt.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a> </td>
# <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/addons/tutorials/networks_seq2seq_nmt.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a> </td>
# </table>
# + [markdown] id="9n0dcDw1Wszw"
# ## 概要
#
# このノートブックでは、***シーケンスツーシーケンスモデルのアーキテクチャ***について簡単に紹介しています。このノートブックでは、ニューラル機械翻訳に必要な 4 つの基本トピックを大まかに取り上げます。
#
# - **データのクリーニング**
# - **データの準備**
# - **アテンションを使用したニューラル翻訳モデル**
# - **`tf.addons.seq2seq.BasicDecoder` と `tf.addons.seq2seq.BeamSearchDecoder` を使用した最終翻訳**
#
# ただし、このようなモデルの背後にある基本的な考え方は、エンコーダとデコーダのアーキテクチャだけです。これらのネットワークは通常、テキスト要約、機械翻訳、画像キャプションなどの様々なタスクに使用されます。このチュートリアルでは、必要に応じて専門用語を説明しながら、概念を実践的に理解できるようにしています。シーケンスツーシーケンスモデルの最初のテストベッドである、ニューラル機械翻訳(NMT)のタスクに焦点を当てています。
#
# + [markdown] id="MpySVYWJhxaV"
# ## セットアップ
# + id="_kxfdP4hJUPB"
# !pip install tensorflow-addons==0.11.2
# + id="tnxXKDjq3jEL"
import tensorflow as tf
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
# + [markdown] id="Ii_vg-XNXTil"
# ## データのクリーニングと準備
#
# http://www.manythings.org/anki/ で提供されている言語データセットを使用します。このデータセットには、以下の形式の言語翻訳ペアが含まれます。
#
# ---
#
# ```
# May I borrow this book? ¿Puedo tomar prestado este libro?
# ```
#
# ---
#
# 多様な言語を使用できますが、ここでは英語とスペイン語のデータセットを使用します。データセットをダウンロードすると、以下の手順に従って、データを準備します。
#
# 1. 各文に、開始と終了のトークンを追加します。
# 2. 特殊文字を除去して、文をクリーニングします。
# 3. 単語インデックスを使用して語彙を作成し(単語から ID にマッピング)、単語インデックスを逆順にします(ID から単語にマッピング)。
# 4. 最大長に合わせて各文にパディングを設定します(反復エンコーダーへの入力に合わせて最大の長さを調整する必要があるためです)。
# + id="PvRnGWnvXm6l"
def download_nmt():
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
return path_to_file
# + [markdown] id="NFKB2c_tX4wU"
# ### 手順1から4を実行するために必要な関数を使用して、NMTDatasetクラスを定義する
#
# `call()` は以下を返します。
#
# 1. `train_dataset` と `val_dataset`: `tf.data.Dataset` オブジェクト
# 2. `inp_lang_tokenizer` と `targ_lang_tokenizer`: `tf.keras.preprocessing.text.Tokenizer` オブジェクト
# + id="JMAHz7kJXc5N"
class NMTDataset:
def __init__(self, problem_type='en-spa'):
self.problem_type = 'en-spa'
self.inp_lang_tokenizer = None
self.targ_lang_tokenizer = None
def unicode_to_ascii(self, s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
## Step 1 and Step 2
def preprocess_sentence(self, w):
w = self.unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
def create_dataset(self, path, num_examples):
# path : path to spa-eng.txt file
# num_examples : Limit the total number of training example for faster training (set num_examples = len(lines) to use full data)
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[self.preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs)
# Step 3 and Step 4
def tokenize(self, lang):
# lang = list of sentences in a language
# print(len(lang), "example sentence: {}".format(lang[0]))
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', oov_token='<OOV>')
lang_tokenizer.fit_on_texts(lang)
## tf.keras.preprocessing.text.Tokenizer.texts_to_sequences converts string (w1, w2, w3, ......, wn)
## to a list of correspoding integer ids of words (id_w1, id_w2, id_w3, ...., id_wn)
tensor = lang_tokenizer.texts_to_sequences(lang)
## tf.keras.preprocessing.sequence.pad_sequences takes argument a list of integer id sequences
## and pads the sequences to match the longest sequences in the given input
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')
return tensor, lang_tokenizer
def load_dataset(self, path, num_examples=None):
# creating cleaned input, output pairs
targ_lang, inp_lang = self.create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = self.tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = self.tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
def call(self, num_examples, BUFFER_SIZE, BATCH_SIZE):
file_path = download_nmt()
input_tensor, target_tensor, self.inp_lang_tokenizer, self.targ_lang_tokenizer = self.load_dataset(file_path, num_examples)
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
train_dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train))
train_dataset = train_dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
val_dataset = tf.data.Dataset.from_tensor_slices((input_tensor_val, target_tensor_val))
val_dataset = val_dataset.batch(BATCH_SIZE, drop_remainder=True)
return train_dataset, val_dataset, self.inp_lang_tokenizer, self.targ_lang_tokenizer
# + id="EIW4NVBmJ25k"
BUFFER_SIZE = 32000
BATCH_SIZE = 64
# Let's limit the #training examples for faster training
num_examples = 30000
dataset_creator = NMTDataset('en-spa')
train_dataset, val_dataset, inp_lang, targ_lang = dataset_creator.call(num_examples, BUFFER_SIZE, BATCH_SIZE)
# + id="w2lCTy4vKOkB"
example_input_batch, example_target_batch = next(iter(train_dataset))
example_input_batch.shape, example_target_batch.shape
# + [markdown] id="rgCLkfv5uO3d"
# ### いくつかの重要なパラメーター
# + id="TqHsArVZ3jFS"
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1
max_length_input = example_input_batch.shape[1]
max_length_output = example_target_batch.shape[1]
embedding_dim = 256
units = 1024
steps_per_epoch = num_examples//BATCH_SIZE
# + id="g-yY9c6aIu1h"
print("max_length_english, max_length_spanish, vocab_size_english, vocab_size_spanish")
max_length_input, max_length_output, vocab_inp_size, vocab_tar_size
# + id="nZ2rI24i3jFg"
#####
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
##-------- LSTM layer in Encoder ------- ##
self.lstm_layer = tf.keras.layers.LSTM(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, h, c = self.lstm_layer(x, initial_state = hidden)
return output, h, c
def initialize_hidden_state(self):
return [tf.zeros((self.batch_sz, self.enc_units)), tf.zeros((self.batch_sz, self.enc_units))]
# + id="60gSVh05Jl6l"
## Test Encoder Stack
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
# sample input
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_h, sample_c = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder h vecotr shape: (batch size, units) {}'.format(sample_h.shape))
print ('Encoder c vector shape: (batch size, units) {}'.format(sample_c.shape))
# + id="yJ_B3mhW3jFk"
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz, attention_type='luong'):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.attention_type = attention_type
# Embedding Layer
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
#Final Dense layer on which softmax will be applied
self.fc = tf.keras.layers.Dense(vocab_size)
# Define the fundamental cell for decoder recurrent structure
self.decoder_rnn_cell = tf.keras.layers.LSTMCell(self.dec_units)
# Sampler
self.sampler = tfa.seq2seq.sampler.TrainingSampler()
# Create attention mechanism with memory = None
self.attention_mechanism = self.build_attention_mechanism(self.dec_units,
None, self.batch_sz*[max_length_input], self.attention_type)
# Wrap attention mechanism with the fundamental rnn cell of decoder
self.rnn_cell = self.build_rnn_cell(batch_sz)
# Define the decoder with respect to fundamental rnn cell
self.decoder = tfa.seq2seq.BasicDecoder(self.rnn_cell, sampler=self.sampler, output_layer=self.fc)
def build_rnn_cell(self, batch_sz):
rnn_cell = tfa.seq2seq.AttentionWrapper(self.decoder_rnn_cell,
self.attention_mechanism, attention_layer_size=self.dec_units)
return rnn_cell
def build_attention_mechanism(self, dec_units, memory, memory_sequence_length, attention_type='luong'):
# ------------- #
# typ: Which sort of attention (Bahdanau, Luong)
# dec_units: final dimension of attention outputs
# memory: encoder hidden states of shape (batch_size, max_length_input, enc_units)
# memory_sequence_length: 1d array of shape (batch_size) with every element set to max_length_input (for masking purpose)
if(attention_type=='bahdanau'):
return tfa.seq2seq.BahdanauAttention(units=dec_units, memory=memory, memory_sequence_length=memory_sequence_length)
else:
return tfa.seq2seq.LuongAttention(units=dec_units, memory=memory, memory_sequence_length=memory_sequence_length)
def build_initial_state(self, batch_sz, encoder_state, Dtype):
decoder_initial_state = self.rnn_cell.get_initial_state(batch_size=batch_sz, dtype=Dtype)
decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state)
return decoder_initial_state
def call(self, inputs, initial_state):
x = self.embedding(inputs)
outputs, _, _ = self.decoder(x, initial_state=initial_state, sequence_length=self.batch_sz*[max_length_output-1])
return outputs
# + id="DaiO0Z6_Ml1c"
# Test decoder stack
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE, 'luong')
sample_x = tf.random.uniform((BATCH_SIZE, max_length_output))
decoder.attention_mechanism.setup_memory(sample_output)
initial_state = decoder.build_initial_state(BATCH_SIZE, [sample_h, sample_c], tf.float32)
sample_decoder_outputs = decoder(sample_x, initial_state)
print("Decoder Outputs Shape: ", sample_decoder_outputs.rnn_output.shape)
# + [markdown] id="_ch_71VbIRfK"
# ## オプティマイザと損失関数を定義する
# + id="WmTHr5iV3jFr"
optimizer = tf.keras.optimizers.Adam()
def loss_function(real, pred):
# real shape = (BATCH_SIZE, max_length_output)
# pred shape = (BATCH_SIZE, max_length_output, tar_vocab_size )
cross_entropy = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
loss = cross_entropy(y_true=real, y_pred=pred)
mask = tf.logical_not(tf.math.equal(real,0)) #output 0 for y=0 else output 1
mask = tf.cast(mask, dtype=loss.dtype)
loss = mask* loss
loss = tf.reduce_mean(loss)
return loss
# + [markdown] id="DMVWzzsfNl4e"
# ## チェックポイント(オブジェクトベースの保存)
# + id="Zj8bXQTgNwrF"
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
# + [markdown] id="8Bw95utNiFHa"
# ## 1 つの train_step 演算
# + id="sC9ArXSsVfqn"
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_h, enc_c = encoder(inp, enc_hidden)
dec_input = targ[ : , :-1 ] # Ignore <end> token
real = targ[ : , 1: ] # ignore <start> token
# Set the AttentionMechanism object with encoder_outputs
decoder.attention_mechanism.setup_memory(enc_output)
# Create AttentionWrapperState as initial_state for decoder
decoder_initial_state = decoder.build_initial_state(BATCH_SIZE, [enc_h, enc_c], tf.float32)
pred = decoder(dec_input, decoder_initial_state)
logits = pred.rnn_output
loss = loss_function(real, logits)
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
# + [markdown] id="pey8eb9piMMg"
# ## モデルのトレーニング
# + id="ddefjBMa3jF0"
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
# print(enc_hidden[0].shape, enc_hidden[1].shape)
for (batch, (inp, targ)) in enumerate(train_dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / steps_per_epoch))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
# + [markdown] id="mU3Ce8M6I3rz"
# ## tf-addons の BasicDecoder を使ってデコードする
#
# + id="EbQpyYs13jF_"
def evaluate_sentence(sentence):
sentence = dataset_creator.preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_input,
padding='post')
inputs = tf.convert_to_tensor(inputs)
inference_batch_size = inputs.shape[0]
result = ''
enc_start_state = [tf.zeros((inference_batch_size, units)), tf.zeros((inference_batch_size,units))]
enc_out, enc_h, enc_c = encoder(inputs, enc_start_state)
dec_h = enc_h
dec_c = enc_c
start_tokens = tf.fill([inference_batch_size], targ_lang.word_index['<start>'])
end_token = targ_lang.word_index['<end>']
greedy_sampler = tfa.seq2seq.GreedyEmbeddingSampler()
# Instantiate BasicDecoder object
decoder_instance = tfa.seq2seq.BasicDecoder(cell=decoder.rnn_cell, sampler=greedy_sampler, output_layer=decoder.fc)
# Setup Memory in decoder stack
decoder.attention_mechanism.setup_memory(enc_out)
# set decoder_initial_state
decoder_initial_state = decoder.build_initial_state(inference_batch_size, [enc_h, enc_c], tf.float32)
### Since the BasicDecoder wraps around Decoder's rnn cell only, you have to ensure that the inputs to BasicDecoder
### decoding step is output of embedding layer. tfa.seq2seq.GreedyEmbeddingSampler() takes care of this.
### You only need to get the weights of embedding layer, which can be done by decoder.embedding.variables[0] and pass this callabble to BasicDecoder's call() function
decoder_embedding_matrix = decoder.embedding.variables[0]
outputs, _, _ = decoder_instance(decoder_embedding_matrix, start_tokens = start_tokens, end_token= end_token, initial_state=decoder_initial_state)
return outputs.sample_id.numpy()
def translate(sentence):
result = evaluate_sentence(sentence)
print(result)
result = targ_lang.sequences_to_texts(result)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
# + [markdown] id="n250XbnjOaqP"
# ## 最新のチェックポイントを復元してテストする
# + id="UJpT9D5_OgP6"
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
# + id="WYmYhNN_faR5"
translate(u'hace mucho frio aqui.')
# + id="zSx2iM36EZQZ"
translate(u'esta es mi vida.')
# + id="A3LLCx3ZE0Ls"
translate(u'¿todavia estan en casa?')
# + id="DUQVLVqUE1YW"
# wrong translation
translate(u'trata de averiguarlo.')
# + [markdown] id="IRUuNDeY0HiC"
# ## tf-addons BeamSearchDecoder を使用する
#
# + id="AJ-RTQ0hsJNL"
def beam_evaluate_sentence(sentence, beam_width=3):
sentence = dataset_creator.preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_input,
padding='post')
inputs = tf.convert_to_tensor(inputs)
inference_batch_size = inputs.shape[0]
result = ''
enc_start_state = [tf.zeros((inference_batch_size, units)), tf.zeros((inference_batch_size,units))]
enc_out, enc_h, enc_c = encoder(inputs, enc_start_state)
dec_h = enc_h
dec_c = enc_c
start_tokens = tf.fill([inference_batch_size], targ_lang.word_index['<start>'])
end_token = targ_lang.word_index['<end>']
# From official documentation
# NOTE If you are using the BeamSearchDecoder with a cell wrapped in AttentionWrapper, then you must ensure that:
# The encoder output has been tiled to beam_width via tfa.seq2seq.tile_batch (NOT tf.tile).
# The batch_size argument passed to the get_initial_state method of this wrapper is equal to true_batch_size * beam_width.
# The initial state created with get_initial_state above contains a cell_state value containing properly tiled final state from the encoder.
enc_out = tfa.seq2seq.tile_batch(enc_out, multiplier=beam_width)
decoder.attention_mechanism.setup_memory(enc_out)
print("beam_with * [batch_size, max_length_input, rnn_units] : 3 * [1, 16, 1024]] :", enc_out.shape)
# set decoder_inital_state which is an AttentionWrapperState considering beam_width
hidden_state = tfa.seq2seq.tile_batch([enc_h, enc_c], multiplier=beam_width)
decoder_initial_state = decoder.rnn_cell.get_initial_state(batch_size=beam_width*inference_batch_size, dtype=tf.float32)
decoder_initial_state = decoder_initial_state.clone(cell_state=hidden_state)
# Instantiate BeamSearchDecoder
decoder_instance = tfa.seq2seq.BeamSearchDecoder(decoder.rnn_cell,beam_width=beam_width, output_layer=decoder.fc)
decoder_embedding_matrix = decoder.embedding.variables[0]
# The BeamSearchDecoder object's call() function takes care of everything.
outputs, final_state, sequence_lengths = decoder_instance(decoder_embedding_matrix, start_tokens=start_tokens, end_token=end_token, initial_state=decoder_initial_state)
# outputs is tfa.seq2seq.FinalBeamSearchDecoderOutput object.
# The final beam predictions are stored in outputs.predicted_id
# outputs.beam_search_decoder_output is a tfa.seq2seq.BeamSearchDecoderOutput object which keep tracks of beam_scores and parent_ids while performing a beam decoding step
# final_state = tfa.seq2seq.BeamSearchDecoderState object.
# Sequence Length = [inference_batch_size, beam_width] details the maximum length of the beams that are generated
# outputs.predicted_id.shape = (inference_batch_size, time_step_outputs, beam_width)
# outputs.beam_search_decoder_output.scores.shape = (inference_batch_size, time_step_outputs, beam_width)
# Convert the shape of outputs and beam_scores to (inference_batch_size, beam_width, time_step_outputs)
final_outputs = tf.transpose(outputs.predicted_ids, perm=(0,2,1))
beam_scores = tf.transpose(outputs.beam_search_decoder_output.scores, perm=(0,2,1))
return final_outputs.numpy(), beam_scores.numpy()
# + id="g_LvXGvX8X-O"
def beam_translate(sentence):
result, beam_scores = beam_evaluate_sentence(sentence)
print(result.shape, beam_scores.shape)
for beam, score in zip(result, beam_scores):
print(beam.shape, score.shape)
output = targ_lang.sequences_to_texts(beam)
output = [a[:a.index('<end>')] for a in output]
beam_score = [a.sum() for a in score]
print('Input: %s' % (sentence))
for i in range(len(output)):
print('{} Predicted translation: {} {}'.format(i+1, output[i], beam_score[i]))
# + id="TODnXBleDzzO"
beam_translate(u'hace mucho frio aqui.')
# + id="_BezQwENFY3L"
beam_translate(u'¿todavia estan en casa?')
| site/ja/addons/tutorials/networks_seq2seq_nmt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster('local').setAppName('PopularMovies')
sc = SparkContext(conf=conf)
line = sc.textFile('/home/ubuntu/spark_project/01-basics_andexampl/ml-100k/u.data')
movies = line.map(lambda x: (int(x.split()[1]), 1))
movies_counts = movies.reduceByKey(lambda x,y: x + y)
sorted_movies = movies_counts.sortBy(lambda x: x[1], ascending=False)
results = sorted_movies.collect()
for result in results:
print('movief_id: {} has {} watchers'.format(result[0], result[1]))
# # BROADCAST VARIABLES
# ```
# Broadcast objects to the executors, such that they're always
# there whenever needed
# ```
# ```
# Just use sc.broadcast() to ship off whatever you want
# ```
# ```python
# Then
# use.value() to get the object back
# ```
# # Improved version
def load_movie_name():
movie_name = {}
with open('/home/ubuntu/spark_project/01-basics_andexampl/ml-100k/u.item') as f:
for line in f:
fields = line.split('|')
movie_name[int(fields[0])] = fields[1]
return movie_name
name_dict = sc.broadcast(load_movie_name())
lines = sc.textFile('/home/ubuntu/spark_project/01-basics_andexampl/ml-100k/u.data')
movies = lines.map(lambda x: (int(x.split()[1], 1))
movie_count = movies.reduceByKey(lambda x,y: x+y)
sorted_movie_count = movies_counts.sortBy(lambda x: x[1], ascending=False)
sorted_movie_with_name = sorted_movie_count.map(lambda :(name_dict.value[x[0]], x[1]))
results = sorted_movie_with_name.collect()
for result in results:
print(result[0], " ", result[1])
# +
def loadMovieNames():
movieNames = {}
with open("/home/ubuntu/spark_project/01-basics_andexampl/ml-100k/u.item") as f:
for line in f:
if isinstance(line, str):
fields = line.split('|')
movieNames[int(fields[0])] = fields[1]
return movieNames
nameDict = sc.broadcast(loadMovieNames())
lines = sc.textFile("/home/ubuntu/spark_project/01-basics_andexampl/ml-100k/u.data")
movies = lines.map(lambda x: (int(x.split()[1], 1)))
movies = lines.map(lambda x: (int(x.split()[1]), 1))
movieCounts = movies.reduceByKey(lambda x, y: x + y)
flipped = movieCounts.map( lambda x : (x[1], x[0]))
sortedMovies = flipped.sortByKey()
sortedMoviesWithNames = sortedMovies.map(lambda countMovie : (nameDict.value[countMovie[1]], countMovie[0]))
results = sortedMoviesWithNames.collect()
for result in results:
print (result)
# -
| 02-Advanced_Examples_of_Spark_Programs/Popular_Movies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/cmyk-magic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="lIYdn1woOS1n" outputId="07d63083-8c16-4379-a7b6-58f5926511d1"
import pdf2image
# + colab={"base_uri": "https://localhost:8080/"} id="qD6GDkgWypCM" outputId="71dd05c1-e15b-4556-eb47-80d2e58dadac"
# !pip install pdf2image
# + id="uUfmQqvT3pML" outputId="6784e2ef-c8c8-4af4-875c-b340a659ff50" colab={"base_uri": "https://localhost:8080/"}
from google.colab import drive
drive.mount('/content/drive')
# + id="CgRH6uKy3xkj" outputId="3b0b56a2-7e7e-401a-a99d-6b72a5d4fe89" colab={"base_uri": "https://localhost:8080/"}
# !ls /content/drive/MyDrive/MLAPA
# + id="D3DOb8JlyrjW"
from pdf2image import convert_from_path
# + colab={"base_uri": "https://localhost:8080/"} id="zRyb4TeezRiC" outputId="fc75478b-f8f2-46c8-c603-cf460e0e3108"
# !ls
# + colab={"base_uri": "https://localhost:8080/"} id="aZot-9nDzZSh" outputId="590f61c4-7ad3-48e5-d689-6243e008a3c8"
# !sudo apt-get install poppler-utils
# + id="Tizd6JxIyw_P"
input_path="/content/2dgridDAGa.pdf"; temp_file_path="/content/temp2dgridDAGa";
out = convert_from_path(input_path,output_file=temp_file_path, use_pdftocairo=True, fmt="png",single_file=True)
# + colab={"base_uri": "https://localhost:8080/"} id="RqJH9PDY0A5w" outputId="6dc45038-80c2-476c-9fbc-6cda41536d36"
# !ls
# + id="tcFSkwAC08us"
import os
import shutil
from pdf2image import convert_from_path
from PIL import Image
from PIL import ImageCms
import argparse
from glob import glob
from tqdm import tqdm
import functools
import multiprocessing
import concurrent.futures
def split_file_name(input_path):
base_name, dir_name = os.path.basename(input_path),os.path.dirname(input_path)
file_name,ext = os.path.splitext(os.path.basename(base_name))
return base_name, dir_name, file_name, ext
def convert(input_path,output_path,color_space="CMYK",input_profile_path=None,output_profile_path=None,quality=100,verbose=False,overwrite=False):
""" converts an image or pdf into a color space of choice
for CMYK the default output format is JPG
Keyword arguments:
input_path -- the input path of the file
output_path -- the output path for the result to be written.
color_space -- the color space to convert to , default value is CMYK
input_profile_path -- the path to the input profile
output_profile_path -- the path to the output profile
"""
try:
if not overwrite and os.path.exists(output_path):
return True
if input_path.endswith(".pdf") or input_path.endswith(".PDF"):
_, dir_name, file_name, _ =split_file_name(output_path)
temp_file_name="temp"+file_name
temp_file_path=os.path.join(dir_name,temp_file_name)
print('input', input_path)
print('output', temp_file_path)
print('call convert ')
#convert_from_path(input_path,output_file=temp_file_path,fmt="png",use_pdftocairo=True,single_file=True)
convert_from_path(input_path,output_file=temp_file_path,fmt="png",single_file=True)
temp_file_path+=".png"
print(temp_file_path)
_convert_profiles(temp_file_path,output_path,color_space=color_space,input_profile_path=input_profile_path,output_profile_path=output_profile_path,quality=quality)
#os.remove(temp_file_path)
print('done')
return True
elif input_path.endswith(".png") or input_path.endswith(".PNG") or \
input_path.endswith(".jpg") or input_path.endswith(".JPG") or \
input_path.endswith(".jpeg") or input_path.endswith(".JPEG") :
print('else block')
return _convert_profiles(input_path,output_path,color_space=color_space,input_profile_path=input_profile_path,output_profile_path=output_profile_path,quality=quality)
else:
print(f"{input_path} is not a valid image file, copying it instead to {output_path}.")
shutil.copy(input_path,output_path)
return False
except Exception as e:
print('exception')
if verbose:
print(f"Error in file: {input_path}\n",e)
return False
def _convert_profiles(input_path=None,output_path=None,color_space="CMYK",input_profile_path=None,output_profile_path=None,quality="100"):
try:
with Image.open(input_path) as im:
img_cmyk = ImageCms.profileToProfile(im, input_profile_path, output_profile_path, renderingIntent=0,outputMode=color_space)
quality=int(quality)
img_cmyk.save(output_path, quality=quality)
return True
except Exception as e:
print(e)
print(f"cannot convert{input_path}, copying it instead.")
shutil.copy(input_path,output_path)
return False
# + id="GL3t92sN4QQ2"
import os
# + id="iLrfkajD4GBx" outputId="a836f8a1-4408-4de2-e94c-87f0435e879a" colab={"base_uri": "https://localhost:8080/"}
input_path="/content/2dgridDAGa.pdf";
temp_file_path="/content/2dgridDAGa";
convert(input_path,
temp_file_path,
color_space="RGB",
quality=80,
verbose=True,
input_profile_path='/content/drive/MyDrive/MLAPA/sRGB Color Space Profile.icm',
output_profile_path='/content/drive/MyDrive/MLAPA/sRGB Color Space Profile.icm')
# + id="vQk45N3O474s" outputId="465c77a7-385e-47b5-fd43-5860f7f1a3c4" colab={"base_uri": "https://localhost:8080/"}
# !ls
# + id="tIQWPYQN5NLX"
| notebooks/cmyk-magic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # EXERCISE — HCIP calculation
#
# ## Gross rock volume
# +
thick = 80 # metres
area = 20000 * 30000 # metres
grv = thick * area
grv
# -
from IPython import display
display.Image("http://subsurfwiki.org/images/6/66/Geometric_correction_factor.png", width=600)
# +
height = 100
ratio = thick / height
top = 'slab'
if top == 'round':
g = -0.6 * ratio + 1
elif top == 'flat':
g = -0.3 * ratio + 1
else:
g = 1
g
# -
grv *= g
# ## HC pore volume
# +
netg = 0.5 # fraction
por = 0.24 # fraction
s_o = 0.8 # fraction
hcpv = netg * por * s_o
# -
# ## Formation volume factor
#
# Oil shrinks when we produce it, especially if it has high GOR. The FVF, or $B_O$, is the ratio of a reservoir barrel to a stock-tank barrel (25 deg C and 1 atm). Typically the FVF is between 1 (heavy oil) and 1.7 (high GOR).
#
# For gas, $B_G$ is 0.3495ZT/P, where Z is the Z factor.
fvf = 1.1
# ## Put it all together
hcip = grv * hcpv / fvf
hcip
print("HCIP", hcip)
# [For more on conversion to bbl, BOE, etc.](https://en.wikipedia.org/wiki/Barrel_of_oil_equivalent)
print("HCIP is {:.0f} Mm³ or {:.0f} million bbl".format(hcip/1000000, 6.29*hcip/1000000))
# If we're lucky we'll recover 50% of what's in place:
recoverable = 0.5 * hcip
print("Recoverable is {:.0f} Mm³ or {:.0f} million bbl".format(recoverable/1000000, 6.29*recoverable/1000000))
# # Risk
#
# ## Single prospect
p_src = 0.5
p_mig = 0.25
p_trap = 0.75
p_res = 0.5
p_disc = p_src * p_mig * p_trap * p_res
p_disc
expect = p_disc * recoverable
print("Expectation is {:.0f} Mm³ or {:.0f} million bbl".format(expect/1000000, 6.29*expect/1000000))
# ## Another prospect
p_a = p_disc
p_b = 0.10
p_both = p_a * p_b
p_both
p_one = 1 - (1 - p_a) * (1 - p_b)
p_one
# <hr />
#
# <div>
# <img src="https://avatars1.githubusercontent.com/u/1692321?s=50"><p style="text-align:center">© Agile Geoscience 2016</p>
# </div>
| instructor/Volumetrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="JrMhl-Tk0y3p"
# # Clone CquenceR and PatchBundle
# + id="K1oK-GoIFz6L" colab={"base_uri": "https://localhost:8080/"} outputId="53563371-4b83-4f29-910e-761da65c01b9"
# !git clone https://github.com/SecureThemAll/PatchBundle.git
# + id="4DFpcUP_htj-" colab={"base_uri": "https://localhost:8080/"} outputId="01a2c5ca-5396-4d69-cf3a-823f4acbaf38"
# !git clone https://github.com/SecureThemAll/CquenceR.git
# + [markdown] id="tjxYCgYtyRJ7"
# # Install python 3.7
# + id="MJYEzwYnMRju" colab={"base_uri": "https://localhost:8080/"} outputId="eef9e817-9d9d-462b-967c-ec56ebb66465"
# !apt-get install python3.7 python3.7-dev
# + [markdown] id="xqQOP8DqzexQ"
# # Initialize CquenceR
# Version of OpenNMT needs no be under 2.0.0, or some dependencies will not work later, such as the OpenNMT's preprocess script and other inputs. Also, the python version check in the init script might fail, just comment it.
# + id="uNP1Gd96izGI" colab={"base_uri": "https://localhost:8080/"} outputId="a63089ec-6b52-47ea-c70d-a030b3867fbd"
# ! CquenceR/init.sh
# + [markdown] id="N5H_m1mUqiz5"
#
# + [markdown] id="o--YZlSbAH-X"
# # Install python 3.7 dependencies
# + id="MpkfeYg0AT-a" colab={"base_uri": "https://localhost:8080/"} outputId="dd1d20e7-085f-4028-d370-b6a685cc18d5"
# !curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
# !python3.7 get-pip.py
# !python3.7 -m pip install pandas
# !python3.7 -m pip install sklearn
# !python3.7 -m pip install python-Levenshtein
# !python3.7 -m pip install PyGithub
# !python3.7 -m pip install matplotlib
# !python3.7 -m pip install ipykernel
# # !python3.7 -m pip freeze > requirements.txt
# + [markdown] id="pHrRki6xy25r"
# # Create new dataset
# + id="JTYQ6CByGFDs" colab={"base_uri": "https://localhost:8080/"} outputId="1b7c4d68-84e6-4bdd-ad50-198163b76e16"
# %cd PatchBundle/tool/
# !echo 'asd' > token.txt
# !python3.7 ./PatchBundle.py filter --datasets nvd secbench mozilla secretpatch msr20 -m -v
# %cd ../..
# + [markdown] id="eMNzMpMhzxD4"
# # Preprocess Dataset Only into Source and Target Sets for Stats
# + id="tfcnSXMapQMg" colab={"base_uri": "https://localhost:8080/"} outputId="57124d0c-36b9-4d65-fb09-877b0e29159b"
# !python3.7 ./CquenceR/CquenceR.py preprocess -op /tmp/dataset --no_truncation --no_onmt -v
# + [markdown] id="0QJvMFc-0IRc"
# # Plot Stats for Dataset
# + id="Gq6xxao_QYGq" colab={"base_uri": "https://localhost:8080/"} outputId="c8038625-2453-4b91-e21f-9b839580a473"
# !python3.7 CquenceR/CquenceR.py stats -v -sp /tmp/dataset --save /content/CquenceR/plots
# + [markdown] id="qaiAeFW60fW6"
# # Clean Data
# + id="5tcZXxGw0lEW" colab={"base_uri": "https://localhost:8080/"} outputId="3d7c1f85-0d19-4b88-aa0f-a7eda3799662"
# !python3.7 ./CquenceR/CquenceR.py clean -v
# + [markdown] id="8XU8HyN70SER"
# # Preprocess Dataset for Training and Testing
# + id="zgwrpoB4bEUT" colab={"base_uri": "https://localhost:8080/"} outputId="1d737d6c-c904-47a7-916a-14dfb7e33455"
# !python3.7 ./CquenceR/CquenceR.py preprocess -s train_val_test -v
# + colab={"base_uri": "https://localhost:8080/"} id="z8V9Pi30AY6p" outputId="8a96c0aa-fa38-4da9-d738-522b6ba67c1d"
# !python3.7 -m pip install --upgrade torchvision==0.6.0
# + id="zuMMxWWw6-8N" colab={"base_uri": "https://localhost:8080/"} outputId="33b07b1a-3af7-46ac-b674-35c5ff45cab0"
# %cd CquenceR
# !git pull
# %cd ..
# + colab={"base_uri": "https://localhost:8080/"} id="09FOL0DnC1Sa" outputId="ad7a6c54-cf5a-4c66-d746-87fe8640c127"
# !which onmt_preprocess
# + [markdown] id="7lxPp8iC0-cS"
# # Train with GPU and Plot results (train_plots is the output folder)
#
# + [markdown] id="jRtKd8mvFhs2"
# If you can not run with the gpu, update the torch. For that just uncomment the next cell and run it.
# + id="9UCfG8ayFesw"
# #!python3.7 -m pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
# + id="XD_rYy-as53-" colab={"base_uri": "https://localhost:8080/"} outputId="8a75d3e7-43a1-4c6e-cdfe-31b37f42fe55"
# !python3.7 CquenceR/CquenceR.py train -v --plot --gpu
# + [markdown] id="AKbnN3Sk1MBF"
# # Test and Plot Results (test_plots is the output folder)
# + id="2PdW0IPAHh8t" colab={"base_uri": "https://localhost:8080/"} outputId="e555ac9c-60b6-4255-f231-2b6b0855cd36"
# !python3.7 CquenceR/CquenceR.py test -v --plot
# + colab={"base_uri": "https://localhost:8080/"} id="2UujEaxQYozc" outputId="d2130b70-9fc6-41e4-fe1d-b3a2bd1c46dc"
import torch
# setting device on GPU if available, else CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
| notebooks/CquenceR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NAMED ENTITY RECOGNITION
# ### IMPORTING LIBRARIES:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# ### IMPORTING DATA AND EDA:
data = pd.read_csv("ner_dataset.csv")
data.head().append(data.tail())
data.shape
data.isnull().sum()
data.Word
data.Tag
plt.figure(figsize = (10, 5))
plt.style.use('seaborn')
plt.hist(data.Tag, log = True, bins = 40, color='red')
plt.show()
# ### GETTING WORDS AND TAGS WITH THEIR INDEXES:
def get_tok_tag(data, token_or_tag):
if token_or_tag == 'token':
vocab = list(set(data['Word'].to_list()))
else:
vocab = list(set(data['Tag'].to_list()))
idx2tok = {idx : tok for idx, tok in enumerate(vocab)}
tok2idx = {tok : idx for idx, tok in enumerate(vocab)}
return tok2idx, idx2tok
token2idx, idx2token = get_tok_tag(data, 'token')
token2idx, idx2token
tag2idx, idx2tag = get_tok_tag(data, 'tag')
tag2idx, idx2tag
# ### MAPPING INDEXES TO WORD AND TAG DATA IN OUR DF:
data['Word_idx'] = data['Word'].map(token2idx)
data['Tag_idx'] = data['Tag'].map(tag2idx)
new_data = data.fillna(method = 'ffill', axis = 0)
new_data.head(3)
# +
grouped = new_data.groupby(['Sentence #'], as_index = False)['Word', 'POS', 'Tag', 'Word_idx', 'Tag_idx'].agg(lambda x : list(x))
#agg() is used to pass a function or list of function to
# be applied on a series or even each element of series separately
# -
grouped.loc[4]['Word']
# ### PADDING OUR TRAIN, TEST AND VALIDATION DATA:
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
n_tokens = len(list(set(data['Word'].to_list())))
n_tags = len(list(set(data['Tag'].to_list())))
n_tokens, n_tags
#PADDING THE TOKENS (X)
tokens = grouped['Word_idx'].to_list()
maxlen = max([len(i) for i in tokens])
print(maxlen)
pad_tokens = pad_sequences(tokens, maxlen = maxlen,
dtype = 'int32', padding = 'post',
value = n_tokens - 1)
#PADDING THE TAGS (Y)
tags = grouped['Tag_idx'].to_list()
pad_tags = pad_sequences(tags, maxlen = maxlen,
dtype = 'int32', padding = 'post',
value = tag2idx["O"])
#ONE HOT ENCODING THE TARGE VARIABLE
pad_tags = [to_categorical(i, num_classes=n_tags) for i in pad_tags]
pad_tokens[0], pad_tags[0]
pad_tags[0]
#SPLITTING DATA FROM TRAIN AND TEST
X_train, X_test, y_train, y_test = train_test_split(pad_tokens, pad_tags, test_size = 0.2, random_state = 44)
X_train.shape, X_test.shape
len(y_train), len(y_test)
# ### DEFINING THE MODEL:
# +
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential, Model, Input
from tensorflow.keras.layers import Embedding, Bidirectional, LSTM, Dropout, TimeDistributed, Dense
input_dim = len(list(set(data['Word'].to_list()))) + 1
output_dim = 104
input_length = max([len(s) for s in grouped['Word_idx']])
n_tags = len(tag2idx)
# MODEL:
model = Sequential()
# ADD EMBEDDING LAYER
model.add(Embedding(input_dim = input_dim, output_dim = output_dim, input_length = input_length))
# ADD BI-LSTM LAYER
model.add(Bidirectional(LSTM(units = output_dim, return_sequences = True, dropout = 0.2, recurrent_dropout = 0.2), merge_mode = 'concat'))
# ADD LSTM LAYER
model.add(LSTM(units = output_dim, return_sequences = True, dropout = 0.5, recurrent_dropout = 0.5))
# ADD TIMEDISTRIBUTED LAYER
model.add(TimeDistributed(Dense(units = n_tags, activation = 'relu')))
# COMPILING MODEL
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
model.summary()
# -
model.fit(X_train, np.array(y_train), batch_size = 500, validation_split = 0.2, epochs = 1)
model.save('NER_model.h5')
# model = keras.models.load_model('NER_model.h5')
y_pred = model.predict(X_test)
y_pred_1 = model.predict(np.array([X_test[45]]))
pred = np.argmax(y_pred_1, axis = -1)
pred
words = list(set(data["Word"].values))
tags = list(set(data["Tag"].values))
print(len(words))
len(tags)
# for i in range(10):
# p = model.predict(np.array([X_test[i]]))
# p = np.argmax(p, axis=-1)
# print("{:14} ({:5}): {}".format("Word", "True", "Pred"))
# for w,pred in zip(X_test[i],p[0]):
# print("{:14}: {}".format(data.Word[w],data.Tag_idx[pred]))
i = np.random.randint(0,X_test.shape[0])
p = model.predict(np.array([X_test[i]]))
p = np.argmax(p, axis =-1)
y_true = np.argmax(np.array(y_test), axis =-1)[i]
print("{:15}{:5}\t {} \n".format("Word","True","Pred"))
print("-"*30)
for w,true,pred in zip(X_test[i],y_true,p[0]):
print("{:15}{}\t{}".format(words[w-1],tags[true],tags[pred]))
p[0]
def output(text):
text = text.split()
text = list(set(text))
dict1 = {idx : tok for idx, tok in enumerate(text)}
text = text.map(dict1)
print(dict1)
output('Hi there')
a = ['Hi, there']
a = pd.DataFrame(a, columns=['ref'])
a
p = list(set(a['ref'].to_list()))
p
dict1 = {idx : tok for idx, tok in enumerate(p)}
dict1
data_req = a['ref'].map(dict1)
data_req = data_req.fillna(method = 'ffill', axis = 0)
data_req
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
text = nlp("U.N. relief coordinator <NAME> said Sunday U.S. Indonesian and Australian military helicopters are ferrying out food and supplies to remote areas western Aceh province that ground crews can not reach")
displacy.render(text, style = 'ent', jupyter=True)
def return_NER(text):
nlp = spacy.load('en_core_web_sm')
text = nlp(text)
displacy.render(text, style = 'ent', jupyter = True)
return_NER("""WASHINGTON — <NAME>, the F.B.I. senior counterintelligence agent who
disparaged President Trump in inflammatory text messages and helped oversee the Hillary
Clinton email and Russia investigations, has been fired for violating bureau policies,
Mr. Strzok’s lawyer said Monday. Mr.Trump and his allies seized on the texts — exchanged
during the 2016 campaign with a former F.B.I. lawyer, <NAME> — in assailing the Russia
investigation as an illegitimate “witch hunt.” Mr. Strzok, who rose over 20 years at the
F.B.I. to become one of its most experienced counterintelligence agents, was a key figure
in the early months of the November""")
# ### TESTING OUR MODEL:
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
text = nlp('Im not in the danger I am the danger said by Mr.Navaneeth at 3:30 AM and Mr.Jai added that the Winter is coming as of Google')
displacy.render(text, style = 'ent', jupyter = True)
| .ipynb_checkpoints/Named_Entity_Recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# The line below sets the environment
# variable CUDA_VISIBLE_DEVICES
get_ipython().magic('env CUDA_VISIBLE_DEVICES = 1')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import multiprocessing as mp # will come in handy due to the size of the data
import os.path
import random
import io
from datetime import datetime
import gc # garbage collector
import sklearn
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import math
from collections import defaultdict
import re
import logging
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
# -
DATASET_PATH = '/media/rs/0E06CD1706CD0127/Kapok/WSDM/'
TRAIN_FILE = DATASET_PATH + 'all_train_withextra.csv'
TEST_FILE = DATASET_PATH + 'all_test_withextra.csv'
MEMBER_FILE = DATASET_PATH + 'members.csv'
SONG_FILE = DATASET_PATH + 'fix_songs.csv'
ALL_ARTIST = DATASET_PATH + 'all_artist_name.csv'
ALL_COMPOSER = DATASET_PATH + 'all_composer.csv'
ALL_LYRICIST = DATASET_PATH + 'all_lyricist.csv'
HDF_FILENAME = DATASET_PATH + 'music_info.h5'
train_data = pd.read_csv(TRAIN_FILE)
test_data = pd.read_csv(TEST_FILE)
member_data = pd.read_csv(MEMBER_FILE)
song_data = pd.read_csv(SONG_FILE)
composer_df = pd.read_csv(ALL_COMPOSER)
artist_name_df = pd.read_csv(ALL_ARTIST)
lyricist_df = pd.read_csv(ALL_LYRICIST)
def convert_unicode_to_str(df):
df.columns = df.columns.astype(str)
types = df.apply(lambda x: pd.api.types.infer_dtype(df.values))
#print(types)#mixed-integer
for col in types[types == 'mixed-integer'].index:
df[col] = df[col].astype(str)
for col in types[types == 'mixed'].index:
df[col] = df[col].astype(str)
return df
store = pd.HDFStore(HDF_FILENAME)
store['all_train_withextra'] = convert_unicode_to_str(train_data)
store['all_test_withextra'] = convert_unicode_to_str(test_data)
store['members'] = convert_unicode_to_str(member_data)
store['fix_songs'] = convert_unicode_to_str(song_data)
store['all_composer'] = convert_unicode_to_str(composer_df)
store['all_artist_name'] = convert_unicode_to_str(artist_name_df)
store['all_lyricist'] = convert_unicode_to_str(lyricist_df)
store.close()
store_test = pd.HDFStore(HDF_FILENAME)
print(store_test['all_composer'])
store_test.close()
| MusicRecommendation/SaveHDF5.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md:myst
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab Week 11 - solution
#
# ## EOSC 211
#
# <img src="images/example_plot.png">
#
#
# ### Learning Objectives:
#
# 1. Use the python pathlib module to create a folder tree and organize your data
# 2. Use a new package, pandas, for data analysis (you can think of pandas as spreadsheets on steroids)
# +
import pandas as pd # this one is new
import numpy as np
from pathlib import Path # and this one
import matplotlib.pyplot as plt
#import scipy.stats as st
pd.set_option('display.max_columns', None) # this shows ALL of the columns of a dataFrame
# -
# ## Introduction
#
# For most people, doing math on a computer means using a spreadsheet: [between 0.75 - 2 billion people](https://askwonder.com/research/number-google-sheets-users-worldwide-eoskdoxav) use either Microsoft Excel
# or Google sheets to get work done every day. In contrast to numpy arrays, a spreadsheet table can have
# many different data types across columns, and rows and columns can be referenced as named ranges,
# so that a spreadsheet formula might look like `=sum(receipts)`, where `receipts` is the label for
# table column `J`. Spreadsheets also allow you to do simple database operations, like [merging tables based on an index](https://www.ablebits.com/office-addins-blog/2018/10/31/excel-merge-tables-matching-columns/).
#
# The python equivalent of a spreadsheet table is a [pandas dataframe](https://realpython.com/pandas-dataframe/), which is a 2-dimensional data type in which columns and rows can be accessed by either column/row numbers
# or index labels.
#
# Dataframes originated in the statistical languge `S` (the ancestor of `R`) and are central to both statistical
# computing and data analysis. Pandas is a large library, and we're going to just skim part of it here, but
# there are great references (including jupyter notebooks) in the reference list below.
# ## Part 1: Folder Trees and Pathlib
#
# Before we start working with dataframes, we need to get more organized about how to handle files and folders
# in python. We've been putting data alongside notebooks in a single folder for each lab, but there is
# the obvious problem that the same dataset can be used for multiple projects, and having dozens of files
# in a folder makes it hard to organize your work.
#
# In this lab we need to construct the following folder tree:
#
# ```
# + myhome
# + eosc211
# + lab_wk11
# + data
# + raw
# + processed
#
# ```
#
# where `myhome` is your "home folder" which we will define in below. On my windows laptop
# `myhome` is `C:\Users\phil` and the full path to `processed` is `C:\Users\phil\eosc211\lab_wk11\data\processed`. On our jupyterhub `myhome` is `/home/jovyan` and the pathname is `/home/jovyan/eosc211/lab_wk11/data/processed`.
#
# ### Using Pathlib
#
# The python pathlib module provides a way to specify folder paths that is portable across macos, windows
# and linux. As a general principle, it is always good to write code that works across operating systems whenever possible. Here is how you locate your home folder with pathlib:
myhome = Path.home()
print(f"{myhome=}")
# and here is how you use the Path.mkdir method to
# make a nested `lab_wk11` folder (aka directory) below the home folder:
lab11 = myhome / 'eosc211/lab_wk11' # note the "/" symbol takes on a different meaning here than usual
lab11.mkdir(parents=True, exist_ok=True)
print(f"{lab11=}")
# ### Creating Multiple Folders
# create two new folders "raw" and "processed", which are subfolders of "data"
new_dirs = ['data/raw','data/processed']
for the_dir in new_dirs:
curr_dir = lab11 / the_dir
curr_dir.mkdir(parents=True,exist_ok=True)
# ### Searching Files and Folders
#
# Once you have a folder tree you can search through it for files
# matching specific patterns. This is called `globbing` and the pattern that matches
# any path or name is `*`, known as the `wildcard`. In the cell below we use globbing to start in the
# `lab11` folder and recursively descend through all that folder's children, listing them:
all_dirs = list(lab11.glob('**/*'))
for this_dir in all_dirs:
print(f"found {this_dir}")
# ### Exploring the Folder Tree
#
# To see how your folder tree looks from a terminal or explorer/finder, do the following:
#
# 1) open a miniconda powershell window (on a windows laptop) or a bash terminal on jupyterhub or macos
# 2) type `cd ~` (spanish tilde) to change to your home folder
# 3) type `cd eosc211` to change to the top folder in our tree.
# 4) type `ls` to list the folders in the current directory
# 5) type `cd ..` to move up one folder, or `cd foldername` to move into a folder named foldername.
# 6) If you want to add a folder, on the commandline do:
# `mkdir foldername`
#
#
# On your laptop, you can also start explorer (windows) or finder (macos) in a folder by typing:
#
# `open .` (for macos)
#
# or
#
# `start .` (for windows)
#
# Try adding a folder to your new eosc211 tree with explorer or finder -- does it appear in
# the all_dirs list when you cell above?
# ## Part 2: Data Analysis With Pandas
#
# <img src="images/panda.jpg"> </img>
#
# Check out [the site I found this photo from](http://www.kidssearch.com/PicturesOfPanda.html) if you need to relax for a few minutes. Also, look at how the image is referenced (double click here to see raw markdown). Note the file path!
#
# Pandas is a python library purpose-built for types of data processing we often do in the earth sciences.
# The core object of the pandas library is the *dataFrame*, which we can think of like a hybridized version
# of a numpy array and a dictionary, laid out to resemble a standard data table.
# Dataframes store data in a 2-d grid,
# but instead of referencing by numerical index, we can give each row and column a sensible name.
# Much easier to keep track of, and provides a level of automatic documentation about the data values.
# Pandas is also a very good choice for parsing `.csv` or `.xlsx` data,
# we can load spreadsheets and manipulate them with all of the pythonic tricks we have gathered over the term.
#
# **Summary**
#
# - Pandas is a library for working with **labelled** tabular data (1-D and 2-D)
# - Data formats include: comma separated values (CSV) and other text files, Excel spreadsheets, HDF5, [and others](https://pandas.pydata.org/pandas-docs/stable/io.html)
# - With `pandas` you can do pretty much everything you would in a spreadsheet, plus a whole lot more!
# ### Pandas dataframes vs. numpy arrays
#
# * Dataframes are **column oriented**, arrays are **row oriented**
# * Array items are all of the same dtype (i.e. numpy.float32), dataframe columns can
# have different types (e.g.strings vs. integers)
# * Dataframe columns (referred to as a pandas.Series [in the week 11 reading](https://phaustin.github.io/eosc211_students/wk11/pythia_pandas.html#the-pandas-series))
# can be indexed by name (e.g. "Total area of basin") or by integer index
# * Dataframe rows can be indexed by number or by a special index (e.g. postal code)
# * Dataframe objects have dozens of methods to summarize and manipulate the data they hold,
# making them similar in features to a lightweight relational database.
# ### Further Reading Beyond This Lab
#
# - `pandas` = [Python Data Analysis Library](https://pandas.pydata.org/)
# - [software carpentry pandas lesson](https://swcarpentry.github.io/python-novice-gapminder/08-data-frames/index.html)
# - Best book: [Python for data analysis](https://github.com/wesm/pydata-book) by <NAME>
# - <NAME>'s [Pandas cheatsheet](https://phaustin.github.io/eosc211_students/wk11/lab_wk11/pandas-cheatsheet.html)
# ### Reading a CSV file with Pandas
#
# You should see `weather_YVR.csv` on our canvas homepage, which is a list of 29,000 weather measurements taken
# at the Vancouver International Airport between 1938-2017/ Copy this file into `myhome/eosc211/lab_wk11/data`
# using `explorer/finder`, or if you're on the hub, the `files` page as demonstrated in class. If you've done this
# correctly then the following wildcard search should find it:
weather_file = list(lab11.glob('**/w*csv'))[0]
print(f"{weather_file=}")
# In the cell below, import the `.csv` file as a pandas dataframe. Your dataframe should include columns `Date, Year, Month, Day, T_mean (C), T_high (C), T_low (C), Rain (mm), Snow (cm), Total Precip (mm)`
#
# Name the dataframe `weather_YVR`
#
# **Hint:** This week's reading has a helpful example of `pd.read_csv`
# + nbgrader={"grade": true, "grade_id": "cell-9dadf33aadb9f15a", "locked": false, "points": 0, "schema_version": 3, "solution": true, "task": false}
### BEGIN SOLUTION
# andrew's soln
# https://phaustin.github.io/eosc211_students/wk11/pythia_pandas.html#the-pandas-dataframe
weather_YVR = pd.read_csv(weather_file, index_col=0, parse_dates=True)
### END SOLUTION
# show the first 6 lines of the dataframe
weather_YVR.head(6)
# -
# ### Data at a Glance
#
# `pandas` provides many ways to quickly and easily summarize your data. What do each of the following compute?
#
# `weather_YVR.shape` (notice the difference from `np.shape(arr)`, like we use to get the shape of a numpy array)
#
# `weather_YVR.columns`
#
# `weather_YVR.mean()`
#
# `weather_YVR.describe()`
# +
# try each of the above here (dont need to hand in).
# -
# ### Using the `groupby()` Function
#
# Now that we have some ideas about the general characteristics of our data (is the dataset relatively *complete*? Do the values *make sense*?), it's time to do some analysis. Say we would like to know about the cumulative precipitation in each month, averaged over a particular decade (1980-1990, for example). Use pandas built in groupby() function to split the raw data into a separate data frame for each decade (1930-2010).
#
# **Hint:** There is no `"decade"` column in the dataframe, so how are you going to assign each year to its decade using python and then add that information to
# the dataframe for each measurement?
#
# First think about how you would extract the year of an observation form `weather_YVR`
# row and then how you would convert,
# say, all the years in the 1930s to be assigned to the 1930 decade. Once you've
# figured this out, follow the example in the week11 reading (where they add a `month` column)
# to add a new column called `decade` to the dataframe, and then use groupby on that column to produce a
# groupby object containing a DataFrame for each of the 9 decades. Finally, use
# the `dict(tuple(df_groups)))` trick to turn that groupby object into a dictionary. Your dictionary keys
# should look like: `dict_keys([1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010])`
#
# Call that dictionary `decades` -- we'll depend on that name in the cells below
# + nbgrader={"grade": false, "grade_id": "cell-dffcd935330387b3", "locked": false, "schema_version": 3, "solution": true, "task": false}
### BEGIN SOLUTION
# andrew's soln
decade_col = (weather_YVR.index.year//10)*10
weather_YVR['the_dec'] = decade_col
decades = weather_YVR.groupby('the_dec')
decades = dict(tuple(decades))
# what did we just produce?
print(f"{type(decades)=}")
print(f"{decades.keys()}")
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "cell-d89e76f1e6f641f1", "locked": true, "points": 0, "schema_version": 3, "solution": false, "task": false}
assert(len(decades) > 0)
# -
# decades is a dictionary, what are the keys?
decades.keys()
# okay, so the keys are integers representing each decade. What are the values of the "decades" dict?
decades[1940].head(10)
# ### Using Pandas Built-in Plotting
#
# Let's display our data with a bar chart, pandas style! This is all still the matplotlib that we know and love,
# but the functions are called via the pandas library, instead of doing it directly. Pandas has some built-in intuition about plot formatting, so oftentimes this takes care of things like axis labels without having to explicitly specify them.
#
# This cell creates a bar chart of average monthly rainfall for the years 1960-1969:
the_ax = (decades[1960]
.groupby("Month")["Rain (mm)"]
.mean()
.plot
.bar(ylabel="Rain (mm)",
title=f"Average Monthly Rainfall in the 1960s"));
# Using the code in the cell above, write a function called `plot_monthly_average()` which takes a pandas dataframe stored in the `decades` dictionary (remember the keys to the `decades` dictionary are just integers representing each decade, so e.g. `decade[1980]` is the dataframe for the 1980s) and some variable (e.g. `YVR_1960`, `Rain (mm)`), and creates a bar chart of that variable for each month, averaged over the whole decade. Include a docstring and some checks on the input arguments.
# + nbgrader={"grade": true, "grade_id": "cell-ea770b366418008c", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
### BEGIN SOLUTION
# andrew's soln
def plot_monthly_average(decade_df, var):
"""
IN: pandas dataframe containing columns "Year", "Month", and a variable "var" to
be plotted on by month, averaged over the whole dataframe
OUT: returns None, produces a bar chart of each month and averaged variable
"""
the_ax = (decade_df
.groupby("Month")[var].mean()
.plot
.bar(ylabel=var,
title=f"Average Monthly {var} in the {decade_df.Year[0]}s"))
return None
### END SOLUTION
# -
# ### Use your function
#
# In the cells below, call your function to create histogram plots of the following:
#
# A) Mean temperature, by month from 1980-1989
#
# B) Mean total precip, by month from 1960-1969
# + nbgrader={"grade": true, "grade_id": "cell-ef3ed0bec0e9b0c7", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
# A)
### BEGIN SOLUTION
plot_monthly_average(decades[1980], "T_mean (C)")
### END SOLUTION
# + nbgrader={"grade": true, "grade_id": "cell-42d618027a7ae9fd", "locked": false, "points": 1, "schema_version": 3, "solution": true, "task": false}
# B)
### BEGIN SOLUTION
plot_monthly_average(decades[1960], "Total Precip (mm)")
### END SOLUTION
# -
# ### One more demonstration: Can we see global warming at YVR?
#
# You can do much more with this dataset. As one more example, here is code to
# compare the decadal change in low temperature, high temperature, and rainfall
# at YVR
compare_decs = {}
decade_list = [1960, 1980, 2000, 2010]
for the_decade,the_df in decades.items():
compare_decs[the_decade]={}
for the_var in ['T_low (C)','T_high (C)', 'Rain (mm)']:
compare_decs[the_decade][the_var] = the_df.groupby("Month")[the_var].mean()
fig,ax = plt.subplots(1,1,figsize=(8,6))
the_var = 'T_low (C)'
for the_decade in decade_list:
ax.plot(compare_decs[the_decade][the_var],label=the_decade)
ax.set_title(f"decadal monthly mean for {the_var}")
ax.legend()
ax.grid(True)
fig,ax = plt.subplots(1,1,figsize=(8,6))
the_var = 'T_high (C)'
for the_decade in decade_list:
ax.plot(compare_decs[the_decade][the_var],label=the_decade)
ax.set_title(f"decadal monthly mean for {the_var}")
ax.legend()
ax.grid(True)
fig,ax = plt.subplots(1,1,figsize=(8,6))
the_var = 'Rain (mm)'
for the_decade in decade_list:
ax.plot(compare_decs[the_decade][the_var],label=the_decade)
ax.set_title(f"decadal monthly mean for {the_var}")
ax.legend()
ax.grid(True)
# ### Final Comments
#
# This lab is just the tip of the iceberg when it comes to using Pandas or other python libraries to do data analysis. We could have just as well completed all of this lab with our familiar numpy arrays and produced the same result -- There are always multiple ways to solve the same problem with code, and choosing the best tool for the job is often far from obvious. Choose tools that work for you for the task at hand!
#
# **Optional Extras:** There are other variations of the pandas library, two of which are particularly helpful for geosciences applications.
#
# 1) [Xarray](http://xarray.pydata.org/en/stable/index.html): *Like pandas but in generalized to 3D, 4D.. ND. These are really good for processing field data of the sort that you encounter in meteorology or oceanography, for example, the temperature field in a 3D slice of the atmosphere, salinity in the ocean, currents, etc. Save this link for later and do some exploring!*
#
# 2) [Geopandas](https://geopandas.org/en/stable/): *A version of pandas with added functionality for parsing common GIS datatypes, like .KML, .shp, .shx, .dbf. You can do pretty much everything in python that you might try in a program like ArcGIS, with the added advantage of looping through big datasets, etc.*
| lab_keys/week11_lab/lab_wk11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img src="http://hikyuu.readthedocs.io/zh_CN/latest/_images/00000-title.png" align="left">
# Hikyuu Quant Framework是一款基于C++/Python的开源量化交易研究框架,用于策略分析及回测。其核心思想基于当前成熟的系统化交易方法,将整个系统化交易策略抽象为由市场环境判断策略、系统有效条件、信号指示器、止损/止盈策略、资金管理策略、盈利目标策略、移滑价差算法七大组件,你可以分别构建这些组件的策略资产库,在实际研究中对它们自由组合来观察系统的有效性、稳定性以及单一种类策略的效果。在系统策略之上,对交易对象选择、系统策略选择、资产组合资金分配进行了进一步封装,能够灵活支持更高层级的策略组合。
#
# 更多信息,请参见:<https://hikyuu.org>
# ## 入门篇
#
# * [001 交互式工具示例](001-overview.ipynb?flush_cache=True)
# * [002 获取股票对象](002-HowToGetStock.ipynb?flush_cache=True)
# * [003 获取并绘制K线数据](003-HowToGetKDataAndDraw.ipynb?flush_cache=True)
# * [004 计算并绘制技术指标](004-IndicatorOverview.ipynb?flush_cache=True)
# * [005 绘制组合图形](005-Drawplot.ipynb?flush_cache=True)
# * [006 TradeManager应用](006-TradeManager.ipynb?flush_cache=True)
# * [007 系统策略演示](007-SystemDetails.ipynb?flush_cache=True)
# * [008 序列化说明](008-Pickle.ipynb?flush_cache=True)
# * [009_获取实时日线数据](009-RealData.ipynb?flush_cache=True)
# * [010_资产组合](010-Portfolio.ipynb?flush_cache=True)
# ## 示例
#
# * [Demo1](Demo/Demo1.ipynb?flush_cache=True)
# * [Demo2](Demo/Demo2.ipynb?flush_cache=True)
| hikyuu/examples/notebook/000-Index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import importlib
importlib.reload(logging) # see https://stackoverflow.com/a/21475297/1469195
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
# -
import numpy as np
a = np.load('/data/hammerj/outputData/v16_visu500_models/gradients/ALL_11_FR1_day1_xpos_test_conv_2_amp_grads.npy')
a.shape
# +
# %%capture
import os
import site
os.sys.path.insert(0, '/home/schirrmr/code/reversible/')
os.sys.path.insert(0, '/home/schirrmr/braindecode/code/braindecode/')
os.sys.path.insert(0, '/home/schirrmr/code/explaining/reversible//')
# %load_ext autoreload
# %autoreload 2
import numpy as np
import logging
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
# %matplotlib inline
# %config InlineBackend.figure_format = 'png'
matplotlib.rcParams['figure.figsize'] = (12.0, 1.0)
matplotlib.rcParams['font.size'] = 14
import seaborn
seaborn.set_style('darkgrid')
from reversible2.sliced import sliced_from_samples
from numpy.random import RandomState
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import copy
import math
import itertools
import torch as th
from braindecode.torch_ext.util import np_to_var, var_to_np
from reversible2.splitter import SubsampleSplitter
from reversible2.view_as import ViewAs
from reversible2.invert import invert
from reversible2.affine import AdditiveBlock
from reversible2.plot import display_text, display_close
from reversible2.util import set_random_seeds
# +
import sklearn.datasets
from matplotlib.patches import Rectangle
set_random_seeds(20190708, True)
X,y = sklearn.datasets.make_moons(100, shuffle=False, noise=4e-2)
train_inputs_a = np_to_var(X[0:len(X)//2], dtype=np.float32)
train_inputs_b = np_to_var(X[len(X)//2:], dtype=np.float32)
cuda = False
plt.figure(figsize=(4,4))
plt.scatter(var_to_np(train_inputs_a)[:,0], var_to_np(train_inputs_a)[:,1], label="Class A")
plt.scatter(var_to_np(train_inputs_b)[:,0], var_to_np(train_inputs_b)[:,1], label="Class B")
ax = plt.gca()
for pt in var_to_np(th.cat((train_inputs_a, train_inputs_b))):
rect = Rectangle(pt - 5e-2/2, 5e-2, 5e-2, facecolor='None', edgecolor='black', lw=1,
alpha=0.5)
ax.add_artist(rect)
plt.legend()
plt.axis('equal')
# +
cuda = False
from reversible2.distribution import TwoClassIndependentDist
from reversible2.blocks import dense_add_block
from reversible2.rfft import RFFT, Interleave
from reversible2.util import set_random_seeds
from torch.nn import ConstantPad2d
import torch as th
from reversible2.splitter import SubsampleSplitter
from matplotlib.patches import Ellipse
from reversible2.gaussian import get_gauss_samples
set_random_seeds(2019011641, cuda)
model = nn.Sequential(
dense_add_block(2,200),
dense_add_block(2,200),
dense_add_block(2,200),
dense_add_block(2,200),
)
dist = TwoClassIndependentDist(2, truncate_to=None)
from reversible2.model_and_dist import ModelAndDist
model_and_dist = ModelAndDist(model, dist)
optim = th.optim.Adam([{'params': model_and_dist.dist.parameters(), 'lr':1e-2},
{'params': list(model_and_dist.model.parameters()),
'lr': 1e-4,
'weight_decay': 0.05}])
# -
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
from reversible2.ot_exact import get_matched_samples
# +
from reversible2.invert import invert
from reversible2.ot_exact import ot_euclidean_loss_for_samples
n_epochs = 10001
rand_noise_factor = 5e-2
for i_epoch in range(n_epochs):
ots = []
for i_class in range(2):
inputs = [train_inputs_a, train_inputs_b][i_class]
out_samples = model_and_dist.dist.get_samples(i_class, 100)
in_samples = invert(model_and_dist.model, out_samples)
ot = ot_euclidean_loss_for_samples(inputs, in_samples)
ots.append(ot)
outputs = model_and_dist.model(inputs)
ot_out = ot_euclidean_loss_for_samples(outputs, out_samples)
ots.append(ot_out)
loss = th.sum(th.stack(ots))
optim.zero_grad()
loss.backward()
optim.step()
if i_epoch % (n_epochs // 20) == 0:
tr_out_a = model_and_dist.model(train_inputs_a)
tr_out_b = model_and_dist.model(train_inputs_b)
mean, std = model_and_dist.dist.get_mean_std(0)
display_text("Epoch {:d} of {:d}\nTrain A NLL {:.1E}\nTrain B NLL {:.1E}\n".format(
i_epoch, n_epochs,
-th.mean(model_and_dist.get_total_log_prob(0, train_inputs_a)).item(),
-th.mean(model_and_dist.get_total_log_prob(1, train_inputs_b)).item(),
))
fig, axes = plt.subplots(1,2, figsize=(12,5))
for i_class in range(2):
outs = [tr_out_a, tr_out_b][i_class]
samples = model_and_dist.dist.get_samples(i_class,200)
axes[1].scatter(var_to_np(samples)[:,0], var_to_np(samples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
axes[1].scatter(var_to_np(outs)[:,0], var_to_np(outs)[:,1], color=seaborn.color_palette()[i_class])
mean, std = model_and_dist.dist.get_mean_std(i_class)
ellipse = Ellipse(var_to_np(mean), var_to_np(std[0] * 2), var_to_np(std[1] * 2),
edgecolor=lighten_color(seaborn.color_palette()[i_class]),
facecolor='None', lw=3)
axes[1].add_artist(ellipse)
axes[1].axis('equal')
axes[1].set_title("Output space")
for i_class in range(2):
inputs = [train_inputs_a, train_inputs_b][i_class]
examples = model_and_dist.get_examples(i_class,200)
axes[0].scatter(var_to_np(inputs)[:,0], var_to_np(inputs)[:,1], color=seaborn.color_palette()[i_class],
label="Class "+ ["A", "B"][i_class])
axes[0].scatter(var_to_np(examples)[:,0], var_to_np(examples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
radians = np.linspace(0,2*np.pi,200)
circle_points = np.stack([np.cos(radians), np.sin(radians)], axis=-1)
circle_th = np_to_var(circle_points, device=train_inputs_a.device, dtype=np.float32)
mean, std = model_and_dist.dist.get_mean_std(i_class)
circle_out= mean + (circle_th * std * 2)
circle_in = invert(model_and_dist.model, circle_out)
axes[0].plot(var_to_np(circle_in)[:,0], var_to_np(circle_in)[:,1],
alpha=1, lw=3, color=lighten_color(seaborn.color_palette()[i_class]),
label="Dist " + ["A", "B"][i_class])
axes[0].axis('equal')
axes[0].set_title("Input space")
axes[0].legend(ncol=3)
display_close(fig)
if i_epoch == 500:
break
# +
tr_out_a = model_and_dist.model(train_inputs_a)
tr_out_b = model_and_dist.model(train_inputs_b)
mean, std = model_and_dist.dist.get_mean_std(0)
display_text("Epoch {:d} of {:d}\nTrain A NLL {:.1E}\nTrain B NLL {:.1E}\n".format(
i_epoch, n_epochs,
-th.mean(model_and_dist.get_total_log_prob(0, train_inputs_a)).item(),
-th.mean(model_and_dist.get_total_log_prob(1, train_inputs_b)).item(),
))
fig, axes = plt.subplots(1,2, figsize=(12,5))
for i_class in range(2):
outs = [tr_out_a, tr_out_b][i_class]
samples = model_and_dist.dist.get_samples(i_class,200)
axes[1].scatter(var_to_np(samples)[:,0], var_to_np(samples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
axes[1].scatter(var_to_np(outs)[:,0], var_to_np(outs)[:,1], color=seaborn.color_palette()[i_class])
matched_examples = get_matched_samples(outs, samples)
for a,bs in zip(var_to_np(outs), var_to_np(matched_examples)):
for b in bs:
axes[1].plot([a[0], b[0]], [a[1], b[1]], color='black', lw=0.25)
mean, std = model_and_dist.dist.get_mean_std(i_class)
ellipse = Ellipse(var_to_np(mean), var_to_np(std[0] * 2), var_to_np(std[1] * 2),
edgecolor=lighten_color(seaborn.color_palette()[i_class]),
facecolor='None', lw=3)
axes[1].add_artist(ellipse)
axes[1].axis('equal')
axes[1].set_title("Output space")
for i_class in range(2):
inputs = [train_inputs_a, train_inputs_b][i_class]
examples = model_and_dist.get_examples(i_class,200)
axes[0].scatter(var_to_np(inputs)[:,0], var_to_np(inputs)[:,1], color=seaborn.color_palette()[i_class],
label="Class "+ ["A", "B"][i_class])
axes[0].scatter(var_to_np(examples)[:,0], var_to_np(examples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
matched_examples = get_matched_samples(inputs, examples)
for a,bs in zip(var_to_np(inputs), var_to_np(matched_examples)):
for b in bs:
axes[0].plot([a[0], b[0]], [a[1], b[1]], color='black', lw=0.25)
radians = np.linspace(0,2*np.pi,200)
circle_points = np.stack([np.cos(radians), np.sin(radians)], axis=-1)
circle_th = np_to_var(circle_points, device=train_inputs_a.device, dtype=np.float32)
mean, std = model_and_dist.dist.get_mean_std(i_class)
circle_out= mean + (circle_th * std * 2)
circle_in = invert(model_and_dist.model, circle_out)
axes[0].plot(var_to_np(circle_in)[:,0], var_to_np(circle_in)[:,1],
alpha=1, lw=3, color=lighten_color(seaborn.color_palette()[i_class]),
label="Dist " + ["A", "B"][i_class])
axes[0].axis('equal')
axes[0].set_title("Input space")
axes[0].legend(ncol=3)
display_close(fig)
# +
cuda = False
from reversible2.distribution import TwoClassIndependentDist
from reversible2.blocks import dense_add_block
from reversible2.rfft import RFFT, Interleave
from reversible2.util import set_random_seeds
from torch.nn import ConstantPad2d
import torch as th
from reversible2.splitter import SubsampleSplitter
from matplotlib.patches import Ellipse
from reversible2.gaussian import get_gauss_samples
set_random_seeds(2019011641, cuda)
model = nn.Sequential(
dense_add_block(2,200),
dense_add_block(2,200),
dense_add_block(2,200),
dense_add_block(2,200),
)
dist = TwoClassIndependentDist(2, truncate_to=None)
from reversible2.model_and_dist import ModelAndDist
model_and_dist = ModelAndDist(model, dist)
optim = th.optim.Adam([{'params': model_and_dist.dist.parameters(), 'lr':1e-2},
{'params': list(model_and_dist.model.parameters()),
'lr': 1e-4,
'weight_decay': 0.05}])
# +
from reversible2.invert import invert
from reversible2.ot_exact import ot_euclidean_loss_for_samples
n_epochs = 10001
rand_noise_factor = 5e-2
for i_epoch in range(n_epochs):
ots = []
for i_class in range(2):
inputs = [train_inputs_a, train_inputs_b][i_class]
out_samples = model_and_dist.dist.get_samples(i_class, 100)
in_samples = invert(model_and_dist.model, out_samples)
ot = ot_euclidean_loss_for_samples(inputs, in_samples)
ots.append(ot)
outputs = model_and_dist.model(inputs)
ot_out = ot_euclidean_loss_for_samples(outputs, out_samples)
ots.append(ot_out)
loss = th.sum(th.stack(ots))
optim.zero_grad()
loss.backward()
optim.step()
if i_epoch == 100:
break
if i_epoch % (n_epochs // 20) == 0:
tr_out_a = model_and_dist.model(train_inputs_a)
tr_out_b = model_and_dist.model(train_inputs_b)
mean, std = model_and_dist.dist.get_mean_std(0)
display_text("Epoch {:d} of {:d}\nTrain A NLL {:.1E}\nTrain B NLL {:.1E}\n".format(
i_epoch, n_epochs,
-th.mean(model_and_dist.get_total_log_prob(0, train_inputs_a)).item(),
-th.mean(model_and_dist.get_total_log_prob(1, train_inputs_b)).item(),
))
fig, axes = plt.subplots(1,2, figsize=(12,5))
for i_class in range(2):
outs = [tr_out_a, tr_out_b][i_class]
samples = model_and_dist.dist.get_samples(i_class,200)
axes[1].scatter(var_to_np(samples)[:,0], var_to_np(samples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
axes[1].scatter(var_to_np(outs)[:,0], var_to_np(outs)[:,1], color=seaborn.color_palette()[i_class])
mean, std = model_and_dist.dist.get_mean_std(i_class)
ellipse = Ellipse(var_to_np(mean), var_to_np(std[0] * 2), var_to_np(std[1] * 2),
edgecolor=lighten_color(seaborn.color_palette()[i_class]),
facecolor='None', lw=3)
axes[1].add_artist(ellipse)
axes[1].axis('equal')
axes[1].set_title("Output space")
for i_class in range(2):
inputs = [train_inputs_a, train_inputs_b][i_class]
examples = model_and_dist.get_examples(i_class,200)
axes[0].scatter(var_to_np(inputs)[:,0], var_to_np(inputs)[:,1], color=seaborn.color_palette()[i_class],
label="Class "+ ["A", "B"][i_class])
axes[0].scatter(var_to_np(examples)[:,0], var_to_np(examples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
radians = np.linspace(0,2*np.pi,200)
circle_points = np.stack([np.cos(radians), np.sin(radians)], axis=-1)
circle_th = np_to_var(circle_points, device=train_inputs_a.device, dtype=np.float32)
mean, std = model_and_dist.dist.get_mean_std(i_class)
circle_out= mean + (circle_th * std * 2)
circle_in = invert(model_and_dist.model, circle_out)
axes[0].plot(var_to_np(circle_in)[:,0], var_to_np(circle_in)[:,1],
alpha=1, lw=3, color=lighten_color(seaborn.color_palette()[i_class]),
label="Dist " + ["A", "B"][i_class])
axes[0].axis('equal')
axes[0].set_title("Input space")
axes[0].legend(ncol=3)
display_close(fig)
# +
tr_out_a = model_and_dist.model(train_inputs_a)
tr_out_b = model_and_dist.model(train_inputs_b)
mean, std = model_and_dist.dist.get_mean_std(0)
display_text("Epoch {:d} of {:d}\nTrain A NLL {:.1E}\nTrain B NLL {:.1E}\n".format(
i_epoch, n_epochs,
-th.mean(model_and_dist.get_total_log_prob(0, train_inputs_a)).item(),
-th.mean(model_and_dist.get_total_log_prob(1, train_inputs_b)).item(),
))
fig, axes = plt.subplots(1,2, figsize=(12,5))
for i_class in range(2):
outs = [tr_out_a, tr_out_b][i_class]
samples = model_and_dist.dist.get_samples(i_class,200)
axes[1].scatter(var_to_np(samples)[:,0], var_to_np(samples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
axes[1].scatter(var_to_np(outs)[:,0], var_to_np(outs)[:,1], color=seaborn.color_palette()[i_class])
matched_examples = get_matched_samples(outs, samples)
for a,bs in zip(var_to_np(outs), var_to_np(matched_examples)):
for b in bs:
axes[1].plot([a[0], b[0]], [a[1], b[1]], color='black', lw=0.25)
mean, std = model_and_dist.dist.get_mean_std(i_class)
ellipse = Ellipse(var_to_np(mean), var_to_np(std[0] * 2), var_to_np(std[1] * 2),
edgecolor=lighten_color(seaborn.color_palette()[i_class]),
facecolor='None', lw=3)
axes[1].add_artist(ellipse)
axes[1].axis('equal')
axes[1].set_title("Output space")
for i_class in range(2):
inputs = [train_inputs_a, train_inputs_b][i_class]
examples = model_and_dist.get_examples(i_class,200)
axes[0].scatter(var_to_np(inputs)[:,0], var_to_np(inputs)[:,1], color=seaborn.color_palette()[i_class],
label="Class "+ ["A", "B"][i_class])
axes[0].scatter(var_to_np(examples)[:,0], var_to_np(examples)[:,1],
color=lighten_color(seaborn.color_palette()[i_class]),
s=3,
label="Fake "+ ["A", "B"][i_class])
matched_examples = get_matched_samples(inputs, examples)
for a,bs in zip(var_to_np(inputs), var_to_np(matched_examples)):
for b in bs:
axes[0].plot([a[0], b[0]], [a[1], b[1]], color='black', lw=0.25)
radians = np.linspace(0,2*np.pi,200)
circle_points = np.stack([np.cos(radians), np.sin(radians)], axis=-1)
circle_th = np_to_var(circle_points, device=train_inputs_a.device, dtype=np.float32)
mean, std = model_and_dist.dist.get_mean_std(i_class)
circle_out= mean + (circle_th * std * 2)
circle_in = invert(model_and_dist.model, circle_out)
axes[0].plot(var_to_np(circle_in)[:,0], var_to_np(circle_in)[:,1],
alpha=1, lw=3, color=lighten_color(seaborn.color_palette()[i_class]),
label="Dist " + ["A", "B"][i_class])
axes[0].axis('equal')
axes[0].set_title("Input space")
axes[0].legend(ncol=3)
display_close(fig)
# -
inputs.shape
examples.shape
matched_examples = get_matched_samples(inputs, examples)
| notebooks/toy-1d-2d-examples/Heidelberg_Two_Moons_OT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Objectives
# In this notebook, we will use the predictive probability we got from the CNN and combine them with the angle to create new features. We will exam the properties of these features.
# We will use the features we generated in this notebook to build models to boost the performance.
# numpy and pandas for dataframe manlipulation
import pandas as pd
import numpy as np
import random
# sklearn.metrics to access the properties of the new features
from sklearn.metrics import roc_curve,auc,log_loss,auc,accuracy_score
from sklearn.neighbors import KNeighborsRegressor
# matplotlib and seaborn for visualisation
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
# set the random seed
seed = 1234
np.random.seed(seed)
random.seed(seed)
# load the angle and probability of the training set
train_angle_prob = pd.read_csv('angle/train_angle_prob.csv')
train_angle_prob.head()
# load the angle and probability of the test set
test_angle_prob = pd.read_csv('angle/test_angle_prob.csv',index_col = 0)
test_angle_prob.head()
# concatenate the train and test set to form a new dataframe, we will do group operations using this new dataframe
df = pd.concat([train_angle_prob,test_angle_prob],axis = 0,ignore_index = True)
print('the shape of concatenated dataframe is',df.shape)
df.head()
# ### Compute the group mean, group median and group count of the probabilities, we group the dataframe according to the value of the inc_angle.
# gmean is the group mean
gmean = df.groupby('inc_angle',as_index = False).mean()
del gmean['is_iceberg']
gmean.columns = ['inc_angle','gmean']
df = pd.merge(df,gmean,on = 'inc_angle')
print('the shape of concatenated dataframe is',df.shape)
df.head()
# gmedian is the group median
gmedian = df.groupby('inc_angle',as_index = False).median()
del gmedian['is_iceberg']
del gmedian['gmean']
gmedian.columns = ['inc_angle','gmedian']
df = pd.merge(df,gmedian,on = 'inc_angle')
print('the shape of concatenated dataframe is',df.shape)
df.head()
# gcount is the group count
gcount = df.groupby('inc_angle',as_index = False).count()
del gcount['ice_prob']
del gcount['is_iceberg']
del gcount['gmean']
del gcount['gmedian']
gcount.columns = ['inc_angle','gcount']
df = pd.merge(df,gcount,on = 'inc_angle')
print('the shape of concatenated dataframe is',df.shape)
df.head()
# ### Restore the order of the train and test set
# The group operations have disrupted the order of the training and test set, we should restore the original order.
train = pd.DataFrame()
for n in range(len(train_angle_prob)):
train = pd.concat([train,df[df['id']==train_angle_prob['id'][n]]],axis = 0)
train.index = range(len(train_angle_prob))
print('the shape of train set is',train.shape)
train.head()
test = pd.DataFrame()
for n in range(len(test_angle_prob)):
test = pd.concat([test,df[df['id']==test_angle_prob['id'][n]]],axis = 0)
test.index = range(len(test_angle_prob))
print('the shape of test set is',test.shape)
test.head()
train.to_csv('new_features/new_features_train.csv',index = False)
test.to_csv('new_features/new_features_test.csv',index = False)
# ### Visualize the new features
# +
def plot_roc(y_true,y_pred,label):
fpr, tpr, thresholds = roc_curve(y_true.astype('int'), y_pred,pos_label = 1,drop_intermediate = False)
lw = 2
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, color='darkorange',lw=lw,label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of '+label)
plt.legend(loc='lower right')
labels=['ice_prob', 'gmean', 'gmedian', 'gcount']
fig = plt.figure(figsize = (10,10))
y_true = train['is_iceberg']
n_ax = 1
for label in labels:
fig.add_subplot(220+n_ax)
y_pred = train[label]
plot_roc(y_true,y_pred,label)
n_ax = n_ax + 1
plt.show()
# -
# ### Use the labels of the training set to improve the precision of prediction
# We already know the labels of the training set. We can use the label to improve the group prediction.
# we only need to tweak the values of the group that contains more than one member
data = pd.concat([train,test],axis = 0,ignore_index = True)
ls = data.groupby('inc_angle').count()['ice_prob']>1
# the angle_lst is to store the groups that have more than one member
angle_lst = ls[ls].index
print(angle_lst)
for l in angle_lst:
temp = data[data['inc_angle']==l]
res = []
for m in range(temp.shape[0]):
a = temp.loc[temp.index[m],'ice_prob']
temp2 = temp.drop(index = temp.index[m])
for m in range(len(temp2)):
if np.isnan(temp2.iloc[m,3]):
a = a + temp2.iloc[m,0]
else:
a = a + temp2.iloc[m,3]
a = a/len(temp)
res.append(a)
res = np.array(res)
data.loc[data['inc_angle']==l,'gmean'] = res
for l in angle_lst:
temp = data[data['inc_angle']==l]
res = []
for m in range(temp.shape[0]):
a = []
a.append(temp.loc[temp.index[m],'ice_prob'])
temp2 = temp.drop(index = temp.index[m])
for m in range(len(temp2)):
if np.isnan(temp2.iloc[m,3]):
a.append(temp2.iloc[m,0])
else:
a.append(temp2.iloc[m,3])
a = np.array(a)
res.append(np.median(a))
res = np.array(res)
data.loc[data['inc_angle']==l,'gmedian'] = res
t0 = time()
score = []
for k in range(1,100):
a = []
for n in range(len(data)):
temp = data.drop(index = n)
if n<1471:
y1 = temp.iloc[:1470,3]
y2 = temp.iloc[1470:,0]
y = np.concatenate((y1,y2))
X = np.expand_dims(temp.iloc[:,2],axis = 1)
reg = KNeighborsRegressor(n_neighbors=k, weights='distance', algorithm='brute')
reg.fit(X,y)
a.append(reg.predict(np.array([[data.loc[n,'inc_angle']]]))[0])
a = np.array(a)
a[a==0]=0.001
a[a==1]=0.999
score.append(log_loss(train.iloc[:,3],a[:len(train)]))
print(time() - t0)
plt.plot(np.arange(1,100),np.array(score))
plt.title('the log_loss of KNN')
plt.xlabel('n_neighbores')
plt.ylabel('log_loss')
plt.show()
range(1,100)[np.argmin(np.array(score))]
t0 = time()
for n in range(len(data)):
temp = data.drop(index = n)
if n<1471:
y1 = temp.iloc[:1470,3]
y2 = temp.iloc[1470:,0]
y = np.concatenate((y1,y2))
X = np.expand_dims(temp.iloc[:,2],axis = 1)
reg = KNeighborsRegressor(n_neighbors=43, weights='distance', algorithm='brute')
reg.fit(X,y)
data.loc[n,'KNN_prob'] = reg.predict(np.array([[data.loc[n,'inc_angle']]]))
print(time() - t0)
train = data.dropna()
train.head()
test = data[data['is_iceberg'].isnull()]
test.head()
# +
labels=['ice_prob', 'gmean', 'gmedian', 'gcount','KNN_prob']
fig = plt.figure(figsize = (10,16))
y_true = train['is_iceberg']
n_ax = 1
for label in labels:
fig.add_subplot(320+n_ax)
y_pred = train[label]
plot_roc(y_true,y_pred,label)
n_ax = n_ax + 1
plt.show()
# +
labels=[ 'gmean', 'gmedian', 'gcount','KNN_prob']
fig = plt.figure(figsize = (10,16))
y_true = train['is_iceberg']
n_ax = 1
for label in labels:
fig.add_subplot(320+n_ax)
y_pred = train[label]
plot_roc(y_true,y_pred,label)
n_ax = n_ax + 1
plt.show()
# +
def plot_dist(prob):
sns.distplot(prob,bins = 20)
labels=['ice_prob', 'gmean', 'gmedian','KNN_prob']
fig = plt.figure(figsize = (10,10))
n_ax = 1
for label in labels:
fig.add_subplot(220+n_ax)
prob = train[label]
plot_dist(prob)
plt.ylim([0,8])
n_ax = n_ax + 1
plt.show()
# +
def plot_dist(prob):
sns.distplot(prob,bins = 20)
labels=['ice_prob', 'gmean', 'gmedian','KNN_prob']
fig = plt.figure(figsize = (10,10))
n_ax = 1
for label in labels:
fig.add_subplot(220+n_ax)
prob = data[label]
plot_dist(prob)
plt.ylim([0,8])
n_ax = n_ax + 1
plt.show()
# -
| 3_create_new_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
# 1.Ramicane and Capomulin were the most successful drugs in reducing tumor growth
# 2.There is a positive correlation between average tumor volume and mouse weight
# 3.There is no notable difference in tumor growth in male and female mice
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import scipy.stats as st
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combined_data = pd.merge(mouse_metadata, study_results,
how='outer', on='Mouse ID')
# Display the data table for preview
combined_data
# -
# Checking the number of mice.
mouse_count=combined_data['Mouse ID'].nunique()
mouse_count
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mice = combined_data.loc[combined_data.duplicated(subset=['Mouse ID', 'Timepoint',]),'Mouse ID'].unique()
duplicate_mice
# Optional: Get all the data for the duplicate mouse ID.
duplicate_id = combined_data.loc[combined_data["Mouse ID"] == "g989"]
duplicate_id
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_data = combined_data[combined_data["Mouse ID"] !="g989"]
clean_data
# +
# Checking the number of mice in the clean DataFrame.
#unique_mouse_count=remove_duplicate_mice['Mouse ID'].nunique()
#unique_mouse_count
clean_mouse_count=clean_data['Mouse ID'].nunique()
clean_mouse_count
# -
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
summary_data = clean_data[["Drug Regimen", "Tumor Volume (mm3)"]]
group_by_data = summary_data.groupby(["Drug Regimen"])
summary_df = pd.DataFrame({
"Mean": group_by_data["Tumor Volume (mm3)"].mean().map('{:.2f}'.format),
"Median": group_by_data["Tumor Volume (mm3)"].median().map('{:.2f}'.format),
"Mode": group_by_data["Tumor Volume (mm3)"].agg(pd.Series.mode).map('{:.2f}'.format),
"Variance": group_by_data["Tumor Volume (mm3)"].var().map('{:.2f}'.format),
"Standard Variance": group_by_data["Tumor Volume (mm3)"].std().map('{:.2f}'.format),
"SEM": group_by_data["Tumor Volume (mm3)"].sem().map('{:.2f}'.format)
})
summary_df.head()
# -
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
summary_df.head()
# Using the aggregation method, produce the same summary statistics in a single line
clean_data.groupby('Drug Regimen').aggregate(['min', np.median, max])
#df.groupby('key').aggregate(['min', np.median, max])
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
datapts2 = clean_data.groupby("Drug Regimen").count()['Tumor Volume (mm3)']
forpanbar = pd.DataFrame(datapts2)
also = forpanbar.sort_values(by='Tumor Volume (mm3)',ascending=False).plot.bar(legend=False,rot=50)
also
plt.ylabel("Number of Data Points")
plt.title("Data Points Per Drug Treatment Regimen")
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.
x_axis = np.arange(len(datapts2))
tick_locations = [x for x in x_axis]
#for x in x_axis:
#tick_locations.append(x)
plt.figure(figsize=(5,3))
newtry = forpanbar.reset_index()
newsort=newtry.sort_values('Tumor Volume (mm3)',ascending=False)
plt.bar(x_axis, newsort['Tumor Volume (mm3)'],alpha=0.75, align="center")
plt.xticks(tick_locations, newsort['Drug Regimen'],rotation="vertical")
plt.xlim(-0.75, len(datapts2)-.25)
plt.ylim(0, 250)
plt.title("Data Points Per Drug Treatment Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Data Points")
plt.show()
# -
# Generate a pie plot showing the distribution of female versus male mice using pandas
mice_count = clean_data["Sex"].value_counts()
plt.figure()
mice_count.plot(kind="pie", autopct='%1.1f%%')
plt.tight_layout()
plt.axis("equal")
plt.title("Distribution of female versus male mice")
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pyplot
colors = ["red","blue"]
explode = (0.1,0)
plt.figure()
plt.pie(mice_count.values, explode=explode, labels=mice_count.index.values, colors=colors,
autopct="%1.1f%%", shadow=True, startangle=140)
# Create axes which are equal so we have a perfect circle
plt.axis("equal")
plt.title("Distribution of female versus male mice")
plt.show()
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
regimen_data = clean_data[(clean_data["Drug Regimen"] == "Capomulin") |
(clean_data["Drug Regimen"] == "Ramicane") |
(clean_data["Drug Regimen"] == "Infubinol") |
(clean_data["Drug Regimen"] == "Ceftamin")]
regimen_data = regimen_data[["Mouse ID", "Drug Regimen", "Tumor Volume (mm3)"]]
regimen_data = regimen_data.groupby(["Mouse ID", "Drug Regimen"])
regimen_df = regimen_data["Tumor Volume (mm3)"].sum().to_frame()
regimen_plot = regimen_df["Tumor Volume (mm3)"]
regimen_df.head()
# quartile calculations
quartiles = regimen_plot.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of Tumor Volume (mm3) is: {lowerq}")
print(f"The upper quartile of Tumor Volume (mm3) is: {upperq}")
print(f"The interquartile range of Tumor Volume (mm3) is: {iqr}")
print(f"The the median of Tumor Volume (mm3) is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# Start by getting the last (greatest) timepoint for each mouse
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
# +
# Put treatments into a list for for loop (and later for plot labels)
capomulin_df = clean_data.loc[combined_data["Drug Regimen"] == "Capomulin",:]
ramicane_df = clean_data.loc[combined_data["Drug Regimen"] == "Ramicane", :]
infubinol_df = clean_data.loc[combined_data["Drug Regimen"] == "Infubinol", :]
ceftamin_df = clean_data.loc[combined_data["Drug Regimen"] == "Ceftamin", :]
# Create empty list to fill with tumor vol data (for plotting)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# -
# Capomulin
capomulin_last = capomulin_df.groupby('Mouse ID').max()['Timepoint']
capomulin_vol = pd.DataFrame(capomulin_last)
capomulin_merge = pd.merge(capomulin_vol, combined_data, on=("Mouse ID","Timepoint"),how="left")
capomulin_merge.head()
# +
# Capomulin quartiles and IQR
# Determine outliers using upper and lower bounds
capomulin_tumors = capomulin_merge["Tumor Volume (mm3)"]
quartiles =capomulin_tumors.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of Capomulin tumors: {lowerq}")
print(f"The upper quartile of Capomulin tumors: {upperq}")
print(f"The interquartile range of Capomulin tumors: {iqr}")
print(f"The median of Capomulin tumors: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Ramicane
ramicane_last = ramicane_df.groupby('Mouse ID').max()['Timepoint']
ramicane_vol = pd.DataFrame(ramicane_last)
ramicane_merge = pd.merge(ramicane_vol, combined_data, on=("Mouse ID","Timepoint"),how="left")
ramicane_merge.head()
# +
# Ramicane quartiles and IQR
# Determine outliers using upper and lower bounds
ramicane_tumors = ramicane_merge["Tumor Volume (mm3)"]
quartiles =ramicane_tumors.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of Ramicane tumors is: {lowerq}")
print(f"The upper quartile of Ramicane tumors is: {upperq}")
print(f"The interquartile range of Ramicane tumors is: {iqr}")
print(f"The median of Ramicane tumors is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Infubinol
# Determine outliers using upper and lower bounds
infubinol_last = infubinol_df.groupby('Mouse ID').max()['Timepoint']
infubinol_vol = pd.DataFrame(infubinol_last)
infubinol_merge = pd.merge(infubinol_vol, combined_data, on=("Mouse ID","Timepoint"),how="left")
infubinol_merge.head()
# +
# Infubinol quartiles and IQR
# Determine outliers using upper and lower bounds
infubinol_tumors = infubinol_merge["Tumor Volume (mm3)"]
quartiles =infubinol_tumors.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of Infubinol tumors is: {lowerq}")
print(f"The upper quartile of Infubinol tumors is: {upperq}")
print(f"The interquartile range of Infubinol tumors is: {iqr}")
print(f"The median of Infubinol tumors is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# -
# Ceftamin
ceftamin_last = ceftamin_df.groupby('Mouse ID').max()['Timepoint']
ceftamin_vol = pd.DataFrame(ceftamin_last)
ceftamin_merge = pd.merge(ceftamin_vol, combined_data, on=("Mouse ID","Timepoint"),how="left")
ceftamin_merge.head()
# +
# Ceftamin quartiles and IQR
# Determine outliers using upper and lower bounds
ceftamin_tumors = ceftamin_merge["Tumor Volume (mm3)"]
quartiles = ceftamin_tumors.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of treatment is: {lowerq}")
print(f"The upper quartile of temperatures is: {upperq}")
print(f"The interquartile range of temperatures is: {iqr}")
print(f"The the median of temperatures is: {quartiles[0.5]} ")
# Determine outliers using upper and lower bounds
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
data_to_plot = [capomulin_tumors, ramicane_tumors, infubinol_tumors, ceftamin_tumors]
Regimen= ['Capomulin', 'Ramicane', 'Infubinol','Ceftamin']
fig1, ax1 = plt.subplots(figsize=(15, 10))
ax1.set_title('Tumor Volume at Selected Mouse',fontsize =25)
ax1.set_ylabel('Final Tumor Volume (mm3)',fontsize = 14)
ax1.set_xlabel('Drug Regimen',fontsize = 14)
ax1.boxplot(data_to_plot, labels=Regimen, widths = 0.4, vert=True)
plt.ylim(10, 80)
plt.show()
# -
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
tumor_data = clean_data[(clean_data["Drug Regimen"] == "Capomulin") & (clean_data["Mouse ID"] == "l509")]
tumor_data = tumor_data.groupby(["Drug Regimen", "Timepoint"])
tumor_data_df = tumor_data["Tumor Volume (mm3)"].mean().to_frame()
tumor_index_df = tumor_data_df.unstack(0)
tumor_plot_df = tumor_index_df["Tumor Volume (mm3)"]
xAxis = [0,5,10,15,20,25,30,35,40,45]
plt.figure()
tumor_total, = plt.plot(xAxis, tumor_plot_df["Capomulin"], marker= "o", color="blue", label="Total Tumor Volume" )
plt.legend(handles=[tumor_total], loc="best")
plt.title("Time Point Vs Average Tumor Volume")
# plt.xlim(-10, 50)
# plt.ylim(30, 50)
plt.xlabel("Time Point")
plt.ylabel("Average Tumor Volume (mm3)")
plt.grid(True)
plt.show()
# -
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
scatter_data = clean_data[(combined_data["Drug Regimen"] == "Capomulin")]
scatter_data = scatter_data[["Mouse ID", "Weight (g)", "Tumor Volume (mm3)"]]
group_by_data = scatter_data.groupby(["Mouse ID","Weight (g)"])
scatter_data_df = group_by_data["Tumor Volume (mm3)"].mean().to_frame()
scatter_data_df = scatter_data_df.reset_index()
scatter_data_df.head()
# +
#Plot
plt.figure()
scatter_data_df.plot(x="Weight (g)", y="Tumor Volume (mm3)", kind = "scatter", marker="o")
plt.title("Mouse Weight Vs Average Tumor Volume")
plt.xlabel("Mouse Weight")
plt.ylabel("Average Tumor Volume (mm3)")
plt.grid(True)
plt.show()
# -
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
x_values = scatter_data_df["Weight (g)"]
y_values = scatter_data_df["Tumor Volume (mm3)"]
plt.figure()
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.title("Mouse Weight Vs Average Tumor Volume")
plt.xlabel("Mouse Weight")
plt.ylabel("Average Tumor Volume (mm3)")
plt.grid(True)
plt.show()
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 (''.venv'': venv)'
# name: pythonjvsc74a57bd071f3d0049937cc818ce2f9b352ba946595cd1ac61856b9bde9007f94463dcc87
# ---
from glob import glob
import pandas as pd
from pathlib import Path
import os
import cv2
# +
def write_ardis(path_to_ardis, binarize=True):
parts = glob(path_to_ardis + '/*/')
parts.sort()
for part in parts:
gt_file = glob(os.path.join(part, '*.xlsx'))[0]
df_left = pd.read_excel(gt_file, index_col=0, dtype=str)
df_right = pd.read_excel(gt_file, index_col=1, dtype=str)
part_name = Path(part).name.replace('Date Strings ', '')
imgs = glob(os.path.join(part, part_name, '**'))
for img_p in imgs:
name = Path(img_p).name[:-4]
gt = ''
if name.startswith('v') or name.startswith('Date'):
try:
gt = df_left.loc[name].Date
except:
continue
elif name.startswith('T'):
try:
gt = df_right.loc[name].Date
except:
continue
if len(gt) > 0 and binarize:
img = cv2.imread(img_p)
dst = cv2.fastNlMeansDenoising(img, h=31, templateWindowSize=7, searchWindowSize=21)
img_grey = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
img_blur = cv2.medianBlur(img_grey,3).astype('uint8')
img_thresh_Gaussian = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
img_write_path = os.path.join('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/binarized', part_name.replace(' ', '_'), name + '.jpg')
cv2.imwrite(img_write_path, img_thresh_Gaussian)
with open('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/binarized/' + part_name.replace(' ', '_') + '_gt.txt', 'a') as f:
f.write(Path(img_p).name + '\t' + gt + '\n')
elif len(gt) > 0 and not binarize:
img = cv2.imread(img_p)
img_write_path = os.path.join('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/original', part_name.replace(' ', '_'), name + '.jpg')
cv2.imwrite(img_write_path, img)
with open('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/original/' + part_name.replace(' ', '_') + '_gt.txt', 'a') as f:
f.write(Path(img_p).name + '\t' + gt + '\n')
# + tags=[]
path_to_ardis = '/home/erik/Riksarkivet/Projects/handwritten-text-recognition/data/ARDIS'
write_ardis(path_to_ardis, binarize=False)
write_ardis(path_to_ardis, binarize=True)
# + tags=["outputPrepend"]
#Skriv om som funktion, binarized och inte, och skriv den obinariserade census också
for i, part in enumerate(parts):
if i == 0:
continue
gt_file = glob(os.path.join(part, '*.xlsx'))[0]
print(gt_file)
df_left = pd.read_excel(gt_file, index_col=0, dtype=str)
df_right = pd.read_excel(gt_file, index_col=1, dtype=str)
part_name = Path(part).name.replace('Date Strings ', '')
print(part_name)
imgs = glob(os.path.join(part, part_name, '**'))
for img_p in imgs:
name = Path(img_p).name[:-4]
gt = ''
if name.startswith('v') or name.startswith('Date'):
try:
gt = df_left.loc[name].Date
except:
print(name)
continue
elif name.startswith('T'):
try:
gt = df_right.loc[name].Date
except:
print('fuck')
continue
if len(gt) > 0:
img = cv2.imread(img_p)
dst = cv2.fastNlMeansDenoising(img, h=31, templateWindowSize=7, searchWindowSize=21)
img_grey = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
img_blur = cv2.medianBlur(img_grey,3).astype('uint8')
img_thresh_Gaussian = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
img_write_path = os.path.join('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/binarized', part_name.replace(' ', '_'), name + '.jpg')
cv2.imwrite(img_write_path, img_thresh_Gaussian)
with open('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/binarized/' + part_name.replace(' ', '_') + '_gt.txt', 'a') as f:
f.write(Path(img_p).name + '\t' + gt + '\n')
# -
subs = glob(path_to_ardis + '/*/')
subs.sort()
# +
parts_test = glob('/home/erik/Riksarkivet/Projects/handwritten-text-recognition/raw/ardis/binarized/*/')
i = 0
for p in parts_test:
imgs_test = glob(p + '/**')
for img in imgs_test:
i += 1
print(i)
# -
| src/notebooks/read_ardis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## __name__ and __main__
# + active=""
# Often in Python script, you will see this...
# +
# #!/usr/bin/env python3
def main():
pass
# FUNCTIONS
# LOTS MORE FUNCTIONS
if __name__ == '__main__':
main()
# -
# ### Let's define these...
# + active=""
# Top-level script: indentation level 0; In this case, if block is the top-level code.
#
# __name__: a built-in special variable which evaluates to the name of the current module
#
# '__main__': the name of the scope in which top-level code executes.
# -
# #### For example
# + active=""
# When your script is run by passing it as a command to the Python interpreter:
#
# python myscript.py
#
# All of the code that is at indentation level 0 gets executed.
#
# Functions and classes that are defined, but none of their code gets ran.
#
# Unlike other languages, there's no main() function that gets run automatically
# the main() function is implicitly all the code at the top level.
# -
# ### Where the function calls are coming from?
# + active=""
# This line of code designed to help indicate where function calls are coming from
# when working with multiple .py scripts.
#
#
# if __name__ == '__main__':
#
#
# However, if a module is being run directly (as in myscript.py above),
# then __name__ is set to the string "__main__".
#
# Thus, you can test if the module is being run directly
# or module/module's function is being used as an import.
# -
# ### one
# ! python one.py
# +
# before the import
# name 'one' is not defined yet --> NameError
one.__name__
# -
import one
dir(one.__builtins__)
# +
# after the import
# the name of the module is actually the name
one.__name__
# -
# ### two
# ! python two.py
# +
# before the import
# name 'two' is not defined yet --> NameError
# two.__name__
# -
import two
dir(two)
# after the import
two.__name__
# ### Let's explore!
# +
# vim -O one.py two.py
# +
# :new | 0read ! python one.py
#
# new - creates new split
# read - to get the command output
# command - actual command
| name_main/name_main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%% raw\n"} active=""
# # First Jupyter Notebook Tutorial With DataSpell
#
# > "A simple tutorial to help get started with Jupyter notebook and DataSpell"
#
# - toc: false
# - branch: master
# - badges: true
# - comments: true
# - categories: [fastpages, jupyter]
# - image: images/simple-tutorial/novice-to-expert.jpeg
# - hide: false
# - search_exclude: true
# - ide: DataSpell
# + active=""
# This is my first attempt at using Jupyter notebook.
#
# My goal is to learn fast.ai, but one must walk before running. Best to begin with a Jupyter notebook. I found a [tutorial](https://www.dataquest.io/blog/j#upyter-notebook-tutorial/) that introduces both markdown and calculations.
#
# I also downloaded DataSpell, an early access IDE for data science from JetBrains. I think their IntelliJ IDE for Java is the best tool on the market. I wanted to see if their new tool could provide a similar boost for my venture into data science.
#
# Every language has a “Hello World”. Jupyter notebook is no different. Let’s create a cell:
# + pycharm={"name": "#%%\n"}
print('Hello World!')
# + pycharm={"name": "#%%\n"}
import time
time.sleep(3)
# -
# So far, so good. Adding cells with markdown and code. This is a nice environment. DataSpell is making it fun. Let's add a function:
# + pycharm={"name": "#%%\n"}
def say_hello(recipient):
return 'Hello, {}!'.format(recipient)
say_hello('Michael')
# + [markdown] pycharm={"name": "#%% md\n"}
# Jupyter notebook does a great job of keep code and documentation together. It's a scientific revolution. I think this is the best hope we have for maintaining that scientific ideal of making results public and easily reproducible by others.
#
# I'll have to dig into [LaTeX](https://www.mathjax.org/) for math typesetting. What does that look like?
#
# Euler's identity: $$e^{i\pi} + 1 = 0$$<br>
# $$\frac{arg 1}{arg 2}$$<br>
# $$x^2$$<br>
# $$e^{i\pi}$$<br>
# $$A_i$$<br>
# $$B_{ij}$$<br>
# $$\sqrt[n]{arg}$$<br>
#
# Here's a linear equation:
#
# $$ \hat{Y} = \hat{\beta_0} + \sum_{j=1}^p \hat{\beta}_j X_j $$<br>
#
# The tutorial has a bunch of markdown examples that aren't hard to understand. No sense memorizing these. I'll Google them when I need them.
#
# # Level 1 heading
# ## Level 2 heading
#
# Text is easy to format:
#
# **bold**
# _italic_
#
# Lists are simple:
#
# 1. First element
# 2. Second element
#
# * First bullet
# * Second bullet
#
# I created hyperlinks earlier.
#
# It's easy to add code. I hope to be able to add Python, R, and Kotlin someday soon.
#
# `fun foo()`
#
# Here's a code block:
#
# ```
# fun bar() = "This could be some Kotlin code"
# ```
#
# You can also tab in:
#
# fun baz():
# return 'A Python string'
#
# What about images? Let's add an image of a dog:
#
# 
#
# That's enough markdown. This is about code. Does DataSpell give me access to numpy?
# + pycharm={"name": "#%%\n"}
import numpy as np
def square(q):
return q*q
x = 4
y = square(x)
print('%d squared is %d' % (x, y))
# -
# Now I have numpy. The tutorial tells me that there are kernels for over 100 languages, including Java, C, Fortran, R, and Kotlin. There's an SoS kernel that provides multi-language support within a single notebook. I'll stick to Python for now, but I can't wait to try out both R and Kotlin.
#
# Enough about notebook mechanics. What about a real analysis?
#
# The tutorial problem uses a Fortune 500 data set.
#
# + pycharm={"name": "#%%\n"}
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
df = pd.read_csv('../datasets/fortune500.csv')
df.head()
# + pycharm={"name": "#%%\n"}
df.tail()
# + pycharm={"name": "#%%\n"}
df.columns = ['year', 'rank', 'company', 'revenue', 'profit']
len(df)
# + pycharm={"name": "#%%\n"}
df.dtypes
# + [markdown] pycharm={"name": "#%% md\n"}
# Profit should be a float; instead, it's an object. There must be non-integer values in that column.
#
# + pycharm={"name": "#%%\n"}
non_numeric_profits = df.profit.str.contains('[^0-9.-]')
df.loc[non_numeric_profits].head()
# + pycharm={"name": "#%%\n"}
set(df.profit[non_numeric_profits])
# + [markdown] pycharm={"name": "#%% md\n"}
# There are values that are NA. How many?
# + pycharm={"name": "#%%\n"}
len(df.profit[non_numeric_profits])
# + [markdown] pycharm={"name": "#%% md\n"}
# That's a small fraction of total data set:
# + pycharm={"name": "#%%\n"}
len(df.profit[non_numeric_profits])/len(df)
# + [markdown] pycharm={"name": "#%% md\n"}
# The easiest thing to do is to discard the bad records. How are they distributed?
# + pycharm={"name": "#%%\n"}
bin_sizes, _, _ = plt.hist(df.year[non_numeric_profits], bins=range(1955,2006))
# + [markdown] pycharm={"name": "#%% md\n"}
# The most invalid cases in a single year is fewer than 25. Since there are 500 points per year, removing these values would account for less than 4% of the data for the worst years. Let's remove these rows:
# + pycharm={"name": "#%%\n"}
df = df.loc[-non_numeric_profits]
df.profit = df.profit.apply(pd.to_numeric)
len(df)
# + [markdown] pycharm={"name": "#%% md\n"}
# Let's check the types:
# + pycharm={"name": "#%%\n"}
df.dtypes
# + [markdown] pycharm={"name": "#%% md\n"}
# The final presentation notebook shouldn't leave all these exploratory cells, but I want to be able to see them.
#
# Finally! It's time to analyze some data. Plot profit and revenue by year:
# + pycharm={"name": "#%%\n"}
group_by_year = df.loc[:, ['year', 'revenue', 'profit']].groupby('year')
avgs = group_by_year.mean()
x = avgs.index
y1 = avgs.profit
def plot(x, y, ax, title, y_label):
ax.set_title(title)
ax.set_ylabel(y_label)
ax.plot(x, y)
ax.margins(x = 0, y = 0)
fig, ax = plt.subplots()
plot(x, y1, ax, 'Increase in mean Fortune 500 profits from 1955 to 2005', 'Profit (millions)')
# + pycharm={"name": "#%%\n"}
y2 = avgs.revenue
fig, ax = plt.subplots()
plot(x, y2, ax, 'Increase in mean Fortune 500 company revenues from 1955 to 2005', 'Revenue (millions)')
# + [markdown] pycharm={"name": "#%% md\n"}
# Revenues have taken off since around 1995.
#
# Let's superimpose +/- standard deviations on top of these plots. The code comes from a post on [Stack Overflow](https://stackoverflow.com/questions/47581672/replacement-for-deprecated-tsplot/47582329#47582329)
# + pycharm={"name": "#%%\n"}
def plot_with_std(x, y, stds, ax, title, y_label):
ax.fill_between(x, y - stds, y + stds, alpha=0.2)
plot(x, y, ax, title, y_label)
fig, (ax1, ax2) = plt.subplots(ncols=2)
title = 'Increase in mean and std Fortune 500 company %s from 1955 to 2005'
stds1 = group_by_year.std().profit.values
stds2 = group_by_year.std().revenue.values
plot_with_std(x, y1.values, stds1, ax1, title % 'profits', 'Profit (millions)')
plot_with_std(x, y2.values, stds2, ax2, title % 'revenues', 'Revenue (millions)')
fig.set_size_inches(14, 4)
fig.tight_layout()
# + pycharm={"name": "#%% raw\n"} active=""
# Profits have been rising steadily, at a rate increased markedly around 1995.
#
#
# + pycharm={"name": "#%% raw\n"} active=""
# There are lots of winners and losers here.
#
# That's enough of this tutorial. There are others to dig into. I also have two tasks in front of me:
#
# 1. Start working through fast.ai and learn neural networks.
# 2. I've lost an electronic copy of my dissertation. I'd like to typeset it in markdown to practice LaTeX. It's be terrific fun to rewrite the C code in Python or Kotlin.
#
#
| _notebooks/2021-05-22-jupyter-notebook-tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Laboratorio Detección de Contornos
#
# <NAME>
# <NAME>
# <NAME>
# <NAME>
# +
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage import data
from skimage.filters import gaussian, laplace, rank,median,hessian
from skimage.segmentation import active_contour, felzenszwalb, mark_boundaries
import matplotlib.patches as mpatches
import skimage.io as io
from skimage import exposure
from skimage import morphology
from skimage.morphology import erosion, dilation, opening, closing, white_tophat,black_tophat
from skimage.util import img_as_ubyte
from skimage import exposure
from skimage.draw import polygon_perimeter
from skimage.morphology import skeletonize, skeletonize_3d
from skimage.morphology import disk
from skimage.transform import rotate
from skimage.filters import threshold_otsu
# -
# http://scikit-image.org/docs/dev/auto_examples/segmentation/plot_morphsnakes.html
# ## Imagen Mariposa
#Se lee la imagen
img =io.imread("tema10_act1a.png")
img = rgb2gray(img)
print("Dimensiones = "+ str(img.shape))
io.imshow(img)
io.show()
#Filtro gaussiano con forma de diamante, la más similar a la mariposa
diamond = morphology.diamond(2)
opened = opening(img, diamond)
exp = exposure.adjust_log(opened)
gaus = gaussian(exp,2)
io.imshow(gaus,cmap=plt.cm.gray)
io.show()
io.imshow(gaus,cmap=plt.cm.gray)
io.show()
# +
#s = np.linspace(0, 2*np.pi, 400)
#x = (img.shape[1]/2)+10 + 130*np.cos(s)
#y = (img.shape[0]/2)+10 + 60*np.sin(s)
#init = np.array([x, y]).T
r = np.array([10, 150, 270, 140])
c = np.array([90, 50, 90, 165])
rr, cc = polygon_perimeter(r, c)
p = np.column_stack((rr, cc))
# +
#Se crea un polígono con forma de diamante para inicializar el contorno
#La elección de la forma inicial
r = np.array([10, 150, 270, 140])
c = np.array([90, 50, 90, 165])
rr, cc = polygon_perimeter(r, c)
p = np.column_stack((rr, cc))
#Se aplica la detección de contorno activo sobre la imagen filtrada.
#Con el parámetro w_line, indicamos que nos aproximaremos a las zonas oscuras
snake = active_contour(gaus,p, alpha=0.01, beta=50, gamma=0.001, w_line=-2)
fig, (ax,ax2) = plt.subplots(figsize=(14, 14),ncols=2)
ax.imshow(img, cmap=plt.cm.gray)
ax.plot(p[:, 0], p[:, 1], '--r', lw=1)
ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
ax2.imshow(gaus, cmap=plt.cm.gray)
ax2.plot(p[:, 0], p[:, 1], '--r', lw=1)
ax2.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
ax.set_xticks([]), ax.set_yticks([])
ax.axis([0, img.shape[1], img.shape[0], 0])
plt.show()
# -
# ## Imagen Buho
from skimage.filters.rank import entropy
#Se lee la imagen
img_owl =io.imread("tema10_act1b.png")
img_owl = rgb2gray(img_owl)
print("Dimensiones = "+ str(img.shape))
#io.imshow(img)
#io.show()
# Utilizaremos la entropía para tratar de diferenciar el buho y el fondo. Los filtros no parecen adecuados ya que el tipo de distorsiones con las mismas entre la figura que queremos detectar y el entorno
# +
image_owl = img_as_ubyte(img_owl)
img_owl_entropy=entropy(image_owl,disk(5))
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 4),
sharex=True, sharey=True)
img0 = ax0.imshow(image_owl, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
img1 = ax1.imshow(img_owl_entropy, cmap='gray')
ax1.set_title("Entropy")
ax1.axis("off")
fig.tight_layout()
plt.show()
# +
#Inicializamos como contorno una elipse
s = np.linspace(0, 2*np.pi, 200)
x = 130 + 90*np.cos(s)
y = 100 + 60*np.sin(s)
init = np.array([x, y]).T
#Con el parámetro w_edge, le indicamos que se aproxime más a los bordes blancos, que son los que nos ha dado la entropía
snake = active_contour(img_owl_entropy,
init, alpha=0.010, beta=100, w_line=10, w_edge=10)
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 4),
sharex=True, sharey=True)
ax0.imshow(img_owl, cmap=plt.cm.gray)
ax0.plot(init[:, 0], init[:, 1], '--r', lw=3)
ax0.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
#ax0.axis([0, img_owl.shape[1], img_owl.shape[0], 0])
ax0.axis("off")
ax0.set_title("Image-Contour")
ax1.imshow(img_owl_entropy, cmap=plt.cm.gray)
ax1.plot(init[:, 0], init[:, 1], '--r', lw=3)
ax1.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
#ax1.axis([0, img_owl_entropy.shape[1], img_owl_entropy.shape[0], 0])
ax1.axis("off")
ax1.set_title("Entropy-Contour")
plt.show()
# +
#Se detecta el contorno de casi todo el contorno del buho, por lo que lo consideramos bastante satisfactorio
# -
# ## Imagen Caballos
# +
img_horse =io.imread("tema10_act1c.png")
img_horse = rgb2gray(img_horse)
print("Dimensiones = "+ str(img_horse.shape))
io.imshow(img_horse)
io.show()
# -
# ### Para la detección de contornos en la imagen de los caballos hemos utilizado un primer método combinando varias técnicas
# +
#Utilizamos el método Otsu, un método de valor umbral que intenta que la dispersión dentro de cada segmento
#sea lo más pequeña posible, y lo más alta posible entre segmentos diferentes
thresh = threshold_otsu(exposure.adjust_gamma(img_horse))
#Con el cierre tratamos de minimizar los huecos encontrados dentro de los caballos, por efecto de los brillos
bw = closing(img_horse > thresh, morphology.square(2))
#Empleamos la segmentación de felzenszwalb, basada en clustering
segments_fz = felzenszwalb(bw, scale=220, sigma=0.1, min_size=220)
fz_image = rgb2gray(mark_boundaries(np.zeros((185, 274)), segments_fz))
fig, (ax0,ax1,ax2,ax3) = plt.subplots(figsize=(25, 25),ncols=4)
ax0.imshow(img_horse,cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
ax1.imshow(bw,cmap=plt.cm.gray)
ax1.set_title("Otsu")
ax1.axis("off")
ax2.imshow(fz_image,cmap=plt.cm.gray)
ax2.set_title("Felzenszwalb")
ax2.axis("off")
ax3.imshow(mark_boundaries(img_horse, segments_fz), cmap=plt.cm.gray)
ax3.set_title("Contour - Mark Boundary")
ax3.axis("off")
plt.show()
# -
# ### También hemos empleado el cálculo del gradiente de la imagen para la detección de contornos
# +
#En primer lugar suavizamos la imagen
denoised = rank.median(img_horse, disk(5))
#Calculo del gradiente de la imagen. Se han probado distintos valores como pico de cresta
markers = rank.gradient(denoised, disk(1)) <18
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 4),
sharex=True, sharey=True)
ax0.imshow(img_horse,cmap=plt.cm.gray)
#ax0.axis([0, img_owl.shape[1], img_owl.shape[0], 0])
ax0.axis("off")
ax0.set_title("Image")
ax1.imshow(markers,cmap=plt.cm.gray)
#ax1.axis([0, img_owl_entropy.shape[1], img_owl_entropy.shape[0], 0])
ax1.axis("off")
ax1.set_title("Gradient Contour")
plt.show()
# -
| Percepcion_Computacional/Actividades/Lab1/Lab_contornos_activos-morfology.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="3H_W4_BmaS9E"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
% matplotlib inline
import math
from sklearn import linear_model
from sklearn.linear_model import SGDRegressor
from sklearn.tree import DecisionTreeClassifier
#from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
# + id="uCzF7MWoazMn" colab={"base_uri": "https://localhost:8080/"} outputId="41a9283b-0f33-428b-bef0-937251fa499d"
from google.colab import drive
drive.mount('/content/drive')
# + id="B7S2OlzLbD6a"
titanic ="/content/drive/MyDrive/Titnaic Data/train.csv"
# + id="N7H27Hv4eBvR" colab={"base_uri": "https://localhost:8080/"} outputId="b0b58b3f-0b11-4c1b-8644-4c0d83c6541c"
titanic_data = pd.read_csv(titanic)
print(" # of passengers"+str(len(titanic_data.index)))
# + id="xHMeG0xBhYjA" colab={"base_uri": "https://localhost:8080/"} outputId="bcd141a7-0240-4b16-9239-00a32946607e"
titanic_data.info()
# + id="ZZ4vgGSEhyK1" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="1c1106d3-f4f7-4ead-9a35-e676938db0ed"
titanic_data.isnull()
# + id="KG799JUtiKsb" colab={"base_uri": "https://localhost:8080/"} outputId="579cce06-4234-47b2-cb7e-df4261305dd2"
titanic_data.isnull().sum()
# + id="I2BJ3d8jiyOW"
titanic_data.drop("Cabin", axis=1,inplace=True)
# + id="E7tkwTxTjblu"
# + id="hylY8y7EjRAz" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="4c3a20c5-3c3a-46f7-e3ee-dabaefdb103f"
titanic_data.head(5)
# + id="gQ6xja5vjcTe"
titanic_data.dropna(inplace=True)
# + id="ISDWNchOjnTW" colab={"base_uri": "https://localhost:8080/"} outputId="41ede90d-97d3-410d-c0df-b181bd6fe998"
titanic_data.isnull().sum()
# + id="JNntH8BGkPsy" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="bb559e26-c42d-46b0-c92b-f2d0b5bc7024"
sex=pd.get_dummies(titanic_data["Sex"],drop_first=True)
sex.head()
# + id="gcT7iXnqli6E" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="b9839a83-67f6-4159-b305-c77da0b10b9f"
embark=pd.get_dummies(titanic_data["Embarked"],drop_first=True)
embark.head()
# + id="v9PrhnBxlxTh" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f35c16a2-ca8e-4f3f-838d-66b3731ea90d"
pcl=pd.get_dummies(titanic_data["Pclass"],drop_first=True)
pcl.head()
# + id="cvgiNoHtmHQ4"
titanic_data=pd.concat([titanic_data,sex,embark,pcl],axis=1)
# + id="e0FG_kJvm46g"
titanic_data.drop(['PassengerId','Name','Sex','Embarked','Ticket','Pclass'],axis=1,inplace=True)
# + id="AehYEkDioSOX" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="482f3851-818a-489b-a455-ab505bf236a1"
titanic_data.head(5)
# + id="h0Vq0lNfoXQO"
y=titanic_data['Survived']
X=titanic_data.drop(['Survived'],axis=1)
# + id="hss2YaKBeWA4" colab={"base_uri": "https://localhost:8080/"} outputId="de2171a8-12ad-44f3-c942-1126150c0aad"
y.head()
# + id="kWHfkw40eY07" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="16b207f0-5756-46e5-f447-d343c30933b8"
X.head()
# + [markdown] id="qX7Y2IL814Re"
# ## Scale the Data
# + id="_YwNwv_AgRLZ"
from sklearn import preprocessing
import numpy as np
X_scaled = preprocessing.scale(X)
# + [markdown] id="TNCjmgfM16Wj"
# ## Split Train and Test
# + id="ADN6mFHSotOP"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.30, random_state=42)
# + [markdown] id="sRZwSAXInlLa"
# ## Train
# + [markdown] id="Z8BnGCnb1yu6"
# ### Train/ Build Model Logistic Reg
#
# + id="3sw86YqBp1-5"
#Importing the simple LogisticRegression
from sklearn.linear_model import LogisticRegression
#Importing the LogisticRegression
reg = LogisticRegression()
# + id="axUnQlQoqJH8" colab={"base_uri": "https://localhost:8080/"} outputId="0a76d5cd-7444-4be6-a787-8a7ca558e473"
reg.fit(X_train,y_train)
# + [markdown] id="ckMH93BanXPc"
# ### Train Decission Tree Classifier
# + id="QPN4zWXZnWOu"
# Decision Tree
# from sklearn.tree import DecisionTreeClassifier
decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, y_train) # Train Model
y_pred = decision_tree.predict(X_test)
y_pred_prob = decision_tree.predict_proba(X_train)
# + id="XR-fBAT3pVkl" colab={"base_uri": "https://localhost:8080/"} outputId="a596084e-d3c8-43fa-d863-bbef4986375c"
y_pred[:5]
# + id="AmI6-UmJpich" colab={"base_uri": "https://localhost:8080/"} outputId="31358d8a-39d8-4c3c-9026-f74e384e920b"
y_pred_prob[:5]
# + id="RBEPVTprrYPZ" colab={"base_uri": "https://localhost:8080/"} outputId="e66f567c-27c6-40bf-beed-c733704e58f2"
from sklearn import tree
text_representation = tree.export_text(decision_tree,feature_names=list(X.columns))
print(text_representation)
# + id="75EMPOUOsUPm" colab={"base_uri": "https://localhost:8080/"} outputId="bfc57c8a-da6a-4acb-9187-d13dd628d2be"
list(X.columns)
# + id="qNWvyLNdqqkG"
# import matplotlib.pyplot as plt
# fig = plt.figure(figsize=(25,20))
# _ = tree.plot_tree(decision_tree,
# filled=True,
# feature_names=list(X.columns),
# class_names=['yes',"no"])
# + id="vtwWH5tLt9D4" colab={"base_uri": "https://localhost:8080/", "height": 328} outputId="1588873d-53bb-412b-b626-7df601652f14"
fe_imp =pd.DataFrame(decision_tree.feature_importances_,columns=['Feature_importance'])
fe_imp["feature_names"] = X.columns
fe_imp.sort_values(by="Feature_importance",ascending=False)
# + [markdown] id="d7aTuoZw1vIG"
# ## Get Predictions
# + id="AwhC0H4mwkdF"
y_pred_train = reg.predict_proba(X_train) # Get probability
# y_pred_train[:,1] # Take only second column (probability of for 1)
y_pred_train_class = reg.predict(X_train) # Get class
# + id="9mPtatIOxKw8"
y_pred_test = reg.predict_proba(X_test)
y_pred_test_class = reg.predict(X_test)
# + id="DoK-SDkLw4sb" colab={"base_uri": "https://localhost:8080/"} outputId="644ec563-ef35-4f62-efed-db26189efcb5"
y_pred_test[:,1][:5] # Takin only first 5 records int the second column
# + [markdown] id="LBDZE9yx1Rtr"
# ## Confusion Matrix
#
# + id="Z5FcgF1Ce63n" colab={"base_uri": "https://localhost:8080/"} outputId="43be1a4b-f51a-4007-c78b-c361bde7a133"
from sklearn.metrics import confusion_matrix
confusion_matrix(y_train, y_pred_train_class) # for Train data
# + id="pXoRUbHx0uoO"
TN, FP, FN, TP = confusion_matrix(y_train, y_pred_train_class).ravel()
TN, FP, FN, TP
Actual_Postives= 146
Actual_Negatives= 42
# + [markdown] id="2SFph5PT1Wgb"
# ## Accuracy Score
# + id="X5wyOlepimGq" colab={"base_uri": "https://localhost:8080/"} outputId="af0975df-330d-4f0c-c88d-19a90720a85f"
from sklearn.metrics import accuracy_score
accuracy_score(y_train, y_pred_train_class, normalize=True)
# + id="-EEJAsy1yPd2" colab={"base_uri": "https://localhost:8080/"} outputId="77a42fa9-8a69-48ac-d837-c292d65e45c5"
accuracy_score(y_train, y_pred_train_class, normalize=True)
# + [markdown] id="zHWc48Muu3NT"
# ## AUC Score
# + id="o4iXhcgSu2gk"
from sklearn.metrics import roc_curve,roc_auc_score
# + id="uh__iE8-vHPS"
# ROC curve on test data
tpr, fpr, threshold =roc_curve(y_true = y_test, y_score = y_pred_test[:,1])
# + id="Cj8aI2zVxsWu" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="26ea701e-835a-4c63-f9e1-50dcaeec7e13"
import seaborn as sns
sns.lineplot(y=fpr,x=tpr)
# + id="10gcHwuNwyxk" colab={"base_uri": "https://localhost:8080/"} outputId="34832546-5a13-42ac-9d56-55ffe165f109"
roc_auc_score(y_true = y_test, y_score = y_pred_test[:,1]) # AUC Score
# + [markdown] id="fx9ZPjfb1c-e"
# ## Assignment : Derive All Below
# True positive Rate: TP/Actual Postives
#
# False Negative rate: FN/actual Postives
#
# False Positive rate: FP/ Actual Negatives
#
# True Negative Rate: TN/Actual Negatives
#
# Precision = TP / (TP + FP)
# Recall = TP / (TP + FN)
#
# + id="31naikebNB1M"
import numpy as np
def perf_metrics_2X2(y_train,y_pred_train_class):
True_positive_Rate = TP /Actual_Postives
False_Negative_Rate = FN / Actual_Postives
False_Positive_Rate = FP/ Actual_Negatives
True_Negative_Rate = TN/ Actual_Negatives
return True_positive_Rate, False_Negative_Rate, False_Positive_Rate, True_Negative_Rate
# + id="waVqmuGnWfO5" colab={"base_uri": "https://localhost:8080/"} outputId="4751137e-71db-430a-bec3-5d469df4011c"
perf_metrics_2X2(y_train,y_pred_train_class)
# + id="I1GRX7IRpPst" colab={"base_uri": "https://localhost:8080/"} outputId="398ddf26-b370-4ccc-862a-510f0cb1c71e"
y_pred
| Chandan/Titanic_Data_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# # Generate datasets
# +
# import libraries we need
# %pylab inline
import tensorflow as tf
from tensorflow import keras
import tensorflow_probability as tfp
tfd = tfp.distributions
import sys
sys.path.append('./../py_programs/')
from Simulator import *
from keract import get_activations
# -
# ### Generate data files with two fixed mixed sources, **saved in `Old_data`**
# +
# create two mixed sources and corresponding two distributions
s1 = simulator(Nbins=200, Ndet=1e6, nset=100, sps=0.7, laser=0.1, ther=0.1, non=0.1)
s2 = simulator(Nbins=200, Ndet=1e6, nset=100, sps=0.4, laser=0.2, ther=0.3, non=0.1)
# plot their piecharts to see source fractions
plt.figure(1)
s1.piechart()
plt.title('source 1')
plt.figure(2)
s2.piechart()
plt.title('source 2')
plt.show()
# -
# ---
# ---
# ### generate data with random distributions
# +
# define parameters
types = 50
basic = 4
rand = np.random.randint(low=0,high=100,size=(types,basic))
data = []
sets = 100
# generate data
for i in range(types):
randsource = simulator(Nbins=200, Ndet=1e6, nset=sets,sps=rand[i,0], laser=rand[i,1], ther=rand[i,2], non=rand[i,3])
datas = randsource.get_data(save=True, name='./../simulation/data/rand'+str(i))
data.append(datas)
data = np.array(data)
# +
# convert simulated data into correct shape
x_ = data.reshape(data.shape[0]*data.shape[1],data.shape[2],data.shape[3])
# get the ground truth
threshold = 0.5
y_ = get_truth(x_, thr=threshold)
# we only need the binvalues for training
x_ = x_[:,2,:]
# -
# # generate pure sps and pure laser
# +
# define parameters
data = []
sets = 5000
# create distributions
sps = simulator(Nbins=200, Ndet=1e6, nset=sets,sps=1., laser=0., ther=0, non=0)
laser = simulator(Nbins=200, Ndet=1e6, nset=sets,sps=0., laser=1., ther=0, non=0)
# get data files
sps_ = sps.get_data(save=True, name='./../simulation/data/sps')
laser_ = laser.get_data(save=True, name='./../simulation/data/laser')
# +
# convert simulated data into correct shape
x_ = np.concatenate([sps_,laser_])
# get the ground truth
threshold = 0.5
y_ = get_truth(x_, thr=threshold)
# we only need the g2signal for training
x_ = x_[:,2,:]
# -
# # generate half-half random distribution
# Idealy, we want to train the model with equal number of data sets for each class. Due to the nature of light sources, simple using random distribution is not able to generate above results. So we the range of random function is limited to make sure we have half-half chances in the end.
# +
# define parameters, 100 types of sources, each contains 100 data sets
types = 100
data = []
sets = 100
rsps = np.random.randint(low=0,high=450,size=types)
rlaser = np.random.randint(low=0,high=100,size=types)
rther = np.random.randint(low=0,high=100,size=types)
rnon = np.random.randint(low=0,high=100,size=types)
# generate data
for i in range(types):
randsource = simulator(Nbins=200, Ndet=1e6, nset=sets,sps=rsps[i], laser=rlaser[i], ther=rther[i], non=rnon[i])
datas = randsource.get_data(save=True, name='./../simulation/data/halfhalf_rand/'+str(i))
data.append(datas)
data = np.array(data)
# +
# convert simulated data into correct shape
x_ = data.reshape(data.shape[0]*data.shape[1],data.shape[2],data.shape[3])
# get the ground truth
threshold = 0.5
y_ = get_truth(x_, thr=threshold)
# we only need the g2signal for training
x_ = x_[:,2,:]
# -
# # Alternatively we can **load data from files**
# +
# load the data from files
filenames = []
x_ = []
y_ = []
# iterate over all files
filenames = ['./../simulation/data/random_100set/rand'+str(i) for i in range(50)]
for filename in filenames:
data = load_data(filename=filename)
x_.append(data)
x_ = np.array(x_)
# +
# convert data into correct shape
x_ = x_.reshape(x_.shape[0]*x_.shape[1],x_.shape[2],x_.shape[3])
# get the ground truth
threshold = 0.5
y_ = get_truth(x_, thr=threshold)
# we only need the g2signal for training
x_ = x_[:,2,:]
# -
# Now we've collected all the datasets. For training the model, we need to **shuffle** all data sets first
#
# +
# shuffle
i = np.random.permutation(len(x_))
x_ = x_[i]
y_ = y_[i]
# reshape
x_ = x_.reshape(x_.shape[0], x_.shape[1], 1)
y_ = y_.reshape(y_.shape[0])
# allocate
#x_train = x_
#y_train = y_
#x_test = x_[700:]
#y_test = y_[700:]
# -
# ### generate test data
# +
# generate test data , 40 types of mixed sources, 100 data sets for each
types = 20
testdata = []
sets = 100
rsps = np.random.randint(low=0,high=450,size=types)
rlaser = np.random.randint(low=0,high=100,size=types)
rther = np.random.randint(low=0,high=100,size=types)
rnon = np.random.randint(low=0,high=100,size=types)
for i in range(types):
randsource = simulator(Nbins=200, Ndet=1e6, nset=sets,sps=rsps[i], laser=rlaser[i], ther=rther[i], non=rnon[i])
datas = randsource.get_data(save=True, name='./data/test_data/'+str(i))
testdata.append(datas)
# +
testdata = np.array(testdata)
# convert simulated data into correct shape
x_test = testdata.reshape(testdata.shape[0]*testdata.shape[1],testdata.shape[2],testdata.shape[3])
# get the ground truth
threshold = 0.5
y_test = get_truth(x_test, thr=threshold)
# we only need the g2signal
x_test = x_test[:,1,:]
# shuffle
i = np.random.permutation(len(x_test))
x_test = x_test[i]
y_test = y_test[i]
# reshape
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
y_test = y_test.reshape(y_test.shape[0])
| simulation/GenerateData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# # %reload_ext autoreload
import numpy as np
import pandas as pd
import pydicom
import copy
# %matplotlib inline
import matplotlib.pyplot as plt
from prettytable import PrettyTable
from utility import *
from keras.models import model_from_json
from skimage.transform import resize
import matplotlib.image as image
from skimage import color
import glob, os
# +
# This function reads in a .dcm file, checks the important fields for our device, and returns a numpy array
# of just the imaging data
def check_dicom(filename, verbose=False):
if verbose:
print(f'Loading file: {filename} ...')
dcm = pydicom.dcmread(filename)
if verbose:
print(dcm)
print(f'ID: {dcm.PatientID}')
print(f'Sex: {dcm.PatientSex}')
print(f'Age: {dcm.PatientAge}')
print(f'Modality: {dcm.Modality}')
print(f'Study Description: {dcm.StudyDescription}')
print(f'Rows: {dcm.Rows}')
print(f'Columns: {dcm.Columns}')
# Age
age = int(dcm.PatientAge)
if age < 2 or age > 90:
print(f'Image {filename} rejected, Age: {age}')
return None
# Body Part
body_part = dcm.BodyPartExamined
if body_part not in ['CHEST', 'chest']:
print(f'Image {filename} rejected, Body part: {body_part}')
return None
# Patient Position
pos = dcm.PatientPosition
if pos not in ['PA', 'AP']:
print(f'Image {filename} rejected, Patient position: {pos}')
return None
# Modali
mod = dcm.Modality
if mod != 'DX':
print(f'Image {filename} rejected, Modality: {mod}')
return None
# print('Load file {} ...'.format(filename))
# ds = pydicom.dcmread(filename)
img = dcm.pixel_array
return img
# This function takes the numpy array output by check_dicom and
# runs the appropriate pre-processing needed for our model input
def preprocess_image(img,img_mean,img_std,img_size, verbose=False):
if verbose:
print(f'Image data: {img.shape}')
if len(img.shape) == 3:
img = color.rgb2gray(img)
# print("after gray scale: ",np.max(img), np.min(img))
resized_img = resize(img, (img_size[1], img_size[2]), anti_aliasing=False)
if verbose:
print(f'Resized data: {resized_img.shape}')
# print("after resize: ",np.max(resized_img), np.min(resized_img))
proc_img = resized_img.reshape((1, img_size[1], img_size[2], 1))
proc_img = np.repeat(proc_img, img_size[3], axis=3)
if verbose:
print(f'Processed data: {proc_img.shape}')
return proc_img
# This function loads in our trained model w/ weights and compiles it
def load_model(model_path, weight_path):
with open(model_path, 'r') as json_file:
model_json = json_file.read()
model = model_from_json(model_json)
# load weights into new model
model.load_weights(weight_path)
print(f"Model loaded: {model}")
return model
# This function uses our device's threshold parameters to predict whether or not
# the image shows the presence of pneumonia using our trained model
def predict_image(model, img, thresh):
pred = model.predict(img)
prediction = pred > thresh
# batch size: 1, single output
return prediction
# +
# ! ls ./weights -lt
print()
data_dir = "./weights"
extension = 'hdf5'
image_files = glob.glob(os.path.join(data_dir, '*.{}'.format(extension)))
for i, file in enumerate(image_files):
print(i," ", image_files[i])
# +
test_dicoms = ['test1.dcm','test2.dcm','test3.dcm','test4.dcm','test5.dcm','test6.dcm']
model_path = "xray_classification_model.json"
weight_path = image_files[0]
IMG_SIZE=(1,224,224,3) # This might be different if you did not use vgg16
# Mean & std passed from outside not used
# Resize function used in preprocessing normalizes the image data automatically
img_mean = -1
img_std = -1
my_model = load_model(model_path, weight_path)
thresh = 0.356
# -
def predict_dicom(filename):
img = np.array([])
img = check_dicom(filename)
if img is None:
return None
img_proc = preprocess_image(img,img_mean,img_std,IMG_SIZE)
pred = predict_image(my_model,img_proc,thresh)
return pred[0][0]
fig, axs = plt.subplots(2, 3, figsize = (12,8))
i = 0
for (file, ax) in zip(test_dicoms, axs.flatten()):
img = pydicom.dcmread(file).pixel_array
pred = predict_dicom(file)
ax.imshow(img, cmap = 'gray')
ax.set_title(f'file:{file}, pred:{pred}')
ax.axis('off')
i=i+1
# +
def predict_image_prob(model, img):
return model.predict(img)[0][0]
def predict_valid(row):
path = row['path']
img_data = image.imread(path)
img_prep = preprocess_image(img_data, -1, -1, IMG_SIZE, verbose=False)
result = predict_image_prob(my_model, img_prep)
return result
# -
valid = pd.read_csv('pneumonia_valid.csv')
valid_df = copy.copy(valid)
valid_df['Pneumonia_pred'] = np.zeros(len(valid_df))
valid_df['Pneumonia_pred'] = valid_df.apply(lambda x: predict_valid(x), axis=1)
valid_df.to_csv('pneumonia_valid_pred.csv', index=False)
# +
# valid_df = pd.read_csv('pneumonia_valid_pred.csv')
# print(valid_df.shape)
# -
findings = set()
for f in valid_df['Finding Labels'].unique():
findings.update(f.split('|'))
print(f'Total number of single diagnoses: {len(findings)}')
print(findings)
findings -= {'Pneumonia','No Finding'}
# +
t = PrettyTable(['absent','auc','f1','thresh','precision','recall'])
valid_df2 = copy.copy(valid_df)
y = valid_df2['Pneumonia']
y_pred = valid_df2['Pneumonia_pred']
fpr, tpr, precision, recall, thresholds, auc = classification_metrics(y, y_pred)
f1scores, ind = f1_max(precision, recall, thresholds)
print("Classification metrics around (+/-)0.1 of the threshold value \
corresponding to max f1 score:\n")
th_opt_range = threshold_range(thresholds, ind)
for th in th_opt_range:
idx = get_index(th, thresholds)
print("f1: {:.3f}, threshold: {:.3f}, precision: {:.3f}, Recall: {:.3f}".format(
f1scores[idx],
thresholds[idx],
precision[idx],
recall[idx]))
print()
t.add_row(['None', np.round(auc,3),
np.round(f1scores[ind],3),
np.round(thresholds[ind],3),
np.round(precision[ind],3),
np.round(recall[ind],3)])
# +
print('Model performance in the absence of the deseases \
indicated in the left most column:\n')
for i in findings:
valid_df2 = copy.copy(valid_df[valid_df[i]==0])
y = valid_df2['Pneumonia']
y_pred = valid_df2['Pneumonia_pred']
fpr, tpr, precision, recall, thresholds, auc = classification_metrics(y, y_pred)
f1scores, ind = f1_max(precision, recall, thresholds)
t.add_row([i, np.round(auc,3),
np.round(f1scores[ind],3),
np.round(thresholds[ind],3),
np.round(precision[ind],3),
np.round(recall[ind],3)])
print(t)
# -
| Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transform Experiments
#
# custom transform
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from PIL import Image as pil_image
#pip3 install nvidia-ml-py3
import tracemalloc, threading, torch, time, pynvml
from fastai.utils.mem import *
from fastai.vision import *
import fastai
print(fastai.__version__)
torch.cuda.set_device(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# +
#see https://forums.fast.ai/t/gpu-optimizations-central/33944/15
#"memory profiler that taps into each epoch, and can be fine-tuned to each separate stage"
if not torch.cuda.is_available(): raise Exception("pytorch is required")
def preload_pytorch():
torch.ones((1, 1)).cuda()
def gpu_mem_get_used_no_cache():
torch.cuda.empty_cache()
return gpu_mem_get().used
def gpu_mem_used_get_fast(gpu_handle):
info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
return int(info.used/2**20)
preload_pytorch()
pynvml.nvmlInit()
class PeakMemMetric(LearnerCallback):
_order=-20 # Needs to run before the recorder
def peak_monitor_start(self):
self.peak_monitoring = True
# -
path = Path('../data/mnist/mnist_png')
np.random.seed(42)
# ### No transforms
#no transforms
tfms = get_transforms(do_flip=False,
flip_vert=False,
max_rotate=0.,
max_zoom=0.,
max_lighting=0.,
max_warp=0.,
p_affine=0.,
p_lighting=0.)
data = ImageDataBunch.from_folder(path, valid_pct = 0.2,ds_tfms=tfms, size=28)
data.show_batch(rows=3, figsize=(5,5))
learn = create_cnn(data, models.resnet18, metrics=error_rate)
learn.lr_find()
learn.recorder.plot(skip_start=10, skip_end=5)
# Notice how the lr vs loss plot is nice and smooth - but I have come accross datasets where it is really bumpy, lets create an example
# ### custom transforms
# +
# Source of the code is based on an excelent piece code from stackoverflow
# http://stackoverflow.com/questions/22937589/how-to-add-noise-gaussian-salt-and-pepper-etc-to-image-in-python-with-opencv
def _noise_generator (image, noise_type):
"""
Generate noise to a given Image based on required noise type
Input parameters:
image: ndarray (input image data. It will be converted to float)
noise_type: string
'gauss' Gaussian-distrituion based noise
'poission' Poission-distribution based noise
's&p' Salt and Pepper noise, 0 or 1
'speckle' Multiplicative noise using out = image + n*image
where n is uniform noise with specified mean & variance
"""
row,col,ch= image.shape
if noise_type == "gauss":
mean = 0.0
var = 0.01
sigma = var**0.5
gauss = np.array(image.shape)
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy.astype('uint8')
elif noise_type == "s&p":
s_vs_p = 0.5
amount = 0.004
out = image
# Generate Salt '1' noise
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 255
# Generate Pepper '0' noise
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_type == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_type =="speckle":
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
else:
return image
# -
def _gaussian_noise (image):
#image.shape: torch.Size([3, 28, 28])
row,col = image.shape[1:]
ch=1
mean = 0.0
var = 0.5
sigma = var**0.5
gauss = np.array(image.shape)
gauss = np.random.normal(mean,sigma,(ch,row,col))
gauss = torch.Tensor(gauss.reshape(ch,row,col))
noisy = image + gauss
return noisy
gaussian_noise = TfmPixel(_gaussian_noise)
#
# #### just the single transform
#
# apply to train, but not validation set
tfms=[[gaussian_noise()],[]]
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=28)
data.show_batch(rows=3, figsize=(5,5))
learn = create_cnn(data, models.resnet18, metrics=error_rate)
learn.lr_find()
#I can never remember what these parameters are called so adding here (with defaults) for reference
learn.recorder.plot(skip_start=10, skip_end=10)
# ### Default transforms plus custom transform
tfms = get_transforms(do_flip=True, flip_vert=False, max_rotate=10.0, max_zoom=1.1, max_lighting=0.2, max_warp=0.2, p_affine=0.75, p_lighting=0.75, xtra_tfms=[gaussian_noise()])
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=28)
data.show_batch(rows=3, figsize=(5,5))
learn = create_cnn(data, models.resnet18, metrics=error_rate)
learn.lr_find()
learn.recorder.plot(skip_start=10, skip_end=10)
| nbs/transforms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from experiments import getDatasets, getData, random_seed
from metrics import MetricsCallback, getDatasetMetrics
from config import *
from pathlib import Path
# %load_ext autoreload
# %autoreload 2
# -
allData = getData()
# +
CSV_PATH = Path(EXPERIMENTS_PATH) / 'sickzil' / 'metrics.csv'
if not (CSV_PATH).exists():
for index, dataset in enumerate(getDatasets(allData, crop=False, cutInHalf = False)):
random_seed(42)
m = MetricsCallback(None)
m.on_train_begin()
for idx in range(len(dataset.valid.x.items)):
pred = dataset.valid.y.sickzilImage(idx)
m.on_batch_end(False, pred.px, dataset.valid.y[idx].px)
m.calculateMetrics()
m.save(CSV_PATH, index > 0)
| code/experiments/sickzil-machine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Data Input
#
# To do any computation, you need to have data. Getting the data in the framework of a workflow is therefore the first step of every analysis. Nipype provides many different modules to grab or select the data:
#
# DataFinder
# DataGrabber
# FreeSurferSource
# JSONFileGrabber
# S3DataGrabber
# SSHDataGrabber
# SelectFiles
# XNATSource
#
# This tutorial will only cover some of them. For the rest, see the section [``interfaces.io``](http://nipype.readthedocs.io/en/latest/interfaces/generated/nipype.interfaces.io.html) on the official homepage.
# # Dataset structure
#
# To be able to import data, you first need to be aware of the structure of your dataset. The structure of the dataset for this tutorial is according to BIDS, and looks as follows:
#
# ds000114
# ├── CHANGES
# ├── dataset_description.json
# ├── derivatives
# │ ├── fmriprep
# │ │ └── sub01...sub10
# │ │ └── ...
# │ ├── freesurfer
# │ ├── fsaverage
# │ ├── fsaverage5
# │ │ └── sub01...sub10
# │ │ └── ...
# ├── dwi.bval
# ├── dwi.bvec
# ├── sub-01
# │ ├── ses-retest
# │ ├── anat
# │ │ └── sub-01_ses-retest_T1w.nii.gz
# │ ├──func
# │ ├── sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz
# │ ├── sub-01_ses-retest_task-fingerfootlips_bold.nii.gz
# │ ├── sub-01_ses-retest_task-linebisection_bold.nii.gz
# │ ├── sub-01_ses-retest_task-linebisection_events.tsv
# │ ├── sub-01_ses-retest_task-overtverbgeneration_bold.nii.gz
# │ └── sub-01_ses-retest_task-overtwordrepetition_bold.nii.gz
# │ └── dwi
# │ └── sub-01_ses-retest_dwi.nii.gz
# │ ├── ses-test
# │ ├── anat
# │ │ └── sub-01_ses-test_T1w.nii.gz
# │ ├──func
# │ ├── sub-01_ses-test_task-covertverbgeneration_bold.nii.gz
# │ ├── sub-01_ses-test_task-fingerfootlips_bold.nii.gz
# │ ├── sub-01_ses-test_task-linebisection_bold.nii.gz
# │ ├── sub-01_ses-test_task-linebisection_events.tsv
# │ ├── sub-01_ses-test_task-overtverbgeneration_bold.nii.gz
# │ └── sub-01_ses-test_task-overtwordrepetition_bold.nii.gz
# │ └── dwi
# │ └── sub-01_ses-retest_dwi.nii.gz
# ├── sub-02..sub-10
# │ └── ...
# ├── task-covertverbgeneration_bold.json
# ├── task-covertverbgeneration_events.tsv
# ├── task-fingerfootlips_bold.json
# ├── task-fingerfootlips_events.tsv
# ├── task-linebisection_bold.json
# ├── task-overtverbgeneration_bold.json
# ├── task-overtverbgeneration_events.tsv
# ├── task-overtwordrepetition_bold.json
# └── task-overtwordrepetition_events.tsv
# # DataGrabber
#
# `DataGrabber` is an interface for collecting files from hard drive. It is very flexible and supports almost any file organization of your data you can imagine.
#
# You can use it as a trivial use case of getting a fixed file. By default, `DataGrabber` stores its outputs in a field called outfiles.
import nipype.interfaces.io as nio
datasource1 = nio.DataGrabber()
datasource1.inputs.base_directory = 'data/ds000114'
datasource1.inputs.template = 'sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz'
datasource1.inputs.sort_filelist = True
results = datasource1.run()
results.outputs
# Or you can get at all NIfTI files containing the word `'fingerfootlips'` in all directories starting with the letter `'s'`.
import nipype.interfaces.io as nio
datasource2 = nio.DataGrabber()
datasource2.inputs.base_directory = 'data/ds000114'
datasource2.inputs.template = 's*/ses-test/func/*fingerfootlips*.nii.gz'
datasource2.inputs.sort_filelist = True
results = datasource2.run()
results.outputs
# Two special inputs were used in these previous cases. The input `base_directory`
# indicates in which directory to search, while the input `template` indicates the
# string template to match. So in the previous case `DataGrabber` is looking for
# path matches of the form `data/ds000114/s*/ses-test/func/*fingerfootlips*.nii.gz`.
#
# <div class="alert alert-info">
# **Note**: When used with wildcards (e.g., `s*` and `*fingerfootlips*` above) `DataGrabber` does not return data in sorted order. In order to force it to return data in a sorted order, one needs to set the input `sorted = True`. However, when explicitly specifying an order as we will see below, `sorted` should be set to `False`.
# </div>
#
# More use cases arise when the template can be filled by other inputs. In the
# example below, we define an input field for `DataGrabber` called `subject_id`. This is
# then used to set the template (see `%d` in the template).
datasource3 = nio.DataGrabber(infields=['subject_id'])
datasource3.inputs.base_directory = 'data/ds000114'
datasource3.inputs.template = 'sub-%02d/ses-test/func/*fingerfootlips*.nii.gz'
datasource3.inputs.sort_filelist = True
datasource3.inputs.subject_id = [1, 7]
results = datasource3.run()
results.outputs
# This will return the functional images from subject 1 and 7 for the task `fingerfootlips`. We can take this a step further and pair subjects with task.
datasource4 = nio.DataGrabber(infields=['subject_id', 'run'])
datasource4.inputs.base_directory = 'data/ds000114'
datasource4.inputs.template = 'sub-%02d/ses-test/func/*%s*.nii.gz'
datasource4.inputs.sort_filelist = True
datasource4.inputs.run = ['fingerfootlips', 'linebisection']
datasource4.inputs.subject_id = [1, 7]
results = datasource4.run()
results.outputs
# This will return the functional image of subject 1, task `'fingerfootlips'` and the functional image of subject 7 for the `'linebisection'` task.
# ## A more realistic use-case
#
# ``DataGrabber`` is a generic data grabber module that wraps around ``glob`` to select your neuroimaging data in an intelligent way. As an example, let's assume we want to grab the anatomical and functional images of a certain subject.
#
# First, we need to create the ``DataGrabber`` node. This node needs to have some input fields for all dynamic parameters (e.g. subject identifier, task identifier), as well as the two desired output fields ``anat`` and ``func``.
# +
from nipype import DataGrabber, Node
# Create DataGrabber node
dg = Node(DataGrabber(infields=['subject_id', 'ses_name', 'task_name'],
outfields=['anat', 'func']),
name='datagrabber')
# Location of the dataset folder
dg.inputs.base_directory = 'data/ds000114'
# Necessary default parameters
dg.inputs.template = '*'
dg.inputs.sort_filelist = True
# -
# Second, we know that the two files we desire are the the following location:
#
# anat = data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz
# func = data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz
#
# We see that the two files only have three dynamic parameters between subjects and task names:
#
# subject_id: in this case 'sub-01'
# task_name: in this case fingerfootlips
# ses_name: test
#
# This means that we can rewrite the paths as follows:
#
# anat = data/ds000114/[subject_id]/ses-[ses_name]/anat/sub-[subject_id]_ses-[ses_name]_T1w.nii.gz
# func = data/ds000114/[subject_id]/ses-[ses_name]/func/sub-[subject_id]_ses-[ses_name]_task-[task_name]_bold.nii.gz
#
# Therefore, we need the parameters ``subject_id`` and ``ses_name`` for the anatomical image and the parameters ``subject_id``, ``ses_name`` and ``task_name`` for the functional image. In the context of DataGabber, this is specified as follows:
dg.inputs.template_args = {'anat': [['subject_id', 'ses_name']],
'func': [['subject_id', 'ses_name', 'task_name']]}
# Now, comes the most important part of DataGrabber. We need to specify the template structure to find the specific data. This can be done as follows.
dg.inputs.field_template = {'anat': 'sub-%02d/ses-%s/anat/*_T1w.nii.gz',
'func': 'sub-%02d/ses-%s/func/*task-%s_bold.nii.gz'}
# You'll notice that we use ``%s``, ``%02d`` and ``*`` for placeholders in the data paths. ``%s`` is a placeholder for a string and is filled out by ``task_name`` or ``ses_name``. ``%02d`` is a placeholder for a integer number and is filled out by ``subject_id``. ``*`` is used as a wild card, e.g. a placeholder for any possible string combination. This is all to set up the ``DataGrabber`` node.
# Above, two more fields are introduced: `field_template` and `template_args`. These fields are both dictionaries whose keys correspond to the `outfields` keyword. The `field_template` reflects the search path for each output field, while the `template_args` reflect the inputs that satisfy the template. The inputs can either be one of the named inputs specified by the `infields` keyword arg or it can be raw strings or integers corresponding to the template. For the `func` output, the **%s** in the `field_template` is satisfied by `subject_id` and the **%d** is filled in by the list of numbers.
# Now it is up to you how you want to feed the dynamic parameters into the node. You can either do this by using another node (e.g. ``IdentityInterface``) and feed ``subject_id``, ``ses_name`` and ``task_name`` as connections to the ``DataGrabber`` node or specify them directly as node inputs.
# Using the IdentityInterface
from nipype import IdentityInterface
infosource = Node(IdentityInterface(fields=['subject_id', 'task_name']),
name="infosource")
infosource.inputs.task_name = "fingerfootlips"
infosource.inputs.ses_name = "test"
subject_id_list = [1, 2]
infosource.iterables = [('subject_id', subject_id_list)]
# Now you only have to connect ``infosource`` with your ``DataGrabber`` and run the workflow to iterate over subjects 1 and 2.
# You can also provide the inputs to the ``DataGrabber`` node directly, for one subject you can do this as follows:
# Specifying the input fields of DataGrabber directly
dg.inputs.subject_id = 1
dg.inputs.ses_name = "test"
dg.inputs.task_name = "fingerfootlips"
# Now let's run the ``DataGrabber`` node and let's look at the output:
dg.run().outputs
# ### Exercise 1
# Grab T1w images from both sessions - ``ses-test`` and ``ses-retest`` for ``sub-01``.
# + solution2="hidden" solution2_first=true
# write your solution here
# + solution2="hidden"
from nipype import DataGrabber, Node
# Create DataGrabber node
ex1_dg = Node(DataGrabber(infields=['subject_id', 'ses_name'],
outfields=['anat']),
name='datagrabber')
# Location of the dataset folder
ex1_dg.inputs.base_directory = 'data/ds000114'
# Necessary default parameters
ex1_dg.inputs.template = '*'
ex1_dg.inputs.sort_filelist = True
# specify the template
ex1_dg.inputs.template_args = {'anat': [['subject_id', 'ses_name']]}
ex1_dg.inputs.field_template = {'anat': 'sub-%02d/ses-%s/anat/*_T1w.nii.gz'}
# specify subject_id and ses_name you're interested in
ex1_dg.inputs.subject_id = 1
ex1_dg.inputs.ses_name = ["test", "retest"]
# and run the node
ex1_res = ex1_dg.run()
# + solution2="hidden"
# you can now check the output
ex1_res.outputs
# -
# # SelectFiles
#
# `SelectFiles` is a more flexible alternative to `DataGrabber`. It is built on Python [format strings](http://docs.python.org/2/library/string.html#format-string-syntax), which are similar to the Python string interpolation feature you are likely already familiar with, but advantageous in several respects. Format strings allow you to replace named sections of template strings set off by curly braces (`{}`), possibly filtered through a set of functions that control how the values are rendered into the string. As a very basic example, we could write
msg = "This workflow uses {package}."
# and then format it with keyword arguments:
print(msg.format(package="FSL"))
# `SelectFiles` uses the {}-based string formatting syntax to plug values into string templates and collect the data. These templates can also be combined with glob wild cards. The field names in the formatting template (i.e. the terms in braces) will become inputs fields on the interface, and the keys in the templates dictionary will form the output fields.
#
# Let's focus again on the data we want to import:
#
# anat = data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz
# func = data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz
#
# Now, we can replace those paths with the according {}-based strings.
#
# anat = data/ds000114/sub-{subject_id}/ses-{ses_name}/anat/sub-{subject_id}_ses-{ses_name}_T1w.nii.gz
# func = data/ds000114/sub-{subject_id}/ses-{ses_name}/func/ \
# sub-{subject_id}_ses-{ses_name}_task-{task_name}_bold.nii.gz
#
# How would this look like as a `SelectFiles` node?
# +
from nipype import SelectFiles, Node
# String template with {}-based strings
templates = {'anat': 'sub-{subject_id}/ses-{ses_name}/anat/sub-{subject_id}_ses-{ses_name}_T1w.nii.gz',
'func': 'sub-{subject_id}/ses-{ses_name}/func/sub-{subject_id}_ses-{ses_name}_task-{task_name}_bold.nii.gz'}
# Create SelectFiles node
sf = Node(SelectFiles(templates),
name='selectfiles')
# Location of the dataset folder
sf.inputs.base_directory = 'data/ds000114'
# Feed {}-based placeholder strings with values
sf.inputs.subject_id = '01'
sf.inputs.ses_name = "test"
sf.inputs.task_name = 'fingerfootlips'
# -
# Let's check if we get what we wanted.
sf.run().outputs
# Perfect! But why is `SelectFiles` more flexible than `DataGrabber`? First, you perhaps noticed that with the {}-based string, we can reuse the same input (e.g. `subject_id`) multiple time in the same string, without feeding it multiple times into the template.
#
# Additionally, you can also select multiple files without the need of an iterable node. For example, let's assume we want to select anatomical images for all subjects at once. We can do this by using the eildcard ``*`` in a template:
#
# 'sub-*/anat/sub-*_T1w.nii.gz'
#
# Let's see how this works:
# +
from nipype import SelectFiles, Node
# String template with {}-based strings
templates = {'anat': 'sub-*/ses-{ses_name}/anat/sub-*_ses-{ses_name}_T1w.nii.gz'}
# Create SelectFiles node
sf = Node(SelectFiles(templates),
name='selectfiles')
# Location of the dataset folder
sf.inputs.base_directory = 'data/ds000114'
# Feed {}-based placeholder strings with values
sf.inputs.ses_name = 'test'
# Print SelectFiles output
sf.run().outputs
# -
# As you can see, now `anat` contains ten file paths, T1w images for all ten subject.
#
# As a side note, you could also use ``[]`` string formatting for some simple cases, e.g. for loading only subject 1 and 2:
#
# 'sub-0[1,2]/ses-test/anat/sub-0[1,2]_ses-test_T1w.nii.gz'
# ### `force_lists`
#
# There's an additional parameter, `force_lists`, which controls how `SelectFiles` behaves in cases where only a single file matches the template. The default behavior is that when a template matches multiple files they are returned as a list, while a single file is returned as a string. There may be situations where you want to force the outputs to always be returned as a list (for example, you are writing a workflow that expects to operate on several runs of data, but some of your subjects only have a single run). In this case, `force_lists` can be used to tune the outputs of the interface. You can either use a boolean value, which will be applied to every output the interface has, or you can provide a list of the output fields that should be coerced to a list.
#
# Returning to our previous example, you may want to ensure that the `anat` files are returned as a list, but you only ever will have a single `T1` file. In this case, you would do
sf = SelectFiles(templates, force_lists=["anat"])
# ### Exercise 2
# Use ``SelectFile`` to select again T1w images from both sessions - ``ses-test`` and ``ses-retest`` for ``sub-01``.
# + solution2="hidden" solution2_first=true
# write your solution here
# + solution2="hidden"
from nipype import SelectFiles, Node
# String template with {}-based strings
templates = {'anat': 'sub-01/ses-*/anat/sub-01_ses-*_T1w.nii.gz'}
# Create SelectFiles node
sf = Node(SelectFiles(templates),
name='selectfiles')
# Location of the dataset folder
sf.inputs.base_directory = 'data/ds000114'
#sf.inputs.ses_name =
sf.run().outputs
# -
# ## FreeSurferSource
#
# `FreeSurferSource` is a specific case of a file grabber that felicitates the data import of outputs from the FreeSurfer recon-all algorithm. This, of course, requires that you've already run `recon-all` on your subject.
# For the tutorial dataset ``ds000114``, `recon-all` was already run. So, let's make sure that you have the anatomy output of one subject on your system:
# !datalad get -r -J 4 data/ds000114/derivatives/freesurfer/sub-01
# Now, before you can run `FreeSurferSource`, you first have to specify the path to the FreeSurfer output folder, i.e. you have to specify the SUBJECTS_DIR variable. This can be done as follows:
# +
from nipype.interfaces.freesurfer import FSCommand
from os.path import abspath as opap
# Path to your freesurfer output folder
fs_dir = opap('data/ds000114/derivatives/freesurfer/')
# Set SUBJECTS_DIR
FSCommand.set_default_subjects_dir(fs_dir)
# -
# To create the `FreeSurferSource` node, do as follows:
# +
from nipype import Node
from nipype.interfaces.io import FreeSurferSource
# Create FreeSurferSource node
fssource = Node(FreeSurferSource(subjects_dir=fs_dir),
name='fssource')
# -
# Let's now run it for a specific subject.
fssource.inputs.subject_id = 'sub-01'
result = fssource.run()
# Did it work? Let's try to access multiple FreeSurfer outputs:
print('aparc_aseg: %s\n' % result.outputs.aparc_aseg)
print('inflated: %s\n' % result.outputs.inflated)
# It seems to be working as it should. But as you can see, the `inflated` output actually contains the file location for both hemispheres. With `FreeSurferSource` we can also restrict the file selection to a single hemisphere. To do this, we use the `hemi` input filed:
fssource.inputs.hemi = 'lh'
result = fssource.run()
# Let's take a look again at the `inflated` output.
result.outputs.inflated
# Perfect!
| notebooks/basic_data_input.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext lab_black
# +
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib.ticker as ticker
import matplotlib.axis as ax
from pywaffle import Waffle
import datetime
# %matplotlib inline
# +
path1 = "./"
path2 = "./"
filename1 = "All_Data_Original_magazine.csv"
filename2 = "All_Data_Original_weibo.csv"
mg_ori = pd.read_csv(path1 + filename1)
wb_ori = pd.read_csv(path2 + filename2)
# -
# ## clean data
# +
df_mg_ori = pd.DataFrame(mg_ori)
df_mg_ori = (
df_mg_ori.dropna(axis=1, how="all") # drop all-null columns
.dropna(axis=0, how="any") # drop rows with any null
.drop(["ip", "browser", "os"], axis=1) # these three columns are useless
)
df_mg = df_mg_ori[0:100]
df_mg.columns = [
"seq",
"start",
"finish",
"status",
"Q1",
"Q2",
"Q3",
"Q4",
"Q5",
"Q6",
"Q7",
]
df_mg.reset_index(inplace=True, drop=True)
# to avoid any break index(since we dropped rows with null), to reset index is necessary.
# Without inplace=True, index won't be changed;
# without drop=True, there will be a redundant index column
df_mg
# +
df_wb_ori = pd.DataFrame(wb_ori)
df_wb_ori = (
df_wb_ori.dropna(axis=1, how="all")
.dropna(axis=0, how="any")
.drop(["ip", "browser", "os"], axis=1)
) #
# print(df_wb_ori[0])
df_wb = df_wb_ori[0:100]
df_wb.columns = [
"seq",
"start",
"finish",
"status",
"Q1",
"Q2",
"Q3",
"Q4",
"Q5",
"Q6",
"Q7",
]
df_wb.reset_index(inplace=True, drop=True)
df_wb
# -
# count the amount of each option
mg_Q3_1 = [item for item in df_mg["Q3"] if item == 1]
mg_Q3_1_freq = sum(mg_Q3_1) / 1
mg_Q3_2 = [item for item in df_mg["Q3"] if item == 2]
mg_Q3_2_freq = sum(mg_Q3_2) / 2
mg_Q3_3 = [item for item in df_mg["Q3"] if item == 3]
mg_Q3_3_freq = sum(mg_Q3_3) / 3
mg_Q3_4 = [item for item in df_mg["Q3"] if item == 4]
mg_Q3_4_freq = sum(mg_Q3_4) / 4
# ## show data: distribution of different options
# +
plt.figure(figsize=[5, 2.5])
plt.title("Original Choice Distribution in Questionnaire A")
plt.xlabel("People Counting")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip([mg_Q3_1_freq, mg_Q3_2_freq, mg_Q3_3_freq, mg_Q3_4_freq], [1, 2, 3, 4]):
plt.text(x + 0.8, y - 1.1, "%.0f" % x)
plt.barh(
["right hand/forehead", "right hand/jaw", "left hand/forehead", "left hand/jaw"],
[mg_Q3_1_freq, mg_Q3_2_freq, mg_Q3_3_freq, mg_Q3_4_freq],
)
top = int(max(mg_Q3_1_freq, mg_Q3_2_freq, mg_Q3_3_freq, mg_Q3_4_freq))
ax.set_xticks(range(0, top, 5))
plt.show()
# -
mg_Q5_1 = [item for item in df_mg["Q5"] if item == 1]
mg_Q5_1_freq = sum(mg_Q5_1) / 1
mg_Q5_2 = [item for item in df_mg["Q5"] if item == 2]
mg_Q5_2_freq = sum(mg_Q5_2) / 2
mg_Q5_3 = [item for item in df_mg["Q5"] if item == 3]
mg_Q5_3_freq = sum(mg_Q5_3) / 3
mg_Q5_4 = [item for item in df_mg["Q5"] if item == 4]
mg_Q5_4_freq = sum(mg_Q5_4) / 4
plt.figure(figsize=[5, 2.5])
plt.title("Final Choice Distribution in Questionnaire A")
plt.xlabel("People Counting")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip([mg_Q5_1_freq, mg_Q5_2_freq, mg_Q5_3_freq, mg_Q5_4_freq], [1, 2, 3, 4]):
plt.text(x + 0.8, y - 1.1, "%.0f" % x)
plt.barh(
["right hand/forehead", "right hand/jaw", "left hand/forehead", "left hand/jaw"],
[mg_Q5_1_freq, mg_Q5_2_freq, mg_Q5_3_freq, mg_Q5_4_freq],
)
top = int(max(mg_Q5_1_freq, mg_Q5_2_freq, mg_Q5_3_freq, mg_Q5_4_freq))
ax.set_xticks(range(0, top, 5))
#plt.savefig("Final Choice Distribution in Questionnaire A.png")
plt.show()
wb_Q3_1 = [item for item in df_wb["Q3"] if item == 1]
wb_Q3_1_freq = sum(wb_Q3_1) / 1
wb_Q3_2 = [item for item in df_wb["Q3"] if item == 2]
wb_Q3_2_freq = sum(wb_Q3_2) / 2
wb_Q3_3 = [item for item in df_wb["Q3"] if item == 3]
wb_Q3_3_freq = sum(wb_Q3_3) / 3
wb_Q3_4 = [item for item in df_wb["Q3"] if item == 4]
wb_Q3_4_freq = sum(wb_Q3_4) / 4
plt.figure(figsize=[5, 2.5])
plt.title("Original Choice Distribution in Questionnaire B")
plt.xlabel("People Counting")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip([wb_Q3_1_freq, wb_Q3_2_freq, wb_Q3_3_freq, wb_Q3_4_freq], [1, 2, 3, 4]):
plt.text(x + 0.8, y - 1.1, "%.0f" % x)
plt.barh(
["right hand/forehead", "right hand/jaw", "left hand/forehead", "left hand/jaw"],
[wb_Q3_1_freq, wb_Q3_2_freq, wb_Q3_3_freq, wb_Q3_4_freq],
)
top = int(max(wb_Q3_1_freq, wb_Q3_2_freq, wb_Q3_3_freq, wb_Q3_4_freq))
ax.set_xticks(range(0, top, 5))
#plt.savefig("Original Choice Distribution in Questionnaire B.png")
plt.show()
wb_Q5_1 = [item for item in df_wb["Q5"] if item == 1]
wb_Q5_1_freq = sum(wb_Q5_1) / 1
wb_Q5_2 = [item for item in df_wb["Q5"] if item == 2]
wb_Q5_2_freq = sum(wb_Q5_2) / 2
wb_Q5_3 = [item for item in df_wb["Q5"] if item == 3]
wb_Q5_3_freq = sum(wb_Q5_3) / 3
wb_Q5_4 = [item for item in df_wb["Q5"] if item == 4]
wb_Q5_4_freq = sum(wb_Q5_4) / 4
plt.figure(figsize=[5, 2.5])
plt.title("Final Choice Distribution in Questionnaire B")
plt.xlabel("People Counting")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip([wb_Q5_1_freq, wb_Q5_2_freq, wb_Q5_3_freq, wb_Q5_4_freq], [1, 2, 3, 4]):
plt.text(x + 0.8, y - 1.1, "%.0f" % x)
plt.barh(
["right hand/forehead", "right hand/jaw", "left hand/forehead", "left hand/jaw"],
[wb_Q5_1_freq, wb_Q5_2_freq, wb_Q5_3_freq, wb_Q5_4_freq],
)
top = int(max(wb_Q5_1_freq, wb_Q5_2_freq, wb_Q5_3_freq, wb_Q5_4_freq))
ax.set_xticks(range(0, top, 5))
#plt.savefig("Final Choice Distribution in Questionnaire B.png")
plt.show()
# ## for H3: choice flow using Sankey plot
df_mg_change = zip(df_mg["Q3"], df_mg["Q5"])
# + jupyter={"source_hidden": true}
mg11, mg12, mg13, mg14, mg21, mg22, mg23, mg24, mg31, mg32, mg33, mg34, mg41, mg42, mg43, mg44 = (
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
for p, q in df_mg_change:
if p == 1 and q == 1:
mg11 += 1
if p == 1 and q == 2:
mg12 += 1
if p == 1 and q == 3:
mg13 += 1
if p == 1 and q == 4:
mg14 += 1
if p == 2 and q == 1:
mg21 += 1
if p == 2 and q == 2:
mg22 += 1
if p == 2 and q == 3:
mg23 += 1
if p == 2 and q == 4:
mg24 += 1
if p == 3 and q == 1:
mg31 += 1
if p == 3 and q == 2:
mg32 += 1
if p == 3 and q == 3:
mg33 += 1
if p == 3 and q == 4:
mg34 += 1
if p == 4 and q == 1:
mg41 += 1
if p == 4 and q == 2:
mg42 += 1
if p == 4 and q == 3:
mg43 += 1
if p == 4 and q == 4:
mg44 += 1
# + jupyter={"source_hidden": true}
from pyecharts.globals import CurrentConfig, NotebookType
CurrentConfig.NOTEBOOK_TYPE = NotebookType.JUPYTER_LAB
import json
import os
from pyecharts import options as opts
from pyecharts.charts import Page, Sankey
def sankey_mg() -> Sankey:
nodes = [
{"name": "original: right hand/ forehead"},
{"name": "original: right hand/ jaw"},
{"name": "original: left hand/ forehead"},
{"name": "original: left hand/ jaw"},
{"name": "final: right hand/ forehead"},
{"name": "final: right hand/ jaw"},
{"name": "final: left hand/ forehead"},
{"name": "final: left hand/ jaw"},
]
links = [
{
"source": "original: right hand/ forehead",
"target": "final: right hand/ forehead",
"value": mg11,
},
{
"source": "original: right hand/ forehead",
"target": "final: right hand/ jaw",
"value": mg12,
},
{
"source": "original: right hand/ forehead",
"target": "final: left hand/ forehead",
"value": mg13,
},
{
"source": "original: right hand/ forehead",
"target": "final: left hand/ jaw",
"value": mg14,
},
{
"source": "original: right hand/ jaw",
"target": "final: right hand/ forehead",
"value": mg21,
},
{
"source": "original: right hand/ jaw",
"target": "final: right hand/ jaw",
"value": mg22,
},
{
"source": "original: right hand/ jaw",
"target": "final: left hand/ forehead",
"value": mg23,
},
{
"source": "original: right hand/ jaw",
"target": "final: left hand/ jaw",
"value": mg24,
},
{
"source": "original: left hand/ forehead",
"target": "final: right hand/ forehead",
"value": mg31,
},
{
"source": "original: left hand/ forehead",
"target": "final: right hand/ jaw",
"value": mg32,
},
{
"source": "original: left hand/ forehead",
"target": "final: left hand/ forehead",
"value": mg33,
},
{
"source": "original: left hand/ forehead",
"target": "final: left hand/ jaw",
"value": mg34,
},
{
"source": "original: left hand/ jaw",
"target": "final: right hand/ forehead",
"value": mg41,
},
{
"source": "original: left hand/ jaw",
"target": "final: right hand/ jaw",
"value": mg42,
},
{
"source": "original: left hand/ jaw",
"target": "final: left hand/ forehead",
"value": mg43,
},
{
"source": "original: left hand/ jaw",
"target": "final: left hand/ jaw",
"value": mg44,
},
]
c = (
Sankey()
.add(
"sankey",
nodes,
links,
linestyle_opt=opts.LineStyleOpts(opacity=0.2, curve=0.5, color="source"),
label_opts=opts.LabelOpts(position="right"),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Choice Flow in Questionnaire A")
)
)
return c
# -
sankey_mg().load_javascript()
sankey_mg().render_notebook() # above two lines are used to render sankey plot inline.
df_wb_change = zip(df_wb["Q3"], df_wb["Q5"])
# + jupyter={"source_hidden": true}
wb11, wb12, wb13, wb14, wb21, wb22, wb23, wb24, wb31, wb32, wb33, wb34, wb41, wb42, wb43, wb44 = (
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
for p, q in df_wb_change:
if p == 1 and q == 1:
wb11 += 1
if p == 1 and q == 2:
wb12 += 1
if p == 1 and q == 3:
wb13 += 1
if p == 1 and q == 4:
wb14 += 1
if p == 2 and q == 1:
wb21 += 1
if p == 2 and q == 2:
wb22 += 1
if p == 2 and q == 3:
wb23 += 1
if p == 2 and q == 4:
wb24 += 1
if p == 3 and q == 1:
wb31 += 1
if p == 3 and q == 2:
wb32 += 1
if p == 3 and q == 3:
wb33 += 1
if p == 3 and q == 4:
wb34 += 1
if p == 4 and q == 1:
wb41 += 1
if p == 4 and q == 2:
wb42 += 1
if p == 4 and q == 3:
wb43 += 1
if p == 4 and q == 4:
wb44 += 1
# + jupyter={"source_hidden": true}
from pyecharts.globals import CurrentConfig, NotebookType
CurrentConfig.NOTEBOOK_TYPE = NotebookType.JUPYTER_LAB
import json
import os
from pyecharts import options as opts
from pyecharts.charts import Page, Sankey
def sankey_wb() -> Sankey:
nodes = [
{"name": "original: right hand/ forehead"},
{"name": "original: right hand/ jaw"},
{"name": "original: left hand/ forehead"},
{"name": "original: left hand/ jaw"},
{"name": "final: right hand/ forehead"},
{"name": "final: right hand/ jaw"},
{"name": "final: left hand/ forehead"},
{"name": "final: left hand/ jaw"},
]
links = [
{
"source": "original: right hand/ forehead",
"target": "final: right hand/ forehead",
"value": wb11,
},
{
"source": "original: right hand/ forehead",
"target": "final: right hand/ jaw",
"value": wb12,
},
{
"source": "original: right hand/ forehead",
"target": "final: left hand/ forehead",
"value": wb13,
},
{
"source": "original: right hand/ forehead",
"target": "final: left hand/ jaw",
"value": wb14,
},
{
"source": "original: right hand/ jaw",
"target": "final: right hand/ forehead",
"value": wb21,
},
{
"source": "original: right hand/ jaw",
"target": "final: right hand/ jaw",
"value": wb22,
},
{
"source": "original: right hand/ jaw",
"target": "final: left hand/ forehead",
"value": wb23,
},
{
"source": "original: right hand/ jaw",
"target": "final: left hand/ jaw",
"value": wb24,
},
{
"source": "original: left hand/ forehead",
"target": "final: right hand/ forehead",
"value": wb31,
},
{
"source": "original: left hand/ forehead",
"target": "final: right hand/ jaw",
"value": wb32,
},
{
"source": "original: left hand/ forehead",
"target": "final: left hand/ forehead",
"value": wb33,
},
{
"source": "original: left hand/ forehead",
"target": "final: left hand/ jaw",
"value": wb34,
},
{
"source": "original: left hand/ jaw",
"target": "final: right hand/ forehead",
"value": wb41,
},
{
"source": "original: left hand/ jaw",
"target": "final: right hand/ jaw",
"value": wb42,
},
{
"source": "original: left hand/ jaw",
"target": "final: left hand/ forehead",
"value": wb43,
},
{
"source": "original: left hand/ jaw",
"target": "final: left hand/ jaw",
"value": wb44,
},
]
c = (
Sankey()
.add(
"sankey",
nodes,
links,
linestyle_opt=opts.LineStyleOpts(opacity=0.2, curve=0.5, color="source"),
label_opts=opts.LabelOpts(position="right"),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Choice Flow in Questionnaire B")
)
)
return c
# -
sankey_wb().load_javascript()
sankey_wb().render_notebook()
# ## for H1: total change rate
change_rate_mg = 1 - (mg11 + mg22 + mg33 + mg44) / 100
print("%.2f" % change_rate_mg)
change_rate_wb = 1 - (wb11 + wb22 + wb33 + wb44) / 100
print("%.2f" % change_rate_wb)
plt.figure(figsize=[5, 1.5])
plt.title("Total Choice Change Rate")
plt.xlabel("Rate")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip([change_rate_mg, change_rate_wb], [1, 2]):
plt.text(x + 0.01, y - 1.1, "%.2f" % x)
plt.barh(["Questionnaire A", "Questionnaire B"], [change_rate_mg, change_rate_wb])
top = max(change_rate_mg, change_rate_wb)
ax.set_xticks(np.arange(0, top, 0.1))
#plt.savefig("Total Choice Change Rate.png")
plt.show()
# ## for H4: option changes among genders
# +
men_keep_mg, men_change_mg, women_keep_mg, women_change_mg = 0, 0, 0, 0
for gender, original, final in zip(df_mg["Q1"], df_mg["Q3"], df_mg["Q5"]):
if gender == 1 and original == final:
men_keep_mg += 1
if gender == 1 and original != final:
men_change_mg += 1
if gender == 2 and original == final:
women_keep_mg += 1
if gender == 2 and original != final:
women_change_mg += 1
# +
men_keep_wb, men_change_wb, women_keep_wb, women_change_wb = 0, 0, 0, 0
for gender, original, final in zip(df_wb["Q1"], df_wb["Q3"], df_wb["Q5"]):
if gender == 1 and original == final:
men_keep_wb += 1
if gender == 1 and original != final:
men_change_wb += 1
if gender == 2 and original == final:
women_keep_wb += 1
if gender == 2 and original != final:
women_change_wb += 1
# -
men_keep = men_keep_mg + men_keep_wb
men_change = men_change_mg + men_change_wb
women_keep = women_keep_mg + women_keep_wb
women_change = women_change_mg + women_change_wb
print(men_keep, men_change, women_keep, women_change)
# since the totals of men and women are different, we scale them to 100
men_change_scaled = men_change * 100 / (men_change + men_keep)
men_keep_scaled = 100 - men_change_scaled
women_change_scaled = women_change * 100 / (women_change + women_keep)
women_keep_scaled = 100 - women_change_scaled
data = {
"Men Who Change Their Choices": int(men_change_scaled),
"Men Who Keep Their Choices": int(men_keep_scaled),
"Women Who Change Their Choices": int(women_change_scaled),
"Women Who Keep Their Choices": int(women_keep_scaled),
}
fig = plt.figure(
FigureClass=Waffle,
rows=10,
figsize=(10, 15),
values=data,
colors=("#A2E7FA", "#0BA2CB", "#F6ACC6", "#E7256A"),
title={
"label": "Choice Change Total Between Men and Women",
"loc": "left",
"fontsize": 18,
"fontstyle": "normal",
},
labels=["{0}: {1}".format(k, v) for k, v in data.items()],
legend={
"loc": "upper left",
"fontsize": 11,
"bbox_to_anchor": (1.05, 0.7),
# "ncol": len(data),
"framealpha": 1,
},
)
fig.gca().set_facecolor("#FFFFFF")
fig.set_facecolor("#FFFFFF")
plt.show()
# ## for H2: change rate with different periods
# +
# convert start time, finish time to datatime format
df_mg_start, df_mg_finish, period_mg = [], [], []
for i in range(len(df_mg["start"])):
df_mg["start"][i] = df_mg["start"][i].strip("PM")
df_mg_start.append(
datetime.datetime.strptime(df_mg["start"][i], "%Y-%m-%d %H:%M:%S")
)
for i in range(len(df_mg["finish"])):
df_mg["finish"][i] = df_mg["finish"][i].strip("PM")
df_mg_finish.append(
datetime.datetime.strptime(df_mg["finish"][i], "%Y-%m-%d %H:%M:%S")
)
# get time period of each person
for s, f in zip(df_mg_start, df_mg_finish):
period_mg.append((f - s).total_seconds())
print(period_mg)
# -
df_mg["period"] = period_mg
df_mg
# +
# convert start time, finish time to datatime format
df_wb_start, df_wb_finish, period_wb = [], [], []
for i in range(len(df_wb["start"])):
df_wb["start"][i] = df_wb["start"][i].strip("PM")
df_wb_start.append(
datetime.datetime.strptime(df_wb["start"][i], "%Y-%m-%d %H:%M:%S")
)
for i in range(len(df_wb["finish"])):
df_wb["finish"][i] = df_wb["finish"][i].strip("PM")
df_wb_finish.append(
datetime.datetime.strptime(df_wb["finish"][i], "%Y-%m-%d %H:%M:%S")
)
# get time period of each person
for s, f in zip(df_wb_start, df_wb_finish):
period_wb.append((f - s).total_seconds())
print(period_wb)
# -
df_wb["period"] = period_wb
df_wb
# +
short_change_mg, short_keep_mg, mid_change_mg, mid_keep_mg, long_change_mg, long_keep_mg = (
0,
0,
0,
0,
0,
0,
)
for p, o, f in zip(df_mg["period"], df_mg["Q3"], df_mg["Q5"]):
if p < 60 and o != f:
short_change_mg += 1
if p < 60 and o == f:
short_keep_mg += 1
if 60 <= p and p < 120 and o != f:
mid_change_mg += 1
if 60 <= p and p < 120 and o == f:
mid_keep_mg += 1
if 120 <= p and o != f:
long_change_mg += 1
if 120 <= p and o == f:
long_keep_mg += 1
short_change_rate_mg = short_change_mg / (short_change_mg + short_keep_mg)
mid_change_rate_mg = mid_change_mg / (mid_change_mg + mid_keep_mg)
long_change_rate_mg = long_change_mg / (long_change_mg + long_keep_mg)
# -
plt.figure(figsize=[4, 2])
plt.title("Choice Change Rate in Different Period under Questionnaire A")
plt.xlabel("Rate")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip(
[long_change_rate_mg, mid_change_rate_mg, short_change_rate_mg], [1, 2, 3]
):
plt.text(x + 0.01, y - 1.1, "%.2f" % x)
plt.barh(
["[120s,infinity)", "[60s,120s)", "[0,60s)"],
[long_change_rate_mg, mid_change_rate_mg, short_change_rate_mg],
)
top = max(long_change_rate_mg, mid_change_rate_mg, short_change_rate_mg)
ax.set_xticks(np.arange(0, top, 0.1))
#plt.savefig("Choice Change Rate in Different Period under Questionnaire A.png")
plt.show()
# +
short_change_wb, short_keep_wb, mid_change_wb, mid_keep_wb, long_change_wb, long_keep_wb = (
0,
0,
0,
0,
0,
0,
)
for p, o, f in zip(df_wb["period"], df_wb["Q3"], df_wb["Q5"]):
if p < 60 and o != f:
short_change_wb += 1
if p < 60 and o == f:
short_keep_wb += 1
if 60 <= p and p < 120 and o != f:
mid_change_wb += 1
if 60 <= p and p < 120 and o == f:
mid_keep_wb += 1
if 120 <= p and o != f:
long_change_wb += 1
if 120 <= p and o == f:
long_keep_wb += 1
short_change_rate_wb = short_change_wb / (short_change_wb + short_keep_wb)
mid_change_rate_wb = mid_change_wb / (mid_change_wb + mid_keep_wb)
long_change_rate_wb = long_change_wb / (long_change_wb + long_keep_wb)
# -
plt.figure(figsize=[4, 2])
plt.title("Choice Change Rate in Different Period under Questionnaire B")
plt.xlabel("Rate")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip(
[long_change_rate_wb, mid_change_rate_wb, short_change_rate_wb], [1, 2, 3]
):
plt.text(x + 0.01, y - 1.1, "%.2f" % x)
plt.barh(
["[120s,infinity)", "[60s,120s)", "[0,60s)"],
[long_change_rate_wb, mid_change_rate_wb, short_change_rate_wb],
)
top = max(long_change_rate_wb, mid_change_rate_wb, short_change_rate_wb)
ax.set_xticks(np.arange(0, top, 0.1))
plt.show()
# ## for H5: change rate with different self-evaluation
# +
low_change_mg, low_keep_mg, soso_change_mg, soso_keep_mg, high_change_mg, high_keep_mg = (
0,
0,
0,
0,
0,
0,
)
for a, o, f in zip(df_mg["Q6"], df_mg["Q3"], df_mg["Q5"]):
if 0 <= a and a < 3 and o != f:
low_change_mg += 1
if 0 <= a and a < 3 and o == f:
low_keep_mg += 1
if 3 <= a and a < 7 and o != f:
soso_change_mg += 1
if 3 <= a and a < 7 and o == f:
soso_keep_mg += 1
if 7 <= a and a <= 10 and o != f:
high_change_mg += 1
if 7 <= a and a <= 10 and o == f:
high_keep_mg += 1
low_change_rate_mg = low_change_mg / (low_change_mg + low_keep_mg)
soso_change_rate_mg = soso_change_mg / (soso_change_mg + soso_keep_mg)
high_change_rate_mg = high_change_mg / (high_change_mg + high_keep_mg)
# -
plt.figure(figsize=[4, 2])
plt.title("Choice Change Rate in Different Self-Evaluation under Questionnaire A")
plt.xlabel("Rate")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip(
[high_change_rate_mg, soso_change_rate_mg, low_change_rate_mg], [1, 2, 3]
):
plt.text(x + 0.01, y - 1.1, "%.2f" % x)
plt.barh(
["feel much affected", "feel moderate affected", "feel little affected"],
[high_change_rate_mg, soso_change_rate_mg, low_change_rate_mg],
)
top = max(high_change_rate_mg, soso_change_rate_mg, low_change_rate_mg)
ax.set_xticks(np.arange(0, top, 0.1))
plt.show()
# +
low_change_wb, low_keep_wb, soso_change_wb, soso_keep_wb, high_change_wb, high_keep_wb = (
0,
0,
0,
0,
0,
0,
)
for a, o, f in zip(df_wb["Q6"], df_wb["Q3"], df_wb["Q5"]):
if 0 <= a and a < 3 and o != f:
low_change_wb += 1
if 0 <= a and a < 3 and o == f:
low_keep_wb += 1
if 3 <= a and a < 7 and o != f:
soso_change_wb += 1
if 3 <= a and a < 7 and o == f:
soso_keep_wb += 1
if 7 <= a and a <= 10 and o != f:
high_change_wb += 1
if 7 <= a and a <= 10 and o == f:
high_keep_wb += 1
low_change_rate_wb = low_change_wb / (low_change_wb + low_keep_wb)
soso_change_rate_wb = soso_change_wb / (soso_change_wb + soso_keep_wb)
high_change_rate_wb = high_change_wb / (high_change_wb + high_keep_wb)
# -
plt.figure(figsize=[4, 2])
plt.title("Choice Change Rate in Different Self-Evaluation under Questionnaire B")
plt.xlabel("Rate")
plt.grid(axis="x", c="w")
ax = plt.gca()
ax.spines["top"].set_visible(False) # erase top frame
ax.spines["bottom"].set_visible(False) # erase bottom frame
ax.spines["right"].set_visible(False) # erase fight frame
for x, y in zip(
[high_change_rate_wb, soso_change_rate_wb, low_change_rate_wb], [1, 2, 3]
):
plt.text(x + 0.01, y - 1.1, "%.2f" % x)
plt.barh(
["feel much affected", "feel moderate affected", "feel little affected"],
[high_change_rate_wb, soso_change_rate_wb, low_change_rate_wb],
)
top = max(high_change_rate_wb, soso_change_rate_wb, low_change_rate_wb)
ax.set_xticks(np.arange(0, top, 0.1))
plt.show()
| coding/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/21alan/daa_20021_1/blob/master/recusividad2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="wK_rIw1Z5CTo" outputId="783cb62b-f198-4400-b866-a748125d8f9a"
def recur(bd,key,i,iz,de):
medio=(iz+de)//2
if i>=len(bd):
print("el alumno no exite")
return 0
if bd[medio]["id"] == key:
print(bd[medio]["nombre"],"estudia la carrera",bd[medio]["carrera"],"y tiene un promedio de",bd[medio]["promedio"])
return 0
i=i+1
if key > bd[medio]["id"]:
recur(bd,key,i,medio,de)
if key < bd[medio]["id"]:
recur(bd,key,i,iz,medio)
alumno1={'id':2, 'nombre':"Juan" , 'carrera':"ICO", 'promedio':7.67}
alumno2={'id':4, 'nombre':"Rocio" , 'carrera':"ICI", 'promedio':8.67}
alumno3={'id':5, 'nombre':"Diego" , 'carrera':"DER", 'promedio':8.98}
alumno4={'id':7, 'nombre':"May" , 'carrera':"ICI", 'promedio':9.87}
alumno5={'id':9, 'nombre':"Rob" , 'carrera':"IME", 'promedio':10.00}
alumno6={'id':10, 'nombre':"Santi" , 'carrera':"ICO", 'promedio':5.37}
alumno7={'id':14, 'nombre':"Moy" , 'carrera':"IME", 'promedio':6.85}
alumno8={'id':16, 'nombre':"Diana" , 'carrera':"DER", 'promedio':9.99}
alumno9={'id':19, 'nombre':"Zoila" , 'carrera':"ICO", 'promedio':8.22}
alumno10={'id':22, 'nombre':"Armando" , 'carrera':"ICO", 'promedio':7.32}
bd = []
bd.append(alumno1)
bd.append(alumno2)
bd.append(alumno3)
bd.append(alumno4)
bd.append(alumno5)
bd.append(alumno6)
bd.append(alumno7)
bd.append(alumno8)
bd.append(alumno9)
bd.append(alumno10)
key=int(input("¿Dame el id del alumno a buscar?: "))
recur(bd,key,0,0,len(bd))
| recusividad2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GYLG4aTZxmgF"
# # tf-simple-metric-learningインストール
# URL:https://github.com/daigo0927/tf-simple-metric-learning
# + id="SBtU0mAKxY7O"
# !pip install tf-simple-metric-learning
# + [markdown] id="djOzqOdUxpTb"
# # インポート
# + id="nQAe8G3jxijE"
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
# + id="MVN5x_Oyxtl2"
from tf_simple_metric_learning.layers import CircleLossCL
# + [markdown] id="YpyHJCTkx2c8"
# # MNISTデータロード
# + colab={"base_uri": "https://localhost:8080/"} id="eL4S07hTx0J0" outputId="3326db71-30ee-4ccd-fa67-a690771d6280"
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# + id="OMTrJV-fx57k"
x_train = x_train.reshape((60000, 28, 28, 1))
x_test = x_test.reshape((10000, 28, 28, 1))
x_train = x_train / 255.0
x_test = x_test / 255.0
# + [markdown] id="-cgSO_obyZlU"
# # モデル作成
# + id="SSbX8zb9yeM_"
NUM_CLASSES = 10
model_save_path = './mnist.hdf5'
# + id="KVwDLwbFy6Gu"
# ベース構造作成
model_input = tf.keras.layers.Input((28, 28, 1))
embeds = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')(model_input)
embeds = tf.keras.layers.MaxPooling2D((2, 2))(embeds)
embeds = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')(embeds)
embeds = tf.keras.layers.MaxPooling2D((2, 2))(embeds)
embeds = tf.keras.layers.Conv2D(128, (3, 3), activation='relu')(embeds)
embeds = tf.keras.layers.Flatten()(embeds)
embeds = tf.keras.layers.Dense(1024)(embeds)
# + id="DdV2Tnery8dl"
# メトリックレイヤー作成
metric_layer = CircleLossCL(num_classes=NUM_CLASSES, margin=0.25, scale=256)
# + id="fSjshZbiy2-U"
labels = tf.keras.layers.Input([], dtype=tf.int32)
labels_onehot = tf.one_hot(labels, depth=NUM_CLASSES)
logits = metric_layer([embeds, labels_onehot])
# + id="j5xxeavly_Fb"
model = tf.keras.Model(inputs=[model_input, labels], outputs=logits)
# + colab={"base_uri": "https://localhost:8080/", "height": 976} id="CHjbP_MVzAIv" outputId="04f69331-ef06-44a0-c532-05c56ceecf02"
tf.keras.utils.plot_model(model, show_shapes=True)
# + id="LW1CtL-UzBV3"
# モデルコンパイル
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=tf.keras.metrics.SparseCategoricalAccuracy()
)
# + [markdown] id="HR_gwUdV1M_u"
# # 学習
# + colab={"base_uri": "https://localhost:8080/"} id="a42EgkyXzDSx" outputId="e5428028-8ef9-497a-b83a-a42e166c3442"
model.fit(
[x_train, y_train],
y_train,
epochs=100,
)
# + [markdown] id="U3hclvU-1bCT"
# # 推論用モデル保存
# + id="djPHpEoRzEe6"
# 推論用モデル取り出し
model = tf.keras.Model(inputs=model.input[0], outputs=model.layers[-3].output)
# + colab={"base_uri": "https://localhost:8080/", "height": 865} id="IYkAk7S6zmq0" outputId="a978c80f-3cbc-4a93-8c9b-6a329af3dc1d"
tf.keras.utils.plot_model(model, show_shapes=True)
# + id="ijpirmhOzm62"
model.save(model_save_path)
# + [markdown] id="HgMkyF7S1rRi"
# # 推論テスト
# + colab={"base_uri": "https://localhost:8080/"} id="2O1v7lO7zoGP" outputId="8ff71311-2709-4778-e2bd-d37ff2f8b90f"
# 保存したモデルのロード
load_model = tf.keras.models.load_model(model_save_path)
# + colab={"base_uri": "https://localhost:8080/"} id="TjLuxombzpkX" outputId="6f684acb-5ade-43b0-c83f-c77cc97ae2a1"
# 推論テスト
predict_result = load_model.predict(np.array([x_test[0]]))
print(np.squeeze(predict_result))
# + [markdown] id="599wLMeO1wGF"
# # UMAP可視化
# + colab={"base_uri": "https://localhost:8080/"} id="_ZvtN9MDzsOR" outputId="cd4d6875-607d-4cc7-f7d4-d38717cedd93"
from tqdm import tqdm
predict_results = []
for x_test_data in tqdm(x_test):
predict_result = load_model.predict(np.array([x_test_data]))
predict_results.append(np.squeeze(predict_result))
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="KE1uzp5rzt0O" outputId="d4fb65bf-e9ab-4b05-86f0-c040f00a2ed3"
from umap import UMAP
import matplotlib.pyplot as plt
umap = UMAP(n_components=2)
umap.fit(predict_results)
embedding = umap.transform(predict_results)
plt.scatter(embedding[:, 0], embedding[:, 1], c=y_test, cmap='rainbow')
plt.colorbar()
for i in range(NUM_CLASSES):
index = np.where(y_test==i)[0][0]
plt.annotate(str(y_test[index]), (embedding[index, 0], embedding[index, 1]), size=30)
# + [markdown] id="wxNiK5VQ2pbB"
# # コサイン類似度による距離比較
# + id="SCgRj5SXzvM0"
def cosine_similarity(x1, x2):
if x1.ndim == 1:
x1 = x1[np.newaxis]
if x2.ndim == 1:
x2 = x2[np.newaxis]
x1_norm = np.linalg.norm(x1, axis=1)
x2_norm = np.linalg.norm(x2, axis=1)
cosine_sim = np.dot(x1, x2.T)/(x1_norm*x2_norm+1e-10)
return cosine_sim
# + colab={"base_uri": "https://localhost:8080/"} id="h8w31luNzwdB" outputId="d8be6ff3-295e-4c29-a8e4-cf3464a1063e"
result = load_model.predict(np.array([x_train[0]]))
print("正解値:" + str(y_train[0]))
# + colab={"base_uri": "https://localhost:8080/"} id="1F5ARIx15uFe" outputId="2c0e0817-5096-4cb4-cc7a-b8b0334a563b"
similarity = cosine_similarity(np.array(result), np.array(predict_results))
similarity = np.squeeze(similarity)
most_similar_index = np.argmax(similarity)
print("コサイン類似度:" + str(similarity[most_similar_index]))
print("推測値:" + str(y_test[most_similar_index]))
| mnist_sample_using_tf_simple_metric_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
import math
import torch
import numpy as np
import torch.optim as optim
import matplotlib.pyplot as plt
from torch import nn
from tqdm import tqdm_notebook
import torch.nn.functional as F
from text_vae.cvae import RnnVae
from text_vae.corpus import SSTCorpus
from text_vae.metrics import Evaluator
from sklearn.datasets.lfw import Bunch
from torch.nn.utils import clip_grad_norm_
# -
path = Bunch(
vae_save='vae.pt',
save='text_vae.pt'
)
# !nvidia-smi
device_code = 3
device_s = f'cuda:{device_code}' if device_code >= 0 and torch.cuda.is_available() else 'cpu'
device = torch.device(device_s)
args, model = torch.load(path.vae_save, map_location=device_s)
model.encoder_rnn.flatten_parameters()
model.decoder_rnn.flatten_parameters()
args.device_code = device_code
assert model.x_emb.weight.device == device
device, model
args.train.lambda_u = 0.1
args.train.lambda_z = 0.1
args.train.lambda_c = 0.1
args.train.beta = 0.1
args.train.n_iter = 10000
args
corpus = SSTCorpus(**args.model, n_batch=args.train.n_batch, device=device)
evaluator = Evaluator(corpus)
class TempAnnealer:
def __init__(self, n_iter, eps=1e-5):
self.n_iter = n_iter
self.eps = eps
def __call__(self, i):
kl_weight = (math.tanh((i - self.n_iter / 2) / (self.n_iter / 10)) + 1) / 2
return 1 - kl_weight + self.eps
temp_annealer = TempAnnealer(n_iter=args.train.n_iter)
xs = np.linspace(0, args.train.n_iter, num=1000)
ts = np.array([temp_annealer(i) for i in xs])
plt.figure(figsize=(7, 7))
plt.plot(xs, ts);
get_params_E = lambda: (p for p in model.encoder.parameters() if p.requires_grad)
get_params_G = lambda: (p for p in model.decoder.parameters() if p.requires_grad)
get_params_D = lambda: (p for p in model.discriminator.parameters() if p.requires_grad)
trainer_E = optim.Adam(get_params_E(), lr=args.train.lr)
trainer_G = optim.Adam(get_params_G(), lr=args.train.lr)
trainer_D = optim.Adam(get_params_D(), lr=args.train.lr)
model.train()
batcher = corpus.batcher('labeled', 'train', n_iter=args.train.n_iter)
t = tqdm_notebook(enumerate(batcher))
losses, log, epoch = [], [], 0
for i, (x, y) in t:
# >Train the discriminator D by Eq.(11)
cw = model.forward_discriminator(x)
_, c_gen, x_gen = model.sample_sentence(x.size(0))
cw_gen = model.forward_discriminator(x_gen)
loss_s = F.cross_entropy(cw, y)
entropy_gen = -(F.log_softmax(cw_gen, dim=1)).mean()
loss_u = F.cross_entropy(cw_gen, c_gen.argmax(1)) + args.train.beta * entropy_gen
loss_D = loss_s + args.train.lambda_u * loss_u
trainer_D.zero_grad()
loss_D.backward()
clip_grad_norm_(get_params_D(), args.train.grad_clipping)
trainer_D.step()
# >Train the generator G and the encoder E by Eq.(8)
kl_loss, recon_loss = model(x, use_c_prior=False)
z_gen, c_gen, x_gen = model.sample_soft_embed(x.size(0), temp=temp_annealer(i))
z_gen_pred, _ = model.forward_encoder(x_gen, do_emb=False)
cw_gen_pred = model.forward_discriminator(x_gen, do_emb=False)
loss_vae = args.train.kl.w_max * kl_loss + recon_loss
loss_z = F.mse_loss(z_gen_pred, z_gen)
loss_c = F.cross_entropy(cw_gen_pred, c_gen.argmax(1))
loss_G = loss_vae + args.train.lambda_z * loss_z + args.train.lambda_c + loss_c
trainer_G.zero_grad()
loss_G.backward()
clip_grad_norm_(get_params_G(), args.train.grad_clipping)
trainer_G.step()
# >and minimizing Eq.(4), respectively.
kl_loss, recon_loss = model(x, use_c_prior=False)
loss_E = args.train.kl.w_max * kl_loss + recon_loss
trainer_E.zero_grad()
loss_E.backward()
clip_grad_norm_(get_params_E(), args.train.grad_clipping)
trainer_E.step()
# Calc metrics and update t
losses.append(loss_D.item() + loss_G.item() + loss_E.item())
cur_loss = np.mean(losses[-args.train.log_interval:])
# lr_D, lr_G, lr_E = (trainer.param_groups[0]['lr'] for trainer in (trainer_D, trainer_G, trainer_E))
t.set_postfix_str(f'loss={cur_loss:.5f} loss_D={loss_D.item():.5f} loss_G={loss_G.item():.5f} loss_E={loss_E.item():.5f}')
t.refresh()
| text_cvae.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to mapclassify
#
# `mapclassify` implements a family of classification schemes for choropleth maps.
# Its focus is on the determination of the number of classes, and the assignment of observations to those classes.
# It is intended for use with upstream mapping and geovisualization packages (see [geopandas](https://geopandas.org/mapping.html) and [geoplot](https://residentmario.github.io/geoplot/user_guide/Customizing_Plots.html) for examples) that handle the rendering of the maps.
#
# In this notebook, the basic functionality of mapclassify is presented.
import mapclassify as mc
mc.__version__
# ## Example data
# mapclassify contains a built-in dataset for employment density for the 58 California counties.
y = mc.load_example()
# ## Basic Functionality
# All classifiers in `mapclassify` have a common interface and afford similar functionality. We illustrate these using the `MaximumBreaks` classifier.
# `MaximumBreaks` requires that the user specify the number of classes `k`. Given this, the logic of the classifier is to sort the observations in ascending order and find the difference between rank adjacent values. The class boundaries are defined as the $k-1$ largest rank-adjacent breaks in the sorted values.
mc.MaximumBreaks(y, k=4)
# The classifier returns an instance of `MaximumBreaks` that reports the resulting intervals and counts. The first class has closed lower and upper bounds:
# `[ 0.13, 228.49]`, with `0.13` being the minimum value in the dataset:
y.min()
# Subsequent intervals are open on the lower bound and closed on the upper bound. The fourth class has the maximum value as its closed upper bound:
y.max()
# Assigning the classifier to an object let's us inspect other aspects of the classifier:
#
mb4 = mc.MaximumBreaks(y, k=4)
mb4
# The `bins` attribute has the upper bounds of the intervals:
mb4.bins
# and `counts` reports the number of values falling in each bin:
#
mb4.counts
# The specific bin (i.e. label) for each observation can be found in the `yb` attribute:
mb4.yb
# ## Changing the number of classes
# Staying the the same classifier, the user can apply the same classification rule, but for a different number of classes:
mc.MaximumBreaks(y, k=7)
mb7 = mc.MaximumBreaks(y, k=7)
mb7.bins
mb7.counts
mb7.yb
# One additional attribute to mention here is the `adcm` attribute:
mb7.adcm
# `adcm` is a measure of fit, defined as the mean absolute deviation around the class median.
mb4.adcm
# The `adcm` can be expected to decrease as $k$ increases for a given classifier. Thus, if using as a measure of fit, the `adcm` should only be used to compare classifiers defined on the same number of classes.
# ## Next Steps
# `MaximumBreaks` is but one of many classifiers in `mapclassify`:
mc.classifiers.CLASSIFIERS
# To learn more about an individual classifier, introspection is available:
# +
# mc.MaximumBreaks?
# -
# For more comprehensive appliciations of `mapclassify` the interested reader is directed to the chapter on [choropleth mapping](https://geographicdata.science/book/notebooks/05_choropleth.html) in [Rey, Arribas-Bel, and Wolf (2020) "Geographic Data Science with PySAL and the PyData Stack”](https://geographicdata.science/book).
| notebooks/01_maximum_breaks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # See how ``run_experiments`` works based on a single experiment
devtools::load_all("../occCompaRe")
devtools::load_all(".")
# ## Setup experiments
#
# A proper set up of the experiments is required by defining the classifier comparison settings list (parcc). Furthermore, a function (get_refset()) must be available which creates the reference sets as required by the functions of occCompaRe
#
# In doubt how this can be done see .002_setup.
parcc <- set_expOpts("ocsvmOnly4revision1") # there are paths defined as default in there which you might need to adjust
#parcc
# ## Variables defined in ``run_experiments``
#
get_refset <- parcc[["get_resfet"]]
args.rs <- parcc[["args.rs"]]
# ## Select one TASK
args.rs$dirData
# #### Arguments
fun = c(".trainUtest")
doPar=TRUE
# nCores=NULL, # used from parcc$nCores
debug=FALSE
overwrite=FALSE
overwrite_resTable=FALSE
loadOnly=FALSE
# +
loopElements <- c("seed", "nP", "nU", "fset",
"idP", "method")
print.fun <- function(loopElements, task, parcc) {
atFrom <- function(pm) {
at=which(parcc[[pm]]==task[[pm]])
from=length(parcc[[pm]])
sprintf("%s/%s", at, from) }
cat(paste(paste(loopElements,
sapply(loopElements, atFrom),
sep=":", collapse=" | "), "\n"))
}
# +
task <- parcc
task$seed=parcc$seed[1] # LOOP
task$nP=parcc$nP[1] # LOOP
task$nU=parcc$nU[1] # LOOP
task$fset=parcc$fset[1] # LOOP
rs_allTr <- get_refset(seed=task$seed,
fset=task$fset,
nP=task$nP, nU=task$nU,
args.rs)
task$idP <- parcc$idP[1] # LOOP
task$scaling <- parcc$scaling[1] # LOOP
# remove the training samples of the non-idP classes
rs <- rs_allTr[rs_allTr$set=="te" | rs_allTr$y %in% c(0, task$idP), ]
# class-specific scaling using preProcess
if (task$scaling == "ccs01") {
idx_scale <- rs$set=="tr" & rs$y==task$idP
pp <- preProcess(rs[idx_scale, -(1:2)], method="range")
# This is a fast solution...
if ("binsvm" %in% parcc$method) {
rs_sup <- predict(pp, rs_allTr[rs_allTr$y!=0, ])
# print(check_refset(rs_sup))
rs_sup$y <- puFactor(rs_sup$y==task$idP, TRUE)
}
rs[, -(1:2)] <- predict(pp, rs[, -(1:2)])
} else {
stop("Currently only ccs01 scaling is supported.")
}
idx4sigest <- c(which(rs$set=="tr" & rs$y==task$idP))
rs$y <- puFactor(rs$y==task$idP, TRUE)
# +
task$method = parcc$method[1] # LOOP
cat("\n\n*********************************\n")
print.fun(loopElements, task, parcc)
tuneGrid.bak <-
get_tuneGrid(rs[idx4sigest, -c(1,2)],
method=task$method,
param=task$param[[task$method]],
seed=task$seed)
# -
for (ff in c(".trainUtest", ".resampling-val")) {
cat("------------------------------------\n")
if(ff!=".trainUtest" & task$method%in%c("binsvm")) {
next
cat(paste0("Skipping ", ff, " for method ", task$method, ".\n"))
}
idx_rs <- !logical(nrow(rs))
stsp <- strsplit(ff, "-")[[1]]
ff <- stsp[1]
if (length(stsp)==1) { # => .trainUtest
# idx_rs[rm_ifNotPuResampl] <- F
cat("PREV. removing PU - samples. Now not anymore!\n")
} else {
task$resampling <- stsp[2]
stsp.r <- strsplit(task$resampling, "_")[[1]]
if (length(stsp.r)==1) { # => normal resampling
#idx_rs[rm_ifNotPuResampl] <- F
cat("PREV. removing PU - samples. Now not anymore!\n")
}
}
summaryFile <- get_summaryFile(task$dn.res, task, ff)
if (overwrite_resTable)
unlink(summaryFile)
if (file.exists(summaryFile) & !overwrite & !loadOnly) {
tasks_done <- try(read.csv(summaryFile, header=TRUE)) # was header=F!
# colnames(tasks_done)[1:length(task$param[[task$method]])] <-
# names(task$param[[task$method]])
idx = matchDf(tuneGrid.bak, tasks_done)
if (!all(is.na(idx))) {
tuneGrid <-
tuneGrid.bak[-as.numeric(rownames(
tuneGrid.bak[!is.na(idx), ])), ]
} else {
tuneGrid <- tuneGrid.bak
}
} else {
tuneGrid <- tuneGrid.bak
}
cat(ff, ":", basename(summaryFile), "\n")
cat("Number of models - all/to do:",
nrow(tuneGrid.bak), " / ", nrow(tuneGrid), "\n")
if (nrow(tuneGrid)>0) {
if (substr(task$method, 1, 3) == "bin") {
rs_run_exp <- rs_sup
} else {
rs_run_exp <- rs[idx_rs, ]
}
re <-
run_exp(rs_run_exp,
looppar=task,
tuneGrid=tuneGrid,
fun=ff,
doPar=doPar,
loadOnly=loadOnly,
overwrite=overwrite,
rm_ifNotPuResampl=rm_ifNotPuResampl)
if (class(re)=="try-error") {
print(">>>>>> ERROR IN trainUtest!")
}
}
}
| _nb_03a_run_one_experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="nSBrMz_ED6HL"
# # RACE-Distractor-Generation
# + id="1-kuP2_W12-j" colab={"base_uri": "https://localhost:8080/"} outputId="c09e741e-8639-4304-c9a9-1ba31de21c3e"
# !nvidia-smi
# + id="ZsaKsEPKiEAL"
import sys
IN_COLAB = 'google.colab' in sys.modules
RUN_TRAINING_CELLS = IN_COLAB
EXPERIMENT_NAME = 'RACE-Distractor-Generation/'
DRIVE_FOLDER_LOCATION = '/content/drive/My Drive/QG-Colab/Github/' + EXPERIMENT_NAME
# + colab={"base_uri": "https://localhost:8080/"} id="asTTT9daGGme" outputId="4602ce11-6923-4f71-a0c4-42e11011425e"
# Mounting google drive
if IN_COLAB:
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + [markdown] id="eI8Saapu878D"
# ## Environment setup
# Setting up Google drive as working directory and installing packages.
# + id="rgN8Nl4xRHPt" colab={"base_uri": "https://localhost:8080/"} outputId="86669b5d-cdfd-4e0a-a9dc-ea274cb6a423"
# Using my own Google Drive during the experiment to save all checkpoints and training logs.
if IN_COLAB:
# Adapted from: https://robertbrucecarter.com/writing/2020/06/setting-your-working-directory-to-google-drive-in-a-colab-notebook/
import os
def create_and_set_working_directory(path: str):
# check if your project folder exists. if not, it will be created.
if os.path.isdir(path) == False:
os.mkdir(path)
print(path + ' did not exist but was created.')
# change the OS to use your project folder as the working directory
os.chdir(path)
print('Working directory changed to: \n' + path)
create_and_set_working_directory(DRIVE_FOLDER_LOCATION)
# !pwd
# + id="5hrFOevkEKG2" colab={"base_uri": "https://localhost:8080/"} outputId="e681d195-5e74-4aa3-da4d-95f2b32c693a"
# Install packages
if IN_COLAB:
# !pip install --quiet transformers==4.3.0
# !pip install --quiet pytorch-lightning==1.2.10
# !pip install --quiet tokenizers==0.10.3
# + id="i8vjhHJ-FfQS"
# Import packages
from typing import List, Dict
import tqdm.notebook as tq
from tqdm.notebook import tqdm
import json
import pandas as pd
import numpy as np
import torch
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from transformers import (
AdamW,
T5ForConditionalGeneration,
T5TokenizerFast as T5Tokenizer
)
# + colab={"base_uri": "https://localhost:8080/"} id="Q4DQ4J1IKPC0" outputId="4b9d082e-4180-447a-fc35-d0d6553487dc"
pl.seed_everything(42)
# + [markdown] id="PZ0OBjb0sDed"
# # Race Dataset
# Exploration and creation of the dataset to be used
# + colab={"base_uri": "https://localhost:8080/"} id="kB7wGA6gscVH" outputId="0a94113c-a596-41e7-e986-b29364ac054e"
# !pip --quiet install datasets
# + colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["2bf29e2764034bdd9d2cf9b2134ec1f4", "fec7fc972782438abec2986e1ee34b74", "9ad54c59346e44c69fc3a1b00e317408", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0cedf57724f8407c83ef18a9248699b6", "5404ced4b3ac4cd78d6d78fcef6e2e12", "755cd3222c604686abf0ff40c53ef09a", "<KEY>", "241c19c318cd4180a6de69d874aaeec9", "eca6e216e5314c76b50f7aaa636126a1", "773fe8942f71443fb8184153de82d011", "b976ee645ff84d4d865b8948705d21c6", "a21c46b8ecb440df986a2f6ef016f406", "<KEY>", "<KEY>", "6a2d20342ad343d5a565c8dede7a13db", "<KEY>", "<KEY>", "a476739e73ad43dca380ea1d9285e69e", "<KEY>", "48eca802f96745a1a138bbbe6d8f888a", "a50a6fbeb10448df8f5e4e8006026e80", "1ab0ad4050c6411098f982fdd5a764d4", "<KEY>", "<KEY>", "07d80e306ec04b8ea7e4eaa69cae6b1e", "<KEY>", "8d6ff93805d74d9296e59944ddcccfc4", "d784db91e3b44f6088f56939030470ca", "<KEY>", "cf82f463661f428babecba5a4bca0655", "7a10b6ad31604de5a619fb013e97f92b", "<KEY>", "<KEY>", "33cd73f7dcdd47deba9296b882e0d050", "ca1ff20243934c6ca3976186793e846b", "f1ad12560b644d0b916278a5416f7306", "<KEY>", "fca4eae7d33f4517ac8e64f4e34a6b28", "<KEY>", "cb8a7a3e01e0444ea3fe4482ac657a01", "<KEY>", "<KEY>", "<KEY>", "e02d4ca8fa2a47249e5fafeedaf88fd3", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "f3c04b50945e4d66a2efbefa6836fb54", "f1cc0f7772494a388d42abef1438330a", "76548b68c3f54ab3a02b6a3e20fe8859", "<KEY>", "4e731840ba3e4e3da11184da1bf52e53", "baf80383b50048c491bedfae98142a05", "b8d55a17535649af80ed9f14d6c956ea", "0a2d3ff319ba4b0a95005c1bf7f3e27b", "<KEY>", "<KEY>", "200999528c9e4fed9044bf7024f25e2b"]} id="2J4edwLVsGH4" outputId="40550ece-85de-48dd-bb63-689616966aec"
from datasets import load_dataset
dataset = load_dataset("race", 'all')
# + [markdown] id="62_ctrdms44v"
# ## Exploration
# + colab={"base_uri": "https://localhost:8080/", "height": 106} id="tu0nauBHsq6O" outputId="766a9dec-c0e5-41c2-cbba-02ebf7f99021"
dataset['train']['article'][0]
# + colab={"base_uri": "https://localhost:8080/"} id="c32I33jusrEH" outputId="8fd1ef4d-a36b-4e9a-c873-6405ea440b97"
list(set(dataset['train']['answer']))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Rs3-xzWNsGLY" outputId="5d49f64d-da50-4306-8350-aa1b07c0cd05"
dataset['train']['question'][0]
# + colab={"base_uri": "https://localhost:8080/"} id="Qm9odwXCtcHu" outputId="f3fcd720-85cc-4b76-bdf2-6ac83ad62878"
dataset['train']['options'][0]
# + [markdown] id="Ig7eG5hRyBNO"
# ## Create dataset for training
# Extract context, correct answer, question, incorrect1, incorrect2, incorrect3
# + id="Nxt4i_GzyL1g"
def create_dataset(dataset_split):
data_rows = []
for i in tqdm(range(len(dataset_split))):
curr_context = dataset_split[i]['article']
curr_question = dataset_split[i]['question']
all_answers = dataset_split[i]['options']
correct_answer_index = ord(dataset_split[i]['answer']) - 65
curr_correct = all_answers.pop(correct_answer_index)
curr_incorrect1 = all_answers[0]
curr_incorrect2 = all_answers[1]
curr_incorrect3 = all_answers[2]
data_rows.append({
'context': curr_context,
'question': curr_question,
'correct': curr_correct,
'incorrect1': curr_incorrect1,
'incorrect2': curr_incorrect2,
'incorrect3': curr_incorrect3
})
return pd.DataFrame(data_rows)
# + colab={"base_uri": "https://localhost:8080/", "height": 48, "referenced_widgets": ["a56c8b173f0e4339ba56147e4ec9b099", "cca41426af58416e8187b91026a27b38", "f8a4b2b833c34190a1af02cdbfd38c9f", "2a180c93819f46a4a62a3ced1c11775e", "aa765b1d44b94fdf8babfc21d27a7759", "e17dbe85a6f640d4b027fb97171558bf", "c43b6ad3bc194ec0b307160f7066db00", "ceca8669986a48be8d9f1c439d4de6c0", "6a1c9ad304f44d799785771769b67069", "24bff6d590084ea6b9a4e4a2093bbc1e", "80831c24788e485f86a51f5f74b7fe02"]} id="MewpS4MH2lh8" outputId="50ee48dc-29ca-4224-e0a2-364f55de2317"
race_train_df = create_dataset(dataset['train'])
# + colab={"base_uri": "https://localhost:8080/", "height": 48, "referenced_widgets": ["eb5c4ecc98c84edabf0ef373714481ec", "ca60a3d7cb784814bf8a89ddb7f5a86e", "<KEY>", "0572e1ff7fe142b091ceac0493aa40fa", "ee088db4388d4a9ca8d5bb353fc3555b", "3893af2ca716469e94c5a3a6ac1e5870", "<KEY>", "<KEY>", "<KEY>", "80375f7ddc004189b4e481e8be7cdea0", "3fc3cc2e81d04b8da5a0d8ac5c19d97e"]} id="cgxaAUoF29Zn" outputId="18bdc454-c482-4a84-f8b3-7587b26cef10"
race_dev_df = create_dataset(dataset['validation'])
# + colab={"base_uri": "https://localhost:8080/", "height": 48, "referenced_widgets": ["7888656c962149d6a23a43262bf6cd7d", "e4c78c4762ae4a9aaf524a7999f8d152", "cd6bc7b1230d4207b4f205a093d09409", "6e321a496f1348f896370ae0842c81a3", "b6161ef9eabd4b85b3e0ebc02142f7f8", "<KEY>", "<KEY>", "c5c00eaf891740e6b11d1fb2d102760f", "077d089ae6654e1a9238fefabad931e4", "25171cd239244881bb60e722ec8f64c0", "7448b831e1e94d2ba15f0c9da8c331ef"]} id="XCtUXNHz29nm" outputId="790b7b4e-5ba4-495a-8ea9-5de5a9407a0e"
race_test_df = create_dataset(dataset['test'])
# + colab={"base_uri": "https://localhost:8080/", "height": 200} id="559pDVv73ZVM" outputId="844bc8fe-6bf0-41cc-e4d9-6f150076b2fb"
race_train_df.head()
# + id="oA_EAO_E6EJL"
train_df = race_train_df
dev_df = race_dev_df
test_df = race_test_df
# + id="CUVZKAa70DRF"
train_df.to_csv('data/race_train_df.csv', index=False)
dev_df.to_csv('data/race_dev_df.csv', index=False)
test_df.to_csv('data/race_test_df.csv', index=False)
# + [markdown] id="_cmz-dK53nLy"
# ## Analyzing source and target token max size
#
# Result
# - Context: 484
# - Answer: 27
# - Question (not uset currently): 30 tokens
# - Incorrect = 62
#
# Max_len
# - Source: 512 - answer (27) + sep (1) + context (484)
# - Target 64 - incorrect 62 + sep (2)
# + colab={"base_uri": "https://localhost:8080/", "height": 81, "referenced_widgets": ["e04ae984f0d642318310c36435a7ee7a", "<KEY>", "18045f0a72bb4a6f8552a432c8e01e9a", "fc1c510335554b11bc077d05bad27e04", "05793b48e1994364adc89559198856ba", "<KEY>", "4e96ea9452f541a4b4e633f4a5f794e2", "e9180a06d30e4d5db2f505ca1fcb6a91", "0222198d52d24ed6b5d8f2ac511b8563", "<KEY>", "22c886e785f14fc3ade5ce3ea92a5f82", "75e60e9489484dbb826938c3ea78a753", "e75b46a8b70b4810acaeb906838b9868", "b546e7984e3546e69cc5bba32c224e4a", "224f268f2fae453894fddba9b3798a39", "d1e0014ae13b44378b15466a93e73feb", "<KEY>", "4f277b7e89e04199b3fe9c734edc1d72", "<KEY>", "dea1d193ca0943f1af8132dbd1dd6673", "a1cf9f6046664f0fa3fff844b3dea255", "9bf078bfb1ee41ad824d2e2fc9ba4c06"]} id="B1VDRoCt3yxa" outputId="37a29c33-ed81-4369-ac21-0dce6ac53789"
model_name = 't5-small'
tokenizer = T5Tokenizer.from_pretrained(model_name)
# + colab={"base_uri": "https://localhost:8080/", "height": 131, "referenced_widgets": ["3e76dff7e3e345688badab7131908a1f", "3da058d3a41d4e16bff2150063ecc385", "7f36dd07b25149929ff898fe9abec11d", "04ea7fba023040c8be982d03edce81d8", "b6956569b60c46a6baf7241fe93ab08e", "<KEY>", "<KEY>", "50967bad41c5401e97277dcdce4399cb", "66d7f046dd694a5399ca8225e90eeb20", "<KEY>", "91406b3784784ed2b3ce8b0b660334a4", "35039efe4fac4ae28869284ac2ce350a", "1a8e1eb88bc4416fbdfd8934713c8c81", "423b81b6a80642989a17f3ee832ae7f3", "0696f09866684f48b252ebba8d94cfdf", "8c23e7df77ef4b39a4cecfa44cf448a4", "<KEY>", "be691256fcad46448975d6b3087240c2", "<KEY>", "<KEY>", "4dee4967eab748378e8d2199a8ef5b08", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "6efe625c39444847b17d989e570a8b03", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "029eaafdd3914f8ab3a216db0b5cac19", "<KEY>"]} id="sIacB9mz5pQd" outputId="fee02b14-2841-4656-a2d1-c84e34a5b051"
context_token_lens = []
question_token_lens = []
answer_token_lens = []
incorrect_token_lens = []
for i in tq.tqdm(range(len(train_df))):
context_token_lens.append(len(tokenizer(train_df.iloc[i]['context'])['input_ids']))
question_token_lens.append(len(tokenizer(train_df.iloc[i]['question'])['input_ids']))
answer_token_lens.append(len(tokenizer(train_df.iloc[i]['correct'])['input_ids']))
incorrect_token_lens.append(len(tokenizer(train_df.iloc[i]['incorrect1'] + train_df.iloc[i]['incorrect2'] + train_df.iloc[i]['incorrect3'])['input_ids']))
for i in tq.tqdm(range(len(test_df))):
context_token_lens.append(len(tokenizer(test_df.iloc[i]['context'])['input_ids']))
question_token_lens.append(len(tokenizer(test_df.iloc[i]['question'])['input_ids']))
answer_token_lens.append(len(tokenizer(test_df.iloc[i]['correct'])['input_ids']))
incorrect_token_lens.append(len(tokenizer(test_df.iloc[i]['incorrect1'] + test_df.iloc[i]['incorrect2'] + test_df.iloc[i]['incorrect3'])['input_ids']))
for i in tq.tqdm(range(len(dev_df))):
context_token_lens.append(len(tokenizer(dev_df.iloc[i]['context'])['input_ids']))
question_token_lens.append(len(tokenizer(dev_df.iloc[i]['question'])['input_ids']))
answer_token_lens.append(len(tokenizer(dev_df.iloc[i]['correct'])['input_ids']))
incorrect_token_lens.append(len(tokenizer(dev_df.iloc[i]['incorrect1'] + dev_df.iloc[i]['incorrect2'] + dev_df.iloc[i]['incorrect3'])['input_ids']))
# + [markdown] id="e22UCk3v621i"
# ### Context
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="M15Yo4ig6zHx" outputId="d2eab4b8-3b3f-4433-8a2e-fc67d0601c2b"
pd.DataFrame(context_token_lens).describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="OdCeArQa6zK7" outputId="3b8e06a1-7adf-4816-a5cf-f440d8499ac4"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 512, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in context_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="z6M_ngCP6zOV" outputId="9ed6b6c9-0934-4688-91f5-b8b3ed458dc6"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(400, 512, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in context_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="Y9lkxNvb6zRn" outputId="5562d648-4eb5-4dc8-e8a3-e756d02f0874"
desired_max_len = 484
inside = sum(float(num) <= desired_max_len for num in context_token_lens)
outside = sum(float(num) > desired_max_len for num in context_token_lens)
percentage = 100 / len(context_token_lens) * inside
print('In :', inside)
print('Over:', outside)
print('Percentage:', round(percentage,2))
# + [markdown] id="QK1hRgrG7Fi-"
# ### Question
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="oVYnWGJO7HN9" outputId="34592c99-670a-4d97-a716-eaf2c1ec187c"
pd.DataFrame(question_token_lens).describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="ihG-ciJu7IWx" outputId="163269fb-7772-4e50-c05d-763f4175b0fc"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 512, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in question_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="qkgSO7xL7Ies" outputId="77a96633-d5aa-40a3-d9e5-b703df744224"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 40, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in question_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="NANkdmR_7Ihs" outputId="6536bebf-7bb6-46f0-a3c2-957109197d3a"
desired_max_len = 30
inside = sum(float(num) <= desired_max_len for num in question_token_lens)
outside = sum(float(num) > desired_max_len for num in question_token_lens)
percentage = 100 / len(question_token_lens) * inside
print('In :', inside)
print('Over:', outside)
print('Percentage:', round(percentage,2))
# + [markdown] id="KdnZDKVH7RPA"
# ### Correct answer
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="84G_XkCi7Ina" outputId="ad64bbee-d8a5-4d48-9706-ff7f4340e5eb"
pd.DataFrame(answer_token_lens).describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="vH0HuJ2f7Irv" outputId="0bc2fedc-5aa9-4c07-922c-9<PASSWORD>"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 512, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in answer_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="-rL0LmY67IvN" outputId="7a5d6cdd-c9da-480f-d39f-4726d46e133b"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 40, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in answer_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="tkp2_Br-7Ixy" outputId="85bf5a0e-81be-4853-e011-94d5f4c5b0f1"
desired_max_len = 27
inside = sum(float(num) <= desired_max_len for num in answer_token_lens)
outside = sum(float(num) > desired_max_len for num in answer_token_lens)
percentage = 100 / len(answer_token_lens) * inside
print('In :', inside)
print('Over:', outside)
print('Percentage:', round(percentage,2))
# + [markdown] id="fgLc85ek7X6M"
# ### Incorrect answers
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="hRGpvsk87Iz_" outputId="a017f7a2-13fa-4a4a-c112-fee0fd2a2679"
pd.DataFrame(incorrect_token_lens).describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="LFg_fD137I4S" outputId="e78070a2-262b-4b42-8b72-92ecfd95897d"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 512, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in incorrect_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 264} id="bPCC3nhp7JPj" outputId="4dcae4ed-a07f-4034-cf84-22df6ed4ef99"
import numpy as np
import matplotlib.pyplot as plt
token_lens = []
samples_counts = []
for i in range(0, 80, 10):
token_lens.append(i)
samples_counts.append(sum(float(num) < i for num in incorrect_token_lens))
plt.plot(token_lens, samples_counts)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="An1h1Vsj7JSa" outputId="45b436c9-8517-40d8-df00-f81ad8ea5fbf"
desired_max_len = 62
inside = sum(float(num) <= desired_max_len for num in incorrect_token_lens)
outside = sum(float(num) > desired_max_len for num in incorrect_token_lens)
percentage = 100 / len(incorrect_token_lens) * inside
print('In :', inside)
print('Over:', outside)
print('Percentage:', round(percentage,2))
# + colab={"base_uri": "https://localhost:8080/"} id="_x5ZsgsR7JWc" outputId="ea0fa1c6-38cf-44b9-ecd7-35440f79910b"
(100 / (97329 + 358)) * 358
# + [markdown] id="X0peLTJJiB_r"
# # Training
#
# + [markdown] id="lpceWRD_iB_w"
# ## Load processed RACE dataset
# + id="XhjNVb2tiB_x"
train_df = pd.read_csv('data/race_train_df.csv')
dev_df = pd.read_csv('data/race_dev_df.csv')
test_df = pd.read_csv('data/race_test_df.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 200} id="IuBOdKvDiB_y" outputId="19fe90d4-471b-498d-9550-692ca161cf69"
train_df.head()
# + [markdown] id="QWPvfAoKiB_y"
# ## PyTorch Lightning modules
# + id="t0zKztANiB_z"
SEP_TOKEN = '<sep>'
# + id="IcMUk4JBiB_z"
class QGDataset(Dataset):
def __init__(
self,
data: pd.DataFrame,
tokenizer: T5Tokenizer,
source_max_token_len: int,
target_max_token_len: int
):
self.tokenizer = tokenizer
self.data = data
self.source_max_token_len = source_max_token_len
self.target_max_token_len = target_max_token_len
def __len__(self):
return len(self.data)
def __getitem__(self, index: int):
data_row = self.data.iloc[index]
source_encoding = tokenizer(
'{} {} {} {} {}'.format(data_row['correct'], SEP_TOKEN, data_row['question'], SEP_TOKEN, data_row['context']),
max_length= self.source_max_token_len,
padding='max_length',
truncation= True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
target_encoding = tokenizer(
'{} {} {} {} {}'.format(data_row['incorrect1'], SEP_TOKEN, data_row['incorrect2'], SEP_TOKEN, data_row['incorrect3']),
max_length=self.target_max_token_len,
padding='max_length',
truncation = True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
labels = target_encoding['input_ids']
labels[labels == 0] = -100
return dict(
answer_text = data_row['correct'],
context = data_row['context'],
question = data_row['question'],
incorrect1 = data_row['incorrect1'],
incorrect2 = data_row['incorrect2'],
incorrect3 = data_row['incorrect3'],
input_ids = source_encoding['input_ids'].flatten(),
attention_mask = source_encoding['attention_mask'].flatten(),
labels=labels.flatten()
)
# + id="WIGR4yPTiB_0"
class QGDataModule(pl.LightningDataModule):
def __init__(
self,
train_df: pd.DataFrame,
val_df: pd.DataFrame,
test_df: pd.DataFrame,
tokenizer: T5Tokenizer,
batch_size,
source_max_token_len: int,
target_max_token_len: int
):
super().__init__()
self.batch_size = batch_size
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
self.tokenizer = tokenizer
self.source_max_token_len = source_max_token_len
self.target_max_token_len = target_max_token_len
def setup(self):
self.train_dataset = QGDataset(self.train_df, self.tokenizer, self.source_max_token_len, self.target_max_token_len)
self.val_dataset = QGDataset(self.val_df, self.tokenizer, self.source_max_token_len, self.target_max_token_len)
self.test_dataset = QGDataset(self.test_df, self.tokenizer, self.source_max_token_len, self.target_max_token_len)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size = self.batch_size, shuffle=True, num_workers = 2)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=1, num_workers=2)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=1, num_workers=2)
# + id="bQm35QF9iB_0"
#### Testing DataModule
# data_module = QGDataModule(train_df, dev_df, test_df, tokenizer, 2, 128, 64)
# data_module.setup()
# + [markdown] id="JQZDoSCpiB_1"
# ## Hyperparameters
# + id="_RrmkccaiB_1"
MODEL_NAME = 't5-small'
SOURCE_MAX_TOKEN_LEN = 512
TARGET_MAX_TOKEN_LEN = 64
N_EPOCHS = 20
BATCH_SIZE = 24
LEARNING_RATE = 0.0001
MODEL_SAVE_NAME = '100200'
# + colab={"base_uri": "https://localhost:8080/"} id="tmrPKcP9iB_1" outputId="903a01b8-0eea-4d63-c9b4-26e9a70b3b7d"
DF_TAKE_PERCENTAGE = 1
TAKE_TRAIN = int(len(train_df) * DF_TAKE_PERCENTAGE)
TAKE_DEV = int(len(dev_df) * DF_TAKE_PERCENTAGE)
TAKE_TEST = int(len(test_df) * DF_TAKE_PERCENTAGE)
print('Taking', DF_TAKE_PERCENTAGE * 100, '%')
print(TAKE_TRAIN, 'of', len(train_df))
print(TAKE_DEV, 'of', len(dev_df))
print(TAKE_TEST, 'of', len(test_df))
# + [markdown] id="j5SuHwQniB_2"
# ### Initializing training module
# + [markdown] id="qyarpW9NiB_2"
# #### Setting DataModule
# + colab={"base_uri": "https://localhost:8080/", "height": 130, "referenced_widgets": ["48568a0c1f094e95aa3bc7cb881e5171", "503de234a1c14ed19ef5890f570bc8cc", "a5f98372aed3473d827aef50c28ff85b", "d09df14f16d74a5d8f905ad06b12b2d5", "<KEY>", "<KEY>", "<KEY>", "73b5cc22fd6b4894b4c495c5d56ef99c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e330c4837baf49bca9dd414d74b5041a", "<KEY>", "7e9110850ae84103a19b88adc3007cd4", "<KEY>", "bdc97219f9ac4a7b8945e3f8e4f68d5d", "92401231508d461d960189099db1af71", "<KEY>", "041117d34983402ead222cf7eaaa82b0", "<KEY>", "c57e65f1a2f947d5ab36c91af3d3be78"]} id="y12z7XNAiB_2" outputId="704e5fa7-6410-441f-e091-5a1fe6319152"
print(train_df[:TAKE_TRAIN].shape, dev_df[:TAKE_DEV].shape, test_df[:TAKE_TEST].shape)
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
print('tokenizer len before: ', len(tokenizer))
tokenizer.add_tokens(SEP_TOKEN)
print('tokenizer len after: ', len(tokenizer))
TOKENIZER_LEN = len(tokenizer)
data_module = QGDataModule(train_df[:TAKE_TRAIN], dev_df[:TAKE_DEV], test_df[:TAKE_TEST], tokenizer, BATCH_SIZE, SOURCE_MAX_TOKEN_LEN, TARGET_MAX_TOKEN_LEN)
data_module.setup()
# + [markdown] id="LWvu-YtCiB_3"
# #### Setting Model
# + id="n1KiNAL3iB_3"
class QGModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME, return_dict=True)
self.model.resize_token_embeddings(TOKENIZER_LEN) #resizing after adding new tokens to the tokenizer
def forward(self, input_ids, attention_mask, labels=None):
output = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
return output.loss, output.logits
def training_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
loss, output = self(input_ids, attention_mask, labels)
self.log('train_loss', loss, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
loss, output = self(input_ids, attention_mask, labels)
self.log('val_loss', loss, prog_bar=True, logger=True)
return loss
def test_step(self, batch, batch_idx):
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
labels = batch['labels']
loss, output = self(input_ids, attention_mask, labels)
self.log('test_loss', loss, prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
return AdamW(self.parameters(), lr=LEARNING_RATE)
# + [markdown] id="LAK_6GUHiB_3"
# #### Setting trainer
# + id="qaqdzzRxiB_4"
if RUN_TRAINING_CELLS:
checkpoint_callback = ModelCheckpoint(
dirpath='checkpoints',
filename='best-checkpoint',
save_top_k=-1,
verbose=True,
monitor='val_loss',
mode='min'
)
# + colab={"base_uri": "https://localhost:8080/"} id="BhY6-_5fiB_4" outputId="6c4f0330-5507-47ca-aa13-c14bb4d95dcf"
if RUN_TRAINING_CELLS:
trainer = pl.Trainer(
checkpoint_callback= checkpoint_callback,
max_epochs=N_EPOCHS,
gpus=1,
progress_bar_refresh_rate=30
)
# + [markdown] id="i79K_itxiB_5"
# ## Training
# + id="hfbmeVvKiB_5"
# %load_ext tensorboard
# + id="Ttj8yPs2iB_5"
# %tensorboard --logdir ./lightning_logs
# + id="Tg9nUNFJiB_6"
model = QGModel()
# model = QGModel.load_from_checkpoint('checkpoints/best-checkpoint-v42.ckpt')
trainer.fit(model, data_module)
# + [markdown] id="6zYUV2SuiB_6"
# **JavaScript to prevent from shutting down.**
#
# function ConnectButton(){
# console.log("Connect pushed");
# document.querySelector("#top-toolbar > colab-connect-button").shadowRoot.querySelector("#connect").click()
# }
#
# setInterval(ConnectButton,60000);
# + id="I5GErTmniB_6"
trainer.test()
# + [markdown] id="3ly7LXIliB_7"
# ## Evaluate
# + [markdown] id="cADF3khwiB_7"
# ### Load model
# + colab={"base_uri": "https://localhost:8080/", "height": 97, "referenced_widgets": ["18d20c41a6124a929d9eedc6b48ea1bd", "0aa87e7f6ecc4d439a3f138ee16791ec", "<KEY>", "<KEY>", "7b9854ff8ced40948438a1ffc21e78c8", "<KEY>", "e6e1917eaaf841548b89bea951098502", "9f8b53dfe77d4ceb9c192d9509509a3c", "fce67724e8794225ab9b4a6629b0c37a", "0445398386074e898961d01cdffea88f", "1c11f732fa704d9bb3c4a63fee6fad1b", "<KEY>", "<KEY>", "<KEY>", "334b276a331449df90451e4c320bdad1", "afbec9241dce4ad688afe3d57d0031fc", "2d15d86a45f2418dabe04656523c78b9", "<KEY>", "<KEY>", "3fed1266d71d4ad4929173330b009190", "968ef08859e14672b9847937e02799f1", "c184ad51594d4b2597686a5ae34c6156"]} id="PDdaXFWeiB_7" outputId="f52e55c9-c526-4094-ba21-88172a9ac75b"
checkpoint_path = 'checkpoints/best-checkpoint-v9.ckpt'
best_model = QGModel.load_from_checkpoint(checkpoint_path)
best_model.freeze()
best_model.eval()
print()
# + [markdown] id="Dz7ld-zHiB_7"
# ### Common functions
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="yNng0d0kiB_7" outputId="85a0b218-4116-4c6c-e094-2e153b349110"
SEP_TOKEN
# + id="7y1-NO_wiB_8"
def generate(qgmodel: QGModel, answer: str, context: str) -> str:
source_encoding = tokenizer(
'{} {} {}'.format(answer, SEP_TOKEN, context),
max_length=SOURCE_MAX_TOKEN_LEN,
padding='max_length',
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
generated_ids = qgmodel.model.generate(
input_ids=source_encoding['input_ids'],
attention_mask=source_encoding['attention_mask'],
num_beams=1,
max_length=TARGET_MAX_TOKEN_LEN,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True,
use_cache=True
)
preds = {
tokenizer.decode(generated_id, skip_special_tokens=False, clean_up_tokenization_spaces=True)
for generated_id in generated_ids
}
return ''.join(preds)
# + id="bcMnihXSiB_8"
def show_result(generated: str, answer: str, context:str, incorrect: List[str] = [], question: str = ''):
print('Context:')
print(context)
print()
if question: print('Question: ', question)
print('Answer : ', answer)
print()
print('Original : ', incorrect)
print('Generated: ', generated)
print('-----------------------------')
# + [markdown] id="1BAmSPIdiB_8"
# ### View results manually
# + colab={"base_uri": "https://localhost:8080/"} id="WC0I4iIGiB_8" outputId="6d22bffc-693d-4016-8f34-93ca68417b27"
sample = test_df.iloc[42]
generated = generate(best_model, sample['correct'], sample['context'])
show_result(generated, sample['correct'], sample['context'], [sample['incorrect1'], sample['incorrect2'], sample['incorrect3']], sample['question'])
# + colab={"base_uri": "https://localhost:8080/"} id="AHwP2w07iB_9" outputId="28b7100c-0967-4f21-a864-9546331b33d4"
sample = test_df.iloc[4]
generated = generate(best_model, sample['correct'], sample['context'])
show_result(generated, sample['correct'], sample['context'], [sample['incorrect1'], sample['incorrect2'], sample['incorrect3']], sample['question'])
# + colab={"base_uri": "https://localhost:8080/"} id="bxB8HeDbiB_9" outputId="3b8dcec1-bd38-461c-c63d-4a7a1b9420aa"
sample = train_df.iloc[42]
generated = generate(best_model, sample['correct'], sample['context'])
show_result(generated, sample['correct'], sample['context'], [sample['incorrect1'], sample['incorrect2'], sample['incorrect3']], sample['question'])
# + id="P4XWudiFiB__"
def generate(qgmodel: QGModel, answer: str, context: str, generate_count: int) -> str:
source_encoding = tokenizer(
'{} {} {}'.format(answer, SEP_TOKEN, context),
max_length=SOURCE_MAX_TOKEN_LEN,
padding='max_length',
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
generated_ids = qgmodel.model.generate(
input_ids=source_encoding['input_ids'],
attention_mask=source_encoding['attention_mask'],
num_beams=generate_count,
num_return_sequences=generate_count,
max_length=TARGET_MAX_TOKEN_LEN,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True,
use_cache=True
)
preds = {
tokenizer.decode(generated_id, skip_special_tokens=False, clean_up_tokenization_spaces=True)
for generated_id in generated_ids
}
return ''.join(preds)
# + colab={"base_uri": "https://localhost:8080/"} id="5y3RFaq-iB__" outputId="41ef1ef5-67bb-472f-efdf-33d7e37c1477"
for i in range(10):
sample = test_df.iloc[i]
print(sample['question'])
print(sample['correct'])
print(sample['incorrect1'], ' | ', sample['incorrect2'], ' | ', sample['incorrect3'])
for beam in generate(best_model, sample['correct'], sample['context'], 1).split('</s>'):
print(beam)
print()
# + colab={"base_uri": "https://localhost:8080/"} id="_8JqWQ9miCAA" outputId="f665a4d0-7cdf-4526-a612-a1cf61a78741"
for i in range(10):
sample = test_df.iloc[i]
print(sample['question'])
print(sample['correct'])
print(sample['incorrect1'], ' | ', sample['incorrect2'], ' | ', sample['incorrect3'])
for beam in generate(best_model, sample['correct'], sample['context'], 4).split('</s>'):
print(beam)
print()
# + [markdown] id="5t9tBu9nsier"
# # Evaluate NLTK BLEU
# + colab={"base_uri": "https://localhost:8080/", "height": 97, "referenced_widgets": ["5ea7c616107e4ee7b03da68c622e15c4", "56de201030b04b4a99fffdb5cd67373f", "542d8dd97ebc49c5a4803d5a9c84e81d", "918a457aa0a94aa0acc4a3fc80d86164", "a5287990e595451c9fa910aa03032836", "81e219b57f374b10966529032094af72", "c89cd591e4cc4c22878ff2ef27531571", "<KEY>", "<KEY>", "4e250808f39e4cd79eb08a0469473852", "26225c9711fd47d59e9bab6e0d523cce", "b59096a6c62d47df9ac87059dd46046a", "<KEY>", "b78cce41a61349d9a6185f75aa7f42cd", "43bea73a7ed143338c8ee3e19f0793ba", "0457d7793fb24679a4db60eea6ec9a30", "015f8fed490543cea373e6dafbdbc577", "024c02483def4f86a5a3cb27de6b4658", "4edad5d35de24f1aa2ab3527a085d69e", "<KEY>", "5fddf2742a3b421fa2372edd41d8b1a9", "<KEY>"]} id="3nqusO2ssies" outputId="b56774b9-9677-47e2-cccc-8045155ccec9"
checkpoint_path = 'checkpoints/best-checkpoint-v16.ckpt'
best_model = QGModel.load_from_checkpoint(checkpoint_path)
best_model.freeze()
best_model.eval()
print()
# + [markdown] id="MOt-RCePsiet"
# ### Common functions
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="muJutsLjsiet" outputId="72c0a3c6-4a3d-4a62-a0ec-f33236bc1632"
SEP_TOKEN
# + id="U5y6aTA1sieu"
def generate(qgmodel: QGModel, correct: str, question: str, context: str) -> str:
source_encoding = tokenizer(
'{} {} {} {} {}'.format(correct, SEP_TOKEN, question, SEP_TOKEN, context),
max_length= SOURCE_MAX_TOKEN_LEN,
padding='max_length',
truncation= True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
generated_ids = qgmodel.model.generate(
input_ids=source_encoding['input_ids'],
attention_mask=source_encoding['attention_mask'],
num_beams=1,
max_length=TARGET_MAX_TOKEN_LEN,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True,
use_cache=True
)
preds = {
tokenizer.decode(generated_id, skip_special_tokens=False, clean_up_tokenization_spaces=True)
for generated_id in generated_ids
}
return ''.join(preds)
# + id="kfBL_jOwsieu"
def show_result(generated: str, answer: str, context:str, incorrect: List[str] = [], question: str = ''):
print('Context:')
print(context)
print()
if question: print('Question: ', question)
print('Answer : ', answer)
print()
print('Original : ', incorrect)
print('Generated: ', generated)
print('-----------------------------')
# + [markdown] id="6CSenmCGsiew"
# ### NLTK BLEU EVAL
# + id="pXi1cT1Hsiew"
from typing import List
import nltk
from nltk.tokenize import word_tokenize
#nltk.download('punkt')
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
def calculate_nltk_bleu_single(references: List[str], hypothesis: str):
if hypothesis == '':
return 0, 0, 0, 0
# Word tokenize
refs_tokenized = list(map(lambda x: word_tokenize(x), references))
hyp_tokenized = word_tokenize(hypothesis)
# Smoothing function to avoid the cases where it resuts 1.0 in the cases when // Corpus/Sentence contains 0 counts of 2-gram overlaps. BLEU scores might be undesirable; use SmoothingFunction() //
chencherry = SmoothingFunction()
bleu_1 = sentence_bleu(refs_tokenized, hyp_tokenized, weights=(1, 0, 0, 0), smoothing_function=chencherry.method2)
bleu_2 = sentence_bleu(refs_tokenized, hyp_tokenized, weights=(0.5, 0.5, 0, 0), smoothing_function=chencherry.method2)
bleu_3 = sentence_bleu(refs_tokenized, hyp_tokenized, weights=(0.33, 0.33, 0.33, 0), smoothing_function=chencherry.method2)
bleu_4 = sentence_bleu(refs_tokenized, hyp_tokenized, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=chencherry.method2)
return bleu_1, bleu_2, bleu_3, bleu_4
# + colab={"base_uri": "https://localhost:8080/"} id="pFuQLjCusiew" outputId="43779ed1-ff33-4ffe-f596-d9e1be835b6d"
nltk.download('punkt')
# + id="XD5Sfl5nsiew"
def calculate_nltk_bleu(references: List[List[str]], hypothesis: List[str]):
assert len(references) == len(hypothesis)
bleu_totals = [0, 0, 0, 0]
for i in tqdm(range(len(references))):
curr_bleu = calculate_nltk_bleu_single(references[i], hypothesis[i])
bleu_totals[0] += curr_bleu[0]
bleu_totals[1] += curr_bleu[1]
bleu_totals[2] += curr_bleu[2]
bleu_totals[3] += curr_bleu[3]
return (round(bleu_totals[0] / len(references) * 100, 2),
round(bleu_totals[1] / len(references) * 100, 2),
round(bleu_totals[2] / len(references) * 100, 2),
round(bleu_totals[3] / len(references) * 100, 2))
# + [markdown] id="gcoXBItPsiex"
# ### Generate results
# + colab={"base_uri": "https://localhost:8080/", "height": 48, "referenced_widgets": ["80785c8b6cc542739bccb509baddfbbf", "943dbc62f52b4eee80e74b04e896ceec", "44a38a6bb5b34ee8a6093d081c9cdca7", "2055ebe312f04474b7f6d6377358c8d4", "<KEY>", "0be38ba17a00473da7c6e8135965cff8", "<KEY>", "<KEY>", "<KEY>", "07134128c75d4a3c8cfae9d420ab0af7", "<KEY>"]} id="5wfW_RT6siex" outputId="61e3d997-f7e8-4976-a978-8cda49f6b088"
results = []
for i in tqdm(range(len(test_df))):
sample = test_df.iloc[i]
results.append(generate(best_model, 1, sample['correct'], sample['question'], sample['context']))
# + id="O5RkcF3Wsiex"
path = DRIVE_FOLDER_LOCATION + 'results/' + 'results-epoch4' + '.txt'
with open(path, "w") as output:
for row in results:
output.write(str(row) + '\n')
# + [markdown] id="CilLFL8asiey"
# ## Evaluate final results
# + [markdown] id="UMkWRR26siey"
# ### Load predictions
# + id="8a2fGECWsiey"
from typing import List
def load_lines_from_txt(file_path: str) -> List[str]:
lines = []
with open(file_path) as f:
lines = f.readlines()
for i in range(len(lines)):
lines[i] = lines[i].strip()
return lines
# + id="_bNBiN1zsiey"
results = load_lines_from_txt('results/results-epoch4.txt')
# + [markdown] id="SLt0x3PHsiey"
# ### Split distractors
# + id="3ecrQZTSsiey"
def fucking_correct_index_of(text:str, substring: str, start_index: int = 0):
try:
index = text.index(substring, start_index)
except ValueError:
index = -1
return index
def replace_all_extra_id(text: str):
new_text = text
start_index_of_extra_id = 0
while (fucking_correct_index_of(new_text, '<extra_id_') >= 0):
start_index_of_extra_id = fucking_correct_index_of(new_text, '<extra_id_', start_index_of_extra_id)
end_index_of_extra_id = fucking_correct_index_of(new_text, '>', start_index_of_extra_id)
new_text = new_text[:start_index_of_extra_id] + '<sep>' + new_text[end_index_of_extra_id + 1:]
return new_text
# + id="tOxDRG1csiey"
incorrect1s = []
incorrect2s = []
incorrect3s = []
for result in results:
cleaned_result = result.replace('<pad>', '').replace('</s>', '')
cleaned_result = replace_all_extra_id(cleaned_result)
distractors = cleaned_result.split('<sep>')
if len(distractors) != 3:
if len(distractors) == 2:
print('2 answers at', result)
distractors.append('')
else:
print('1 distractor', result, 'not enough distractors??')
distractors.append('')
distractors.append('')
distractors.append('')
incorrect1s.append(distractors[0])
incorrect2s.append(distractors[1])
incorrect3s.append(distractors[2])
# + [markdown] id="4AClxKuHsiez"
# ### Load references
# + id="Mr641QA_siez"
reference_correct = list(test_df['correct'])
# + colab={"base_uri": "https://localhost:8080/"} id="qH0uCNIlsiez" outputId="6e4931ec-5165-4340-dd22-c35fc4b256df"
reference_correct[:10]
# + id="0-AdMB2msiez"
reference_incorrects = []
for i in range(len(test_df)):
reference_incorrects.append([test_df.iloc[i]['incorrect1'], test_df.iloc[i]['incorrect2'], test_df.iloc[i]['incorrect3']])
# + colab={"base_uri": "https://localhost:8080/"} id="UwD-SU4Nsiez" outputId="fd4dd10c-6828-471c-ffd6-7b62f0e3ba94"
reference_incorrects[42]
# + [markdown] id="JlIce4MZsie0"
# ### Evaluate
# + colab={"base_uri": "https://localhost:8080/", "height": 333, "referenced_widgets": ["a160efe6d559401a9182f4c3fae743ea", "886cb3c56b764801853eb64fdbe6de3d", "e5bb50f07b964fc88379f8efb4155b5d", "e91fe296fa134362a0b58c98f9598615", "00bfd3dffc2344f19960bbea23c89213", "c578525f4f5842c49c1695dd10ad69e9", "<KEY>", "1d036d3a6a0a4c8288ed6beb69317c34", "ea214337c1304c5baca7d7b1c0a6ec45", "<KEY>", "f0af92313e374acba265530fd2993da9", "0e8fa219062047779807c15090733d9f", "3a7d50e1dfe74cd2aff1ed19f0041caa", "a0284f53428e4d93a4c0d6d852c63125", "<KEY>", "cb88772006394e108e0df9d5ebd22ca3", "<KEY>", "<KEY>", "502647e2ef264eb6aa400be291e6daec", "de1032dfd50242c4aaeef7ac1890a9c3", "c5d493deafbe44a18cc3fe54190cf13b", "d8e7c93375914cf28871a2173fad26ef", "111de013c77449ee8cab7fb7b8634b9c", "21fe1676dc2940c18e50dd06e5e71865", "312e48ad677e42f3bef6b3670b4730e7", "dde17cde8bea434f92256f2339e8206d", "8df29ecbceaa46458dbd6a53ff840693", "ec10c77340144ca1a1decfffb8ced778", "<KEY>", "<KEY>", "5b684177a7194145b0096a48384c7a7a", "<KEY>", "813ceb1b9ecd48f1859aa3ec28ee7654", "2285917219b14e31a216c146369c5995", "dac8d34861fd4fea9418e714c633398c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "df1b1e04f8ad4a6985450eda6c4cb9fe", "bda4ede84ab44e80ab038b51d172f4d9", "41d89483fa2d40ac994c72d5d68229cd", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "95fe4ccc13904981b4a12092ea0f4558", "<KEY>", "7ee7996c19014e2ab4f7f120b75de170", "0292de51f370424785f1227269d6e8ea", "<KEY>", "<KEY>", "0f99c41aef5d449d906b4a756eb0e135", "<KEY>", "dd383edb3d5247dfb6eae271671225ec", "e308882006624aea856a86b1dad281ce", "76f4440515684d33a27569dea6f5ffef", "2563fccaea3e4082b46ef5164451e083", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "c9f8079352d84403a1908c8a959dec30", "71a152bcacbd42ebbbbec6e9f5a3b80a"]} id="aDZmTlSSSVFW" outputId="56b0e5d7-5a59-4ae1-be5f-9ea795421e1c"
bleu_scores = []
bleu_scores.append(calculate_nltk_bleu(reference_incorrects, incorrect1s))
bleu_scores.append(calculate_nltk_bleu(reference_incorrects, incorrect2s))
bleu_scores.append(calculate_nltk_bleu(reference_incorrects, incorrect3s))
bleu_scores.append(calculate_nltk_bleu(reference_correct, incorrect1s))
bleu_scores.append(calculate_nltk_bleu(reference_correct, incorrect2s))
bleu_scores.append(calculate_nltk_bleu(reference_correct, incorrect3s))
print('###', 'bleu_1', 'bleu_2', 'bleu_3', 'bleu_4')
labels = ['d1i', 'd2i', 'd3i', 'd1c', 'd2c', 'd3c']
for i in range(len(bleu_scores)):
print(labels[i], "{:<7}".format(bleu_scores[i][0]), "{:<7}".format(bleu_scores[i][1]), "{:<7}".format(bleu_scores[i][2]), "{:<7}".format(bleu_scores[i][3]))
# + id="F56ZaM1wVNHe"
| training/RACE_Distractor_Generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Forms Recogniser Python Example
#
# 6th April 2020
#
# Microsoft Forms Recogniser Python Tutorial
#
#
# https://docs.microsoft.com/en-us/azure/cognitive-services/form-recognizer/quickstarts/python-train-extract
# ## 1. Load the Required Libraries
# load environment variables
import azure, json, os, requests
import pandas as pd
from dotenv import load_dotenv
import sys
import time
from requests import get, post
load_dotenv(verbose=True)
# ## 2. Train the Model
# +
########### Python Form Recognizer Labeled Async Train #############
# Endpoint URL
endpoint = r"https://formrecogniserapi.cognitiveservices.azure.com/"
post_url = endpoint + r"/formrecognizer/v2.0-preview/custom/models"
source = r"https://formsstorageamc.blob.core.windows.net/trainingforms?"+os.getenv("SAS_KEY")
prefix = ""
includeSubFolders = False
useLabelFile = False
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': os.getenv("SUBKEY"),
}
body = {
"source": source,
"sourceFilter": {
"prefix": prefix,
"includeSubFolders": includeSubFolders
},
"useLabelFile": useLabelFile
}
try:
resp = post(url = post_url, json = body, headers = headers)
if resp.status_code != 201:
print("POST model failed (%s):\n%s" % (resp.status_code, json.dumps(resp.json())))
quit()
print("POST model succeeded:\n%s" % resp.headers)
get_url = resp.headers["location"]
except Exception as e:
print("POST model failed:\n%s" % str(e))
quit()
# +
# make sure to save the model ID as a variable which we will use in later steps
n_tries = 15
n_try = 0
wait_sec = 5
max_wait_sec = 60
while n_try < n_tries:
try:
resp = get(url = get_url, headers = headers)
resp_json = resp.json()
if resp.status_code != 200:
print("GET model failed (%s):\n%s" % (resp.status_code, json.dumps(resp_json)))
quit()
model_status = resp_json["modelInfo"]["status"]
if model_status == "ready":
print("Training succeeded:\n%s" % json.dumps(resp_json, indent=4, sort_keys=True))
modelID = resp_json["modelInfo"]["modelId"]
print(modelID)
break
if model_status == "invalid":
print("Training failed. Model is invalid:\n%s" % json.dumps(resp_json))
quit()
# Training still running. Wait and retry.
time.sleep(wait_sec)
n_try += 1
wait_sec = min(2*wait_sec, max_wait_sec)
except Exception as e:
msg = "GET model failed:\n%s" % str(e)
print(msg)
quit()
# -
# ## 3. Perform Model Inferencing on a Local Dcoument
# +
# local path of document
source = os.getenv("file_path")
# Endpoint URL
apim_key = os.getenv("SUBKEY")
model_id = modelID
post_url = endpoint + "/formrecognizer/v2.0-preview/custom/models/%s/analyze" % model_id
params = {
"includeTextDetails": True
}
headers = {
# Request headers
'Content-Type': 'application/pdf',
'Ocp-Apim-Subscription-Key': apim_key,
}
with open(source, "rb") as f:
data_bytes = f.read()
try:
resp = post(url = post_url, data = data_bytes, headers = headers, params = params)
if resp.status_code != 202:
print("POST analyze failed:\n%s" % json.dumps(resp.json()))
quit()
print("POST analyze succeeded:\n%s" % resp.headers)
get_url = resp.headers["operation-location"]
except Exception as e:
print("POST analyze failed:\n%s" % str(e))
quit()
# -
n_tries = 15
n_try = 0
wait_sec = 5
max_wait_sec = 60
while n_try < n_tries:
try:
resp = get(url = get_url, headers = {"Ocp-Apim-Subscription-Key": apim_key})
resp_json = resp.json()
if resp.status_code != 200:
print("GET analyze results failed:\n%s" % json.dumps(resp_json))
quit()
status = resp_json["status"]
if status == "succeeded":
print("Analysis succeeded:\n%s" % json.dumps(resp_json, indent=4, sort_keys=True))
break
if status == "failed":
print("Analysis failed:\n%s" % json.dumps(resp_json))
quit()
# Analysis still running. Wait and retry.
time.sleep(wait_sec)
n_try += 1
wait_sec = min(2*wait_sec, max_wait_sec)
except Exception as e:
msg = "GET analyze results failed:\n%s" % str(e)
print(msg)
quit()
| FormsRecogniserPythonExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import csv
import pandas as pd
f = open("casi01.txt", "r")
print(f.read())
read_file = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/casi01.txt',delimiter='\t')
read_file.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/casi01.csv', index=0)
df = pd.read_csv('casi01.csv')
df.head()
# cd ../Data
#ace_subjmedhist
ace = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/ace_subjmedhist01.txt',delimiter='\t')
ace.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/ace_subjmedhist01.csv', index=0)
#bu_medical_form01
bu = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/bu_medical_form01.txt',delimiter='\t')
bu.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/bu_medical_form01.csv', index=0)
#diagnosis02
diag = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/diagnosis02.txt',delimiter='\t')
diag.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/diagnosis02.csv', index=0)
#nart01
nart = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/nart01.txt',delimiter='\t')
nart.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/nart01.csv', index=0)
#odyssey01
odyssey = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/odyssey01.txt',delimiter='\t')
odyssey.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/odyssey01.csv', index=0)
#wais_iii01
wais_iii01 = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/wais_iii01.txt',delimiter='\t')
wais_iii01.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/wais_iii01.csv', index=0)
#wasi_199903
wasi_199903 = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/wasi_199903.txt',delimiter='\t')
wasi_199903.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/wasi_199903.csv', index=0)
#wasi201
wasi201 = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/wasi201.txt',delimiter='\t')
wasi201.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/wasi201.csv', index=0)
#wisc_iii01
wisc_iii01 = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/wisc_iii01.txt',delimiter='\t')
wisc_iii01.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/wisc_iii01.csv', index=0)
#wisc_iv_part202
wisc_iv_part202 = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/wisc_iv_part202.txt',delimiter='\t')
wisc_iv_part202.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/wisc_iv_part202.csv', index=0)
#wisc_v01
wisc_v01 = pd.read_csv ('/mmfs1/data/pijarj/ndar_fmri/wisc_v01.txt',delimiter='\t')
wisc_v01.to_csv ('/mmfs1/data/pijarj/BC-ORG-Data/Data/wisc_v01.csv', index=0)
# +
#diagnosis02 has information for tics in Intrinsic Brain Architecture..., 3/29 subjects have tics
#ace has info on disorders patients also have,info for dataset Multimodal Developmental Neurogenetics of Females with ASD
#bu is additional drugs taken
#nart01,waisiii_01,wasi_199903,wasi201,wisc_iii01,wisc_iv_part202,wisc_v01 is info regarding the test subjects took
#odyssey is questions about treatment, not useful
#wasi201 is the only csv that has info for mapping thalamocortical...
# -
| Code/ADOS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EDA: Advanced Feature Extraction
# +
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
import os
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
from sklearn.manifold import TSNE
from wordcloud import WordCloud, STOPWORDS
from os import path
from PIL import Image
# -
# ## Importig data File
filePath = "../data/train/df_fe_without_preprocessing_train.csv"
if os.path.isfile(filePath):
df = pd.read_csv(filePath,encoding='latin-1')
df = df.fillna('')
df.head()
else:
print("get df_fe_without_preprocessing_train.csv from drive or run the previous notebook")
df.head(2)
# ## Preprocessing of Text
# - Preprocessing:
# - Removing html tags
# - Removing Punctuations
# - Performing stemming
# - Removing Stopwords
# - Expanding contractions etc.
| Preprocessing/Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="nqrr7ZlrpBgt"
# # Cohort Project - Supplemental - $H_2$ Ising Hamiltonian solver
#
# > Implementing a molecular ground state energy calculator, with $H_2$ as the primary example. This demonstrates a method for finding the exact mapping between the electronic structure Hamiltonian and the Ising Hamilitonain. The key part of this work is leveraged from this paper [arXiv:1611.01068v1 [quant-ph] 3 Nov 2016](https://arxiv.org/pdf/1611.01068.pdf)
#
#
#
#
# + [markdown] colab_type="text" id="3FUmbz1SASOh"
# ## Summary
#
# Authors of the [paper](https://arxiv.org/pdf/1611.01068.pdf) have provided a method to express the calculation of the $H_2$ molecule ground state energy as a two-body Ising Hamiltonian.
#
# The DWave Quantum Annealer is designed for solving Ising Hamiltonians.
#
# This implementation attempts to follow the recipe laid out in Supplemental Material (page 6 - para. 1 Detailed Procedure ) which refer to the formulas (4), (5), (6) and (7). Table I (page 7) is represented in the data file H2_coefficients_exact_simulated.csv which we use to compute the simulated energy to compare to the exact energy, and thus validate the computation.
#
# + [markdown] colab_type="text" id="LgB-_TdQ_8kp"
# # Experimenting for $H_2$ Energy Calculations
#
# ## Implementing Hamiltonian for calculating $H_2$ Energies
#
#
#
#
#
#
#
# + [markdown] colab_type="text" id="NQUEUmrrzEaS"
# ### Outcome
#
# At this stage the results do not match the target exact numbers provided. The code will need reviewing to verify the application of the recipe.
#
# One peculiar outcome is that results obtained for row N strangely coincide with the target on row N+1. If the code is considered correct, one might need to have the data Table I in the paper validated to ensure the energies indicated are indeed associated with the correct coefficients for the given molecular distance on each row.
#
# See results under the "Process" header further below.
#
# + [markdown] colab_type="text" id="46n_8IGtQ7UA"
# ## Procedure
#
# + [markdown] colab_type="text" id="m2QeeaupL4eq"
# ### Excerpt from the paper
#
# We can use this symmetry to reduce the
# Hamiltonian to the following effective Hamiltonian, acting only on two qubits:
#
# >(4) $H_{H_2}=g_01+g_1σ_{z}^{0}+g_2σ_{z}^{1}+g_3σ_{z}^{0}σ_{z}^{1}+g_4σ_{x}^{0}σ_{x}^{1}+g_4σ_{y}^{0}σ_{y}^{1}$
# >$=g_01+H_0$
#
# >(5) $H_0=g_1σ_{z}^{0}+g_2σ_{z}^{1}+g_3σ_{z}^{0}σ_{z}^{1}+g_4σ_{x}^{0}σ_{x}^{1}+g_4σ_{y}^{0}σ_{y}^{1}$
#
# By squaring the Hamiltonian $H_0$ and modifying it, one can get a new Ising Hamiltonian:
#
# >(6) $H_1=H_{0}^2 + 2g_3H_0=a_1+a_2(σ_{z}^{0}+σ_{z}^{1}) +a_3σ_{z}^{0}σ_{z}^{1}$
#
# With:
#
# >(7)
#
# >$a_1=g_1^2+g_2^2+g_3^2+ 2g_4^2$
#
# >$a_2 = 2(g_1+g_2)g_3$
#
# >$a_3= 2(g_1g_2−g_4^2+g_3^2)$
#
#
#
# Here we present steps to get the ground state of $H_{H_2}$ by using the new Ising Hamiltonian H1 (Eq.6).
# 1. If $|g_1| + |g_2| + |g_4| < |g_3|$ start computing by $H_1$ and get the result $Y$ . Otherwise increase $|g_3|$ by
# $|g_1| + |g_2| + |g_4|$ and start computing.
# 2. Solve equation $x2 + 2g3x = Y$ and get $σ_x^1$ and $σ_x^2$ $(σ_x^1<= σ_x^2)$. Add $|g_1| + |g_2| + |g_4|$ to $σ_x^1$
# if added to $g_3$ before (we just assume $g_3 > 0$.) Compare $σ_x^1$ with $g_3 − g_1 − g_2$ (or $g_3 + g_1 + g_2$) to get the ground state of $H_0$. Add $g_0$ to get the ground state of $H_{H_2}$.
#
# + [markdown] colab_type="text" id="G8Mcaqw5iJGN"
# # Code
#
# ## Toolkit installation
# + colab={"base_uri": "https://localhost:8080/", "height": 822} colab_type="code" id="iW9g5Ffbfz9g" outputId="5a4f9889-7498-4911-b118-86d4629b5245"
# !pip install dwave-ocean-sdk
# -
# Now we import our libraries and define the sampler. Adapating this to work on different QPU's is only a matter of setting the sampler to the desired device.
# + colab={} colab_type="code" id="8S9CKZDGpSOu"
from quantum_H2_ising import GroundStateEnergy
# + [markdown] colab_type="text" id="-ARym4ImN_FR"
# ## Class for $H_2$ energy calculation
#
# Loads the table file included in the source paper.
#
# Calculates the ground state energy given one set of coefficients (one row)
# from the table
# + colab={"base_uri": "https://localhost:8080/", "height": 56} colab_type="code" id="qUB_X3OyADfk" outputId="37824d1a-2227-4cc9-e86d-84080f184284"
H2 = GroundStateEnergy('H2_coefficients_exact_simulated.csv')
# + [markdown] colab_type="text" id="Pp2EvStFJDX2"
# ## Process the table and calculate the $H_2$ ground states
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="rYRqT782ATp6" outputId="f04f6dd6-c576-4bf2-91ed-5d6ff2c5c96f"
data = H2.data()
isingList = []
exactList = []
quboList = []
samples = 50
#for row in range(len(data)):
# R,g0,g1,g2,g3,g4,exact,sim = data[row]
count = 0
for R in H2.get_available_R():
exact = H2.get_g_values(R)['e']
HH2_i = H2.solve_ising(R, samples, exact, False, useQPU=False)
HH2_q = H2.solve_qubo(R, samples, exact, False, useQPU=False)
exactList.append(exact)
isingList.append(HH2_i)
quboList.append(HH2_q)
if count % 10 == 0:
print("\nSolving for R = %f" % (R))
print("Energy via exact solution = %f," % (exact))
print("Energy via qubo = %f," % (HH2_q))
print("Energy via ising = %f" % (HH2_i))
count += 1
# -
# ## Plot the results
# +
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
r = np.array(range(60,315,5)) / 100.0
#Define plots
ising_plot = ax.plot(r, isingList, label='Ising')
qubo_plot = ax.plot(r, quboList, dashes=[8, 5], label='QUBO')
true_plot = ax.plot(r, exactList, dashes=[6, 2], label='True Energy')
#Text formating
params = {'mathtext.default': 'regular' }
plt.rcParams.update(params)
plt.xlabel('$r$', fontsize=14)
y = plt.ylabel('$E_{bond}$', fontsize=14)
plt.title('H2 Potential Energy Curve using Ising and QUBO methods', fontsize=16)
ax.legend()
plt.show()
# -
# We can now zoom in to a subset of the data to see how close the solvers are to the exact values. Viewing this subset, the difference is more noticable but still very accurate.
# +
fig_f, ax_f = plt.subplots()
#Choose a, b values to plot a subset of the data
a = 14
b = 19
ising_f= ax_f.plot(r[a:b], isingList[a:b], label='Ising')
rbm_plot = ax_f.plot(r[a:b], quboList[a:b], dashes=[8, 5], label='QUBO')
true_plot = ax_f.plot(r[a:b], exactList[a:b], dashes=[6, 2], label='True Energy')
ax_f.legend()
plt.show()
# -
| Project_4_Ising_Annealer/CDL_DWaveH2QA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building a CESM Case
# This will run through building a CESM case, providing documentation from the [official CESM2 site](https://escomp.github.io/CESM/versions/cesm2.1/html/)
# It is also recommended you look at the [CIME documentation](http://esmci.github.io/cime/versions/master/html/index.html)
#
# From that link, you will walk through the following steps:
# 1. Download the CESM2 to the machine you are working on via [Github](http://github.com/ESCOMP/CESM)
# 1. Install external portions using the [instructions](https://escomp.github.io/CESM/versions/cesm2.2/html/downloading_cesm.html)
# 1. Decide on which components/grids to include
# 1. Use the [CESM naming conventions](https://www.cesm.ucar.edu/models/cesm2/naming_conventions.html#casenames) to find the syntax needed to run the components you are using
# 1. Setup your case run
# 1. Build your case
# 1. Run your case
| notebooks/BuildCESMcase.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project Overview -- KNN
# ### 1.0.1 Breast Cancer Wisconsin Diagnostic
# In this project, we are going to work with the real dataset on Breast Cancer Wisconsin (Diagnostic). The dataset is available on kaggle and originally belong to UCI Machine Learning Repository.
# This dataset was donated to UCI by <NAME> in 1995 for the public use. Relevant Papers and detailed description on the dataset are provided at UCI website.
#
#
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
df = pd.read_csv('Breast_Cancer_Diagnostic.csv')
df.columns
# ##### We will only consider ten real-valued features in this project for diagnostic.
df = df[['radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean','diagnosis']]
df.head()
# ##### Let's get an overview of our data using info(), and let's check if there are any missing values.
df.info()
# ##### Let's see which type cancer is common
df['diagnosis'].value_counts()
# ##### Let's standardize the variable/features to get your data ready for knn
from sklearn.preprocessing import StandardScaler
# ##### Let's create a scaler
scaler = StandardScaler()
# ##### Let's split data into features and target to fit scaler to the features only.
features = df.drop('diagnosis', axis = 1)
target = df['diagnosis']
# ##### Fitting scaler to the features
scaler.fit(features)
# ##### Let's get the scaled features into scaled_features
scaled_features = scaler.transform(features)
# ##### Let's do the train_test_split by using test_size = 0.33, random_state = 42
from sklearn.model_selection import train_test_split
X = scaled_features
y = target
X_train, X_test, y_train, y_test = train_test_split(X,
y, test_size=0.33, random_state=42)
# ##### Let's import the KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
# ##### Let's create a KNN model instance with n_neighbors=1
knn = KNeighborsClassifier(n_neighbors=1)
# ##### Let's fit the model to training data
knn.fit(X_train, y_train)
# ##### Let's do the prediction for the data test
predictions = knn.predict(X_test)
# ##### Let's print the Confusion matrix and the classifier report
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
# ##### Let's use the Elbow method, and find the best value of k
err_rate = []
for i in range(1,100):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
err_rate.append(np.mean(pred_i !=y_test))
# ##### Let's plot the error rate Vs k to see wich value have the lowest error rate
# +
plt.figure(figsize=(16,6))
plt.plot(range(1,100), err_rate, color ='green',
marker ='o', markerfacecolor = 'blue')
plt.title('Error Rate vs. K Value')
plt.xlabel('K_Value')
plt.ylabel('Error_Rate')
plt.show()
# -
# ##### Let's use k for the minimum error rate, do the predictions and print confusion matrix and classification report
knn = KNeighborsClassifier(n_neighbors=17)
knn.fit(X_train,y_train)
predictions = knn.predict(X_test)
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
# ##### More data we have better we can train our model.
| Breast_Cancer_Wisconsin_Diagnostic_Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Accessing higher energy states with Qiskit Pulse
# In most quantum algorithms/applications, computations are carried out over a 2-dimensional space spanned by $|0\rangle$ and $|1\rangle$. In IBM's hardware, however, there also exist higher energy states which are not typically used. The focus of this section is to explore these states using Qiskit Pulse. In particular, we demonstrate how to excite the $|2\rangle$ state and build a discriminator to classify the $|0\rangle$, $|1\rangle$ and $|2\rangle$ states.
#
# We recommend reviewing the prior [chapter](https://qiskit.org/textbook/ch-quantum-hardware/calibrating-qubits-openpulse.html) before going through this notebook. We also suggest reading the OpenPulse specifications (Ref [1](#refs)).
# ### Physics Background
# We now give some additional background on the physics of transmon qubits, the basis for much of IBM's quantum hardware. These systems contain superconducting circuits composed of a Josephson junction and capacitor. For those unfamilar with superconducting circuits, see the review [here](https://arxiv.org/pdf/1904.06560.pdf) (Ref. [2](#refs)). The Hamiltonian of this system is given by
#
# $$
# H = 4 E_C n^2 - E_J \cos(\phi),
# $$
#
# where $E_C, E_J$ denote the capacitor and Josephson energies, $n$ is the reduced charge number operator and $\phi$ is the reduced flux across the junction. We work in units with $\hbar=1$.
#
# Transmon qubits are defined in the regime where $\phi$ is small, so we may expand $E_J \cos(\phi)$ in a Taylor series (ignoring constant terms)
#
# $$
# E_J \cos(\phi) \approx \frac{1}{2} E_J \phi^2 - \frac{1}{24} E_J \phi^4 + \mathcal{O}(\phi^6).
# $$
#
# The quadratic term $\phi^2$ defines the standard harmonic oscillator. Each additional term contributes an anharmonicity.
#
# Using the relations $n \sim (a-a^\dagger), \phi \sim (a+a^\dagger)$ (for raising, lowering operators $a^\dagger, a$), it can be shown that the system resembles a Duffing oscillator with Hamiltonian
# $$
# H = \omega a^\dagger a + \frac{\alpha}{2} a^\dagger a^\dagger a a,
# $$
#
# where $\omega$ gives the $0\rightarrow1$ excitation frequency ($\omega \equiv \omega^{0\rightarrow1}$) and $\alpha$ is the anharmonicity between the $0\rightarrow1$ and $1\rightarrow2$ frequencies ($\alpha \equiv \omega^{1\rightarrow2} - \omega^{0\rightarrow1}$). Drive terms can be added as needed.
#
# If we choose to specialize to the standard 2-dimensional subspace, we can make $|\alpha|$ sufficiently large or use special control techniques to suppress the higher energy states.
# # Contents
#
# 0. [Getting started](#importing)
# 1. [Discriminating the 0 and 1 states](#discrim01)
# 1. [0->1 Frequency Sweep](#freqsweep01)
# 2. [0->1 Rabi Experiment](#rabi01)
# 3. [Build the 0,1 discriminator](#builddiscrim01)
# 2. [Discriminating the 0, 1 and 2 states](#discrim012)
# 1. [Computing the 1->2 Frequency](#freq12)
# 1. [1->2 Frequency Sweep using the sideband method](#sideband12)
# 2. [1->2 Rabi Experiment](#rabi12)
# 3. [Build the 0, 1, 2 discriminator](#builddiscrim012)
# 4. [References](#refs)
# ## 0. Getting Started <a id="importing"></a>
# We begin by importing dependencies and defining some default variable values. We choose qubit 0 to run our experiments. We perform our experiments on the publicly available single qubit device `ibmq_armonk`.
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
import qiskit.pulse as pulse
import qiskit.pulse.pulse_lib as pulse_lib
from qiskit.compiler import assemble
from qiskit.pulse.commands import SamplePulse
from qiskit.tools.monitor import job_monitor
# + tags=["uses-hardware"]
import warnings
warnings.filterwarnings('ignore')
from qiskit.tools.jupyter import *
# %matplotlib inline
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
backend = provider.get_backend('ibmq_armonk')
backend_config = backend.configuration()
assert backend_config.open_pulse, "Backend doesn't support OpenPulse"
dt = backend_config.dt
backend_defaults = backend.defaults()
# unit conversion factors -> all backend properties returned in SI (Hz, sec, etc)
GHz = 1.0e9 # Gigahertz
MHz = 1.0e6 # Megahertz
us = 1.0e-6 # Microseconds
ns = 1.0e-9 # Nanoseconds
qubit = 0 # qubit we will analyze
default_qubit_freq = backend_defaults.qubit_freq_est[qubit] # Default qubit frequency in Hz.
print(f"Qubit {qubit} has an estimated frequency of {default_qubit_freq/ GHz} GHz.")
# scale data (specific to each device)
scale_factor = 1e-14
# number of shots for our experiments
NUM_SHOTS = 1024
### Collect the necessary channels
drive_chan = pulse.DriveChannel(qubit)
meas_chan = pulse.MeasureChannel(qubit)
acq_chan = pulse.AcquireChannel(qubit)
# -
# We define some additional helper functions.
# + tags=["uses-hardware"]
def get_job_data(job, average):
"""Retrieve data from a job that has already run.
Args:
job (Job): The job whose data you want.
average (bool): If True, gets the data assuming data is an average.
If False, gets the data assuming it is for single shots.
Return:
list: List containing job result data.
"""
job_results = job.result(timeout=120) # timeout parameter set to 120 s
result_data = []
for i in range(len(job_results.results)):
if average: # get avg data
result_data.append(job_results.get_memory(i)[qubit]*scale_factor)
else: # get single data
result_data.append(job_results.get_memory(i)[:, qubit]*scale_factor)
return result_data
def get_closest_multiple_of_16(num):
"""Compute the nearest multiple of 16. Needed because pulse enabled devices require
durations which are multiples of 16 samples.
"""
return (int(num) - (int(num)%16))
# -
# Next we include some default parameters for drive pulses and measurement. We pull the `measure` command from the instruction schedule map (from backend defaults), so that it is updated with new calibrations.
# + tags=["uses-hardware"]
# Drive pulse parameters (us = microseconds)
drive_sigma_us = 0.075 # This determines the actual width of the gaussian
drive_samples_us = drive_sigma_us*8 # This is a truncating parameter, because gaussians don't have
# a natural finite length
drive_sigma = get_closest_multiple_of_16(drive_sigma_us * us /dt) # The width of the gaussian in units of dt
drive_samples = get_closest_multiple_of_16(drive_samples_us * us /dt) # The truncating parameter in units of dt
# + tags=["uses-hardware"]
# Find out which measurement map index is needed for this qubit
meas_map_idx = None
for i, measure_group in enumerate(backend_config.meas_map):
if qubit in measure_group:
meas_map_idx = i
break
assert meas_map_idx is not None, f"Couldn't find qubit {qubit} in the meas_map!"
# + tags=["uses-hardware"]
# Get default measurement pulse from instruction schedule map
inst_sched_map = backend_defaults.instruction_schedule_map
measure = inst_sched_map.get('measure', qubits=backend_config.meas_map[meas_map_idx])
# -
# ## 1. Discriminating the $|0\rangle$ and $|1\rangle$ states <a id="discrim01"></a>
# In this section, we build a discriminator for our standard $|0\rangle$ and $|1\rangle$ states. The job of the discriminator is to take `meas_level=1` complex data and classify it into the standard $|0\rangle$ and $|1\rangle$ states (`meas_level=2`). This will replicate much of the work of the prior [chapter](https://qiskit.org/textbook/ch-quantum-hardware/calibrating-qubits-openpulse.html). These results are necessary for exciting the higher energy states which are the focus of this notebook.
# ### 1A. 0->1 Frequency Sweep <a id="freqsweep01"></a>
# The first step in building a discriminator is to calibrate our qubit frequency, as done in the prior chapter.
# + tags=["uses-hardware"]
def create_ground_freq_sweep_program(freqs, drive_power):
"""Builds a program that does a freq sweep by exciting the ground state.
Depending on drive power this can reveal the 0->1 frequency or the 0->2 frequency.
Args:
freqs (np.ndarray(dtype=float)): Numpy array of frequencies to sweep.
drive_power (float) : Value of drive amplitude.
Raises:
ValueError: Raised if use more than 75 frequencies; currently, an error will be thrown on the backend
if you try to do this.
Returns:
Qobj: Program for ground freq sweep experiment.
"""
if len(freqs) > 75:
raise ValueError("You can only run 75 schedules at a time.")
# print information on the sweep
print(f"The frequency sweep will go from {freqs[0] / GHz} GHz to {freqs[-1]/ GHz} GHz \
using {len(freqs)} frequencies. The drive power is {drive_power}.")
# Define the drive pulse
ground_sweep_drive_pulse = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_power,
name='ground_sweep_drive_pulse')
# Create the base schedule
schedule = pulse.Schedule(name='Frequency sweep starting from ground state.')
schedule |= ground_sweep_drive_pulse(drive_chan)
schedule |= measure << schedule.duration
# define frequencies for the sweep
schedule_freqs = [{drive_chan: freq} for freq in freqs]
# assemble the program
# Note: we only require a single schedule since each does the same thing;
# for each schedule, the LO frequency that mixes down the drive changes
# this enables our frequency sweep
ground_freq_sweep_program = assemble(schedule,
backend=backend,
meas_level=1,
meas_return='avg',
shots=NUM_SHOTS,
schedule_los=schedule_freqs)
return ground_freq_sweep_program
# + tags=["uses-hardware"]
# We will sweep 40 MHz around the estimated frequency, with 75 frequencies
num_freqs = 75
ground_sweep_freqs = default_qubit_freq + np.linspace(-20*MHz, 20*MHz, num_freqs)
ground_freq_sweep_program = create_ground_freq_sweep_program(ground_sweep_freqs, drive_power=0.3)
# + tags=["uses-hardware"]
ground_freq_sweep_job = backend.run(ground_freq_sweep_program)
# + tags=["uses-hardware"]
print(ground_freq_sweep_job.job_id())
job_monitor(ground_freq_sweep_job)
# + tags=["uses-hardware"]
# Get the job data (average)
ground_freq_sweep_data = get_job_data(ground_freq_sweep_job, average=True)
# -
# We fit our data to a Lorentzian curve and extract the calibrated frequency.
# + tags=["uses-hardware"]
def fit_function(x_values, y_values, function, init_params):
"""Fit a function using scipy curve_fit."""
fitparams, conv = curve_fit(function, x_values, y_values, init_params)
y_fit = function(x_values, *fitparams)
return fitparams, y_fit
# + tags=["uses-hardware"]
# do fit in Hz
(ground_sweep_fit_params,
ground_sweep_y_fit) = fit_function(ground_sweep_freqs,
ground_freq_sweep_data,
lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,
[7, 4.975*GHz, 1*GHz, 3*GHz] # initial parameters for curve_fit
)
# + tags=["uses-hardware"]
# Note: we are only plotting the real part of the signal
plt.scatter(ground_sweep_freqs/GHz, ground_freq_sweep_data, color='black')
plt.plot(ground_sweep_freqs/GHz, ground_sweep_y_fit, color='red')
plt.xlim([min(ground_sweep_freqs/GHz), max(ground_sweep_freqs/GHz)])
plt.xlabel("Frequency [GHz]", fontsize=15)
plt.ylabel("Measured Signal [a.u.]", fontsize=15)
plt.title("0->1 Frequency Sweep", fontsize=15)
plt.show()
# + tags=["uses-hardware"]
_, cal_qubit_freq, _, _ = ground_sweep_fit_params
print(f"We've updated our qubit frequency estimate from "
f"{round(default_qubit_freq/GHz, 7)} GHz to {round(cal_qubit_freq/GHz, 7)} GHz.")
# -
# ### 1B. 0->1 Rabi Experiment <a id="rabi01"></a>
# Next, we perform a Rabi experiment to compute the $0\rightarrow1 ~ \pi$ pulse amplitude. Recall, a $\pi$ pulse is a pulse that takes us from the $|0\rangle$ to $|1\rangle$ state (a $\pi$ rotation on the Bloch sphere).
# + tags=["uses-hardware"]
# experimental configuration
num_rabi_points = 50 # number of experiments (ie amplitudes to sweep out)
# Drive amplitude values to iterate over: 50 amplitudes evenly spaced from 0 to 0.75
drive_amp_min = 0
drive_amp_max = 0.75
drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)
# + tags=["uses-hardware"]
# Create schedule
rabi_01_schedules = []
# loop over all drive amplitudes
for ii, drive_amp in enumerate(drive_amps):
# drive pulse
rabi_01_pulse = pulse_lib.gaussian(duration=drive_samples,
amp=drive_amp,
sigma=drive_sigma,
name='rabi_01_pulse_%d' % ii)
# add commands to schedule
schedule = pulse.Schedule(name='Rabi Experiment at drive amp = %s' % drive_amp)
schedule |= rabi_01_pulse(drive_chan)
schedule |= measure << schedule.duration # shift measurement to after drive pulse
rabi_01_schedules.append(schedule)
# + tags=["uses-hardware"]
# Assemble the schedules into a program
# Note: We drive at the calibrated frequency.
rabi_01_expt_program = assemble(rabi_01_schedules,
backend=backend,
meas_level=1,
meas_return='avg',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: cal_qubit_freq}]
* num_rabi_points)
# + tags=["uses-hardware"]
rabi_01_job = backend.run(rabi_01_expt_program)
# + tags=["uses-hardware"]
print(rabi_01_job.job_id())
job_monitor(rabi_01_job)
# + tags=["uses-hardware"]
# Get the job data (average)
rabi_01_data = get_job_data(rabi_01_job, average=True)
# + tags=["uses-hardware"]
def baseline_remove(values):
"""Center data around 0."""
return np.array(values) - np.mean(values)
# + tags=["uses-hardware"]
# Note: Only real part of data is plotted
rabi_01_data = np.real(baseline_remove(rabi_01_data))
(rabi_01_fit_params,
rabi_01_y_fit) = fit_function(drive_amps,
rabi_01_data,
lambda x, A, B, drive_01_period, phi: (A*np.cos(2*np.pi*x/drive_01_period - phi) + B),
[4, -4, 0.5, 0])
plt.scatter(drive_amps, rabi_01_data, color='black')
plt.plot(drive_amps, rabi_01_y_fit, color='red')
drive_01_period = rabi_01_fit_params[2]
# account for phi in computing pi amp
pi_amp_01 = (drive_01_period/2/np.pi) *(np.pi+rabi_01_fit_params[3])
plt.axvline(pi_amp_01, color='red', linestyle='--')
plt.axvline(pi_amp_01+drive_01_period/2, color='red', linestyle='--')
plt.annotate("", xy=(pi_amp_01+drive_01_period/2, 0), xytext=(pi_amp_01,0), arrowprops=dict(arrowstyle="<->", color='red'))
plt.annotate("$\pi$", xy=(pi_amp_01-0.03, 0.1), color='red')
plt.xlabel("Drive amp [a.u.]", fontsize=15)
plt.ylabel("Measured signal [a.u.]", fontsize=15)
plt.title('0->1 Rabi Experiment', fontsize=15)
plt.show()
# + tags=["uses-hardware"]
print(f"Pi Amplitude (0->1) = {pi_amp_01}")
# -
# Using these results, we define our $0\rightarrow1$ $\pi$ pulse.
# + tags=["uses-hardware"]
pi_pulse_01 = pulse_lib.gaussian(duration=drive_samples,
amp=pi_amp_01,
sigma=drive_sigma,
name='pi_pulse_01')
# -
# ### 1C. Build the 0,1 discriminator <a id="builddiscrim01"></a>
# Now that we have our calibrated frequency and $\pi$ pulse, we can build a discriminator for $|0\rangle$ and $1\rangle$ states. The discriminator works by taking `meas_level=1` data in the IQ plane and classifying it into a $|0\rangle$ or a $1\rangle$.
#
# The $|0\rangle$ and $|1\rangle$ states form coherent circular "blobs" in the IQ plane, which are known as centroids. The center of the centroid defines the exact, no-noise IQ point for each state. The surrounding cloud shows the variance in the data, which is generated from a variety of noise sources.
#
# We apply a machine learning technique, Linear Discriminant Analysis, to discriminate (distinguish) between $|0\rangle$ and $|1\rangle$. This is a common technique for classifying qubit states.
# Our first step is to get the centroid data. To do so, we define two schedules (recalling that our system is in the $|0\rangle$ state to start):
# 1. Measure the $|0\rangle$ state directly (obtain $|0\rangle$ centroid).
# 2. Apply a $\pi$ pulse and then measure (obtain $|1\rangle$ centroid).
# + tags=["uses-hardware"]
# Create the two schedules
# Ground state schedule
zero_schedule = pulse.Schedule(name="zero schedule")
zero_schedule |= measure
# Excited state schedule
one_schedule = pulse.Schedule(name="one schedule")
one_schedule |= pi_pulse_01(drive_chan)
one_schedule |= measure << one_schedule.duration
# + tags=["uses-hardware"]
# Assemble the schedules into a program
IQ_01_program = assemble([zero_schedule, one_schedule],
backend=backend,
meas_level=1,
meas_return='single',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: cal_qubit_freq}] * 2)
# + tags=["uses-hardware"]
IQ_01_job = backend.run(IQ_01_program)
# + tags=["uses-hardware"]
print(IQ_01_job.job_id())
job_monitor(IQ_01_job)
# + tags=["uses-hardware"]
# Get job data (single); split for zero and one
IQ_01_data = get_job_data(IQ_01_job, average=False)
zero_data = IQ_01_data[0]
one_data = IQ_01_data[1]
# + tags=["uses-hardware"]
def IQ_01_plot(x_min, x_max, y_min, y_max):
"""Helper function for plotting IQ plane for |0>, |1>. Limits of plot given
as arguments."""
# zero data plotted in blue
plt.scatter(np.real(zero_data), np.imag(zero_data),
s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$')
# one data plotted in red
plt.scatter(np.real(one_data), np.imag(one_data),
s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\rangle$')
# Plot a large dot for the average result of the zero and one states.
mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts
mean_one = np.mean(one_data)
plt.scatter(np.real(mean_zero), np.imag(mean_zero),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.scatter(np.real(mean_one), np.imag(mean_one),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.xlim(x_min, x_max)
plt.ylim(y_min,y_max)
plt.legend()
plt.ylabel('I [a.u.]', fontsize=15)
plt.xlabel('Q [a.u.]', fontsize=15)
plt.title("0-1 discrimination", fontsize=15)
# -
# Below, we display the IQ plot. The blue centroid denotes the $|0\rangle$ state, while the red centroid denotes the $|1\rangle$ state. (Note: If the plot looks off, rerun the notebook)
# + tags=["uses-hardware"]
x_min = -5
x_max = 15
y_min = -5
y_max = 10
IQ_01_plot(x_min, x_max, y_min, y_max)
# -
# Now it is time to actually build the discriminator. As mentioned above, we will use a machine learning technique called Linear Discriminant Analysis (LDA). LDA classifies an arbitrary data set into a set of categories (here $|0\rangle$, $|1\rangle$) by maximizing the distance between the means of each category and minimizing the variance within each category. For further detail, see [here](https://scikit-learn.org/stable/modules/lda_qda.html#id4) (Ref. [3](#refs)).
#
# LDA generates a line called a separatrix. Depending on which side of the separatrix a given data point is on, we can determine which category it belongs to. In our example, one side of the separatrix corresponds to $|0\rangle$ states and the other to $|1\rangle$ states.
#
# We train our model using the first half of our data and test it on the second half. We use `scikit.learn` for an implementation of LDA; in a future release, this functionality will be added released directly into Qiskit-Ignis (see [here](https://github.com/Qiskit/qiskit-ignis/tree/master/qiskit/ignis/measurement/discriminator)).
# We begin by reshaping our result data into a format suitable for discrimination.
# + tags=["uses-hardware"]
def reshape_complex_vec(vec):
"""Take in complex vector vec and return 2d array w/ real, imag entries. This is needed for the learning.
Args:
vec (list): complex vector of data
Returns:
list: vector w/ entries given by (real(vec], imag(vec))
"""
length = len(vec)
vec_reshaped = np.zeros((length, 2))
for i in range(len(vec)):
vec_reshaped[i]=[np.real(vec[i]), np.imag(vec[i])]
return vec_reshaped
# + tags=["uses-hardware"]
# Create IQ vector (split real, imag parts)
zero_data_reshaped = reshape_complex_vec(zero_data)
one_data_reshaped = reshape_complex_vec(one_data)
IQ_01_data = np.concatenate((zero_data_reshaped, one_data_reshaped))
print(IQ_01_data.shape) # verify IQ data shape
# -
# Next, we split our training and testing data. We test using a state vector with our expected results (an array of `0`'s for the ground schedule and `1`s for the excited schedule).
# + tags=["uses-hardware"]
# construct vector w/ 0's and 1's (for testing)
state_01 = np.zeros(NUM_SHOTS) # shots gives number of experiments
state_01 = np.concatenate((state_01, np.ones(NUM_SHOTS)))
print(len(state_01))
# Shuffle and split data into training and test sets
IQ_01_train, IQ_01_test, state_01_train, state_01_test = train_test_split(IQ_01_data, state_01, test_size=0.5)
# -
# Finally, we set up our model and train it. The accuracy of our fit is printed.
# + tags=["uses-hardware"]
# Set up the LDA
LDA_01 = LinearDiscriminantAnalysis()
LDA_01.fit(IQ_01_train, state_01_train)
# + tags=["uses-hardware"]
# test on some simple data
print(LDA_01.predict([[0,0], [10, 0]]))
# + tags=["uses-hardware"]
# Compute accuracy
score_01 = LDA_01.score(IQ_01_test, state_01_test)
print(score_01)
# -
# The last step is to plot the separatrix.
# + tags=["uses-hardware"]
# Plot separatrix on top of scatter
def separatrixPlot(lda, x_min, x_max, y_min, y_max, shots):
nx, ny = shots, shots
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='black')
IQ_01_plot(x_min, x_max, y_min, y_max)
separatrixPlot(LDA_01, x_min, x_max, y_min, y_max, NUM_SHOTS)
# -
# We see how each side of the separatrix corresponds to a centroid (and hence a state). Given a point in the IQ plane, our model checks which side of the separatrix it lies on and returns the corresponding state.
# ## 2. Discriminating the $|0\rangle$, $|1\rangle$ and $|2\rangle$ states <a id="discrim012"></a>
# Now that we have calibrated the $0, 1$ discriminator, we move on to exciting higher energy states. Specifically, we focus on exciting the $|2\rangle$ state and building a discriminator to classify $|0\rangle$, $|1\rangle$ and $2\rangle$ states from their respective IQ data points. The procedure for even higher states ($|3\rangle$, $|4\rangle$, etc) should be similar, but we have not tested them explicitly.
# The process for building the higher state discriminator is as follows:
# 1. Compute the $1\rightarrow2$ frequency.
# 2. Conduct a Rabi experiment to obtain the $\pi$ pulse amplitude for $1\rightarrow2$. To do this, we first apply a $0\rightarrow1$ $\pi$ pulse to get from the $|0\rangle$ to the $|1\rangle$ state. Then, we do a sweep of drive amplitudes at the $1\rightarrow2$ frequency obtained above.
# 3. Construct 3 schedules:\
# a. Zero schedule: just measure the ground state.\
# b. One schedule: apply a $0\rightarrow1$ $\pi$ pulse and measure.\
# c. Two schedule: apply a $0\rightarrow1$ $\pi$ pulse, then a $1\rightarrow2$ $\pi$ pulse and measure.
# 4. Separate the data from each schedule into training and testing sets and construct an LDA model for discrimination.
# ### 2A. Computing the 1->2 frequency <a id="freq12"></a>
# The first step in our calibration is to compute the frequency needed to go from the $1\rightarrow2$ state. There are two methods to do this:
# 1. Do a frequency sweep from the ground state and apply very high power. If the applied power is large enough, two peaks should be observed. One at the $0\rightarrow1$ frequency found in section [1](#discrim01) and one at the $0\rightarrow2$ frequency. The $1\rightarrow2$ frequency can be obtained by taking the difference of the two. Unfortunately, for `ibmq_armonk`, the maximum drive power of $1.0$ is not sufficient to see this transition. Instead, we turn to the second method.
# 2. Excite the $|1\rangle$ state by applying a $0\rightarrow1$ $\pi$ pulse. Then perform the frequency sweep over excitations of the $|1\rangle$ state. A single peak should be observed at a frequency lower than the $0\rightarrow1$ frequency which corresponds to the $1\rightarrow2$ frequency.
# #### 1->2 Frequency Sweep using the sideband method <a id="sideband12"></a>
# We follow the second method described above. To drive the $0\rightarrow 1$ $\pi$ pulse, we require a local oscillator (LO) frequency given by the calibrated $0\rightarrow1$ frequency `cal_qubit_freq` (see construction of the Rabi $\pi$ pulse in section [1](#discrim01)). To sweep the range for the $1\rightarrow2$ frequency, however, we require varying the LO frequency. Unfortunately, the OpenPulse specification requires a single LO frequency per schedule.
#
# To resolve this, we set the LO frequency to `cal_qubit_freq` and multiply a sine function onto the $1\rightarrow2$ pulse at `freq-cal_qubit_freq`, where `freq` is the desired scan frequency. Applying the sinusoidal sideband, as it's known, enables us to change the LO frequency without manually setting it when assembling the program.
# + tags=["uses-hardware"]
def apply_sideband(pulse, freq):
"""Apply a sinusoidal sideband to this pulse at frequency freq.
Args:
pulse (SamplePulse): The pulse of interest.
freq (float): LO frequency for which we want to apply the sweep.
Return:
SamplePulse: Pulse with a sideband applied (oscillates at difference between freq and cal_qubit_freq).
"""
# time goes from 0 to dt*drive_samples, sine arg of form 2*pi*f*t
t_samples = np.linspace(0, dt*drive_samples, drive_samples)
sine_pulse = np.sin(2*np.pi*(freq-cal_qubit_freq)*t_samples) # no amp for the sine
# create sample pulse w/ sideband applied
# Note: need to make sq_pulse.samples real, multiply elementwise
sideband_pulse = SamplePulse(np.multiply(np.real(pulse.samples), sine_pulse), name='sideband_pulse')
return sideband_pulse
# -
# We wrap the logic for assembling the program in a method and run our program.
# + tags=["uses-hardware"]
def create_excited_freq_sweep_program(freqs, drive_power):
"""Builds a program that does a freq sweep by exciting the |1> state.
This allows us to obtain the 1->2 frequency. We get from the |0> to |1>
state via a pi pulse using the calibrated qubit frequency. To do the
frequency sweep from |1> to |2>, we use a sideband method by tacking
a sine factor onto the sweep drive pulse.
Args:
freqs (np.ndarray(dtype=float)): Numpy array of frequencies to sweep.
drive_power (float) : Value of drive amplitude.
Raises:
ValueError: Thrown if use more than 75 frequencies; currently, an error will be thrown on the backend
if you try more than 75 frequencies.
Returns:
Qobj: Program for freq sweep experiment.
"""
if len(freqs) > 75:
raise ValueError("You can only run 75 schedules at a time.")
print(f"The frequency sweep will go from {freqs[0] / GHz} GHz to {freqs[-1]/ GHz} GHz \
using {len(freqs)} frequencies. The drive power is {drive_power}.")
base_12_pulse = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_power,
name='base_12_pulse')
schedules = []
for jj, freq in enumerate(freqs):
# add sideband to gaussian pulse
freq_sweep_12_pulse = apply_sideband(base_12_pulse, freq)
# add commands to schedule
schedule = pulse.Schedule(name="Frequency = {}".format(freq))
# Add 0->1 pulse, freq sweep pulse and measure
schedule |= pi_pulse_01(drive_chan)
schedule |= freq_sweep_12_pulse(drive_chan) << schedule.duration
schedule |= measure << schedule.duration # shift measurement to after drive pulses
schedules.append(schedule)
num_freqs = len(freqs)
# draw a schedule
display(schedules[-1].draw(channels_to_plot=[drive_chan, meas_chan], label=True, scaling=1.0))
# assemble freq sweep program
# Note: LO is at cal_qubit_freq for each schedule; accounted for by sideband
excited_freq_sweep_program = assemble(schedules,
backend=backend,
meas_level=1,
meas_return='avg',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: cal_qubit_freq}]
* num_freqs)
return excited_freq_sweep_program
# + tags=["uses-hardware"]
# sweep 400 MHz below 0->1 frequency to catch the 1->2 frequency
num_freqs = 75
excited_sweep_freqs = cal_qubit_freq + np.linspace(-400*MHz, 30*MHz, num_freqs)
excited_freq_sweep_program = create_excited_freq_sweep_program(excited_sweep_freqs, drive_power=0.3)
# Plot an example schedule to make sure it's valid
# + tags=["uses-hardware"]
excited_freq_sweep_job = backend.run(excited_freq_sweep_program)
# + tags=["uses-hardware"]
print(excited_freq_sweep_job.job_id())
job_monitor(excited_freq_sweep_job)
# + tags=["uses-hardware"]
# Get job data (avg)
excited_freq_sweep_data = get_job_data(excited_freq_sweep_job, average=True)
# + tags=["uses-hardware"]
# Note: we are only plotting the real part of the signal
plt.scatter(excited_sweep_freqs/GHz, excited_freq_sweep_data, color='black')
plt.xlim([min(excited_sweep_freqs/GHz)+0.01, max(excited_sweep_freqs/GHz)]) # ignore min point (is off)
plt.xlabel("Frequency [GHz]", fontsize=15)
plt.ylabel("Measured Signal [a.u.]", fontsize=15)
plt.title("1->2 Frequency Sweep (first pass)", fontsize=15)
plt.show()
# -
# We see a minimum around $4.64$ GHz. There are a few spurious maxima, but they are too large to be the $1\rightarrow2$ frequency. The minimum corresponds the $1\rightarrow2$ frequency.
#
# Using a relative minima function, we computes the value of this point exactly. This gives an estimate for the $1\rightarrow2$ frequency.
# + tags=["uses-hardware"]
# Prints out relative minima frequencies in output_data; height gives lower bound (abs val)
def rel_minima(freqs, output_data, height):
"""
Prints out relative minima frequencies in output_data (can see peaks); height gives upper bound (abs val).
Be sure to set the height properly or the peak will be ignored!
Args:
freqs (list): frequency list
output_data (list): list of resulting signals
height (float): upper bound (abs val) on a peak
Returns:
list: List containing relative minima frequencies
"""
peaks, _ = find_peaks(-1*output_data, height)
print("Freq. dips: ", freqs[peaks])
return freqs[peaks]
# + tags=["uses-hardware"]
minima = rel_minima(excited_sweep_freqs, np.real(excited_freq_sweep_data), 10)
approx_12_freq = minima[0]
# + [markdown] tags=["uses-hardware"]
# We now use the estimate obtained above to do a refined sweep (ie much smaller range). This will allow us to obtain a more accurate value for the $1\rightarrow2$ frequency. We sweep $20$ MHz in each direction.
# + tags=["uses-hardware"]
# smaller range refined sweep
num_freqs = 75
refined_excited_sweep_freqs = approx_12_freq + np.linspace(-20*MHz, 20*MHz, num_freqs)
refined_excited_freq_sweep_program = create_excited_freq_sweep_program(refined_excited_sweep_freqs, drive_power=0.3)
# + tags=["uses-hardware"]
refined_excited_freq_sweep_job = backend.run(refined_excited_freq_sweep_program)
# + tags=["uses-hardware"]
print(refined_excited_freq_sweep_job.job_id())
job_monitor(refined_excited_freq_sweep_job)
# + tags=["uses-hardware"]
# Get the refined data (average)
refined_excited_freq_sweep_data = get_job_data(refined_excited_freq_sweep_job, average=True)
# -
# Let's plot and fit the refined signal, using the standard Lorentzian curve.
# + tags=["uses-hardware"]
# do fit in Hz
(refined_excited_sweep_fit_params,
refined_excited_sweep_y_fit) = fit_function(refined_excited_sweep_freqs,
refined_excited_freq_sweep_data,
lambda x, A, q_freq, B, C: (A / np.pi) * (B / ((x - q_freq)**2 + B**2)) + C,
[-12, 4.625*GHz, 0.05*GHz, 3*GHz] # initial parameters for curve_fit
)
# + tags=["uses-hardware"]
# Note: we are only plotting the real part of the signal
plt.scatter(refined_excited_sweep_freqs/GHz, refined_excited_freq_sweep_data, color='black')
plt.plot(refined_excited_sweep_freqs/GHz, refined_excited_sweep_y_fit, color='red')
plt.xlim([min(refined_excited_sweep_freqs/GHz), max(refined_excited_sweep_freqs/GHz)])
plt.xlabel("Frequency [GHz]", fontsize=15)
plt.ylabel("Measured Signal [a.u.]", fontsize=15)
plt.title("1->2 Frequency Sweep (refined pass)", fontsize=15)
plt.show()
# + tags=["uses-hardware"]
_, qubit_12_freq, _, _ = refined_excited_sweep_fit_params
print(f"Our updated estimate for the 1->2 transition frequency is "
f"{round(qubit_12_freq/GHz, 7)} GHz.")
# -
# ### 2B. 1->2 Rabi Experiment <a id="rabi12"></a>
# Now that we have a good estimate for the $1\rightarrow2$ frequency, we perform a Rabi experiment to obtain the $\pi$ pulse amplitude for the $1\rightarrow2$ transition. To do so, we apply a $0\rightarrow1$ $\pi$ pulse and then sweep over drive amplitudes at the $1\rightarrow2$ frequency (using the sideband method).
# + tags=["uses-hardware"]
# experimental configuration
num_rabi_points = 75 # number of experiments (ie amplitudes to sweep out)
# Drive amplitude values to iterate over: 75 amplitudes evenly spaced from 0 to 1.0
drive_amp_min = 0
drive_amp_max = 1.0
drive_amps = np.linspace(drive_amp_min, drive_amp_max, num_rabi_points)
# + tags=["uses-hardware"]
# Create schedule
rabi_12_schedules = []
# loop over all drive amplitudes
for ii, drive_amp in enumerate(drive_amps):
base_12_pulse = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_amp,
name='base_12_pulse')
# apply sideband at the 1->2 frequency
rabi_12_pulse = apply_sideband(base_12_pulse, qubit_12_freq)
# add commands to schedule
schedule = pulse.Schedule(name='Rabi Experiment at drive amp = %s' % drive_amp)
schedule |= pi_pulse_01(drive_chan) # 0->1
schedule |= rabi_12_pulse(drive_chan) << schedule.duration # 1->2 Rabi pulse
schedule |= measure << schedule.duration # shift measurement to after drive pulse
rabi_12_schedules.append(schedule)
# + tags=["uses-hardware"]
# Assemble the schedules into a program
# Note: The LO frequency is at cal_qubit_freq to support the 0->1 pi pulse;
# it is modified for the 1->2 pulse using sidebanding
rabi_12_expt_program = assemble(rabi_12_schedules,
backend=backend,
meas_level=1,
meas_return='avg',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: cal_qubit_freq}]
* num_rabi_points)
# + tags=["uses-hardware"]
rabi_12_job = backend.run(rabi_12_expt_program)
# + tags=["uses-hardware"]
print(rabi_12_job.job_id())
job_monitor(rabi_12_job)
# + tags=["uses-hardware"]
# Get the job data (average)
rabi_12_data = get_job_data(rabi_12_job, average=True)
# -
# We plot and fit our data as before.
# + tags=["uses-hardware"]
# Note: We only plot the real part of the signal.
rabi_12_data = np.real(baseline_remove(rabi_12_data))
(rabi_12_fit_params,
rabi_12_y_fit) = fit_function(drive_amps,
rabi_12_data,
lambda x, A, B, drive_12_period, phi: (A*np.cos(2*np.pi*x/drive_12_period - phi) + B),
[3, 0.5, 0.9, 0])
plt.scatter(drive_amps, rabi_12_data, color='black')
plt.plot(drive_amps, rabi_12_y_fit, color='red')
drive_12_period = rabi_12_fit_params[2]
# account for phi in computing pi amp
pi_amp_12 = (drive_12_period/2/np.pi) *(np.pi+rabi_12_fit_params[3])
plt.axvline(pi_amp_12, color='red', linestyle='--')
plt.axvline(pi_amp_12+drive_12_period/2, color='red', linestyle='--')
plt.annotate("", xy=(pi_amp_12+drive_12_period/2, 0), xytext=(pi_amp_12,0), arrowprops=dict(arrowstyle="<->", color='red'))
plt.annotate("$\pi$", xy=(pi_amp_12-0.03, 0.1), color='red')
plt.xlabel("Drive amp [a.u.]", fontsize=15)
plt.ylabel("Measured signal [a.u.]", fontsize=15)
plt.title('Rabi Experiment (1->2)', fontsize=20)
plt.show()
# + tags=["uses-hardware"]
print(f"Pi Amplitude (1->2) = {pi_amp_12}")
# -
# With this information, we can define our $1\rightarrow2$ $\pi$ pulse (making sure to add a sideband at the $1\rightarrow2$ frequency).
# + tags=["uses-hardware"]
pi_pulse_12 = pulse_lib.gaussian(duration=drive_samples,
amp=pi_amp_12,
sigma=drive_sigma,
name='pi_pulse_12')
# make sure this pulse is sidebanded
pi_pulse_12 = apply_sideband(pi_pulse_12, qubit_12_freq)
# -
# ### 2C. Build the 0, 1, 2 discriminator <a id="builddiscrim012"></a>
# Finally, we build our discriminator for the $|0\rangle$, $|1\rangle$ and $|2\rangle$ states. The procedure is analagous to section [1](#discrim01), however now we add an additional schedule for the $|2\rangle$ state.
# As a review, our three schedules are (again, recalling that our system starts in the $|0\rangle$ state):
# 1. Measure the $|0\rangle$ state directly (obtain $|0\rangle$ centroid).
# 2. Apply $0\rightarrow1$ $\pi$ pulse and then measure (obtain $|1\rangle$ centroid).
# 3. Apply $0\rightarrow1$ $\pi$ pulse, then $1\rightarrow2$ $\pi$ pulse, then measure (obtain $|2\rangle$ centroid).
# + tags=["uses-hardware"]
# Create the three schedules
# Ground state schedule
zero_schedule = pulse.Schedule(name="zero schedule")
zero_schedule |= measure
# Excited state schedule
one_schedule = pulse.Schedule(name="one schedule")
one_schedule |= pi_pulse_01(drive_chan)
one_schedule |= measure << one_schedule.duration
# Excited state schedule
two_schedule = pulse.Schedule(name="two schedule")
two_schedule |= pi_pulse_01(drive_chan)
two_schedule |= pi_pulse_12(drive_chan) << two_schedule.duration
two_schedule |= measure << two_schedule.duration
# -
# We construct the program and plot the centroids in the IQ plane.
# + tags=["uses-hardware"]
# Assemble the schedules into a program
IQ_012_program = assemble([zero_schedule, one_schedule, two_schedule],
backend=backend,
meas_level=1,
meas_return='single',
shots=NUM_SHOTS,
schedule_los=[{drive_chan: cal_qubit_freq}] * 3)
# + tags=["uses-hardware"]
IQ_012_job = backend.run(IQ_012_program)
# + tags=["uses-hardware"]
print(IQ_012_job.job_id())
job_monitor(IQ_012_job)
# + tags=["uses-hardware"]
# Get job data (single); split for zero, one and two
IQ_012_data = get_job_data(IQ_012_job, average=False)
zero_data = IQ_012_data[0]
one_data = IQ_012_data[1]
two_data = IQ_012_data[2]
# + tags=["uses-hardware"]
def IQ_012_plot(x_min, x_max, y_min, y_max):
"""Helper function for plotting IQ plane for 0, 1, 2. Limits of plot given
as arguments."""
# zero data plotted in blue
plt.scatter(np.real(zero_data), np.imag(zero_data),
s=5, cmap='viridis', c='blue', alpha=0.5, label=r'$|0\rangle$')
# one data plotted in red
plt.scatter(np.real(one_data), np.imag(one_data),
s=5, cmap='viridis', c='red', alpha=0.5, label=r'$|1\rangle$')
# two data plotted in green
plt.scatter(np.real(two_data), np.imag(two_data),
s=5, cmap='viridis', c='green', alpha=0.5, label=r'$|2\rangle$')
# Plot a large dot for the average result of the 0, 1 and 2 states.
mean_zero = np.mean(zero_data) # takes mean of both real and imaginary parts
mean_one = np.mean(one_data)
mean_two = np.mean(two_data)
plt.scatter(np.real(mean_zero), np.imag(mean_zero),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.scatter(np.real(mean_one), np.imag(mean_one),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.scatter(np.real(mean_two), np.imag(mean_two),
s=200, cmap='viridis', c='black',alpha=1.0)
plt.xlim(x_min, x_max)
plt.ylim(y_min,y_max)
plt.legend()
plt.ylabel('I [a.u.]', fontsize=15)
plt.xlabel('Q [a.u.]', fontsize=15)
plt.title("0-1-2 discrimination", fontsize=15)
# + tags=["uses-hardware"]
x_min = -20
x_max = 10
y_min = -10
y_max = 5
IQ_012_plot(x_min, x_max, y_min, y_max)
# -
# We now observe a third centroid corresponding to the $|2\rangle$ state. (Note: If the plot looks off, rerun the notebook)
# With this data, we can build our discriminator. Again, we use `scikit.learn` and Linear Discriminant Analysis (LDA).
#
# We begin by shaping the data for LDA.
# + tags=["uses-hardware"]
# Create IQ vector (split real, imag parts)
zero_data_reshaped = reshape_complex_vec(zero_data)
one_data_reshaped = reshape_complex_vec(one_data)
two_data_reshaped = reshape_complex_vec(two_data)
IQ_012_data = np.concatenate((zero_data_reshaped, one_data_reshaped, two_data_reshaped))
print(IQ_012_data.shape) # verify IQ data shape
# -
# Next, we split our training and testing data (again, half and half). The testing data is a vector containing an array of `0`'s (for the zero schedule, `1`'s (for the one schedule) and `2`'s (for the two schedule).
# + tags=["uses-hardware"]
# construct vector w/ 0's, 1's and 2's (for testing)
state_012 = np.zeros(NUM_SHOTS) # shots gives number of experiments
state_012 = np.concatenate((state_012, np.ones(NUM_SHOTS)))
state_012 = np.concatenate((state_012, 2*np.ones(NUM_SHOTS)))
print(len(state_012))
# Shuffle and split data into training and test sets
IQ_012_train, IQ_012_test, state_012_train, state_012_test = train_test_split(IQ_012_data, state_012, test_size=0.5)
# -
# Finally, we set up our model and train it. The accuracy of our fit is printed.
# + tags=["uses-hardware"]
# Set up the LDA
LDA_012 = LinearDiscriminantAnalysis()
LDA_012.fit(IQ_012_train, state_012_train)
# + tags=["uses-hardware"]
# test on some simple data
print(LDA_012.predict([[0, 0], [-10, 0], [-15, -5]]))
# + tags=["uses-hardware"]
# Compute accuracy
score_012 = LDA_012.score(IQ_012_test, state_012_test)
print(score_012)
# -
# The last step is to plot the separatrix.
# + tags=["uses-hardware"]
IQ_012_plot(x_min, x_max, y_min, y_max)
separatrixPlot(LDA_012, x_min, x_max, y_min, y_max, NUM_SHOTS)
# -
# Now that we have 3 centroids, the separatrix is no longer a line, but rather a curve containing a combination of two lines. In order to discriminate between $|0\rangle$, $|1\rangle$ and $|2\rangle$ states, our model checks where the IQ point lies relative to the separatrix and classifies the point accordingly.
# ## 3. References <a id="refs"></a>
# 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “Qiskit backend specifications for OpenQASM and OpenPulse experiments,” 2018, https://arxiv.org/abs/1809.03452.
# 2. Krantz, P. et al. “A Quantum Engineer’s Guide to Superconducting Qubits.” Applied Physics Reviews 6.2 (2019): 021318, https://arxiv.org/abs/1904.06560.
# 3. Scikit-learn: Machine Learning in Python, <NAME> al., JMLR 12, pp. 2825-2830, 2011, https://scikit-learn.org/stable/modules/lda_qda.html#id4.
import qiskit.tools.jupyter
# %qiskit_version_table
| content/ch-quantum-hardware/accessing_higher_energy_states.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
import pandas as pd
import numpy as np
from datetime import datetime,timedelta
from dateutil import parser
import pickle
import re
import random
import nltk
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
# # Pull tweet data from database
dbname = '***'
username = '***'
engine = create_engine('postgres://%s@localhost/%s'%(username,dbname))
# +
con = None
con = psycopg2.connect(database = dbname, user = username)
sql_query = """
SELECT * FROM raw_tweet_table;
"""
df = pd.read_sql_query(sql_query,con)
#df is a dataframe with columns 'created_at','text' and 'hashtags'
# -
# # Take dataframe of those with exactly one hashtag
#currently hashtags are 1 string separated with a space
#split into lists
df['hashtags']=df['hashtags'].str.split()
df = df[[df['hashtags'].map(len)==1]]
# # Processing
#negate words between negative word and next punctuation by appending _neg
def negation_process(tweet):
#add final period to ensure negation if no final punctuation
tweet = tweet + '.'
tweet = re.sub(r'\b(?:never|no|nothing|nowhere|noone|none|not|havent|hasnt|hadnt|cant|couldnt|shouldnt|wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint)\b[\w\s]+[^\w\s]',
lambda match: re.sub(r'(\s+)(\w+)', r'\1neg_\2', match.group(0)), tweet,flags=re.IGNORECASE)
#return tweet[:-1] to remove added period
return tweet[:-1]
#Porter stemming
def stemming(tweet):
temp = []
for word in tweet.split():
temp.append(stemmer.stem(word.lower()))
return ' '.join(temp)
#primary text processing
def process_text(tweet_list):
processed_tweets = []
for tweet in tweet_list:
tweet = re.sub(r"(?:\@|https?\://|#)\S+", "", tweet)
tweet = tweet.replace('\'','')
#negate
tweet = negation_process(tweet)
#replace non ascii characters
tweet = re.sub(r'[^\x00-\x7F]+',' ', tweet)
tweet = tweet.replace('RT','')
tweet = tweet.replace(':','')
tweet = tweet.replace('+',' ')
tweet = tweet.replace(',','')
tweet = tweet.replace('.','')
tweet = tweet.replace('\"','')
#remove duplicate consecutive characters for standardization
tweet = re.sub(r'(\S)\1+', r'\1', tweet)
#add spaces before emotive punctuation, useful for bigrams
tweet = tweet.replace('!',' !')
tweet = tweet.replace('?',' ?')
tweet = tweet.strip()
tweet = stemming(tweet)
processed_tweets.append(tweet)
return processed_tweets
#process twitter text
processed_tweets = process_text(df['text'].tolist())
df['processed_text'] = processed_tweets
#drop duplicates after processing
#processing standardizes so drop full amount of duplicates
df = df.drop_duplicates(inplace=False, subset='processed_text')
# # Export data
#export dataframe with processed tweet data
f = open('df_processed_single_hashtag.pickle', 'wb')
pickle.dump(df[['created_at','processed_text','hashtags']], f)
f.close()
| TweetOff/ProcessSingleHashtagTwitterData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import relevant libraries
import numpy as np
import pandas as pd
# read grades.csv into a pandas dataframe & save the dataframe in a variable
grades = pd.read_csv('grades.csv')
# display grades
grades
# display first few rows of grades
grades.head()
# +
# create a Python dictionary of Series & save in a variable named points
points = {'player1': pd.Series([15, 10, 20, 25],
index=['game1', 'game2', 'game3', 'game4']),
'player2': pd.Series([10, 15, 23, 27],
index=['game1', 'game2', 'game3', 'game4'])}
# create a pandas dataframe from points
pd.DataFrame(points)
# +
# create a Python dictionary of lists & save in a variable named sales
sales = {'foodTruck1': [216,275,203,210,315,402,380],
'foodTruck2': [374,90,95,115,130,150,140]}
# create a pandas dataframe from sales with index being day1 through day7
pd.DataFrame(sales, index=['day1', 'day2', 'day3', 'day4', 'day5', 'day6', 'day7'])
# +
# create a multi-dimensional numpy array & save in a variable named passengers
passengers = np.array([[20, 40, 60, 80], [15, 30, 45, 60], [10, 20, 30, 40]])
# create a pandas DataFrame from passengers
# with index being plane1 through plane3
# and columns being infants, children, adults, seniors
pd.DataFrame(passengers,
index=['plane1', 'plane2', 'plane3'],
columns=['infants', 'children', 'adults', 'seniors'])
# -
| Ex_Files_Python_Data_Functions/Exercise Files/05_02_pandas_dataframe.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# # Model Optimization
using NbodyGradient, Plots, Optim, DistributionsAD, Distributions, LinearAlgebra, BenchmarkTools
include("likelihood.jl")
# +
BJD = 2454950.0 #
ets = 3e-6 # Earth to solar masses
a = Elements(m = 1.03)
b = Elements(
m = 3.9 * ets,
P = 13.9,
t0 = 2454960.9753 - BJD,
ecosϖ = 0.05,
esinϖ = -0.026,
I = π/2,
)
c = Elements(
m = 7.5 * ets,
P = 16.2,
ecosϖ = 0.053,
esinϖ = -0.039,
t0 = 2454955.9132 - BJD,
I = π/2,
)
t0 = 0.0
H = [3,1,1]
ic = ElementsIC(t0, H, a, b, c)
# -
t0 = 0.0
tmax = b.P * 100
h = b.P / 40.0
intr = Integrator(h, t0, tmax)
s = State(ic)
tt = TransitTiming(intr.tmax, ic)
intr(s,tt)
# +
error = 3 / 1440.0
dist = Normal(0.0, error)
times = tt.tt[2,1:end-3] #.+ rand(dist, length(tt.tt[2,1:end-3]))
data = TimingData([tt.count[2]],[collect(1:length(times))],[times],[error .* ones(length(times))], 0.0);
# -
data.tt
logP(chi_squared, ∇chi_squared, Matrix(ic.elements), H, data, 0.0, intr)
function optimize_times(elements0, data, intr, H)
# Optimize transit timing model...
θ_init = elements0[2:end,:][:]
function loglike!(F, G, θ)
elements = zeros(3,7)
elements[1,1] = 1.03
elements[2:end,:] .= reshape(θ, 2, 7)
nll, dnll = logP(chi_squared, ∇chi_squared, elements, H, data, 0.0, intr)
if G != nothing
G .= dnll
end
if F != nothing
return nll
end
end
opt = GradientDescent() # LBFGS()
# Bounds
lower = get_lower_open(elements0[2:end,:])
upper = get_upper_open(elements0[2:end,:])
res = Optim.optimize(Optim.only_fg!(loglike!), lower, upper, θ_init, Fminbox(opt),
Optim.Options(show_trace=true))
return res
end
results = optimize_times(Matrix(ic.elements), data, intr, H)
# Compare optimized elements to initial elements
elements_fit = zeros(3,7)
elements_fit[1,1] = 1.03
elements_fit[2:end,:] .= reshape(results.minimizer, 2, 7)
elements_fit .- ic.elements
# ## Misc.
function chi2(θ::Vector{T}, data::Vector{T}, error::Vector{T}) where T <: Real
# Make the model here...
# Convert parameter vector into elements array
elements = zeros(3,7)
elements[1,1] = 1.03
elements[2:end,:] .= reshape(θ, 2, 7)
t0 = 0.0
H = 3
ic = ElementsIC(t0, H, elements)
h = ic.elements[2,2] / 40
tmax = ic.elements[2,2] * 101
intr = Integrator(h, t0, tmax)
s = State(ic)
tt = TransitTiming(intr.tmax, ic)
intr(s,tt)
## ##
N = length(data)
model = tt.tt[2,1:N] # Make them the same size
chisq = 0.0
for i in 1:N
chisq += ((data[i] - model[i]) / error[i])^2
end
return chisq
end
σ = 5 / 1440.0 # 3 minutes, in days
dist = Normal(0.0, σ)
data = tt.tt[2,1:end-4] #.+ rand(dist, length(tt.tt[2,1:end-4]))
error = ones(length(data)) .* σ
p0 = ic.elements[2:end, :][:]
chi2(p0, data, error) / (length(data) - 7*2 - 1)
function optimize_times(p0, data, error)
# Optimize transit timing model...
opt = NelderMead()
res = Optim.optimize(θ->chi2(θ, data, error), p0, opt)
return res
end
optimize_times(p0, data, error)
scatter(diff(data), yerr=error)
| Optimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculating Charity Scores from relevant charity data on charitydata.ca (using dataframe from Data_Collection.ipyntb)
# +
#importing required libraries and setting the driver path
import pandas as pd
import numpy as np
import json
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
driver_path = "C:/Users/16475/Documents/GitHub/CharityScore.ca/chromedriver.exe"
driver = webdriver.Chrome(executable_path=driver_path)
# +
# import the url data from the dataframe
df = pd.read_csv("expense data.csv")
# remove duplicate index column
df = pd.read_csv("expense data.csv").drop(['Unnamed: 0'],axis=1)
# re-index to add new column
df = df.reindex(df.columns.tolist() + ['Expense Table'], axis=1)
print(df.head(1))
# +
# find expense tables on charitydata.ca and add them to the dataframe as string representations of dictionaries
for i in range(1, len(df)+1):
driver.get(df.loc[i,"URL"])
# check if url page found, else skip
try:
url_check = driver.find_element_by_xpath("/html/body/main/div/section[2]/div/div[2]/p").text
except NoSuchElementException:
url_check = ""
if url_check == "The requested page could not be found.":
continue
# open expense table
try:
WebDriverWait(driver, 15).until(EC.presence_of_element_located((By.LINK_TEXT, 'Expenses'))).click()
expense_table = driver.find_element_by_xpath('//html/body/main/div/section[3]/div/div[2]/div[4]/div[2]/table[1]')
except:
continue
# create dictionary representation of expense table
expense_dict = {}
expense_rows = expense_table.find_elements(By.TAG_NAME, "tr")
header_row = expense_rows.pop(0)
headers = header_row.find_elements(By.TAG_NAME, "th")
header_key = headers.pop(0).text
headers = list(map(lambda x: int(x.text), headers))
expense_dict[header_key] = headers
for row in expense_rows:
row_key = row.find_element(By.TAG_NAME, "th").text
row_series = row.find_elements(By.TAG_NAME, "td")
num_list = []
for num in row_series:
if num.text[0] != "(":
num_list +=[int(num.text.replace(",",""))]
else:
num_list += [int(num.text[1:-1].replace(",",""))]
expense_dict[row_key] = num_list
df.loc[df.index[i], 'Expense_Table'] = str(expense_dict)
df.to_csv("expense data.csv")
# quit driver and export to csv
driver.quit()
df.to_csv("expense data.csv")
# -
| .ipynb_checkpoints/Score_Calculation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Display of Rich Output
# In Python, objects can declare their textual representation using the `__repr__` method.
# +
class Ball(object):
pass
b = Ball()
b.__repr__()
# -
print(b)
# Overriding the `__repr__` method:
# +
class Ball(object):
def __repr__(self):
return 'TEST'
b = Ball()
# -
print(b)
# IPython expands on this idea and allows objects to declare other, rich representations including:
#
# * HTML
# * JSON
# * PNG
# * JPEG
# * SVG
# * LaTeX
#
# A single object can declare **some or all** of these representations; all of them are handled by IPython's *display system*. .
# ## Basic display imports
# The `display` function is a general purpose tool for displaying different representations of objects. Think of it as `print` for these rich representations.
from IPython.display import display
# A few points:
#
# * Calling `display` on an object will send **all** possible representations to the Notebook.
# * These representations are stored in the Notebook document.
# * In general the Notebook will use the richest available representation.
#
# If you want to display a particular representation, there are specific functions for that:
from IPython.display import (
display_pretty, display_html, display_jpeg,
display_png, display_json, display_latex, display_svg
)
# ## Images
# To work with images (JPEG, PNG) use the `Image` class.
from IPython.display import Image
i = Image(filename='./ipython-image.png')
display(i)
# Returning an `Image` object from an expression will automatically display it:
i
# An image can also be displayed from raw data or a URL.
Image(url='http://python.org/images/python-logo.gif')
# ## HTML
# Python objects can declare HTML representations that will be displayed in the Notebook. If you have some HTML you want to display, simply use the `HTML` class.
from IPython.display import HTML
s = """<table>
<tr>
<th>Header 1</th>
<th>Header 2</th>
</tr>
<tr>
<td>row 1, cell 1</td>
<td>row 1, cell 2</td>
</tr>
<tr>
<td>row 2, cell 1</td>
<td>row 2, cell 2</td>
</tr>
</table>"""
h = HTML(s)
display(h)
# You can also use the `%%html` cell magic to accomplish the same thing.
# + language="html"
# <table>
# <tr>
# <th>Header 1</th>
# <th>Header 2</th>
# </tr>
# <tr>
# <td>row 1, cell 1</td>
# <td>row 1, cell 2</td>
# </tr>
# <tr>
# <td>row 2, cell 1</td>
# <td>row 2, cell 2</td>
# </tr>
# </table>
# + language="html"
# <style>
# #notebook {
# background-color: skyblue;
# font-family: times new roman;
# }
# </style>
# -
# You can remove the abvove styling by using "Cell"$\rightarrow$"Current Output"$\rightarrow$"Clear" with that cell selected.
# ## JavaScript
# The Notebook also enables objects to declare a JavaScript representation. At first, this may seem odd as output is inherently visual and JavaScript is a programming language. However, this opens the door for rich output that leverages the full power of JavaScript and associated libraries such as [d3.js](http://d3js.org) for output.
from IPython.display import Javascript
# Pass a string of JavaScript source code to the `JavaScript` object and then display it.
js = Javascript('alert("hi")');
display(js)
# The same thing can be accomplished using the `%%javascript` cell magic:
# + language="javascript"
#
# alert("hi");
# -
# Here is a more complicated example that loads `d3.js` from a CDN, uses the `%%html` magic to load CSS styles onto the page and then runs ones of the `d3.js` examples.
Javascript(
"""$.getScript('https://cdnjs.cloudflare.com/ajax/libs/d3/3.2.2/d3.v3.min.js')"""
)
# + language="html"
# <style type="text/css">
#
# circle {
# fill: rgb(31, 119, 180);
# fill-opacity: .25;
# stroke: rgb(31, 119, 180);
# stroke-width: 1px;
# }
#
# .leaf circle {
# fill: #ff7f0e;
# fill-opacity: 1;
# }
#
# text {
# font: 10px sans-serif;
# }
#
# </style>
# + language="javascript"
#
# // element is the jQuery element we will append to
# var e = element.get(0);
#
# var diameter = 600,
# format = d3.format(",d");
#
# var pack = d3.layout.pack()
# .size([diameter - 4, diameter - 4])
# .value(function(d) { return d.size; });
#
# var svg = d3.select(e).append("svg")
# .attr("width", diameter)
# .attr("height", diameter)
# .append("g")
# .attr("transform", "translate(2,2)");
#
# d3.json("./flare.json", function(error, root) {
# var node = svg.datum(root).selectAll(".node")
# .data(pack.nodes)
# .enter().append("g")
# .attr("class", function(d) { return d.children ? "node" : "leaf node"; })
# .attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; });
#
# node.append("title")
# .text(function(d) { return d.name + (d.children ? "" : ": " + format(d.size)); });
#
# node.append("circle")
# .attr("r", function(d) { return d.r; });
#
# node.filter(function(d) { return !d.children; }).append("text")
# .attr("dy", ".3em")
# .style("text-anchor", "middle")
# .text(function(d) { return d.name.substring(0, d.r / 3); });
# });
#
# d3.select(self.frameElement).style("height", diameter + "px");
# -
# ## Audio
# IPython makes it easy to work with sounds interactively. The `Audio` display class allows you to create an audio control that is embedded in the Notebook. The interface is analogous to the interface of the `Image` display class. All audio formats supported by the browser can be used. Note that no single format is presently supported in all browsers.
from IPython.display import Audio
Audio("./scrubjay.mp3")
# A NumPy array can be converted to audio. The `Audio` class normalizes and encodes the data and embeds the resulting audio in the Notebook.
#
# For instance, when two sine waves with almost the same frequency are superimposed a phenomena known as [beats](https://en.wikipedia.org/wiki/Beat_%28acoustics%29) occur:
# +
import numpy as np
max_time = 3
f1 = 120.0
f2 = 124.0
rate = 8000.0
L = 3
times = np.linspace(0,L,rate*L)
signal = np.sin(2*np.pi*f1*times) + np.sin(2*np.pi*f2*times)
Audio(data=signal, rate=rate)
# -
# ## Video
# More exotic objects can also be displayed, as long as their representation supports the IPython display protocol. For example, videos hosted externally on YouTube are easy to load:
from IPython.display import YouTubeVideo
YouTubeVideo('sjfsUzECqK0')
# ## External sites
# You can even embed an entire page from another site in an iframe; for example this is IPython's home page:
from IPython.display import IFrame
IFrame('https://ipython.org', width='100%', height=350)
# ## Links to local files
# IPython provides builtin display classes for generating links to local files. Create a link to a single file using the `FileLink` object:
from IPython.display import FileLink, FileLinks
FileLink('../Visualization/Matplotlib.ipynb')
# Alternatively, to generate links to all of the files in a directory, use the `FileLinks` object, passing `'.'` to indicate that we want links generated for the current working directory. Note that if there were other directories under the current directory, `FileLinks` would work in a recursive manner creating links to files in all sub-directories as well.
FileLinks('./')
# ## Rich output and nbviewer
# Much of the power of the Notebook is that it enables users to share notebooks with each other using http://nbviewer.ipython.org, without installing IPython locally. As of IPython 2.0, notebooks rendered on nbviewer will display all output, including HTML and JavaScript. Furthermore, to provide a consistent JavaScript environment on the live Notebook and nbviewer, the following JavaScript libraries are loaded onto the nbviewer page, *before* the notebook and its output is displayed:
#
# * [jQuery](http://jquery.com/)
# * [RequireJS](http://requirejs.org/)
#
# Libraries such as [mpld3](http://mpld3.github.io/) use these capabilities to generate interactive visualizations that work on nbviewer.
| days/day08/Display.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from matplotlib import pyplot as plt
# +
# state dimensionality
s = 2
# encoder matrix
E = np.random.randn(s, s)
# state transition matrix
A_s = np.zeros((s, s))
coefficients = np.random.randn(s)
indices = list(range(s))
np.random.shuffle(indices)
#A_s[range(s), indices] = coefficients
A_s[range(s), range(s)] = coefficients
# +
# observation transition matrix
W_o = E @ A_s @ np.linalg.inv(E)
# showing the observation transition matrix and the state transition matrix
plt.figure(figsize=(5, 5))
plt.subplot(1, 2, 1)
plt.title('Obs. trans. matrix')
plt.imshow(W_o, cmap='gray')
plt.subplot(1, 2, 2)
plt.title('State trans. matrix')
plt.imshow(A_s, cmap='gray')
plt.show()
# -
| tf_agents/permutation_matrix_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head()
# +
player_count = len(purchase_data["SN"].unique())
player_count
player_count_output = pd.DataFrame({"Total Players": [player_count]})
player_count_output
# +
unique_items_count = len(purchase_data["Item ID"].unique())
unique_items_count
average_price = purchase_data["Price"].mean()
average_price
total_purchase = len(purchase_data["Purchase ID"].unique())
total_purchase
total_revenue = purchase_data["Price"].sum()
total_revenue
purchasing_analysis = pd.DataFrame([{
"Number of Unique Items": unique_items_count,
"Average Price": average_price,
"Number of Purchases": total_purchase,
"Total Revenue": total_revenue
}], columns=["Number of Unique Items", "Average Price", "Number of Purchases", "Total Revenue"])
purchasing_analysis
purchasing_analysis["Average Price"] = purchasing_analysis["Average Price"].map("${0:.2f}".format)
purchasing_analysis["Total Revenue"] = purchasing_analysis["Total Revenue"].map("${0:,.2f}".format)
purchasing_analysis
# +
male_players = purchase_data.loc[purchase_data["Gender"] == "Male"]
male_count = len(male_players["SN"].unique())
male_percent= "{:.2f}%".format(male_count / player_count * 100)
female_players = purchase_data.loc[purchase_data["Gender"] == "Female"]
female_count = len(female_players["SN"].unique())
female_percent= "{:.2f}%".format(female_count / player_count * 100)
other_players = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed"]
other_count = len(other_players["SN"].unique())
other_percent= "{:.2f}%".format(other_count / player_count * 100)
gender_demographics_table = pd.DataFrame([{
"Gender": "Male", "Total Count": male_count,
"Percentage of Players": male_percent},
{"Gender": "Female", "Total Count": female_count,
"Percentage of Players": female_percent},
{"Gender": "Other / Non-Disclosed", "Total Count": other_count,
"Percentage of Players": other_percent
}], columns=["Gender", "Total Count", "Percentage of Players"])
gender_demographics_table = gender_demographics_table.set_index("Gender")
gender_demographics_table
# +
# Male Players
male_purchase = purchase_data.loc[purchase_data["Gender"]== "Male", :]
male_purchase_count = len(male_purchase)
average_male_purchase = purchase_data.loc[purchase_data["Gender"] == "Male", ["Price"]].mean()
total_male_purchase = purchase_data.loc[purchase_data["Gender"] == "Male", ["Price"]].sum()
avg_male_purchase_total_person = total_male_purchase / male_count
# Female Players
female_purchase = purchase_data.loc[purchase_data["Gender"]== "Female", :]
female_purchase_count = len(female_purchase)
average_female_purchase = purchase_data.loc[purchase_data["Gender"] == "Female", ["Price"]].mean()
total_female_purchase = purchase_data.loc[purchase_data["Gender"] == "Female", ["Price"]].sum()
avg_female_purchase_total_person = total_female_purchase / female_count
# Other Players
other_purchase = purchase_data.loc[purchase_data["Gender"]== "Other / Non-Disclosed", :]
other_purchase_count = len(other_purchase)
average_other_purchase = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed", ["Price"]].mean()
total_other_purchase = purchase_data.loc[purchase_data["Gender"] == "Other / Non-Disclosed", ["Price"]].sum()
avg_other_purchase_total_person = total_other_purchase / other_count
gender_purchasing_analysis_table = pd.DataFrame([{
"Gender": "Female", "Purchase Count": female_purchase_count,
"Average Purchase Price": "${:.2f}".format(average_female_purchase[0]),
"Total Purchase Value": "${:.2f}".format(total_female_purchase[0]),
"Avg Total Purchase per Person": "${:.2f}".format(avg_female_purchase_total_person[0])},
{"Gender": "Male", "Purchase Count": male_purchase_count,
"Average Purchase Price": "${:.2f}".format(average_male_purchase[0]),
"Total Purchase Value": "${:,.2f}".format(total_male_purchase[0]),
"Avg Total Purchase per Person": "${:.2f}".format(avg_male_purchase_total_person[0])},
{"Gender": "Other / Non-Disclosed", "Purchase Count": other_purchase_count,
"Average Purchase Price": "${:.2f}".format(average_other_purchase[0]),
"Total Purchase Value": "${:.2f}".format(total_other_purchase[0]),
"Avg Total Purchase per Person": "${:.2f}".format(avg_other_purchase_total_person[0])
}], columns=["Gender", "Purchase Count", "Average Purchase Price", "Total Purchase Value", "Avg Total Purchase per Person"])
gender_purchasing_analysis_table = gender_purchasing_analysis_table.set_index("Gender")
gender_purchasing_analysis_table
# +
age_bins = [0, 9, 14, 19, 24, 29, 34, 39, 46]
groups_names = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
purchase_data["Age Group"] = pd.cut(purchase_data["Age"], bins=age_bins, labels=groups_names)
purchase_data
age_group = purchase_data.groupby("Age Group")
total_count_age = age_group["SN"].nunique()
percentage_by_age = round(total_count_age / player_count * 100,2)
age_demographics_table = pd.DataFrame({
"Total Count": total_count_age,
"Percentage of Players": percentage_by_age
})
age_demographics_table["Percentage of Players"] = age_demographics_table["Percentage of Players"].map("{0:,.2f}%".format)
age_demographics_table
# +
bins = [0, 9, 14, 19, 24, 29, 34, 39, 46]
groups_names = ["<10", "10-14", "15-19", "20-24", "25-29", "30-34", "35-39", "40+"]
purchase_data["Age Group"] = pd.cut(purchase_data["Age"], bins=age_bins, labels=groups_names)
age_purchase_count = age_group["SN"].count()
avg_age_purchase_price = round(age_group["Price"].mean(),2)
total_age_purchase_value = round(age_group["Price"].sum(),2)
avg_total_age_purchase_person = round(total_age_purchase_value / total_count_age, 2)
age_purchasing_analysis_table = pd.DataFrame({
"Purchase Count": age_purchase_count,
"Average Purchase Price": avg_age_purchase_price,
"Total Purchase Value": total_age_purchase_value,
"Avg Total Purchase per Person": avg_total_age_purchase_person
})
age_purchasing_analysis_table["Average Purchase Price"] = age_purchasing_analysis_table["Average Purchase Price"].map("${0:,.2f}".format)
age_purchasing_analysis_table["Total Purchase Value"] = age_purchasing_analysis_table["Total Purchase Value"].map("${0:,.2f}".format)
age_purchasing_analysis_table["Avg Total Purchase per Person"] = age_purchasing_analysis_table["Avg Total Purchase per Person"].map("${0:,.2f}".format)
age_purchasing_analysis_table
# +
# Top Spenders
top_spenders = purchase_data.groupby("SN")
spender_purchase_count = top_spenders["Purchase ID"].count()
average_spender_purchase_price = round(top_spenders["Price"].mean(),2)
total_spender_purchase_value = top_spenders["Price"].sum()
top_spenders_table = pd.DataFrame({
"Purchase Count": spender_purchase_count,
"Average Purchase Price": average_spender_purchase_price,
"Total Purchase Value": total_spender_purchase_value
})
sort_top_spenders = top_spenders_table.sort_values(["Total Purchase Value"], ascending=False).head()
sort_top_spenders["Average Purchase Price"] = sort_top_spenders["Average Purchase Price"].astype(float).map("${:,.2f}".format)
sort_top_spenders["Total Purchase Value"] = sort_top_spenders["Total Purchase Value"].astype(float).map("${:,.2f}".format)
sort_top_spenders
# +
# Most Popular Items
popular_items_list = purchase_data[["Item ID", "Item Name", "Price"]]
popular_items = popular_items_list.groupby(["Item ID","Item Name"])
item_purchase_count = popular_items["Price"].count()
item_price = popular_items["Price"].sum()
item_purchase_value = item_price / item_purchase_count
most_popular_items = pd.DataFrame({
"Purchase Count": item_purchase_count,
"Item Price": item_purchase_value,
"Total Purchase Value": item_price
})
popular_items_formatted = most_popular_items.sort_values(["Purchase Count"], ascending=False).head()
popular_items_formatted["Item Price"] = popular_items_formatted["Item Price"].astype(float).map("${:,.2f}".format)
popular_items_formatted["Total Purchase Value"] = popular_items_formatted["Total Purchase Value"].astype(float).map("${:,.2f}".format)
popular_items_formatted
# +
# Most Profitable Items
popular_items = most_popular_items.sort_values(["Total Purchase Value"], ascending=False).head()
popular_items["Item Price"] = popular_items["Item Price"].astype(float).map("${:,.2f}".format)
popular_items["Total Purchase Value"] = popular_items["Total Purchase Value"].astype(float).map("${:,.2f}".format)
popular_items
# -
| HeroesOfPymoli/HeroesOfPymoli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating Custom Feature Maps for Quantum Support Vector Machines
#
# Support vector machines (SVM) address the problem of supervised learning through the construction of a classifier. Havlicek *et al*. proposed two strategies to design a quantum SVM, namely the Quantum Kernel Estimator and the Quantum Variational Classifier. Both of these strategies use data that is provided classically and encodes it in the quantum state space through a quantum feature map [1]. The choice of which feature map to use is important and may depend on the given dataset we want to classify. In this tutorial, we show how to configure new feature maps in Aqua and explore their impact on the accuracy of the quantum classifier.
#
# Aqua provides several options for customizing the quantum feature map. In particular, there are four main parameters that can be used for model selection: the feature map circuit depth, the data map function for encoding the classical data, the quantum gate set and the order of expansion. We will go through each of these parameters in this tutorial, but before getting started, let us review the main concepts of the quantum feature map discussed in [1].
#
# [1] Havlicek _et al_. Nature **567**, 209-212 (2019). https://www.nature.com/articles/s41586-019-0980-2, https://arxiv.org/abs/1804.11326
#
# ### Review of the Quantum Feature Map
#
# A quantum feature map nonlinearly maps a classical datum **x** to a quantum state $|\Phi(\mathbf{x})\rangle\langle\Phi(\mathbf{x})|$, a vector in the Hilbert space of density matrices. Support vector machine classifiers find a hyperplane separating each vector $|\Phi(\mathbf{x}_i)\rangle\langle\Phi(\mathbf{x}_i)|$ depending on its label, supported by a reduced amount of vectors (the so-called support vectors). A key element of the feature map is not only the use of quantum state space as a feature space but also the way data are mapped into this high dimensional space.
#
# Constructing feature maps based on quantum circuits that are hard to simulate classically is an important step towards obtaining a quantum advantage over classical approaches. The authors of [1] proposed a family of feature maps that is conjectured to be hard to simulate classically and that can be implemented as short-depth circuits on near-term quantum devices. The quantum feature map of depth $d$ is implemented by the unitary operator
#
# $$ \mathcal{U}_{\Phi(\mathbf{x})}=\prod_d U_{\Phi(\mathbf{x})}H^{\otimes n},\ U_{\Phi(\mathbf{x})}=\exp\left(i\sum_{S\subseteq[n]}\phi_S(\mathbf{x})\prod_{k\in S} P_k\right), $$
#
# which contains layers of Hadamard gates interleaved with entangling blocks encoding the classical data as shown in circuit diagram below for $d=2$.
#
# <img src="images/uphi.PNG" width="400" />
#
# The number of qubits $n$ in the quantum circuit is equal to the dimensionality of the classical data $\mathbf{x}$, which are encoded through the coefficients $\phi_S(\mathbf{x})$, where $S \subseteq[n] = \{1, \ldots, n \}$. We call the $r$-th order expansion the feature map of this circuit family when $|S|\leq r$. In Aqua, the default is the second order expansion $|S|\leq 2$ used in [1], which gives $n$ singeltons $S=\{i\}$ and, depending on the connectivity graph of the quantum device, up to $\frac{n(n-1)}{2}$ couples to encode non-linear interactions. The greater the upper bound $r$, the more interactions will be taken into account.
#
# Only contributions from $Z$ and $ZZ$ gates in the entangling blocks are considered in [1]. In general, the blocks can be expressed in terms of the Pauli gates $P_k \in \{\mathbb{1}_k, X_k, Y_k, Z_k \}$.
#
# In Aqua, the circuit depth $d$, coefficients $\phi_S$, expansion order $r$, and gates $P_k$ are mutable for both classification algorithms (Quantum Variational Classifier and Quantum Kernel Estimator). As discussed in [1], the depth $d=1$ circuit can be efficiently simulated classically by uniform sampling, while the $d=2$ variant is conjectured to be hard to simulate classically.
# ### Programming the Quantum Feature Map
#
# We will now see how to configure quantum feature maps in Aqua by modifing the circuit depth $d$, data map function $\phi_S$, expansion order $r$, and gates $P_k$. Documentation on the quantum feature maps in Aqua can be found at https://qiskit.org/documentation/aqua/feature_maps.html. To configure and compare different feature maps, we will use synthetic data from `datasets.py`, which is generated by the `SecondOrderExpansion` feature map with default settings. As a result, we expect high classification accuracy when training the model with this same feature map.
#
# +
import numpy as np
import matplotlib.pyplot as plt
import functools
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.feature_maps import SecondOrderExpansion, FirstOrderExpansion, PauliExpansion, self_product
from qiskit.aqua.algorithms import QSVM
from qiskit.ml.datasets import ad_hoc_data
# +
# Generate synthetic training and test sets from the SecondOrderExpansion quantum feature map
feature_dim = 2
sample_Total, training_dataset, test_dataset, class_labels = ad_hoc_data(training_size=20, test_size=10,
n=feature_dim, gap=0.3,
plot_data=False)
# Using the statevector simulator
backend = BasicAer.get_backend('statevector_simulator')
random_seed = 10598
quantum_instance = QuantumInstance(backend, seed_simulator=random_seed, seed_transpiler=random_seed)
# -
# With this synthetic data, we will use the Quantum Kernel Estimator to test different feature maps, starting with a first order expansion of the feature map discussed in [1]. From there, we will explore more complex feature maps with higher order expansions and custom functions to map the classical data.
# #### 1. First Order Diagonal Expansion
#
#
# A first order diagonal expansion is implemented using the `FirstOrderExpansion` feature map where $|S|=1$. The resulting circuit contains no interactions between features of the encoded data, and therefore no entanglement. The feature map can take the following inputs:
#
# - `feature_dimension`: dimensionality of the classical data (equal to the number of required qubits)
# - `depth`: number of times $d$ to repeat the feature map circuit
# - `data_map_func`: function $\phi_S(\mathbf{x})$ encoding the classical data.
#
# The default setting `data_map_func = self_product` for the `FirstOrderExpansion` has $S = \{i\}$ and is given by
#
# $$\phi_S:x\mapsto x_i.$$
#
# +
# Generate the feature map
feature_map = FirstOrderExpansion(feature_dimension=feature_dim, depth=2)
# Run the Quantum Kernel Estimator and classify the test data
qsvm = QSVM(feature_map=feature_map, training_dataset=training_dataset, test_dataset=test_dataset)
result = qsvm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
# -
# We see that the first order expansion feature map yields poor classification accuracy on data generated to be separable by the second order expansion.
# #### 2. Second Order Diagonal Expansion
#
# The `SecondOrderExpansion` feature map allows $|S|\leq 2$, so interactions in the data will be encoded in the feature map according to the connectivity graph and the classical data map. `SecondOrderExpansion` with default parameters is equivalent to the feature map described in [1] and can take the additional inputs:
#
# - `entangler_map`: encodes qubit connectivity (default `None` uses a precomputed connectivity graph according to `entanglement`)
# - `entanglement`: generates connectivity `'full'` or `'linear'` if `entangler_map` not provided (default value `'full'` indicates a complete connectivity graph of $\frac{n(n-1)}{2}$ interactions)
#
# The default setting for `data_map_func` in `SecondOrderExpansion` is given by
#
# $$\phi_S:x\mapsto \Bigg\{\begin{array}{ll}
# x_i & \mbox{if}\ S=\{i\} \\
# (\pi-x_i)(\pi-x_j) & \mbox{if}\ S=\{i,j\}
# \end{array}$$.
#
# +
feature_map = SecondOrderExpansion(feature_dimension=feature_dim, depth=2)
qsvm = QSVM(feature_map=feature_map, training_dataset=training_dataset, test_dataset=test_dataset)
result = qsvm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
# -
# As expected, the second order feature map yields high test accuracy on this dataset.
# #### 3. Second Order Diagonal Expansion with Custom Data Map
#
# Instead of using the default data map $\phi_S(\mathbf{x})$ in Aqua, we can encode the classical data using custom functions. For example, we will create the following map for our data (shown for $|S| \le 2$, but defined similarly for higher order terms):
#
# $$\phi_S:x\mapsto \Bigg\{\begin{array}{ll}
# x_i & \mbox{if}\ S=\{i\} \\
# \sin(\pi-x_i)\sin(\pi-x_j) & \mbox{if}\ S=\{i,j\}
# \end{array}$$
def custom_data_map_func(x):
"""Define a function map from R^n to R.
Args:
x (np.ndarray): data
Returns:
double: the mapped value
"""
coeff = x[0] if len(x) == 1 else \
functools.reduce(lambda m, n: m * n, np.sin(np.pi - x))
return coeff
# Let us now test this custom data map on the synthetic dataset.
# +
feature_map = SecondOrderExpansion(feature_dimension=feature_dim, depth=2, data_map_func=custom_data_map_func)
qsvm = QSVM(feature_map=feature_map, training_dataset=training_dataset, test_dataset=test_dataset)
result = qsvm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
# -
# We see that this choice for the data map function reduced the accuracy of the model.
# #### 4. Second Order Pauli Expansion
#
# For some applications, we may want to consider a more general form of the feature map. One way to generalize is to use `PauliExpansion` and specify a set of Pauli gates instead of using the default $Z$ gates. This feature map has the same parameters as `FirstOrderExpansion` and `SecondOrderExpansion` such as `depth` and `data_map_function` along with an additional `paulis` parameter to change the gate set. This parameter is a list of strings, each representing the desired Pauli gate(s). The default value is `['Z', 'ZZ']`, which is equivalent to `SecondOrderExpansion`.
#
#
# Each string in `paulis` is implemented one at a time for each layer in the depth $d$ feature map circuit. A single character, for example `'Z'`, adds one layer of the corresponding single-qubit gates, while terms such as `'ZZ'` or `'XY'` add a layer of corresponding two-qubit entangling gates for each qubit pair available.
#
# For example, the choice `paulis = ['Z', 'Y', 'ZZ']` generates a quantum feature map of the form
#
# $$\mathcal{U}_{\Phi(\mathbf{x})} = \left( \exp\left(i\sum_{jk} \phi_{\{j,k\}}(\mathbf{x}) \, Z_j \otimes Z_k\right) \, \exp\left(i\sum_{j} \phi_{\{j\}}(\mathbf{x}) \, Y_j\right) \, \exp\left(i\sum_j \phi_{\{j\}}(\mathbf{x}) \, Z_j\right) \, H^{\otimes n} \right)^d.$$
#
# The depth $d=1$ version of this quantum circuit is shown in the figure below for $n=2$ qubits.
#
# <br>
# <img src="images/depth1.PNG" width="400"/>
# <br>
#
# The circuit begins with a layer of Hadamard gates $H^{\otimes n}$ followed by a layer of single-qubit $A = e^{i\phi_{\{j\}}(\mathbf{x})Z_j}$ gates and a layer of $B = e^{i\phi_{\{j\}}(\mathbf{x}) \, Y_j}$ gates. The $A$ and $B$ gates are parametrized by the same set of angles $\phi_{\{j\}}(\mathbf{x})$ but around different axes. The diagonal entangling gate $e^{i \phi_{\{0,1\}}(\mathbf{x}) \, Z_0 \otimes Z_1}$ is parametrized by an angle $\phi_{\{0,1\}}(\mathbf{x})$ and can be implemented using two controlled-NOT gates and one $A'=e^{i\phi_{\{0,1\}}(x)\, Z_1}$ gate as shown in the figure.
#
# As a comparison, `paulis = ['Z', 'ZZ']` creates the same circuit as above but without the $B$ gates, while `paulis = ['Z', 'YY']` creates a circuit with a layer of $A$ gates followed by a layer of entangling gates $e^{i \phi_{\{0,1\}}(\mathbf{x}) \, Y_0 \otimes Y_1}$.
#
# Below, we test the `PauliExpansion` with `paulis=['Z', 'Y', 'ZZ']`.
# +
feature_map = PauliExpansion(feature_dimension=feature_dim, depth=2, paulis = ['Z','Y','ZZ'])
qsvm = QSVM(feature_map=feature_map, training_dataset=training_dataset, test_dataset=test_dataset)
result = qsvm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
# -
# #### 5. Third Order Pauli Expansion with Custom Data Map
#
# Third order or higher expansions can be configured using `PauliExpansion`. For example, assuming the classical data has dimensionality of at least three and we have access to three qubits, `paulis = ['Y', 'Z', 'ZZ', 'ZZZ']` generates a feature map according to the previously mentioned rule, with $|S|\leq 3$.
#
# Suppose we want to classify data with three features using a third order expansion, a custom data map, and a circuit of depth $d=2$. We can do this with the following code in Aqua.
feature_dim = 3
sample_Total_b, training_dataset_b, test_dataset_b, class_labels = ad_hoc_data(training_size=20, test_size=10,
n=feature_dim, gap=0.3,
plot_data=False)
# +
feature_map = PauliExpansion(feature_dimension=feature_dim, depth=2,
paulis = ['Y','Z','ZZ','ZZZ'], data_map_func=custom_data_map_func)
qsvm = QSVM(feature_map=feature_map, training_dataset=training_dataset_b, test_dataset=test_dataset_b)
result = qsvm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
# -
# The qubit connectivity is `'full'` by default, so each layer of this depth $d=2$ circuit will contain the sequence:
#
# - One layer of $B = e^{i\phi_{\{j\}}(\mathbf{x})\,Y_j}$ gates followed by one layer of $A = e^{i\phi_{\{j\}}(\mathbf{x})\,Z_j}$ gates
# - One layer containing a $ZZ$ entangler $e^{i \phi_{\{j,k\}}(\mathbf{x}) \,Z_j \otimes Z_k}$ for each pair of qubits $(0,1),\ (1,2),\ (0,2)$
# - One layer containing a $ZZZ$ entangler $e^{i\phi_{\{0,1,2 \}}(x)\,Z_0 \otimes Z_1 \otimes Z_2}$ where $\phi_{\{jkl\}} = \sin(\pi-x_j)\sin(\pi-x_k)\sin(\pi-x_l)$
# ### Building New Feature Maps
#
# In this tutorial, we have seen how to generate feature maps from the circuit family described in [1]. To explore new circuit families, we can create a new class implementing the class `FeatureMap`, and its method `construct_circuit`, and the new feature map will be pluggable in any Aqua component requiring a feature map. More information on adding new feature maps can be found in the documentation https://qiskit.org/documentation/aqua/feature_maps.html.
#
# As an example to illustrate the process, below we show a general custom feature map class with the circuit construction method that creates a quantum circuit consisting of successive layers of $R_X$ gates and $ZZ$ gates.
# +
from qiskit.aqua.components.feature_maps import FeatureMap
from qiskit import QuantumCircuit, QuantumRegister
class CustomFeatureMap(FeatureMap):
"""Mapping data with a custom feature map."""
def __init__(self, feature_dimension, depth=2, entangler_map=None):
"""
Args:
feature_dimension (int): number of features
depth (int): the number of repeated circuits
entangler_map (list[list]): describe the connectivity of qubits, each list describes
[source, target], or None for full entanglement.
Note that the order is the list is the order of
applying the two-qubit gate.
"""
self._feature_dimension = feature_dimension
self._num_qubits = self._feature_dimension = feature_dimension
self._depth = depth
self._entangler_map = None
if self._entangler_map is None:
self._entangler_map = [[i, j] for i in range(self._feature_dimension) for j in range(i + 1, self._feature_dimension)]
def construct_circuit(self, x, qr, inverse=False):
"""Construct the feature map circuit.
Args:
x (numpy.ndarray): 1-D to-be-transformed data.
qr (QauntumRegister): the QuantumRegister object for the circuit.
inverse (bool): whether or not to invert the circuit.
Returns:
QuantumCircuit: a quantum circuit transforming data x.
"""
qc = QuantumCircuit(qr)
for _ in range(self._depth):
for i in range(self._feature_dimension):
qc.rx(x[i], qr[i])
for [source, target] in self._entangler_map:
qc.cx(qr[source], qr[target])
qc.u1(x[source] * x[target], qr[target])
qc.cx(qr[source], qr[target])
if inverse:
qc.inverse()
return qc
# +
feature_map = CustomFeatureMap(feature_dimension=2, depth=2)
qsvm = QSVM(feature_map=feature_map, training_dataset=training_dataset, test_dataset=test_dataset)
result = qsvm.run(quantum_instance)
print("testing success ratio: ", result['testing_accuracy'])
| machine_learning/custom_feature_map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Standardizing your own data
# You may already have your own data, and you would like to link the odorants you've used in one dataset with those from other datasets, or simply be able to do analyses that require your odorants to be well-descibed of featurized.
# !pip install -q pyrfume # Install pyrfume if it is not already installed
import pandas as pd
import pyrfume
# *Pyrfume* operates under the principle that the proper identifier for a single odorant molecule (e.g. d-Limonene) is the PubChem compound ID ([440917](https://pubchem.ncbi.nlm.nih.gov/compound/440917)), for a single (known) mixture (e.g. light mineral oil) is the PubChem substance ID ([402315722](https://pubchem.ncbi.nlm.nih.gov/substance/402315722)).
# - A PubChem compound ID uniquely identifiers a molecular structure (unlike a CAS registry number).
# - A given structure resolves to only one PubChem ID (unlike a SMILES string which depends on implementation).
# - [PubChem](https://pubchem.ncbi.nlm.nih.gov/) itself is indexed by these IDs and provides a wealth of additional records covering experimental data, computable properties, safety information, and other externally linked data.
#
# In order to get access to all of this information, and to link the *same molecule* across datasets, the first step is to obtain PubChem IDs (henceforth, CIDs) for the molecules in question.
names = ['d-limonene', '98-86-2', '(+)-carvone', 'CCCCCC=O', 'GXANMBISFKBPEX-ARJAWSKDSA-N']
# Above we have 5 different molecules, represented with a mix of names (with different annotations), CAS numbers, SMILES strings, and InChiKeys. Your data may use one of these formats, or a mix of them, or some other format entirely. The [PubChem exchange identifier](https://pubchem.ncbi.nlm.nih.gov/idexchange/idexchange.cgi) service can do a good job of converting between (some of) these format, or identifying potential CIDs. Pyrfume does the extra work of auto-identifying the current identifier, checking for alternative conversions, and providing information about names that did not match or had multiple matches.
from pyrfume import get_cids
cids = get_cids(names)
# The process above can be a little bit slow (resolving only a few identifers per second) because the PubChem database itself is not indexed by most of these (only CIDs and InChiKeys). Still, it returns a dictionary of unique identifiers (CIDs) for each original identifier:
cids
# Which looks a bit nicer as a Pandas series
cids = pd.Series(cids)
cids
# Now that you have unique identifiers, you can access a lot more information:
from pyrfume import from_cids
info = from_cids(cids.values)
# That part was quite fast and scales very well, because PubChem is indexed by CID. Pyrfume runs this in batches of 100 CIDs, and each batch takes about 1 second.
molecules = pd.DataFrame(info).set_index('CID')
molecules
# The above contains the original set of molecules, indexed by CID, but also containing some other useful identifiers that (unlike CAS or InChiKey) actually tell you something about the molecule in question just by looking at them. The "IsomericSMILES" columns is standardized SMILES string computed using the same software (on PubChem) for every molecule. The "[IUPACName](https://en.wikipedia.org/wiki/IUPAC_nomenclature_of_organic_chemistry)" is similarly, a standardized nomenclature for molecle names. "name" is simply the most common name (sometimes a trade name) of the molecule, as you might see it in a publication. CID, IsomericSMILES, and IUPACName, all uniquely describe the molecule. If you have multiple datasets from multiple sources, and you want to integrate them together, you can use stock Pandas functions for merging and/or concatenating data.
#
# This representation for a set of molecules will recur again and again in Part 4, when looking at external datasets.
# Now that you have the molecules from your data in a standard format, save them to disk for future use:
pyrfume.save_data(molecules, 'my_data/molecules.csv')
# You can load them back again with:
molecules = pyrfume.load_data('my_data/molecules.csv')
# You can change the location that Pyrfume uses for its (local copy of) the data archives with `pyrfume.set_data_path`.
| docs/your-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="G_BRnjPuoVJM" executionInfo={"status": "ok", "timestamp": 1638145994293, "user_tz": 480, "elapsed": 4129, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}} outputId="cbbc6909-f89b-4b1e-ec96-abaf0b6d2927"
# !pip install simcse transformers==4.2.1 scipy==1.5.4 datasets==1.2.1 pandas==1.1.5 scikit-learn==0.24.0 prettytable==2.1.0 gradio torch setuptools==49.3.0
# + id="bsA0WM0soVNM"
import torch
torch.cuda.is_available()
dtype = torch.cuda.FloatTensor
from simcse import SimCSE
import pandas as pd
from IPython.display import HTML, display
from tqdm import tqdm
from functools import partialmethod
import numpy as np
import h5py
from time import time
import tables
import gc
# + colab={"base_uri": "https://localhost:8080/"} id="8o1got2ICQIG" executionInfo={"status": "ok", "timestamp": 1638146020285, "user_tz": 480, "elapsed": 16617, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}} outputId="aeb5c275-b31b-4c2f-fa8c-4b1872ebf1b3"
from google.colab import drive
drive.mount('/content/drive')
# + id="lf39yRP3CXYI"
data_folder = 'drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/'
data_triplet_matrices_folder = '/content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/data_triplet_matrices/'
# + colab={"base_uri": "https://localhost:8080/", "height": 266, "referenced_widgets": ["aa7600195b3a49358aa6f5791f7f9406", "279fe55b3e354bdda81a6f0746dc8b63", "4a4956d8015b44d1bc3f8ca5524659f8", "b317e3a316f144bfa6af084997911d42", "<KEY>", "f5b15b06c2f6486f8e1cbdb6759a4432", "09401494c0694e4eb1cc1949c62ba9f2", "8c678fc08b4c4abd839b1ac25fde1f7f", "3c14e3a4caa24e5782bcea7d810f32d8", "3781b8ae5e7546a991eea140461c2896", "<KEY>", "b4695f9ef3b24d28ad831849f465ea15", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9832888ad69842c1914f66e3eba50715", "affccf4c5e3643cf8fee7b5645675be3", "<KEY>", "<KEY>", "<KEY>", "333536a33114471c81861ad94a96483f", "<KEY>", "<KEY>", "cf49ff3fe43345f49aa5777c4acc0434", "<KEY>", "<KEY>", "122764d882224f81a84870f6ef195e8c", "f72dfe4696944d3aace6a3699303f197", "4e1f3e6662e84373ad328d39627dd258", "<KEY>", "516d5edf3233478e870fcee31e4fc651", "786f7e633fb14e9db8ef3b963cce76cd", "1ca3a46059864fe48c062052bad3ee6d", "<KEY>", "9765cba22458423e9c77adad5ffae17c", "<KEY>", "<KEY>", "7981def777054c09a82e5c3f35d27beb", "cce4cb8eadd340408f96e9686f4aff86"]} id="LOql_FPLCXVp" executionInfo={"status": "ok", "timestamp": 1638146051169, "user_tz": 480, "elapsed": 30890, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}} outputId="add424d9-a79d-4b00-ea8c-a01a8d2c567a"
model = SimCSE("princeton-nlp/sup-simcse-bert-base-uncased")
bot_index_model = SimCSE("princeton-nlp/sup-simcse-bert-base-uncased")
human_index_model = SimCSE("princeton-nlp/sup-simcse-bert-base-uncased")
## is supervised and case insensitive
# + id="qo16RNDJCXHv" colab={"base_uri": "https://localhost:8080/", "height": 17} executionInfo={"status": "ok", "timestamp": 1638146171899, "user_tz": 480, "elapsed": 5080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}} outputId="2e410f6d-c9d0-4803-cae4-36fd8260d9ee"
# /content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/data_round_3_corrected/train_tweets_spacy_round3.csv
data = pd.read_csv(data_folder+'data_round_3_corrected/train_tweets_spacy_round3.csv')
# + id="0PJs1MwkCXEu" colab={"base_uri": "https://localhost:8080/", "height": 17} executionInfo={"status": "ok", "timestamp": 1638146178952, "user_tz": 480, "elapsed": 250, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}} outputId="1b30b842-461d-46f0-a05d-fca3985fdc0f"
data = data[data['lang_code_spc']=='en'].reset_index(drop=True)
data = data.drop(columns=['orig_tweet', 'lang_spc', 'lang_code_spc'])
# + [markdown] id="Novhc13jPkuh"
# ## Build a similarity matrix for all tweets in the dataset (post processing the dataset)
# Takes about 1 hour to build the similarity matrix for 600k x 600k (all tweets x all tweets ) and
# Takes about 15 minutes to build the similarity matrix for 1 x 325k (one tweet row x all bot tweets)
# + id="EKTym3JLC57o"
# Not enough memory to pre-compile similarity matrices - even for just all_tweets x bot_tweets -
# We compute similarity matrices in batches of 10,000 and save only the top 4 similar tweets per tweet
tqdm.pandas()
tqdm.__init__ = partialmethod(tqdm.__init__, disable=False)
print("total tweets", len(data))
time_start = time()
# data_subset = data[['tweet','label']].sample(n=300000, random_state=11)
data_subset = data.groupby('label').apply(lambda x: x.sample(180000, random_state=1))
all_tweets = data_subset['tweet'].to_list()
all_tweet_labels = data_subset['label'].to_list()
bot_tweets = data_subset[data_subset['label']==1]['tweet'].to_list()
bot_tweet_labels = data_subset[data_subset['label']==1]['label'].to_list()
human_tweets = data_subset[data_subset['label']==0]['tweet'].to_list()
human_tweet_labels = data_subset[data_subset['label']==0]['label'].to_list()
print("all_tweets", len(all_tweets))
print("bot_tweets", len(bot_tweets))
print("human_tweets", len(human_tweets))
print("verify bot+human tweet size", len(bot_tweets) + len(human_tweets))
# + colab={"base_uri": "https://localhost:8080/", "height": 106} id="tvH0mEXuxYC1" executionInfo={"status": "ok", "timestamp": 1638146105311, "user_tz": 480, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}} outputId="b90d0118-be65-4bbe-f7a9-e52b20075da8"
pd.option_context('display.max_rows', None, 'display.max_columns', None)
print(data_subset.iloc[0])
print(len(data_subset))
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="72RN4lGS4bij" executionInfo={"status": "ok", "timestamp": 1638066552602, "user_tz": 480, "elapsed": 12813674, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}} outputId="0c72e549-1845-443b-960e-f16f52b86076"
batch_size = 9000
num_batches = int(len(all_tweets)/batch_size)
print(num_batches)
for i in range(0,num_batches):
print("{}/{}".format(i+1, num_batches))
# Get the data batch
data_batch = all_tweets[i*batch_size:(i+1)*batch_size]
print("data_batch size", len(data_batch))
time_start = time()
# Compute the bot sim matrix for the batch
sim_mat_bot = model.similarity(data_batch, bot_tweets)
print("sim_mat_bot size", len(sim_mat_bot)) #should be equal to batch_size
# use np.argpartition to get the top 4 similar tweets
num = 4
sim_mat_bot = np.argpartition(sim_mat_bot, -num, axis=1)[:, -num:]
# These are the top 4 tweets in ascending order. Sort them while accessing them later on.
np.save(data_triplet_matrices_folder+'train/sim_mat_bot_' + str(i*batch_size) + '_'+ str((i+1)*batch_size) + '.npy', sim_mat_bot)
print("time taken to write bot" + str(i*batch_size) +"_" + str((i+1)*batch_size) + "npy file", time() - time_start)
# delete bot sim matrix
del sim_mat_bot
# del top4_sim_mat_bot
gc.collect()
time_start = time()
# Compute the human sim matrix for the batch
sim_mat_human = model.similarity(data_batch, human_tweets)
print("sim_mat_human size", len(sim_mat_human)) #should be equal to batch_size
# use np.argpartition to get the top 4 similar tweets
num = 4
sim_mat_human = np.argpartition(sim_mat_human, -num, axis=1)[:, -num:]
# These are the top 4 tweets in ascending order. Sort them while accessing them later on.
np.save(data_triplet_matrices_folder+'train/sim_mat_human_' + str(i*batch_size) + '_' + str((i+1)*batch_size) + '.npy', sim_mat_human)
print("time taken to write human" + str(i*batch_size) + "_" + str((i+1)*batch_size) + "npy file", time() - time_start)
# delete human sim matrix
del sim_mat_human
# del top4_sim_mat_human
gc.collect()
# + [markdown] id="3DkeuxZzsM5r"
# ## Functions for searching similar sentences - using sim matrix
# + id="chbL109fsN-p"
def sim_00(row, tweet, label):
top3_bot_indices = global_bot_sim_mat[row.name][::-1]
top3_human_indices = global_human_sim_mat[row.name][::-1]
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return human_tweets[top3_human_indices[0]], human_tweet_labels[top3_human_indices[0]], \
bot_tweets[top3_bot_indices[0]], bot_tweet_labels[top3_bot_indices[0]]
elif label == 1:
# Current tweet is a bot tweet
return bot_tweets[top3_bot_indices[0]], bot_tweet_labels[top3_bot_indices[0]], \
human_tweets[top3_human_indices[0]], human_tweet_labels[top3_human_indices[0]]
else:
return 'Should not come here'
def sim_01(row, tweet, label):
top3_bot_indices = global_bot_sim_mat[row.name][::-1]
top3_human_indices = global_human_sim_mat[row.name][::-1]
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return human_tweets[top3_human_indices[0]], human_tweet_labels[top3_human_indices[0]], \
bot_tweets[top3_bot_indices[1]], bot_tweet_labels[top3_bot_indices[1]]
elif label == 1:
# Current tweet is a bot tweet
return bot_tweets[top3_bot_indices[0]], bot_tweet_labels[top3_bot_indices[0]], \
human_tweets[top3_human_indices[1]], human_tweet_labels[top3_human_indices[1]]
else:
return 'Should not come here'
def sim_10(row, tweet, label):
top3_bot_indices = global_bot_sim_mat[row.name][::-1]
top3_human_indices = global_human_sim_mat[row.name][::-1]
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return human_tweets[top3_human_indices[1]], human_tweet_labels[top3_human_indices[1]], \
bot_tweets[top3_bot_indices[0]], bot_tweet_labels[top3_bot_indices[0]]
elif label == 1:
# Current tweet is a bot tweet
return bot_tweets[top3_bot_indices[1]], bot_tweet_labels[top3_bot_indices[1]], \
human_tweets[top3_human_indices[0]], human_tweet_labels[top3_human_indices[0]]
else:
return 'Should not come here'
def sim_11(row, tweet, label):
top3_bot_indices = global_bot_sim_mat[row.name][::-1]
top3_human_indices = global_human_sim_mat[row.name][::-1]
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return human_tweets[top3_human_indices[1]], human_tweet_labels[top3_human_indices[1]], \
bot_tweets[top3_bot_indices[1]], bot_tweet_labels[top3_bot_indices[1]]
elif label == 1:
# Current tweet is a bot tweet
return bot_tweets[top3_bot_indices[1]], bot_tweet_labels[top3_bot_indices[1]], \
human_tweets[top3_human_indices[1]], human_tweet_labels[top3_human_indices[1]]
else:
return 'Should not come here'
# + [markdown] id="HTpOT6z_sqoT"
# ## Building the Dataset
# + id="k5y-oIU1sqoT"
tqdm.pandas()
tqdm.__init__ = partialmethod(tqdm.__init__, disable=False)
## **Note: Editing the encode function of SimCSE.tools to silence TQDM there**
## Edited but nothing happened! progress bar still appears. Maybe a restart is needed but I don't want to restart now
def build_dataset(file, data):
triplet_csv_build_start = time()
# num_similar_tweets = 2
# out_data = out_data.loc[out_data.index.repeat(2**num_similar_tweets)].reset_index(drop=True)
out_data_00 = data[['tweet','label']].copy()
out_data_00 = out_data_00.rename(columns={'tweet': 'sent0', 'label': 'sent0_label'})
print(len(out_data_00))
out_data_01 = data[['tweet','label']].copy()
out_data_01 = out_data_01.rename(columns={'tweet': 'sent0', 'label': 'sent0_label'})
out_data_10 = data[['tweet','label']].copy()
out_data_10 = out_data_10.rename(columns={'tweet': 'sent0', 'label': 'sent0_label'})
out_data_11 = data[['tweet','label']].copy()
out_data_11 = out_data_11.rename(columns={'tweet': 'sent0', 'label': 'sent0_label'})
file_00_time_start = time()
print("preparing 00 file (most similar sent1, most similar hard_neg)")
out_data_00['sent1'], out_data_00['sent1_label'], out_data_00['hard_neg'], out_data_00['hard_neg_label'] = \
zip(*out_data_00.apply(lambda x: sim_00(x, x.sent0, x.sent0_label), axis=1))
file_00_time_end = time()
print("Time taken for file 00 :", file_00_time_end - file_00_time_start)
file_01_time_start = time()
print("preparing 01 file (most similar sent1, second most similar hard_neg)")
out_data_01['sent1'], out_data_01['sent1_label'], out_data_01['hard_neg'], out_data_01['hard_neg_label'] = \
zip(*out_data_01.apply(lambda x: sim_01(x, x.sent0, x.sent0_label), axis=1))
file_01_time_end = time()
print("Time taken for file 01 :", file_01_time_end - file_01_time_start)
file_10_time_start = time()
print("preparing 10 file (second most similar sent1, most similar hard_neg)")
out_data_10['sent1'], out_data_10['sent1_label'], out_data_10['hard_neg'], out_data_10['hard_neg_label'] = \
zip(*out_data_10.apply(lambda x: sim_10(x, x.sent0, x.sent0_label), axis=1))
file_10_time_end = time()
print("Time taken for file 10 :", file_10_time_end - file_10_time_start)
file_11_time_start = time()
print("preparing 10 file (second most similar sent1, second most similar hard_neg)")
out_data_11['sent1'], out_data_11['sent1_label'], out_data_11['hard_neg'], out_data_11['hard_neg_label'] = \
zip(*out_data_11.apply(lambda x: sim_11(x, x.sent0, x.sent0_label), axis=1))
file_11_time_end = time()
print("Time taken for file 11 :", file_11_time_end - file_11_time_start)
out_data = pd.concat([out_data_00, out_data_01, out_data_10, out_data_11], ignore_index=True)
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# display(out_data)
# /content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/twiBot-360k-similarity-metadata-1epoch
out_data.to_csv(data_folder + "Model_TwiBot_Similarity_With_Metadata_360k_Tweets/" + file + "_all_data_triplets_with_similarity.csv", index=False)
print('Time taken to build CSV:', (time() - triplet_csv_build_start))
# + [markdown] id="KQuQ_I0bs_Dx"
# ## Create Global Similarity Matrices
# + id="y3f4tzAcs_Dy"
# /content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/data_triplet_matrices
# triplet_folder = "/content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/data_triplet_matrices/train/"
triplet_folder = "/content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/data_triplet_matrices/dev/"
global_bot_sim_mat_orig = []
global_human_sim_mat_orig = []
batch_size = 9000
num_batches = int(len(all_tweets)/batch_size)
print(num_batches)
for i in range(num_batches):
print("{}/{}".format(i+1, num_batches))
sim_mat_bot = np.load(triplet_folder+'sim_mat_bot_' + str(i*batch_size) + '_' + str((i+1)*batch_size) + '.npy')
sim_mat_human = np.load(triplet_folder+'sim_mat_human_' + str(i*batch_size) + '_' + str((i+1)*batch_size) + '.npy')
global_bot_sim_mat_orig.extend(sim_mat_bot)
global_human_sim_mat_orig.extend(sim_mat_human)
print("len(global_bot_sim_mat_orig)", len(global_bot_sim_mat_orig))
print("len(global_human_sim_mat_orig)", len(global_human_sim_mat_orig))
# + [markdown] id="Fc3ijUiRs_Dy"
# ## Delete the instances of the index in sim matrix if it matches with row index
# ( we don't want tweets that are similar to themselves )
# + colab={"base_uri": "https://localhost:8080/", "height": 88} id="G6FVllczs_Dy" executionInfo={"status": "ok", "timestamp": 1638082596188, "user_tz": 480, "elapsed": 2690, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}} outputId="165fb707-d7df-4ca9-ed6d-f37298bb5265"
print(global_bot_sim_mat_orig[0])
print(global_human_sim_mat_orig[0])
global_bot_sim_mat = [[x for x in global_bot_sim_mat_orig[row] if x != row] for row in range(len(global_bot_sim_mat_orig))]
global_human_sim_mat = [[x for x in global_human_sim_mat_orig[row] if x != row] for row in range(len(global_human_sim_mat_orig))]
print(global_bot_sim_mat[0])
print(global_human_sim_mat[0])
# + [markdown] id="obQUvtlDs_Dy"
# ## Call the build_dataset function
# + colab={"base_uri": "https://localhost:8080/", "height": 196} id="JtbzwCfws_Dz" executionInfo={"status": "ok", "timestamp": 1638082705687, "user_tz": 480, "elapsed": 57501, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}} outputId="57421b81-bca8-448a-b8b2-5bd8f1b30ce2"
# Build the train triplet set
pd.options.mode.chained_assignment = None # default='warn'
# we shuffle the data in SimCSE code
data_subset.reset_index(drop = True, inplace = True)
build_dataset("train", data_subset)
# + [markdown] id="XUh4SAVss_Dz"
# ## Verify that the created file has the right number of rows (expected 1440000 rows)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="W-fG7-8os_Dz" executionInfo={"status": "ok", "timestamp": 1638086033165, "user_tz": 480, "elapsed": 12122, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}} outputId="7fd9e743-01a1-449c-9241-d84c56ecb452"
all_data = pd.read_csv('/content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/train_all_data_triplets_with_similarity_new.csv')
print(len(all_data))
# + [markdown] id="3ryauypWs_Dz"
# ## Build the dataset for SimCSE (only tweets - no labels)
#
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="RZsnfV1As_Dz" executionInfo={"status": "ok", "timestamp": 1638086144911, "user_tz": 480, "elapsed": 24752, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}} outputId="0c173484-230d-4f0d-b664-7c60f2a4bada"
all_data_only_tweets = all_data[['sent0','sent1','hard_neg']]
all_data_only_tweets = all_data_only_tweets.sample(frac=1, random_state=1).reset_index(drop=True)
all_data_only_tweets.to_csv("/content/drive/MyDrive/CSCI_544_NLP_Project/Twitter_Bot_Detection/Data/Model_TwiBot_Similarity_With_Metadata_360k_Tweets/train_all_data_triplets_with_similarity_for_simcse.csv", index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 53} id="6igMeXr_s_D0" executionInfo={"elapsed": 303, "status": "ok", "timestamp": 1637925021036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}, "user_tz": 480} outputId="14507409-d857-4a5a-cb5f-a4756c141b97"
time_start = time()
sim_mat_bot_file = tables.open_file('/content/sim_mat_bot_0_20000.hdf', 'r')
sim_mat_bot = sim_mat_bot_file.root.somename
sim_mat_bot_rows = sim_mat_bot[:10,:] #only one row gets loaded into memory
print (len(sim_mat_bot_rows))
print("time taken to load bot hdf file", time() - time_start)
# + colab={"base_uri": "https://localhost:8080/", "height": 214} id="xMCFpZxus_D2" executionInfo={"elapsed": 816, "status": "ok", "timestamp": 1637925398102, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}, "user_tz": 480} outputId="a259f790-3615-4fef-a999-435f6a761b93"
time_start = time()
# print(sim_mat_bot_rows)
num = 4
top4_sim_mat_bot_rows = np.argpartition(sim_mat_bot_rows, -num, axis=1)[:, -num:]
print(sim_mat_bot_rows[np.arange(sim_mat_bot_rows.shape[0])[:, None], top4_sim_mat_bot_rows])
# print(top)
# top = np.argpartition(sim_mat_bot_rows, -num, axis=1)[-num:]
# x = sim_mat_bot_rows[np.arange(sim_mat_bot_rows.shape[0])[:, None], top]
# top4_sim_mat_bot_rows = np.argpartition(sim_mat_bot_rows, -num, axis=1)[-num:]
np.save(data_folder+'data_triplets/train/sim_mat_bot_0_10000.npy', top4_sim_mat_bot_rows)
print("time taken to write bot npy file", time() - time_start)
# + [markdown] id="ducMh1MjByoQ"
# ## Old/Unused - Build a similarity matrix for all tweets in the dataset (post processing the dataset)
# ## Try using the build_index() and search() functions
#
# Building the two indices takes around 30 mins. Index is built as a model property (self.index). See if we can save it when we save the model.
# + colab={"base_uri": "https://localhost:8080/", "height": 89} id="hG20TVCbaGgf" executionInfo={"elapsed": 389, "status": "ok", "timestamp": 1637832246598, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}, "user_tz": 480} outputId="f06a4715-94f2-48ca-c72a-493946f021e2"
all_tweets = data['tweet'].to_list()
print(len(all_tweets))
bot_tweets = data[data['label']==1]['tweet'].to_list()
bot_tweet_labels = data[data['label']==1]['label'].to_list()
human_tweets = data[data['label']==0]['tweet'].to_list()
human_tweet_labels = data[data['label']==0]['label'].to_list()
print("bot_tweets", len(bot_tweets))
print("human_tweets", len(human_tweets))
print(len(bot_tweets) + len(human_tweets))
# + colab={"base_uri": "https://localhost:8080/", "height": 341} id="n4x_yKU7Bxpj" executionInfo={"elapsed": 1692536, "status": "ok", "timestamp": 1637829351211, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}, "user_tz": 480} outputId="9303db5e-b1db-411b-d0b1-05047a474456"
sim_mat_time_start = time()
#build index for bot tweets and human tweets
# model_idx = model.build_index(all_tweets)
bot_index_model.build_index(bot_tweets)
human_index_model.build_index(human_tweets)
print("\nTime taken to build index", time() - sim_mat_time_start)
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="-tikHhkDIRMP" executionInfo={"elapsed": 2475, "status": "ok", "timestamp": 1637832158149, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}, "user_tz": 480} outputId="a8baba85-3cb7-4ee4-e4f2-a5b6897ede07"
print("\ntweet 0", all_tweets[0])
results = bot_index_model.search(human_tweets[0], threshold = 0, top_k = 6)
print("\nSimilar Bot tweets:",results)
print("\ntweet 0", all_tweets[0])
results = human_index_model.search(human_tweets[0], threshold = 0, top_k = 6)
print("\nSimilar Human tweets:",results)
# print(results[1][0])
# print(human_tweets.index(results[1][0]))
# + [markdown] id="iTbqSEUdQFR3"
# ## Old/Unused Build similarity matrices for 50k tweets. One sim matrix for tweets x bot_tweets, another for tweets x human_tweets
# + colab={"base_uri": "https://localhost:8080/", "height": 305} id="XfCTFBu44KnT" executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1637625322490, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}, "user_tz": 480} outputId="232ed245-92b3-4501-a649-a153b2354dc7"
# Include labels while writing to the triplets CSV!
sim_mat_time_start = time()
# data['tweet'] = data['tweet'].apply(lambda x: ' '.join(x))
# As of now, we sample 50000 random rows with seed = 1. If we wanna progressively build the entire dataset, we can take the first 50000 rows, and so on.
data_subset = data[['tweet','label']].sample(n=50000, random_state=1)
tweet_subset = data_subset['tweet'].to_list()
label_subset = data_subset['label'].to_list()
print(data_subset[:5])
print(data_subset[-5:])
bot_tweets = data_subset[data_subset['label']==1]['tweet'].to_list()
bot_tweet_labels = data_subset[data_subset['label']==1]['label'].to_list()
human_tweets = data_subset[data_subset['label']==0]['tweet'].to_list()
human_tweet_labels = data_subset[data_subset['label']==0]['label'].to_list()
print("bot_tweets", len(bot_tweets))
print("human_tweets", len(human_tweets))
print("total tweets", len(bot_tweets) + len(human_tweets))
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="FrYrh-238IW2" executionInfo={"elapsed": 144, "status": "error", "timestamp": 1637649123197, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}, "user_tz": 480} outputId="36c2f38b-e891-4661-dedc-7b1edef6c413"
# import numpy as np
# sim_mat_bot = np.load(data_folder+'similarity_matrix_bot_50k.npy')
# sim_mat_human = np.load(data_folder+'similarity_matrix_human_50k.npy')
# + [markdown] id="dCXRvcnRtP2U"
# ## Old/Unused Build triplet CSV (dataset size = 50k right now)
# # Prev exec : 6622.105570077896 seconds
# + colab={"base_uri": "https://localhost:8080/", "height": 305} id="ySaYIeUDOhEy" executionInfo={"elapsed": 6622618, "status": "ok", "timestamp": 1637632797101, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13074642537976358258"}, "user_tz": 480} outputId="2c1d7c3b-7d00-4376-d8fe-c826b37cf7b8"
import heapq
import numpy as np
triplet_csv_build_start = time()
out = pd.DataFrame(columns=['sent0', 'sent1', 'hard_neg', 'sent0_label', 'sent1_label', 'hard_neg_label'])
print("tweets in tweet_subset", len(tweet_subset))
for idx in range(len(data_subset)):
if idx % 100 == 0:
print("{}/{}".format(idx, len(data_subset)))
# find top n similar bot tweets
top3_bot_indices = heapq.nlargest(3, range(len(sim_mat_bot[idx])), key=sim_mat_bot[idx].__getitem__)
# # find top n similar human tweets
top3_human_indices = heapq.nlargest(3, range(len(sim_mat_human[idx])), key=sim_mat_human[idx].__getitem__)
# combine all the permutations of the most similar bot and human tweets
# ignore the first most similar bot tweet if current tweet is a bot tweet
# else ignore the first most similar human tweet
if label_subset[idx]==1:
for bot_idx in top3_bot_indices[:2]:
for human_idx in top3_human_indices[:2]:
# print("<bot>",tweet_subset[idx], tweet_subset[bot_idx], tweet_subset[human_idx])
out = out.append({'sent0': tweet_subset[idx], 'sent1': bot_tweets[bot_idx], 'hard_neg': human_tweets[human_idx],\
'sent0_label': label_subset[idx], 'sent1_label': bot_tweet_labels[bot_idx], 'hard_neg_label': human_tweet_labels[human_idx]}, ignore_index=True)
# print(out_line)
# out.append(out_line)
elif label_subset[idx]==0:
for bot_idx in top3_bot_indices[:2]:
for human_idx in top3_human_indices[1:]:
# print("<human>",tweet_subset[idx], tweet_subset[bot_idx], tweet_subset[human_idx])
out = out.append({'sent0': tweet_subset[idx], 'sent1': human_tweets[human_idx], 'hard_neg': bot_tweets[bot_idx],\
'sent0_label': label_subset[idx], 'sent1_label': human_tweet_labels[human_idx], 'hard_neg_label': bot_tweet_labels[bot_idx]}, ignore_index=True)
# print(out_line)
# out.append(out_line)
else:
print("something went wrong in the label comparison")
#*******************************************************************************
# If the above does not work due to memory issues,
# we loop across each tweet and find top n similar bot
# tweets and top n similar human tweets for the given tweet.
# for tweet_idx, tweet in enumerate(all_tweets.iloc[0]):
# print(tweet_idx, tweet)
# for index, row in data_subset[:5].iterrows():
# print(row['tweet'], row['label'])
# find top n similar bot tweets
# similar_bot_tweets = model.similarity(tweet, bot_tweets)
# top3_bot_indices = heapq.nlargest(3, range(len(similar_bot_tweets[0])), key=similar_bot_tweets[0].__getitem__)
# # find top n similar human tweets
# similar_human_tweets = model.similarity(tweet, human_tweets)
# top3_human_indices = heapq.nlargest(3, range(len(similar_human_tweets[0])), key=similar_human_tweets[0].__getitem__)
# # combine all the permutations of the most similar bot and human tweets
# # ignore the first most similar bot tweet if current tweet is a bot tweet
# # else ignore the first most similar human tweet
# if tweet['label']==1:
# for bot_idx in top3_bot_indices[1:]:
# for human_idx in top3_human_indices[:2]:
# print(tweet, all_tweets[bot_idx], all_tweets[human_idx])
# elif tweet['label']==0:
# for bot_idx in top3_bot_indices[:2]:
# for human_idx in top3_human_indices[1:]:
# print(tweet, all_tweets[bot_idx], all_tweets[human_idx])
# else:
# print("something went wrong in the label comparison")
#*******************************************************************************
print(out)
out.to_csv(data_folder+"train_triplets_with_similarity.csv", index=False)
print('Time taken to build CSV:', (time() - triplet_csv_build_start))
# + [markdown] id="Lswu-t1aIO33"
# ## Old/Unused Functions for searching similar sentences - using build_index and search
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="ebAyD5g3gYtW" executionInfo={"elapsed": 189, "status": "ok", "timestamp": 1637845689696, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "15348670853435150367"}, "user_tz": 480} outputId="4a8ce229-404e-4058-959a-652475d57665"
def similar_00(tweet, label):
top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return top3_human_tweets[1][0], human_tweet_labels[human_tweets.index(top3_human_tweets[1][0])], \
top3_bot_tweets[0][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[0][0])]
elif label == 1:
# Current tweet is a bot tweet
return top3_bot_tweets[1][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[1][0])], \
top3_human_tweets[0][0], human_tweet_labels[human_tweets.index(top3_human_tweets[0][0])]
else:
return 'Should not come here'
def similar_01(tweet, label):
top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return top3_human_tweets[1][0], human_tweet_labels[human_tweets.index(top3_human_tweets[1][0])], \
top3_bot_tweets[1][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[1][0])]
elif label == 1:
# Current tweet is a bot tweet
return top3_bot_tweets[1][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[1][0])], \
top3_human_tweets[1][0], human_tweet_labels[human_tweets.index(top3_human_tweets[1][0])]
else:
return 'Should not come here'
def similar_10(tweet, label):
top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return top3_human_tweets[2][0], human_tweet_labels[human_tweets.index(top3_human_tweets[2][0])], \
top3_bot_tweets[0][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[0][0])]
elif label == 1:
# Current tweet is a bot tweet
return top3_bot_tweets[2][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[2][0])], \
top3_human_tweets[0][0], human_tweet_labels[human_tweets.index(top3_human_tweets[0][0])]
else:
return 'Should not come here'
def similar_11(tweet, label):
top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
if label == 0:
# Current tweet is a human tweet
# Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
return top3_human_tweets[2][0], human_tweet_labels[human_tweets.index(top3_human_tweets[2][0])], \
top3_bot_tweets[1][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[1][0])]
elif label == 1:
# Current tweet is a bot tweet
return top3_bot_tweets[2][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[2][0])], \
top3_human_tweets[1][0], human_tweet_labels[human_tweets.index(top3_human_tweets[1][0])]
else:
return 'Should not come here'
# def similar_0_sent1(tweet, label):
# # Human tweet
# top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
# top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
# if label == 0:
# # Current tweet is a human tweet
# # Just hardcode the label? For now let's get it from the actual list and see if the right values are coming
# return top3_human_tweets[1][0], human_tweet_labels[human_tweets.index(top3_human_tweets[1][0])]
# elif label == 1:
# # Current tweet is a bot tweet
# return top3_bot_tweets[1][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[1][0])]
# else:
# return 'Should not come here'
# def similar_0_hard_neg(tweet, label):
# # Human tweet
# top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
# top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
# if label == 0:
# # Current tweet is a human tweet
# return top3_bot_tweets[0][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[0][0])]
# elif label == 1:
# # Current tweet is a bot tweet
# return top3_human_tweets[0][0], human_tweet_labels[human_tweets.index(top3_human_tweets[0][0])]
# else:
# return 'Should not come here'
# def similar_1_sent1(tweet, label):
# # Human tweet
# top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
# top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
# if label == 0:
# # Current tweet is a human tweet
# return top3_human_tweets[2][0], human_tweet_labels[human_tweets.index(top3_human_tweets[2][0])]
# elif label == 1:
# # Current tweet is a bot tweet
# return top3_bot_tweets[2][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[2][0])]
# else:
# return 'Should not come here'
# def similar_1_hard_neg(tweet, label):
# # Human tweet
# top3_bot_tweets = bot_index_model.search(tweet, threshold = 0, top_k = 3)
# top3_human_tweets = human_index_model.search(tweet, threshold = 0, top_k = 3)
# if label == 0:
# # Current tweet is a human tweet
# return top3_bot_tweets[1][0], bot_tweet_labels[bot_tweets.index(top3_bot_tweets[1][0])]
# elif label == 1:
# # Current tweet is a bot tweet
# return top3_human_tweets[1][0], human_tweet_labels[human_tweets.index(top3_human_tweets[1][0])]
# else:
# return 'Should not come here'
| DatasetBuilder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env-tcga
# language: python
# name: env-tcga
# ---
# # Exploratory Data Analysis of Cancer Genomics data using TCGA
#
# In this notebook, we will take a look at one of the canonical datasets, if not _the_ dataset, in cancer genomics: TCGA.
#
# We'll start with investigating the RNA Sequencing (rnaseq) and Clinical data available for a type of liver cancer known as hepatocellular carcinoma (HCC). Hepatocellular carcinoma is the most common form of liver cancer in the United States, making up [more than 80% of cases](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga/studied-cancers/liver). The TCGA dataset is abbreviated LIHC.
#
# Some examples of what researchers have learned from the LIHC dataset at the DNA-level include confirmed [frequent mutations](https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga/studied-cancers/liver) in:
# - The TERT promotor region, associated with regulating cell survival
# - TP53, one of the most frequently mutated genes in cancer
# - CTNNB1, a member of the Wnt signaling pathway that mediates cell growth and differentiation
#
# There are currently several therapies under development that target these genes.
#
# In addition to DNA alterations however, different biological and tumor microenvrionment factors can [influence disease progression](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6237857/). A transcriptomic survey of tissues at various stages of disease progression could help elucidate some of the underlying pathways contributing to tumorigenesis.
#
# ### Today, we'll be focusing on using RNA-seq data from LIHC combined with clinical attributes to identify biomarkers for disease progression.
#
# The data is stored in the R package _[RTCGA](http://rtcga.github.io/RTCGA/)_
# + [markdown] heading_collapsed=true
# ## Load libraries
# + hidden=true
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] heading_collapsed=true
# ## Set variables
# + hidden=true
data_dir=""
response_name="patient.race"
rnaseq_file=data_dir+"lihc_rnaseq.csv.gz"
clinical_file=data_dir+"lihc_clinical.csv.gz"
# + [markdown] heading_collapsed=true
# ## Load data
# + [markdown] hidden=true
# The data is stored in the RTCGA package in the R programming language. I've outputted it for easy use within python.
#
# We will be investigating the Hepatocellular carcinoma dataset. Read about it [here](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5680778/).
#
# The TCGA RNASeq data is illumina hiseq Level 3 RSEM normalized expression data. You can read about thec RSEM method [here](https://academic.oup.com/bioinformatics/article/26/4/493/243395).
#
# Essentially this is the raw counts of reads that aligned to the gene transcript, though it's only a guess by the program. Since it's a guess, the values are rational numbers. To simplify things, we'll round the values to the next whole integer.
#
#
# + hidden=true
rnaseq = (pd.
read_csv(rnaseq_file,compression="gzip").
set_index('bcr_patient_barcode').
applymap(lambda x : int(np.ceil(x)))
)
display(rnaseq.shape)
display(rnaseq.head())
# + hidden=true
gene_name_logical = [len(x[0])>1 for x in rnaseq.columns.str.split('|')]
sub = rnaseq.loc[:,gene_name_logical]
sub.columns = [x[0] for x in sub.columns.str.split('|')]
rnaseq_sub = sub.copy()
rnaseq_sub.head()
# + [markdown] hidden=true
# The clinical data is within the RTCGA package, but is also available [here](https://portal.gdc.cancer.gov/projects/TCGA-LIHC). More cdescription of the clinical attributes are [here](https://gdc.cancer.gov/about-data/data-harmonization-and-generation/clinical-data-harmonization).
# + hidden=true
clinical = pd.read_csv(clinical_file,compression="gzip").set_index('patient.bcr_patient_barcode')
display(clinical.shape)
display(clinical.head())
# -
# ## Gene level distribution
#
# In this section, we will investigate the value distribution of genes in our dataset.
#
# <br>
#
# #### Sample Questions:
#
# - What is the range of values for a given gene?
#
# - What is the distribution of values for a given gene?
#
# - Are there higher than average or lower than average expression of genes?
# Range
max(rnaseq_sub['A2M']), min(rnaseq_sub['A2M'])
# Distribution
sns.set_style('darkgrid')
sns.distplot(rnaseq_sub['A2M'].values)
rnaseq_sub.describe()
rnaseq_sub.mean()
# +
# Genes with expression less than or greater than 2 std from mean
avg_expression_all_genes = rnaseq_sub.mean()
sns.distplot(avg_expression_all_genes[(avg_expression_all_genes>1) & (avg_expression_all_genes<2500)]);
# -
# ## Dimension reduction based on gene expression
#
# If we were working for a smaller dataset we'd be able to plot the few genes we had on a expression by sample graph. However, we are working with 20k genes and will need a better method than creating 20k separate plots. One way to visualize sample distances is to reduce the dimensional space of gene expression across samples.
#
# Using principal component analysis (PCA) we can project the data points into a 2D plane so that each axis or component captures the greatest variation of the sample expression.
#
# PCA can be useful to characterize the distances between samples and identify groups of samplings that may be farther or closer apart. We can also identify any samples that might be worth excluding from our analysis. Documentation on sklearn's PCA function can be found [here](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html).
from sklearn.decomposition import PCA
# +
rnaseq_sub.index = rnaseq_sub.index.map(lambda x: '-'.join(x.split('-')[:3]).lower())
# Pre-filtering of lowly-expressed genes < 1
rnaseq_gender = rnaseq_sub.T[rnaseq_sub.sum()>rnaseq_sub.shape[0]].T
rnaseq_gender = rnaseq_sub.join(clinical['patient.gender'])
rnaseq_gender, gender_labels = rnaseq_gender.drop('patient.gender', axis=1), rnaseq_gender['patient.gender']
# -
pca = PCA(n_components=2)
pc_vals = pca.fit_transform(rnaseq_gender)
plt.figure(figsize=(16,8))
plt.scatter(pc_vals[:,0], pc_vals[:,1], c=gender_labels.replace({'male': 'orange', 'female': 'purple'}))
plt.title('PCA of gene expression for male and female patients', fontdict={'fontsize': 14, 'fontweight': 'bold'});
# ## Differential Expression Analysis
#
# We can statistically test for a difference in gene expression by performing a hypothesis test for each gene to see whether there is evidence to decide that expression is signficantly different between conditions.
# In this section, we will investigate differential expression results derived from the [DESeq2] package in R. Also see this [vignette](https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html) on how to do these analyses, as well as understand the methods.
#
# Briefly, a Sample x Gene matrix of raw count reads and a matching matrix of phenotypes (eg, treatment or condition) for each sample are inputted into DEseq2. In order to account for RNA composition bias and library size, when only a small number of genes are highly expressed in one condition vs another, these genes are normalized by diving the expression by the mean expression for each gene across samples.
#
# DEseq2 fits a [negative binomial](https://en.wikipedia.org/wiki/Negative_binomial_distribution) generalized linear model to each gene and uses the [Wald test](https://www.statisticshowto.datasciencecentral.com/wald-test) for significance. Outliers are detected using Cooke's distance and removed from the dataset. Genes with low normalized mean expression values below a threshbold are also removed to improve detection power. https://chipster.csc.fi/manual/deseq2.html
#
# #### Sample questions:
# - Which genes are differentially expressed? Are they positively or negatively expressed compared to your control?
# - What do these genes do? Which pathways are they involved in?
# - Are there related clinical phenotypes which might show similar differences in expression?
# ### Running differential expression analysis via DEseq2
# We've created a subprocess to run the R package DEseq2 with our data and return the results in a python-friendly format.
#
# To run this script on the command line use:
# ```
# Rscript TCGA_differential_expression_analysis.R "" lihc_rnaseq.csv.gz lihc_clinical.csv.gz 100 patient.gender female male
# ```
#
# (script name | rna_seq_file.csv.gz | clinical_data_file.csv.gz | num_genes_to_run | clinical_attribute_col | atttribute 1 | attribute 2
#
# Use the output of this file (lihc_DESeq2_100_sampled_genes_patient.gender_female_vs_male.csv) to plot and identify differentially-expressed genes. You can change the clinical columns, attributes and number of genes to explore additional clinical variables.
de_gender = pd.read_csv('lihc_DESeq2_full_sampled_genes_patient.gender_female_vs_male.csv')
de_gender.head()
# Create a diagnostic plots to help us visualize the data. An MA-plot shows the log2 fold changes from the treatment over the mean of normalized counts, i.e. the average of counts normalized by size factor.
plt.figure(figsize=(16,8))
plt.scatter(de_gender['baseMean'], de_gender['log2FoldChange'], c='purple')
plt.xlim(0,100000);
# ### Pathway analysis
# We can do a brief survey of what kinds of pathways and functions some of our differentially expressed genes are involved in. Using the [BioMart](http://www.biomart.org/martservice_9.html) service to annotate our genes with gene ontology (GO) terms.
from pybiomart import Server, Dataset
# +
dataset = Dataset(name='hsapiens_gene_ensembl',
host='http://www.ensembl.org')
attributes = [
'go_id',
'name_1006',
'definition_1006',
'hgnc_symbol',
]
bm = dataset.query(attributes=attributes,
filters={'chromosome_name': ['1','2']})
bm
# -
# ## Clinical data type investigation
#
# Now that we've gone through the steps of exploratory analysis for a single clinical attribute, you can repeat this process and explore how other clinical factors may change gene expression. In this section, we will investigate the diversity of the clinical data. Explore the clinical attributes available in the dataset.
#
# We are interested in understanding whether there are RNA signatures that are change according to disease progression for hepatocellular carcinoma. Are there any relevant attributes in the clinical data which are relevant to identifying this find of biomarker?
#
#
# #### Sample Questions:
#
# - How many unique values are there for a given clinical attribute?
#
# - How can we define an appropriate response variable for supervised learning?
#
# - What clinical attributes can be used to identify biomarkers for disease progression?
# ## Set up for supervised learning
#
# In this section, we will set up a supervised learning paradigm using the Genes within the RNASeq data as predictors and a clinical attribute as a response variable.
| tcga-series/workshop1/Hacknights_TCGA_EDA_key.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Copy items-Copy1.csv file into pandas dataframe and
# # replace all fields that are empty with NaN
# +
import pandas as pd
import numpy as np
df_items = pd.read_csv('items-Copy1.csv') # read dataset into pandas dataframe
# replace field that's entirely space (or empty) with NaN
df_items = df_items.replace(np.nan, '', regex=True)
documents = list(df_items['description']) # get only description column and create documents list for this
#print(documents)
# -
from measures import Measures # utilized to encapsulate measures from each case
json_output = {} # will be used later to save dictionaries of measures to json files
json_output['cases'] = []
np.random.seed(500)
# # Go through all rows in dataframe and check for if
# 1. anger is in tags
# 2. anger is not in tags
#
# ## This is a binary classification task
# +
labels_list_word = [] # holds list of labels in word form
labels_list_numerical = [] # holds list of labels in numerical form
descriptions_list = [] # holds list of descriptions
# binary classification so either anger or not
for i, row in df_items.iterrows():
row['tag'] = row['tag'].lower() # convert tags to lowercase
if 'anger' in row['tag']:
labels_list_word.append('anger') # anger
descriptions_list.append(row['description']) # add description
labels_list_numerical.append(1) # 1
else:
labels_list_word.append('none') # not anger
descriptions_list.append(row['description']) # add description
labels_list_numerical.append(0) # 0
df_anger = pd.DataFrame() # create empty dataframe
df_anger['label word'] = labels_list_word # append labels_list_word to df with column header 'label word'
df_anger['label numerical'] = labels_list_numerical # append labels_list_numerical to df with column header 'label numerical'
df_anger['description'] = descriptions_list # append descriptions_list to df with column header 'description'
df_anger.loc[df_anger['label word'] == 'anger']
# -
# # Go through all rows in dataframe and check for if
# 1. fear is in tags
# 2. fear is not in tags
# +
labels_list_word = [] # holds list of labels in word form
labels_list_numerical = [] # holds list of labels in numerical form
descriptions_list = [] # holds list of descriptions
# binary classification so either fear or not
for i, row in df_items.iterrows():
row['tag'] = row['tag'].lower() # convert tags to lowercase
if 'fear' in row['tag']:
labels_list_word.append('fear') # fear
descriptions_list.append(row['description']) # add description
labels_list_numerical.append(1) # 1
else:
labels_list_word.append('none') # not anger
descriptions_list.append(row['description']) # add description
labels_list_numerical.append(0) # 0
df_fear = pd.DataFrame() # create empty dataframe
df_fear['label word'] = labels_list_word # append labels_list_word to df with column header 'label word'
df_fear['label numerical'] = labels_list_numerical # append labels_list_numerical to df with column header 'label numerical'
df_fear['description'] = descriptions_list # append descriptions_list to df with column header 'description'
df_fear.loc[df_fear['label word'] == 'fear']
# -
# # Go through all rows in dataframe and check for if
# 1. anger and fear are both in tags
# 2. neither are in the tags
# +
labels_list_word = [] # holds list of labels in word form
labels_list_numerical = [] # holds list of labels in numerical form
descriptions_list = [] # holds list of descriptions
# binary classification so either fear or not
for i, row in df_items.iterrows():
row['tag'] = row['tag'].lower() # convert tags to lowercase
if 'fear' in row['tag'] and 'anger' in row['tag']:
labels_list_word.append('both') # both
descriptions_list.append(row['description']) # add description
labels_list_numerical.append(1) # 1
else:
labels_list_word.append('none') # neither
descriptions_list.append(row['description']) # add description
labels_list_numerical.append(0) # 0
df_both = pd.DataFrame() # create empty dataframe
df_both['label word'] = labels_list_word # append labels_list_word to df with column header 'label word'
df_both['label numerical'] = labels_list_numerical # append labels_list_numerical to df with column header 'label numerical'
df_both['description'] = descriptions_list # append descriptions_list to df with column header 'description'
df_both.loc[df_both['label word'] == 'both']
# -
# # Data pre-processing
'''from nltk.tokenize import word_tokenize
from collections import defaultdict
from nltk.corpus import wordnet as wn
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
from nltk.corpus import stopwords
# Step - a : Remove blank rows if any.
df_anger['description'].dropna(inplace=True)
# Step - b : Change all the text to lower case. This is required as python interprets 'dog' and 'DOG' differently
df_anger['description'] = [entry.lower() for entry in df_anger['description']]
# Step - c : Tokenization : In this each entry in the corpus will be broken into set of words
df_anger['description']= [word_tokenize(entry) for entry in df_anger['description']]
# Step - d : Remove Stop words, Non-Numeric and perfom Word Stemming/Lemmenting.
# WordNetLemmatizer requires Pos tags to understand if the word is noun or verb or adjective etc. By default it is set to Noun
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
for index,entry in enumerate(df_anger['description']):
# Declaring Empty List to store the words that follow the rules for this step
Final_words = []
# Initializing WordNetLemmatizer()
word_Lemmatized = WordNetLemmatizer()
# pos_tag function below will provide the 'tag' i.e if the word is Noun(N) or Verb(V) or something else.
for word, tag in pos_tag(entry):
# Below condition is to check for Stop words and consider only alphabets
if word not in stopwords.words('english') and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
# The final processed set of words for each iteration will be stored in 'text_final'
df_anger.loc[index,'description'] = str(Final_words)'''
# +
# df_anger['description']
# -
# # Create train test split for classification for anger or not anger
# +
from sklearn.model_selection import train_test_split
from sklearn import svm
X_train_anger, X_test_anger, Y_train_anger, Y_test_anger = train_test_split(df_anger['description'],
df_anger['label numerical'],
test_size=0.3)
print('Number of rows in the total set for anger: {}'.format(df_anger.shape[0]))
print('Number of rows in the training set for anger: {}'.format(X_train_anger.shape[0]))
print('Number of rows in the test set for anger: {}'.format(X_test_anger.shape[0]))
# -
# # Ensure that labels are numerical values that the model can understand
'''from sklearn.preprocessing import LabelEncoder
Encoder = LabelEncoder()
Y_train_anger = Encoder.fit_transform(Y_train_anger)
Y_test_anger = Encoder.fit_transform(Y_test_anger)'''
# # Utilize Term frequency - Inverse Document
# 2. Term Frequency - Summarizes how often a given word appears within a document
# 1. Inverse Document Frequency - Down scales words that appear a lot accross documents
#
# ### * Essentially highlights words that are more interesting, e.g. frequent in document but not accross all documents
'''from sklearn.feature_extraction.text import TfidfVectorizer
Tfidf_vect_anger = TfidfVectorizer(max_features=5000)
Tfidf_vect_anger.fit(df_anger['description'])
X_train_Tfidf_anger = Tfidf_vect_anger.transform(X_train_anger)
X_test_Tfidf_anger = Tfidf_vect_anger.transform(X_test_anger)'''
# +
#print(Tfidf_vect_anger.vocabulary_)
# +
#print(X_train_Tfidf_anger)
# -
# !pip3 install matplotlib==3.1.0 # use this version of matplotlib as other version causes problems with seaborn
# # Create an instance of CountVectorizer
# # Fit training data and return matrix
# # transform testing data and return matrix
# +
from sklearn.feature_extraction.text import CountVectorizer
# instantiate Countvectorizer method
count_vector_anger = CountVectorizer()
# fit training data and return matrix
training_data_anger = count_vector_anger.fit_transform(X_train_anger)
# transform testing data and return matrix
testing_data_anger = count_vector_anger.transform(X_test_anger)
# -
# # Utilize svm from sklearn to create a support vector machine classifier and form predictions
# +
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_anger = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_anger.fit(training_data_anger,Y_train_anger)
# predict the labels on validation dataset
predictions_SVM_anger = SVM_anger.predict(testing_data_anger)
# -
# # Print out classification report for anger vs not anger
# +
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sn
print('Classification report for SVM anger classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_anger, predictions_SVM_anger, target_names = ['anger', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_anger, Y_test_anger)))
print('Precision score: ', format(precision_score(predictions_SVM_anger, Y_test_anger)))
print('Recall score: ', format(recall_score(predictions_SVM_anger, Y_test_anger)))
print('F1 score: ', format(f1_score(predictions_SVM_anger, Y_test_anger)))
print('---------------------------------------------------------- ')
labels = ['anger', 'none']
cm = confusion_matrix(list(Y_test_anger), predictions_SVM_anger)
print("Confusion matrix SVM anger: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Anger')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_anger.png')
plt.show()
# -
# # Create train test split for classification for fear or not fear
# +
from sklearn.model_selection import train_test_split
X_train_fear, X_test_fear, Y_train_fear, Y_test_fear = train_test_split(df_fear['description'],
df_fear['label numerical'],
random_state=1)
print('Number of rows in the total set for fear: {}'.format(df_fear.shape[0]))
print('Number of rows in the training set for fear: {}'.format(X_train_fear.shape[0]))
print('Number of rows in the test set for fear: {}'.format(X_test_fear.shape[0]))
# -
# # Create an instance of CountVectorizer
# # Fit training data and return matrix
# # transform testing data and return matrix
# +
from sklearn.feature_extraction.text import CountVectorizer
# instantiate Countvectorizer method
count_vector_fear = CountVectorizer()
# fit training data and return matrix
training_data_fear = count_vector_fear.fit_transform(X_train_fear)
# transform testing data and return matrix
testing_data_fear = count_vector_fear.transform(X_test_fear)
# -
# # Utilize svm from sklearn to create a support vector machine classifier and form predictions
# +
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_fear = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_fear.fit(training_data_fear,Y_train_fear)
# predict the labels on validation dataset
predictions_SVM_fear = SVM_fear.predict(testing_data_fear)
# -
# # Print out classification report for fear vs not fear
# +
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sn
print('Classification report for SVM fear classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_fear, predictions_SVM_fear, target_names = ['fear', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_fear, Y_test_fear)))
print('Precision score: ', format(precision_score(predictions_SVM_fear, Y_test_fear)))
print('Recall score: ', format(recall_score(predictions_SVM_fear, Y_test_fear)))
print('F1 score: ', format(f1_score(predictions_SVM_fear, Y_test_fear)))
print('---------------------------------------------------------- ')
labels = ['fear', 'none']
cm = confusion_matrix(list(Y_test_fear), predictions_SVM_fear)
print("Confusion matrix SVM fear: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Fear')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_fear.png')
plt.show()
# -
# # Create train test split for classification for both or not both
# +
X_train_both, X_test_both, Y_train_both, Y_test_both = train_test_split(df_both['description'],
df_both['label numerical'],
random_state=1)
print('Number of rows in the total set for both: {}'.format(df_both.shape[0]))
print('Number of rows in the training set for both: {}'.format(X_train_both.shape[0]))
print('Number of rows in the test set for both: {}'.format(X_test_both.shape[0]))
# -
# # Create an instance of CountVectorizer
# # Fit training data and return matrix
# # transform testing data and return matrix
# +
# instantiate Countvectorizer method
count_vector_both = CountVectorizer()
# fit training data and return matrix
training_data_both = count_vector_both.fit_transform(X_train_both)
# transform testing data and return matrix
testing_data_both = count_vector_both.transform(X_test_both)
# -
# # Utilize svm from sklearn to create a support vector machine classifier and form predictions
# +
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_both = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_both.fit(training_data_both,Y_train_both)
# predict the labels on validation dataset
predictions_SVM_both = SVM_both.predict(testing_data_both)
# -
# # Print out classification report for both vs not both
# +
print('Classification report for SVM both classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_both, predictions_SVM_both, target_names = ['both', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_both, Y_test_both)))
print('Precision score: ', format(precision_score(predictions_SVM_both, Y_test_both)))
print('Recall score: ', format(recall_score(predictions_SVM_both, Y_test_both)))
print('F1 score: ', format(f1_score(predictions_SVM_both, Y_test_both)))
print('---------------------------------------------------------- ')
labels = ['both', 'none']
cm = confusion_matrix(list(Y_test_both), predictions_SVM_both)
print("Confusion matrix SVM both: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Both')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_both.png')
plt.show()
# -
# # Specificity measures the proportion of actual negatives that are correctly identified as such. In probability notation: P(T-|D-) = TN / (TN + FP).
def perf_measure(y_actual, y_hat):
'''
Description:
Takes in ground truth and predicted values and through a series
of comparisons determines the number of True Positives (TP), False
Positives (FP), True Negatives (TN), False Negatives (FN) and
returns these values in a tuple.
Input:
y_actual: Actual values of y set
y_hat: Predicted values of y set
Output:
(TP, FP, TN, FN): Tuple of performance measures
'''
TP = 0
FP = 0
TN = 0
FN = 0
# Go through all values
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==1: # True Positive
TP += 1
if y_hat[i]==1 and y_actual[i]!=y_hat[i]: # False Positive
FP += 1
if y_actual[i]==y_hat[i]==0: # True Negative
TN += 1
if y_hat[i]==0 and y_actual[i]!=y_hat[i]: # False Negative
FN += 1
return(TP, FP, TN, FN)
# # Sensitivity and Specificity measures for anger, fear and both classifications before any adjustments for imbalance
TP_anger, FP_anger, TN_anger, FN_anger = perf_measure(list(Y_test_anger), list(predictions_SVM_anger))
sensitivity_anger = TP_anger / (TP_anger+FN_anger)
specificity_anger = TN_anger / (TN_anger + FP_anger)
print("Sensitivity Measure for Anger Classification: {sensitivity_anger}".format(sensitivity_anger=str(sensitivity_anger)))
print("Specificity Measure for Anger Classification: {specificity_anger}".format(specificity_anger=str(specificity_anger)))
TP_fear, FP_fear, TN_fear, FN_fear = perf_measure(list(Y_test_fear), list(predictions_SVM_fear))
sensitivity_fear = TP_fear / (TP_fear+FN_fear)
specificity_fear = TN_fear / (TN_fear + FP_fear)
print("Sensitivity Measure for Fear Classification: {sensitivity_fear}".format(sensitivity_fear=str(sensitivity_fear)))
print("Specificity Measure for Fear Classification: {specificity_fear}".format(specificity_fear=str(specificity_fear)))
TP_both, FP_both, TN_both, FN_both = perf_measure(list(Y_test_both), list(predictions_SVM_both))
sensitivity_both = TP_both / (TP_both+FN_both)
specificity_both = TN_both / (TN_both + FP_both)
print("Sensitivity Measure for Both Classification: {sensitivity_both}".format(sensitivity_both=str(sensitivity_both)))
print("Specificity Measure for Both Classification: {specificity_both}".format(specificity_both=str(specificity_both)))
# # Let's try under-sampling for anger classification
# +
import seaborn as sns
# remove (2231 - 781) = 1450 negative samples from overall set for anger
# this will ensure that the number of positive and negative samples are equal
pos_anger_df = df_anger.loc[df_anger['label numerical'] == 1]
neg_anger_df = df_anger.loc[df_anger['label numerical'] == 0].sample(n=781, random_state=42)
normalized_anger_df = pd.concat([pos_anger_df, neg_anger_df])
#plot the dataset after the undersampling
plt.figure(figsize=(8, 8))
sns.countplot('label numerical', data=normalized_anger_df)
plt.title('Balanced Classes')
plt.show()
# -
# # Repeat steps from above for anger classification again after undersamplilng
# +
X_train_anger, X_test_anger, Y_train_anger, Y_test_anger = train_test_split(normalized_anger_df['description'],
normalized_anger_df['label numerical'],
random_state=1)
print('Number of rows in the total set for anger: {}'.format(normalized_anger_df.shape[0]))
print('Number of rows in the training set for anger: {}'.format(X_train_anger.shape[0]))
print('Number of rows in the test set for anger: {}'.format(X_test_anger.shape[0]))
###################################################################################
# instantiate Countvectorizer method
count_vector_anger = CountVectorizer()
# fit training data and return matrix
training_data_anger = count_vector_anger.fit_transform(X_train_anger)
# transform testing data and return matrix
testing_data_anger = count_vector_anger.transform(X_test_anger)
###################################################################################
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_anger = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_anger.fit(training_data_anger,Y_train_anger)
# predict the labels on validation dataset
predictions_SVM_anger = SVM_anger.predict(testing_data_anger)
###################################################################################
print('Classification report for SVM undersampled anger classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_anger, predictions_SVM_anger, target_names = ['anger', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_anger, Y_test_anger)))
print('Precision score: ', format(precision_score(predictions_SVM_anger, Y_test_anger)))
print('Recall score: ', format(recall_score(predictions_SVM_anger, Y_test_anger)))
print('F1 score: ', format(f1_score(predictions_SVM_anger, Y_test_anger)))
print('---------------------------------------------------------- ')
labels = ['anger', 'none']
cm = confusion_matrix(list(Y_test_anger), predictions_SVM_anger)
print("Confusion matrix SVM undersampled anger: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Undersampled Anger')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_undersampled_anger.png')
plt.show()
TP_anger, FP_anger, TN_anger, FN_anger = perf_measure(list(Y_test_anger), list(predictions_SVM_anger))
sensitivity_anger = TP_anger / (TP_anger+FN_anger)
specificity_anger = TN_anger / (TN_anger + FP_anger)
print("Sensitivity Measure for Anger SVM Classification after undersampling: {sensitivity_anger}".format(sensitivity_anger=str(sensitivity_anger)))
print("Specificity Measure for Anger SVM Classification after undersampling: {specificity_anger}".format(specificity_anger=str(specificity_anger)))
# -
# # List of contractions to expand most of the contracted forms to their original forms such as "don't" to "do not"
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot",
"can't've": "cannot have", "'cause": "because", "could've": "could have",
"couldn't": "could not", "couldn't've": "could not have","didn't": "did not",
"doesn't": "does not", "don't": "do not", "hadn't": "had not",
"hadn't've": "had not have", "hasn't": "has not", "haven't": "have not",
"he'd": "he would", "he'd've": "he would have", "he'll": "he will",
"he'll've": "he will have", "he's": "he is", "how'd": "how did",
"how'd'y": "how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'd've": "I would have", "I'll": "I will",
"I'll've": "I will have","I'm": "I am", "I've": "I have",
"i'd": "i would", "i'd've": "i would have", "i'll": "i will",
"i'll've": "i will have","i'm": "i am", "i've": "i have",
"isn't": "is not", "it'd": "it would", "it'd've": "it would have",
"it'll": "it will", "it'll've": "it will have","it's": "it is",
"let's": "let us", "ma'am": "madam", "mayn't": "may not",
"might've": "might have","mightn't": "might not","mightn't've": "might not have",
"must've": "must have", "mustn't": "must not", "mustn't've": "must not have",
"needn't": "need not", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not",
"sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would",
"she'd've": "she would have", "she'll": "she will", "she'll've": "she will have",
"she's": "she is", "should've": "should have", "shouldn't": "should not",
"shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is",
"that'd": "that would", "that'd've": "that would have","that's": "that is",
"there'd": "there would", "there'd've": "there would have","there's": "there is",
"here's": "here is",
"they'd": "they would", "they'd've": "they would have", "they'll": "they will",
"they'll've": "they will have", "they're": "they are", "they've": "they have",
"to've": "to have", "wasn't": "was not", "we'd": "we would",
"we'd've": "we would have", "we'll": "we will", "we'll've": "we will have",
"we're": "we are", "we've": "we have", "weren't": "were not",
"what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is",
"when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have",
"who's": "who is", "who've": "who have", "why's": "why is",
"why've": "why have", "will've": "will have", "won't": "will not",
"won't've": "will not have", "would've": "would have", "wouldn't": "would not",
"wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would",
"y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd": "you would", "you'd've": "you would have", "you'll": "you will",
"you'll've": "you will have", "you're": "you are", "you've": "you have" }
# # Function utilized to clean up text in description field utilizing spacy and regex
# +
import codecs
import unidecode
import re
import spacy
nlp = spacy.load('en')
def spacy_cleaner(text):
try:
decoded = unidecode.unidecode(codecs.decode(text, 'unicode_escape'))
except:
decoded = unidecode.unidecode(text)
apostrophe_handled = re.sub("’", "'", decoded)
expanded = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in apostrophe_handled.split(" ")])
parsed = nlp(expanded)
final_tokens = []
for t in parsed:
if t.is_punct or t.is_space or t.like_num or t.like_url or str(t).startswith('@'):
pass
else:
if t.lemma_ == '-PRON-':
final_tokens.append(str(t))
else:
sc_removed = re.sub("[^a-zA-Z]", '', str(t.lemma_))
if len(sc_removed) > 1:
final_tokens.append(sc_removed)
joined = ' '.join(final_tokens)
spell_corrected = re.sub(r'(.)\1+', r'\1\1', joined)
return spell_corrected
# -
# # Create new column for df_anger called clean_text that contains each description cleaned up
df_anger['clean_text'] = [spacy_cleaner(t) for t in df_anger['description']]
# # Utilize RandomOverSampler in order to oversample the dataset and utilize NB again to compare metrics after oversampling
# - RandomOverSampler repeats some entries of the minority class to balance the data
# +
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.pipeline import make_pipeline
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
#tvec = TfidfVectorizer(stop_words=None, max_features=100000, ngram_range=(1, 3))
# lr = LogisticRegression()
# ROS_pipeline = make_pipeline(tvec, RandomOverSampler(random_state=777),lr)
# SMOTE_pipeline = make_pipeline(tvec, SMOTE(random_state=777), lr)
# Fit TfidfVectorizer and use Tfidf representation of texts to oversample
tv = TfidfVectorizer(stop_words=None, max_features=100000)
testing_tfidf = tv.fit_transform(df_anger['clean_text'])
ros = RandomOverSampler(random_state=777)
X_ROS, y_ROS = ros.fit_sample(testing_tfidf, df_anger['label numerical'])
# df_labelnumerical_temp = df_anger['label numerical']
'''smt = SMOTE(random_state=777)#, k_neighbors=1)
X_SMOTE, y_SMOTE = smt.fit_sample(testing_tfidf, df_anger['label numerical'])
pd.DataFrame(X_SMOTE.todense(), columns=tv.get_feature_names())'''
###################################################################################
'''
loop through all elements of labels and count all positive and negative instances
in order to compare if their counts are balanced
'''
pos_count = 0
neg_count = 0
for y in list(y_ROS):
if y == 1:
pos_count += 1
else:
neg_count += 1
#plot the dataset after the oversampling
plt.figure(figsize=(8, 8))
y_pos = np.arange(1)
plt.bar(['0', '1'], [neg_count, pos_count], color=('blue', 'orange'))
plt.ylabel('count')
plt.xlabel('label numerical')
plt.title('Balanced Classes')
plt.show()
###################################################################################
X_train_anger, X_test_anger, Y_train_anger, Y_test_anger = train_test_split(X_ROS,
y_ROS,
random_state=1)
#print('Number of rows in the total set for anger: {}'.format(normalized_anger_df.shape[0]))
print('Number of rows in the training set for anger: {}'.format(X_train_anger.shape[0]))
print('Number of rows in the test set for anger: {}'.format(X_test_anger.shape[0]))
###################################################################################
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_anger = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_anger.fit(X_train_anger,Y_train_anger)
# predict the labels on validation dataset
predictions_SVM_anger = SVM_anger.predict(X_test_anger)
###################################################################################
print('Classification report for SVM oversampled anger classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_anger, predictions_SVM_anger, target_names = ['anger', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_anger, Y_test_anger)))
print('Precision score: ', format(precision_score(predictions_SVM_anger, Y_test_anger)))
print('Recall score: ', format(recall_score(predictions_SVM_anger, Y_test_anger)))
print('F1 score: ', format(f1_score(predictions_SVM_anger, Y_test_anger)))
print('---------------------------------------------------------- ')
labels = ['anger', 'none']
cm = confusion_matrix(list(Y_test_anger), predictions_SVM_anger)
print("Confusion matrix SVM oversampled anger: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Oversampled Anger')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_oversampled_anger.png')
plt.show()
TP_anger, FP_anger, TN_anger, FN_anger = perf_measure(list(Y_test_anger), list(predictions_SVM_anger))
sensitivity_anger = TP_anger / (TP_anger+FN_anger)
specificity_anger = TN_anger / (TN_anger + FP_anger)
print("Sensitivity Measure for SVM Anger Classification after oversampling: {sensitivity_anger}".format(sensitivity_anger=str(sensitivity_anger)))
print("Specificity Measure for SVM Anger Classification after oversampling: {specificity_anger}".format(specificity_anger=str(specificity_anger)))
measures_dict_angry = {'accuracy': accuracy_score(predictions_SVM_anger, Y_test_anger),
'precision': precision_score(predictions_SVM_anger, Y_test_anger),
'recall': recall_score(predictions_SVM_anger, Y_test_anger),
'f1': f1_score(predictions_SVM_anger, Y_test_anger),
'sensitivity': sensitivity_anger,
'specificity': specificity_anger,
'name': 'Anger SVM Oversampled'}
anger_oversampled_SVM_measures = Measures('anger_oversampled_SVM_measures', **measures_dict_angry)
anger_oversampled_SVM_measures.write_to_file()
json_output['cases'].append(measures_dict_angry)
# -
# # Let's try under-sampling for fear classification
# +
# remove negative samples from overall set for fear
# this will ensure that the number of positive and negative samples are equal
pos_fear_df = df_fear.loc[df_fear['label numerical'] == 1]
neg_fear_df = df_fear.loc[df_fear['label numerical'] == 0].sample(n=317, random_state=42)
normalized_fear_df = pd.concat([pos_fear_df, neg_fear_df])
#plot the dataset after the undersampling
plt.figure(figsize=(8, 8))
sns.countplot('label numerical', data=normalized_fear_df)
plt.title('Balanced Classes')
plt.show()
# -
# # Repeat steps from above for fear classification again after undersamplilng
# +
X_train_fear, X_test_fear, Y_train_fear, Y_test_fear = train_test_split(normalized_fear_df['description'],
normalized_fear_df['label numerical'],
random_state=1)
print('Number of rows in the total set for anger: {}'.format(normalized_fear_df.shape[0]))
print('Number of rows in the training set for anger: {}'.format(X_train_fear.shape[0]))
print('Number of rows in the test set for anger: {}'.format(X_test_fear.shape[0]))
###################################################################################
# instantiate Countvectorizer method
count_vector_fear = CountVectorizer()
# fit training data and return matrix
training_data_fear = count_vector_fear.fit_transform(X_train_fear)
# transform testing data and return matrix
testing_data_fear = count_vector_fear.transform(X_test_fear)
###################################################################################
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_fear = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_fear.fit(training_data_fear,Y_train_fear)
# predict the labels on validation dataset
predictions_SVM_fear = SVM_fear.predict(testing_data_fear)
###################################################################################
print('Classification report for SVM undersampled fear classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_fear, predictions_SVM_fear, target_names = ['fear', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_fear, Y_test_fear)))
print('Precision score: ', format(precision_score(predictions_SVM_fear, Y_test_fear)))
print('Recall score: ', format(recall_score(predictions_SVM_fear, Y_test_fear)))
print('F1 score: ', format(f1_score(predictions_SVM_fear, Y_test_fear)))
print('---------------------------------------------------------- ')
labels = ['fear', 'none']
cm = confusion_matrix(list(Y_test_fear), predictions_SVM_fear)
print("Confusion matrix SVM undersampled fear: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Undersampled Fear')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_undersampled_fear.png')
plt.show()
TP_fear, FP_fear, TN_fear, FN_fear = perf_measure(list(Y_test_fear), list(predictions_SVM_fear))
sensitivity_fear = TP_fear / (TP_fear+FN_fear)
specificity_fear = TN_fear / (TN_fear + FP_fear)
print("Sensitivity Measure for SVM Fear Classification after undersampling: {sensitivity_fear}".format(sensitivity_fear=str(sensitivity_fear)))
print("Specificity Measure for SVM Fear Classification after undersampling: {specificity_fear}".format(specificity_fear=str(specificity_fear)))
# -
# # Create new column for df_anger called clean_text that contains each description cleaned up
df_fear['clean_text'] = [spacy_cleaner(t) for t in df_fear['description']]
# # Utilize RandomOverSampler in order to oversample the dataset and utilize NB again to compare metrics after oversampling
# - RandomOverSampler repeats some entries of the minority class to balance the data
# +
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.pipeline import make_pipeline
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
#tvec = TfidfVectorizer(stop_words=None, max_features=100000, ngram_range=(1, 3))
# lr = LogisticRegression()
# ROS_pipeline = make_pipeline(tvec, RandomOverSampler(random_state=777),lr)
# SMOTE_pipeline = make_pipeline(tvec, SMOTE(random_state=777), lr)
# Fit TfidfVectorizer and use Tfidf representation of texts to oversample
tv = TfidfVectorizer(stop_words=None, max_features=100000)
testing_tfidf = tv.fit_transform(df_fear['clean_text'])
ros = RandomOverSampler(random_state=777)
X_ROS, y_ROS = ros.fit_sample(testing_tfidf, df_fear['label numerical'])
# df_labelnumerical_temp = df_anger['label numerical']
'''smt = SMOTE(random_state=777)#, k_neighbors=1)
X_SMOTE, y_SMOTE = smt.fit_sample(testing_tfidf, df_anger['label numerical'])
pd.DataFrame(X_SMOTE.todense(), columns=tv.get_feature_names())'''
###################################################################################
'''
loop through all elements of labels and count all positive and negative instances
in order to compare if their counts are balanced
'''
pos_count = 0
neg_count = 0
for y in list(y_ROS):
if y == 1:
pos_count += 1
else:
neg_count += 1
#plot the dataset after the oversampling
plt.figure(figsize=(8, 8))
y_pos = np.arange(1)
plt.bar(['0', '1'], [neg_count, pos_count], color=('blue', 'orange'))
plt.ylabel('count')
plt.xlabel('label numerical')
plt.title('Balanced Classes')
plt.show()
###################################################################################
X_train_fear, X_test_fear, Y_train_fear, Y_test_fear = train_test_split(X_ROS,
y_ROS,
random_state=1)
#print('Number of rows in the total set for fear: {}'.format(normalized_fear_df.shape[0]))
print('Number of rows in the training set for fear: {}'.format(X_train_fear.shape[0]))
print('Number of rows in the test set for fear: {}'.format(X_test_fear.shape[0]))
###################################################################################
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_fear = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_fear.fit(X_train_fear,Y_train_fear)
# predict the labels on validation dataset
predictions_SVM_fear = SVM_fear.predict(X_test_fear)
###################################################################################
print('Classification report for SVM oversampled fear classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_fear, predictions_SVM_fear, target_names = ['fear', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_fear, Y_test_fear)))
print('Precision score: ', format(precision_score(predictions_SVM_fear, Y_test_fear)))
print('Recall score: ', format(recall_score(predictions_SVM_fear, Y_test_fear)))
print('F1 score: ', format(f1_score(predictions_SVM_fear, Y_test_fear)))
print('---------------------------------------------------------- ')
labels = ['fear', 'none']
cm = confusion_matrix(list(Y_test_fear), predictions_SVM_fear)
print("Confusion matrix SVM oversampled fear: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Oversampled Fear')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_oversampled_fear.png')
plt.show()
TP_fear, FP_fear, TN_fear, FN_fear = perf_measure(list(Y_test_fear), list(predictions_SVM_fear))
sensitivity_fear = TP_fear / (TP_fear+FN_fear)
specificity_fear = TN_fear / (TN_fear + FP_fear)
print("Sensitivity Measure for SVM Fear Classification after oversampling: {sensitivity_fear}".format(sensitivity_fear=str(sensitivity_fear)))
print("Specificity Measure for SVM Fear Classification after oversampling: {specificity_fear}".format(specificity_fear=str(specificity_fear)))
measures_dict_fear = {'accuracy': accuracy_score(predictions_SVM_fear, Y_test_fear),
'precision': precision_score(predictions_SVM_fear, Y_test_fear),
'recall': recall_score(predictions_SVM_fear, Y_test_fear),
'f1': f1_score(predictions_SVM_fear, Y_test_fear),
'sensitivity': sensitivity_fear,
'specificity': specificity_fear,
'name': 'Fear SVM Oversampled'}
fear_oversampled_SVM_measures = Measures('fear_oversampled_SVM_measures', **measures_dict_fear)
fear_oversampled_SVM_measures.write_to_file()
json_output['cases'].append(measures_dict_fear)
# -
# # Let's try under-sampling for both classification
# +
# remove negative samples from overall set for fear
# this will ensure that the number of positive and negative samples are equal
pos_both_df = df_both.loc[df_both['label numerical'] == 1]
neg_both_df = df_both.loc[df_both['label numerical'] == 0].sample(n=186, random_state=42)
normalized_both_df = pd.concat([pos_both_df, neg_both_df])
#plot the dataset after the undersampling
plt.figure(figsize=(8, 8))
sns.countplot('label numerical', data=normalized_both_df)
plt.title('Balanced Classes')
plt.show()
# -
# # Repeat steps from above for both classification again after undersamplilng
# +
X_train_both, X_test_both, Y_train_both, Y_test_both = train_test_split(normalized_both_df['description'],
normalized_both_df['label numerical'],
random_state=1)
print('Number of rows in the total set for both: {}'.format(normalized_both_df.shape[0]))
print('Number of rows in the training set for both: {}'.format(X_train_both.shape[0]))
print('Number of rows in the test set for both: {}'.format(X_test_both.shape[0]))
###################################################################################
# instantiate Countvectorizer method
count_vector_both = CountVectorizer()
# fit training data and return matrix
training_data_both = count_vector_both.fit_transform(X_train_both)
# transform testing data and return matrix
testing_data_both = count_vector_both.transform(X_test_both)
###################################################################################
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_both = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_both.fit(training_data_both,Y_train_both)
# predict the labels on validation dataset
predictions_SVM_both = SVM_both.predict(testing_data_both)
###################################################################################
print('Classification report for SVM undersampled both classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_both, predictions_SVM_both, target_names = ['both', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_both, Y_test_both)))
print('Precision score: ', format(precision_score(predictions_SVM_both, Y_test_both)))
print('Recall score: ', format(recall_score(predictions_SVM_both, Y_test_both)))
print('F1 score: ', format(f1_score(predictions_SVM_both, Y_test_both)))
print('---------------------------------------------------------- ')
labels = ['both', 'none']
cm = confusion_matrix(list(Y_test_both), predictions_SVM_both)
print("Confusion matrix SVM undersampled both: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Undersampled Both')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_undersampled_both.png')
plt.show()
TP_both, FP_both, TN_both, FN_both = perf_measure(list(Y_test_both), list(predictions_SVM_both))
sensitivity_both = TP_both / (TP_both+FN_both)
specificity_both = TN_both / (TN_both + FP_both)
print("Sensitivity Measure for SVM Both Classification after undersampling: {sensitivity_both}".format(sensitivity_both=str(sensitivity_both)))
print("Specificity Measure for SVM Both Classification after undersampling: {specificity_both}".format(specificity_both=str(specificity_both)))
# -
# # Create new column for df_anger called clean_text that contains each description cleaned up
df_both['clean_text'] = [spacy_cleaner(t) for t in df_both['description']]
# # Utilize RandomOverSampler in order to oversample the dataset and utilize NB again to compare metrics after oversampling
# - RandomOverSampler repeats some entries of the minority class to balance the data
# +
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.pipeline import make_pipeline
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
#tvec = TfidfVectorizer(stop_words=None, max_features=100000, ngram_range=(1, 3))
# lr = LogisticRegression()
# ROS_pipeline = make_pipeline(tvec, RandomOverSampler(random_state=777),lr)
# SMOTE_pipeline = make_pipeline(tvec, SMOTE(random_state=777), lr)
# Fit TfidfVectorizer and use Tfidf representation of texts to oversample
tv = TfidfVectorizer(stop_words=None, max_features=100000)
testing_tfidf = tv.fit_transform(df_both['clean_text'])
ros = RandomOverSampler(random_state=777)
X_ROS, y_ROS = ros.fit_sample(testing_tfidf, df_both['label numerical'])
# df_labelnumerical_temp = df_anger['label numerical']
'''smt = SMOTE(random_state=777)#, k_neighbors=1)
X_SMOTE, y_SMOTE = smt.fit_sample(testing_tfidf, df_anger['label numerical'])
pd.DataFrame(X_SMOTE.todense(), columns=tv.get_feature_names())'''
###################################################################################
'''
loop through all elements of labels and count all positive and negative instances
in order to compare if their counts are balanced
'''
pos_count = 0
neg_count = 0
for y in list(y_ROS):
if y == 1:
pos_count += 1
else:
neg_count += 1
#plot the dataset after the oversampling
plt.figure(figsize=(8, 8))
y_pos = np.arange(1)
plt.bar(['0', '1'], [neg_count, pos_count], color=('blue', 'orange'))
plt.ylabel('count')
plt.xlabel('label numerical')
plt.title('Balanced Classes')
plt.show()
###################################################################################
X_train_both, X_test_both, Y_train_both, Y_test_both = train_test_split(X_ROS,
y_ROS,
random_state=1)
#print('Number of rows in the total set for fear: {}'.format(normalized_fear_df.shape[0]))
print('Number of rows in the training set for both: {}'.format(X_train_both.shape[0]))
print('Number of rows in the test set for both: {}'.format(X_test_both.shape[0]))
###################################################################################
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM_both = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM_both.fit(X_train_both,Y_train_both)
# predict the labels on validation dataset
predictions_SVM_both = SVM_both.predict(X_test_both)
###################################################################################
print('Classification report for SVM oversampled both classification: ')
print('---------------------------------------------------------- ')
print(classification_report(Y_test_both, predictions_SVM_both, target_names = ['both', 'none']))
print('---------------------------------------------------------- ')
print('Accuracy score: ', format(accuracy_score(predictions_SVM_both, Y_test_both)))
print('Precision score: ', format(precision_score(predictions_SVM_both, Y_test_both)))
print('Recall score: ', format(recall_score(predictions_SVM_both, Y_test_both)))
print('F1 score: ', format(f1_score(predictions_SVM_both, Y_test_both)))
print('---------------------------------------------------------- ')
labels = ['both', 'none']
cm = confusion_matrix(list(Y_test_both), predictions_SVM_both)
print("Confusion matrix SVM oversampled both: \n")
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion Matrix SVM Oversampled Both')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.savefig('confusion_matrix_SVM_oversampled_both.png')
plt.show()
TP_both, FP_both, TN_both, FN_both = perf_measure(list(Y_test_both), list(predictions_SVM_both))
sensitivity_both = TP_both / (TP_both+FN_both)
specificity_both = TN_both / (TN_both + FP_both)
print("Sensitivity Measure for SVM Both Classification after oversampling: {sensitivity_both}".format(sensitivity_both=str(sensitivity_both)))
print("Specificity Measure for SVM Both Classification after oversampling: {specificity_both}".format(specificity_both=str(specificity_both)))
measures_dict_both = {'accuracy': accuracy_score(predictions_SVM_both, Y_test_both),
'precision': precision_score(predictions_SVM_both, Y_test_both),
'recall': recall_score(predictions_SVM_both, Y_test_both),
'f1': f1_score(predictions_SVM_both, Y_test_both),
'sensitivity': sensitivity_both,
'specificity': specificity_both,
'name': 'Both SVM Oversampled'}
both_oversampled_SVM_measures = Measures('both_oversampled_SVM_measures', **measures_dict_both)
both_oversampled_SVM_measures.write_to_file()
json_output['cases'].append(measures_dict_both)
# -
import json
with open('SVM_measures.json', 'w') as outfile:
json.dump(json_output, outfile)
# +
with open('SVM_measures.json') as json_file:
data = json.load(json_file)
print(data['cases'][0])
| .ipynb_checkpoints/support_vector_machine-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
np.random.seed(1)
mu, sigma = 50, 10
s = np.random.normal(mu, sigma, 100)
s[85] = 120
s[15] = -120
s
df = pd.DataFrame(s,columns=['Data'])
df.head()
df['col_zscore'] = (df['Data'] - df['Data'].mean())/df['Data'].std(ddof=0)
df['outlier'] = (abs(df['col_zscore'])> 3).astype(int)
df.tail()
print ("Print number of outliers = " +str(df.outlier.value_counts()[1]))
df.outlier.unique()
df[df['outlier'] == 1]
| z-score test/z-score test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# ## Load the MNIST digit dataset
# +
#import pylab as plt
from sklearn.datasets import fetch_openml
# Download the MNIST dataset
X, y = fetch_openml(name='mnist_784', version=1, data_id=None, return_X_y=True)
# normalize the sample into [0, 1]
X *= 1. / 255
# -
# ## Testing BCM model on MNIST dataset
# +
from plasticity.model import BCM
from plasticity.model.optimizer import Adam, SGD
model = BCM(outputs=100, num_epochs=10, batch_size=1000, activation='relu',
optimizer=Adam(lr=4e-2), interaction_strength=-0.25, verbose=False)
model.fit(X)
# +
from plasticity.utils import view_weights
view_weights (model.weights, dims=(28, 28))
# -
# ## BCM inpainting
# +
import numpy as np
import pylab as plt
idx = 1
reference_data = X[idx].reshape(28, 28)
half_input = reference_data.copy()
# nullify the lower section of the image
half_input[14:, :] = 0.
# prediction with linear activation function
prediction = np.einsum('ij, kj -> ik', model.weights, half_input.reshape(1, -1), optimize=True).T
# select the neuron connections with the highest response
best_neuron = model.weights[np.argmax(prediction)].reshape(28, 28)
inpaint = half_input.copy()
# use the neuron connections to complete the image
inpaint[14:, :] = best_neuron[14:, :]
# +
nc = np.amax(np.abs(model.weights))
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4, figsize=(12, 12))
ax1.set_title('Raw Image')
ax1.imshow(reference_data, cmap='gray'); ax1.axis('off')
ax2.set_title('Half Input')
ax2.imshow(half_input, cmap='gray'); ax2.axis('off')
ax3.set_title('BCM neuron activation')
ax3.imshow(best_neuron, cmap='bwr', vmin=-nc, vmax=nc); ax3.axis('off')
ax4.set_title('Inpainting')
ax4.imshow(inpaint, cmap='bwr', vmin=-nc, vmax=nc); ax4.axis('off')
ax4.imshow(inpaint[:14, :], cmap='gray')
ax4.hlines(14, 0, 27, color='r', linestyle='dashed')
# -
# ## BCM classifier
# +
from sklearn.preprocessing import OneHotEncoder
y_categorical = y.astype('int').reshape(-1, 1)
y_categorical = OneHotEncoder(sparse=False).fit_transform(y_categorical)
model = BCM(outputs=200, num_epochs=10, optimizer=Adam(learning_rate=4e-2), interaction_strength=-0.45,
activation='Relu', batch_size=1000, verbose=False)
model.fit(X, y=y_categorical)
# +
idx = 50
reference_data = X[idx].reshape(28, 28)
# set the corresponding labels to null
reference_data[28*28 :] = 0
prediction = model.predict(X[idx].reshape(1, -1), y=y_categorical[idx].reshape(1, -1))
# select the neuron connections with the highest response
best_neuron = model.weights[np.argmax(prediction)][:28*28].reshape(28, 28)
# extract the predicted label for each neuron as argmax of the label-connections
labels = model.weights[:, 28*28:].argmax(axis=1)
best_neuron = model.weights[np.argmax(prediction)][:28*28].reshape(28, 28)
# +
from collections import Counter
# extract the top 10 labels as the top-ranking connections
top_10 = sorted(zip(prediction.ravel(), labels), key=lambda x : x[0], reverse=True)[:10]
top_10 = [x[1] for x in top_10]
counter_labels = Counter(top_10)
# +
nc = np.amax(np.abs(model.weights))
print('Top 10 labels: {}'.format(counter_labels.most_common()))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 12))
ax1.set_title('Raw Image')
ax1.imshow(X[idx].reshape(28, 28), cmap='gray'); ax1.axis('off')
ax2.set_title('BCM neuron activation')
im = ax2.imshow(best_neuron, cmap='bwr', vmin=-nc, vmax=nc); ax2.axis('off')
# -
# ## BCM classifier performances
# +
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y_categorical, test_size=1/6, random_state=42)
model = BCM(outputs=1000, num_epochs=20, optimizer=Adam(learning_rate=4e-2), interaction_strength=-0.005,
activation='Relu', batch_size=4000, verbose=False)
model.fit(x_train, y_train)
# +
from sklearn.metrics import accuracy_score
predictions = model.predict(X=x_test, y=y_test)
predictions = [model.weights[np.argmax(x)][28*28:].argmax() for x in predictions]
y_true = y_test.argmax(axis=1)
y_pred = np.asarray(predictions)
print('Accuracy score on test set: {:.3f}'.format(accuracy_score(y_true, y_pred)))
# +
# Prediction using the Top 10 ranking agreement
predictions = model.predict(X=x_test, y=y_test)
labels = model.weights[:, 28*28:].argmax(axis=1)
top_10 = []
for x in predictions:
# order the predictions
top = sorted(zip(x, labels), key=lambda x : x[0], reverse=True)[:10]
top = [x[1] for x in top]
# select the label as the most common prediction of the ranking
counter_labels = Counter(top).most_common()
top_10.append(counter_labels[0][0])
print('Accuracy score on test set: {:.3f}'.format(accuracy_score(y_true, top_10)))
# -
# ## Testing Hopfield model on MNIST dataset
# +
from plasticity.model import Hopfield
model = Hopfield(outputs=100, num_epochs=10, verbose=False)
model.fit(X)
# +
from plasticity.utils import view_weights
view_weights (model.weights, dims=(28, 28))
# -
# ## Hopfield inpainting
# +
import numpy as np
import pylab as plt
idx = 1
reference_data = X[idx].reshape(28, 28)
half_input = reference_data.copy()
# nullify the lower section of the image
half_input[14:, :] = 0.
# prediction with linear activation function
prediction = np.einsum('ij, kj -> ik', model.weights, half_input.reshape(1, -1), optimize=True).T
# select the neuron connections with the highest response
best_neuron = model.weights[np.argmax(prediction)].reshape(28, 28)
inpaint = half_input.copy()
# use the neuron connections to complete the image
inpaint[14:, :] = best_neuron[14:, :]
# +
nc = np.amax(np.abs(model.weights))
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1, ncols=4, figsize=(12, 12))
ax1.set_title('Raw Image')
ax1.imshow(reference_data, cmap='gray'); ax1.axis('off')
ax2.set_title('Half Input')
ax2.imshow(half_input, cmap='gray'); ax2.axis('off')
ax3.set_title('Hopfield neuron activation')
ax3.imshow(best_neuron, cmap='bwr', vmin=-nc, vmax=nc); ax3.axis('off')
ax4.set_title('Inpainting')
ax4.imshow(inpaint, cmap='bwr', vmin=-nc, vmax=nc); ax4.axis('off')
ax4.imshow(inpaint[:14, :], cmap='gray')
ax4.hlines(14, 0, 27, color='r', linestyle='dashed')
# -
# ## Hopfield classifier
# +
from sklearn.preprocessing import OneHotEncoder
y_categorical = y.astype('int').reshape(-1, 1)
y_categorical = OneHotEncoder(sparse=False).fit_transform(y_categorical)
model = Hopfield(outputs=200, num_epochs=10, optimizer=Adam(learning_rate=4e-2),
batch_size=1000, verbose=False)
model.fit(X, y=y_categorical)
# +
idx = 50
reference_data = X[idx].reshape(28, 28)
# set the corresponding labels to null
reference_data[28*28 :] = 0
prediction = model.predict(X[idx].reshape(1, -1), y=y_categorical[idx].reshape(1, -1))
# select the neuron connections with the highest response
best_neuron = model.weights[np.argmax(prediction)][:28*28].reshape(28, 28)
# extract the predicted label for each neuron as argmax of the label-connections
labels = model.weights[:, 28*28:].argmax(axis=1)
best_neuron = model.weights[np.argmax(prediction)][:28*28].reshape(28, 28)
# +
from collections import Counter
# extract the top 10 labels as the top-ranking connections
top_10 = sorted(zip(prediction.ravel(), labels), key=lambda x : x[0], reverse=True)[:10]
top_10 = [x[1] for x in top_10]
counter_labels = Counter(top_10)
# +
nc = np.amax(np.abs(model.weights))
print('Top 10 labels: {}'.format(counter_labels.most_common()))
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 12))
ax1.set_title('Raw Image')
ax1.imshow(X[idx].reshape(28, 28), cmap='gray'); ax1.axis('off')
ax2.set_title('Hopfield neuron activation')
im = ax2.imshow(best_neuron, cmap='bwr', vmin=-nc, vmax=nc); ax2.axis('off')
# -
# ## Hopfield classifier performances
# +
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y_categorical, test_size=1/6, random_state=42)
model = Hopfield(outputs=1000, num_epochs=20, optimizer=Adam(learning_rate=4e-2),
batch_size=4000, verbose=False)
model.fit(x_train, y_train)
# +
from sklearn.metrics import accuracy_score
predictions = model.predict(X=x_test, y=y_test)
predictions = [model.weights[np.argmax(x)][28*28:].argmax() for x in predictions]
y_true = y_test.argmax(axis=1)
y_pred = np.asarray(predictions)
print('Accuracy score on test set: {:.3f}'.format(accuracy_score(y_true, y_pred)))
# +
# Prediction using the Top 10 ranking agreement
predictions = model.predict(X=x_test, y=y_test)
labels = model.weights[:, 28*28:].argmax(axis=1)
top_10 = []
for x in predictions:
# order the predictions
top = sorted(zip(x, labels), key=lambda x : x[0], reverse=True)[:10]
top = [x[1] for x in top]
# select the label as the most common prediction of the ranking
counter_labels = Counter(top).most_common()
top_10.append(counter_labels[0][0])
print('Accuracy score on test set: {:.3f}'.format(accuracy_score(y_true, top_10)))
# -
| example/python_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + active=""
# import cPickle
# import numpy as np
# import pandas as pd
# import datetime
# import datetime
# import numpy as np
# import pandas as pd
# import sklearn
# import pandas.io.data as web
# import seaborn as sns
#
# from sklearn.linear_model import LogisticRegression
# from sklearn.lda import LDA
# from sklearn.qda import QDA
# from sklearn import ensemble
# from sklearn import preprocessing
# from sklearn import cross_validation
# from sklearn import grid_search
# from datetime import datetime
# from sklearn.ensemble import RandomForestClassifier
# from sklearn import neighbors
# from sklearn.ensemble import AdaBoostClassifier
# from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.svm import SVC
# import operator
# import re
# from dateutil import parser
# from datetime import datetime
# import matplotlib
# import matplotlib.pyplot as plt
# %matplotlib inline
# -
# # Stock Market Forecast
# Welcome to my project, where in general terms I try and predict the movements of the stock market through the S&P 500. Throughout the project I will look at ways to try and improve my probability, or "hit score" as called by others, of predicting an up or down day for the S&P500. This means that looking at the daily returns of the S&P500, i.e. negative daily return or positive daily return, I can trade accordingly. Below we will start with visualizations of the predictors.
# As you can see above the Nasdaq will be a very strong predictor of the movements of the S&P500. The Dow Jones will also be another strong predictor. However, if you look at the returns of the three indices you will see that while the overall graphs look to have similar movements with steady increases over time, the day to day returns vary with the DOW having an up day and the nasdaq having a down day. Looking at other indices will help accout for those days.
# counts the number NAN values
def count_NANs(dataframe):
"""
count number of NaN in dataframe
"""
return (dataframe.shape[0] * dataframe.shape[1]) - dataframe.count().sum()
# The function below grabs all the variabls/indices I chose to represent/predict the market. I decided to add a high yield treasure bond indice because there is a known belief that high correlation exists between high yield bonds and the S&P500. The rest represent major markets throughout the world that have close ties to the U.S. stock market. The function "create_lagged_series" creates the lag for those functions
def getWebData(symbol,start,end):
df= DataReader(name=symbol,data_source='yahoo',start=start,end=end)
df.columns.values[-1] = 'AdjClose'
df.columns = df.columns + '_' + symbol
df['Return_%s' %symbol] = df['AdjClose_%s' %symbol].pct_change()
df=df[['AdjClose_%s' %symbol,'Return_%s' %symbol]]
return df
getWebData('^MCX','2012-1-1','2015-12-31')
def getAllStockData(output):
start=parser.parse('2006-1-1')
end=parser.parse('2016-1-1')
nikkei=getWebData('^N225',start,end)
nasdaq=getWebData('^IXIC',start,end)
frankfurt=getWebData('^GDAXI',start,end)
london=getWebData('^MCX',start,end)
paris=getWebData('^FCHI',start,end)
hkong=getWebData('^HSI',start,end)
technology=getWebData('VGT',start,end)
australia=getWebData('^AXJO',start,end)
energy=getWebData('VDE',start,end)
djia=getWebData('^DJI',start,end)
bond=getWebData('JNK',start,end)
out=getWebData(output,start,end)
frames=[nikkei,nasdaq, frankfurt,london,paris,hkong,technology,australia,energy,djia,bond,out]
return frames
# +
def addFeatures(dataframe, adjclose, returns, n):
"""
operates on two columns of dataframe:
- n >= 2
- given Return_* computes the return of day i respect to day i-n.
- given AdjClose_* computes its moving average on n days
"""
return_n = adjclose[9:] + "_MultiDay" + str(n)
dataframe[return_n] = dataframe[adjclose].pct_change(n)
roll_n = returns[7:] + "_RolMean" + str(n)
dataframe[roll_n] = pd.rolling_mean(dataframe[returns], n)
# -
def applyRollMeanDelayedReturns(datasets, delta):
"""
applies rolling mean and delayed returns to each dataframe in the list
"""
for dataset in datasets:
columns = dataset.columns
adjclose = columns[0]
returns = columns[1]
for n in delta:
addFeatures(dataset, adjclose, returns, n)
return datasets
def mergeDataframes(datasets, index):
subset = []
subset = [dataset.iloc[:, index:] for dataset in datasets[1:]]
first = subset[0].join(subset[1:], how = 'outer')
finance = datasets[0].iloc[:, index:].join(first, how = 'left')
return finance
def fit_model(name, model, X_train, y_train, X_test, pred):
"""Fits a classification model (for our purposes this is LR, LDA and QDA)
using the training data, then makes a prediction and subsequent "hit rate"
for the test data."""
model.fit(X_train, y_train)
pred[name] = model.predict(X_test)
# Create a series with 1 being correct direction, 0 being wrong
# and then calculate the hit rate based on the actual direction
pred["%s_Correct" % name] = (1.0+pred[name]*pred["Actual"])/2.0
hit_rate = np.mean(pred["%s_Correct" % name])
print "%s: %.3f" % (name, hit_rate)
# +
dataStocks3=getAllStockData('^GSPC')
dataStocks3=applyRollMeanDelayedReturns(dataStocks3,range(1,20))
dataStocks3=mergeDataframes(dataStocks3,0)
dataStocks3=dataStocks3.reset_index()
dataStocks3['Weekday'] = dataStocks3['Date'].dt.dayofweek
dataStocks3=dataStocks3.set_index('Date')
cols=list(dataStocks3)
cols.insert(0, cols.pop(cols.index('Weekday')))
#cols
dataStocks3 = dataStocks3.ix[:, cols]
# +
cols=list(dataStocks3)
cols_to_drop = filter(lambda x: x.startswith('Adj'), cols)
dataStocks3= dataStocks3.drop(cols_to_drop, axis=1)
dataStocks3=dataStocks3.interpolate(method='time')
dataStocks3=dataStocks3.fillna(dataStocks3.mean())
dataStocks3["Direction"] = np.sign(dataStocks3["Return_^GSPC"])
#dataStocks3.head()
#count_NANs(dataStocks3)
# +
dataStocks3['Direction'] = dataStocks3['Direction'].shift(-1)
dataStocks3 = dataStocks3.head(-1)
# +
le = preprocessing.LabelEncoder()
dataStocks3.Direction[dataStocks3.Direction >= 0] = 'Up'
dataStocks3.Direction[dataStocks3.Direction < 0] = 'Down'
dataStocks3.Direction = le.fit(dataStocks3.Direction).transform(dataStocks3.Direction)
#dataStocks3.tail()
#dataStocks3['Direction']
dataStocks3.shape
# +
features=dataStocks3.columns[0:469]
target='Direction'
#features
X=dataStocks3[features]
#direction prection
y=dataStocks3[target]
features
# +
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=None)
start_test=parser.parse('2014-11-1')
X_train = X[X.index < start_test]
X_test = X[X.index >= start_test]
y_train = y[y.index < start_test]
y_test = y[y.index >= start_test]
# +
# Instantiate model
rf = ensemble.RandomForestClassifier(n_estimators=500,random_state=None)
# Train model on training set
rf.fit(X_train, y_train)
# -
# Evaluate model on train set
print "Accuracy: %0.3f" % rf.score(X_train, y_train)
# Evaluate model on test set
print "Accuracy: %0.3f" % rf.score(X_test, y_test)
# +
# Set list of values to grid search over
l = range(1, 10)
d = [10, 20, 30]
params = {'min_samples_leaf': l, 'max_depth': d}
# Perform grid search using list of values
gs = grid_search.GridSearchCV(
estimator=ensemble.RandomForestClassifier(n_estimators=20),
param_grid=params)
gs.fit(X_train, y_train)
# Get best value to use
print "Best Params:"
print gs.best_params_
# Get improvement
print "Accuracy using best param: %0.3f" % gs.best_score_
# -
def sorted(s, num):
tmp = s.order(ascending=False)[:num]
tmp.index = range(num)
return tmp
# +
# Plot importances for all features
features = X.columns
feature_importances = rf.feature_importances_
features_df = pd.DataFrame({'Features': features, 'Importance Score': feature_importances})
features_df=features_df.apply(lambda x: sorted(x, 15))
#features_df.sort_values('Importance Score', inplace=True, ascending=False)
sns.barplot(y='Features', x='Importance Score', data=features_df)
# -
jim1.shift(-1).head()
jim1.shift(-1).head(-1)
# ?pd.DataFrame.shift()
# +
#move 'Direction' values up back one
dataStocks3.shape
# -
dataStocks3.groupby('Direction').count()
# +
# +
# -
y.mean()
set(y)
# +
# -
# +
# Set list of values to grid search over
n = [1, 2, 3, 10, 20, 30, 100, 200, 300]
params = {'n_estimators': n}
# Perform grid search using list of values
gs = grid_search.GridSearchCV(
estimator=ensemble.RandomForestClassifier(random_state=None),
param_grid=params)
gs.fit(X_train, y_train)
# Get best value to use
print "Best Params:"
print gs.best_params_
# Get improvement
print "Accuracy of current model: %0.3f" % rf.score(X_test, y_test)
print "Accuracy using best param: %0.3f" % gs.best_score_
# -
# +
#adding lag
#lags=range(4)
#cols=list(dataStocks3)
#columns = filter(lambda x: x.startswith('Return'), cols)
#for column in columns:
#for lag in lags:
# newcolumn = column + str(lag)
#dataStocks3[newcolumn] = dataStocks3[column].shift(lag)
#dataStocks3.head()
# -
# +
import datetime
import numpy as np
import pandas as pd
import sklearn
from pandas.io.data import DataReader
from sklearn.linear_model import LogisticRegression
from sklearn.lda import LDA
from sklearn.qda import QDA
def create_lagged_series(symbol, start_date, end_date, lags=5):
# Obtain stock information from Yahoo Finance
ts = DataReader(symbol, "yahoo", start_date-datetime.timedelta(days=365), end_date)
# Create the new lagged DataFrame
tslag = pd.DataFrame(index=ts.index)
tslag["Today"] = ts["Adj Close"]
tslag["Volume"] = ts["Volume"]
# Create the shifted lag series of prior trading period close values
for i in xrange(0,lags):
tslag["Lag%s" % str(i+1)] = ts["Adj Close"].shift(i+1)
# Create the returns DataFrame
tsret = pd.DataFrame(index=tslag.index)
tsret["Adj Close"]=ts["Adj Close"]
tsret["Today"] = tslag["Today"].pct_change()*100.0
# If any of the values of percentage returns equal zero, set them to
# a small number (stops issues with QDA model in scikit-learn)
for i,x in enumerate(tsret["Today"]):
if (abs(x) < 0.0001):
tsret["Today"][i] = 0.0001
# Create the lagged percentage returns columns
for i in xrange(0,lags):
tsret["Lag%s" % str(i+1)] = tslag["Lag%s" % str(i+1)].pct_change()*100.0
tsret = tsret[tsret.index >= start_date]
tsret.columns = tsret.columns + '_' + symbol
return tsret
# -
#test lag
snpret=create_lagged_series("UUP", datetime.datetime(2013,1,10), datetime.datetime(2014,12,31), lags=5)
snpret.head()
#Output
#dataframes
dataStocks=getAllStockData('^GSPC')
dataStocks
# As seen in the cell above, the function returns the dataframes as a list for each index with the corresponding 0 or +1 for up or down day. In the above function it takes the adjusted closing price and stores it "today". It then finds the returns of those prices and stores it over the adjusted close value. We don't need the adjusted close value because we are stricly looking at returns.You can also see that each lag number corresponds to how many days back the prices is lagged. The next couple of functions will add features to the dataset. These features will include rolling mean and multi-day return, which finds the difference in returns for different days. This will allow us to see how the mean is changing and how the returns are changing as well.
# +
def addFeatures(dataframe, adjclose, returns, n):
"""
operates on two columns of dataframe:
- n >= 2
- given Return_* computes the return of day i respect to day i-n.
- given AdjClose_* computes its moving average on n days
"""
return_n = adjclose[9:] + "_MultiDay" + str(n)
dataframe[return_n] = dataframe[adjclose].pct_change(n)
roll_n = returns[7:] + "_RolMean" + str(n)
dataframe[roll_n] = pd.rolling_mean(dataframe[returns], n)
# +
def applyRollMeanDelayedReturns(datasets, delta):
"""
applies rolling mean and delayed returns to each dataframe in the list
"""
for dataset in datasets:
columns = dataset.columns
adjclose = columns[0]
returns = columns[1]
for n in delta:
addFeatures(dataset, adjclose, returns, n)
return datasets
# -
# Cell below will merge all of the dataframes together. I can see where issues will arise with holidays being different for various countries. I will need a way to adjust for that.
applyRollMeanDelayedReturns(dataStocks,range(1,2))
# +
#Merge datasets
def mergeDataframes(datasets, index):
subset = []
subset = [dataset.iloc[:, index:] for dataset in datasets[1:]]
first = subset[0].join(subset[1:], how = 'outer')
finance = datasets[0].iloc[:, index:].join(first, how = 'left')
return finance
# +
dataStocks=getAllStockData('^GSPC')
dataStocks=applyRollMeanDelayedReturns(dataStocks,range(1,2))
dataStocks=mergeDataframes(dataStocks,0)
dataStocks['Today_^GSPC']=dataStocks['Today_^GSPC'].shift(-1)
dataStocks['Direction'] = np.sign(dataStocks['Today_^GSPC'])
start_date=datetime.datetime(2015,1,1)
dataStocks = dataStocks[dataStocks.index >= start_date]
dataStocks.head()
#cols = filter(lambda x: x.startswith('Adj'), dataStocks.columns)
#dataStocks=dataStocks.drop(dataStocks.columns[[cols]], axis=0, inplace=True)
#dataStocks=dataStocks[cols].drop(axis=1)
#dataStocks
# +
all_cols = list(dataStocks.columns)
print all_cols
# -
cols_to_drop = filter(lambda x: x.startswith('Adj'), all_cols)
cols_to_drop
# +
dataStocks= dataStocks.drop(cols_to_drop, axis=1)
dataStocks.head()
# -
dataStocks = dataStocks.interpolate(method='time')
#fill left overs with mean
dataStocks=dataStocks.fillna(dataStocks.mean())
#set features
count_NANs(dataStocks)
dataStocks=dataStocks.fillna(dataStocks.mean())
count_NANs(dataStocks)
# Below creates a test and train set, while also applying categorical variables to the returns of the S&P 500. Up for postive returns and down for negative returns. We will then go through the classification process to find the best features and model. These methods include: random forrest, k nearest neighbors, support vector machines, adaptive boosting, gradient tree boosting,and quadratic discriminant analysis. I may need to add more features to the data set to get a better representative of the movements. I may need to adjust my variables as well by picking indices that better represent the market movements.
# +
#from sklearn import preprocessing
#le = preprocessing.LabelEncoder()
#dataStocks['UpDown'] = dataStocks['Today_^GSPC']
#dataStocks.UpDown[dataStocks.UpDown >= 0] = 'Up'
#dataStocks.UpDown[dataStocks.UpDown < 0] = 'Down'
#dataStocks.UpDown = le.fit(dataStocks.UpDown).transform(dataStocks.UpDown)
#features = dataStocks.columns[1:65]
#target=dataStocks.columns[66]
#print features
3 target
#print count_NANs(dataStocks)
#print dataStocks.isnull().sum()
### splitting in train and test set
#index = int(np.floor(dataStocks.shape[0]*0.8))
#train, test = dataStocks[:index], dataStocks[index:]
#print 'Size of train set: ', train.shape
#print 'Size of test set: ', test.shape
# +
#from sklearn.ensemble import RandomForestClassifier
#forest = RandomForestClassifier(n_estimators=1000, n_jobs=-1)
#forest = forest.fit(train[features], train['UpDown'])
#print forest.score(test[features],test['UpDown'])
# -
def UpDownDay(dataset, start_test):
le = preprocessing.LabelEncoder()
dataset['UpDown'] = dataset['Today_^GSPC']
dataset.UpDown[dataset.UpDown >= 0] = 'Up'
dataset.UpDown[dataset.UpDown < 0] = 'Down'
dataset.UpDown = le.fit(dataset.UpDown).transform(dataset.UpDown)
features = dataset.columns[1:]
X = dataset[features]
y = dataset.UpDown
X_train = X[X.index < start_test]
y_train = y[y.index < start_test]
X_test = X[X.index >= start_test]
y_test = y[y.index >= start_test]
return X_train, y_train, X_test, y_test
# +
def RandomForrest(X_train, y_train, X_test, y_test, parameters):
clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1)
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
# -
# K-Nearest Neighbors
def KNN(X_train, y_train, X_test, y_test, parameters):
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
# +
#Support Vector Machines
def SVM(X_train, y_train, X_test, y_test, parameters):
c = parameters[0]
g = parameters[1]
clf = SVC()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
# +
#Adaptive Boosting
def AdaptBoostClass(X_train, y_train, X_test, y_test, parameters):
"""
Ada Boosting binary Classification
"""
n = parameters[0]
l = parameters[1]
clf = AdaBoostClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
# -
#Gradient Tree Boosting
def GradientTreeBoost(X_train, y_train, X_test, y_test, parameters):
clf = GradientBoostingClassifier(n_estimators=100)
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
# +
#Quadratic Discriminiatve Analysis
def GDA(X_train, y_train, X_test, y_test, parameters):
X_train = X_train.apply(replaceTiny)
X_test = X_test.apply(replaceTiny)
clf = QDA()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
# -
| Data Science Stocks (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
df = pd.read_csv("Dataset/googleplaystore.csv").dropna()
df.head()
# # Advanced Analysis
data = df.values
data.shape
data[2]
# +
temp = 0
for i in range(len(data)):
if ((data[i][1] == "ART_AND_DESIGN") and (data[i][6] == "Free") and (data[i][8] == "Teen")):
print(data[i][0])
temp += 1
print(temp)
# +
temp = 0
for i in range(len(data)):
if ((data[i][1] == "FAMILY") and (data[i][6] == "Free") and (data[i][8] == "Teen")):
print(data[i][0])
temp += 1
print(temp)
# -
data[2][6]
len(data)
df
| PlayStore Analysis - Advanced Analysis/PlayStore Analysis - Advanced Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="gTLMohVk5vEU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 4}], "base_uri": "https://localhost:8080/", "height": 105} outputId="3c24cc9d-f8ca-42ca-a308-0d3969603e34" executionInfo={"status": "ok", "timestamp": 1521698206209, "user_tz": -180, "elapsed": 8674, "user": {"displayName": "\u0418\u043b\u044c\u0434\u0430\u0440 \u041a\u0430\u043c\u0430\u043b\u044c\u0434\u0438\u043d\u043e\u0432", "photoUrl": "//lh4.googleusercontent.com/-V561pRqasQg/AAAAAAAAAAI/AAAAAAAAAY8/SBtycgQVgsg/s50-c-k-no/photo.jpg", "userId": "112336361359478169776"}}
# !apt-get install -y -qq software-properties-common python-software-properties module-init-tools
# !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
# !apt-get update -qq 2>&1 > /dev/null
# !apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
# !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
# !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
# + id="oO4PuyBm51A_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# !mkdir -p drive
# !google-drive-ocamlfuse drive
# + id="jQghgS5K6E1E" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
from os import path
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu'
# !pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.3.1-{platform}-linux_x86_64.whl torchvision
import torch
# + id="0ojDhHna6Ft3" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
import sys
sys.path.append('drive/cyclegan_proj/')
# + id="t5xNfElk6Ha2" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
from inception import InceptionSham
from data_sampler import SequentialSampler, BatchSampler, RandomSampler
from loss import compute_loss
from loss import consensus_loss
from inception import inception_score
import networks as net
from data_sampler import data_sampler
from utl import *
import numpy as np
import pandas as pd
from math import ceil
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import datetime as dt
from IPython.display import clear_output
# + id="o6eWMQ4eT9Wn" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
train_pull = pd.read_csv("drive/cyclegan_proj/data/train_pull.csv",
header=None).values
train_top = pd.read_csv("drive/cyclegan_proj/data/train_top.csv",
header=None).values
test_pull = pd.read_csv("drive/cyclegan_proj/data/test_pull.csv",
header=None).values
test_top = pd.read_csv("drive/cyclegan_proj/data/test_top.csv",
header=None).values
train_n_obs = train_pull.shape[0]
test_n_obs = test_pull.shape[0]
# + id="R4bJ_Hh2XQwg" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
inception_model = InceptionSham(num_classes=10, input_nc=1, dropout=0.5)
if torch.cuda.is_available():
inception_model.cuda()
inception_state = torch.load("drive/cyclegan_proj/inception_sham_state.pth",)
inception_model.load_state_dict(inception_state)
# + id="NI7nvSus6Rm_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# argparser or builder, where are you?
input_nc = 1
output_nc = 1
discr_filters = 8
max_power = 4
n_layers = 2
norm_lay = nn.BatchNorm2d
start_size = 28
gen_filters = 8
dropout = 0.5
n_blocks = 3
batch_size = 1
alpha = 10
use_gpu = False
if torch.cuda.is_available():
use_gpu = True
# + id="1jvB6s8j6T7k" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
discr_a = net.Discriminator(input_nc=input_nc,
discr_filters=discr_filters,
max_power=max_power,
n_layers=n_layers,
norm_lay=norm_lay,
start_size=start_size
)
discr_b = net.Discriminator(input_nc=input_nc,
discr_filters=discr_filters,
max_power=max_power,
n_layers=n_layers,
norm_lay=norm_lay,
start_size=start_size
)
gener_a = net.ResnetGenerator(
input_nc = input_nc,
output_nc = output_nc,
gen_filters = gen_filters,
norm_lay = norm_lay,
dropout = dropout,
n_blocks = n_blocks
)
gener_b = net.ResnetGenerator(
input_nc = input_nc,
output_nc = output_nc,
gen_filters = gen_filters,
norm_lay = norm_lay,
dropout = dropout,
n_blocks = n_blocks
)
if torch.cuda.is_available():
discr_a.cuda()
discr_b.cuda()
gener_a.cuda()
gener_b.cuda()
# + id="EfL4gx2G6U5K" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 34} outputId="f1d6e3fb-71d7-4ef1-ebfe-1dd2aedcd36f" executionInfo={"status": "ok", "timestamp": 1521706192921, "user_tz": -180, "elapsed": 558, "user": {"displayName": "\u0418\u043b\u044c\u0434\u0430\u0440 \u041a\u0430\u043c\u0430\u043b\u044c\u0434\u0438\u043d\u043e\u0432", "photoUrl": "//lh4.googleusercontent.com/-V561pRqasQg/AAAAAAAA<KEY>AY8/SBtycgQVgsg/s50-c-k-no/photo.jpg", "userId": "112336361359478169776"}}
gener_a.apply(weights_init)
gener_b.apply(weights_init)
discr_a.apply(weights_init)
discr_b.apply(weights_init)
print("weights initialized")
# + id="HTue9PZc6XqZ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
from itertools import chain
adam_params = dict(lr=0.002, betas=(0.5, 0.999))
opt_discr = torch.optim.Adam(
chain(discr_a.parameters(), discr_b.parameters()),
**adam_params)
opt_gener = torch.optim.Adam(
chain(gener_a.parameters(), gener_b.parameters()),
**adam_params)
opt_gener_a = torch.optim.Adam(gener_a.parameters(), **adam_params)
opt_gener_b = torch.optim.Adam(gener_b.parameters(), **adam_params)
opt_discr_a = torch.optim.Adam(discr_a.parameters(), **adam_params)
opt_discr_b = torch.optim.Adam(discr_b.parameters(), **adam_params)
# + id="-Nu1BkVW7wd5" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
discr_loss_log = []
gener_loss_log = []
da_loss_log = []
db_loss_log = []
ga_loss_log = []
gb_loss_log = []
ga_fool_log = []
gb_fool_log = []
gener_a_cyc_loss_log = []
gener_b_cyc_loss_log = []
consesus_a_log = []
consesus_b_log = []
da_grad_log, db_grad_log = [], []
ga_grad_log, gb_grad_log = [], []
incp_a_log, incp_b_log = [], []
# + id="nkbqYWJg4Cnt" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# consensus weight
clambda = None
if clambda is not None:
inp_grad = True
cons_norm = 1/2
# + id="uVY9_c2A92Gq" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}, {"item_id": 3}, {"item_id": 4}, {"item_id": 5}, {"item_id": 6}], "base_uri": "https://localhost:8080/", "height": 2156} outputId="1b73f907-ced9-48b5-94ac-447622502be8" executionInfo={"status": "error", "timestamp": 1521707396132, "user_tz": -180, "elapsed": 28214, "user": {"displayName": "\u0418\u043b\u044c\u0434\u0430\u0440 \u041a\u0430\u043c\u0430\u043b\u044c\u0434\u0438\u043d\u043e\u0432", "photoUrl": <KEY>", "userId": "112336361359478169776"}}
N_EPOCHS = 5
print("Start")
for epoch in range(N_EPOCHS):
sampler = RandomSampler(train_n_obs)
batch_sampler_a = BatchSampler(sampler, batch_size)
batch_sampler_b = BatchSampler(sampler, batch_size)
for i, batches in enumerate(zip(batch_sampler_a, batch_sampler_b)):
batch_a = Variable(
aduc(
torch.FloatTensor(train_pull[batches[0]]).view(-1, 1, 28, 28)),
requires_grad=inp_grad)
batch_b = Variable(
aduc(torch.FloatTensor(train_top[batches[1]]).view(-1, 1, 28, 28)),
requires_grad=inp_grad)
train_stage(gener_a, gener_b, discr_a, discr_b)
losses = compute_loss(
gener_a, gener_b,
discr_a, discr_b,
batch_a, batch_b,
alpha,
discr_loss='mse',
use_gpu=use_gpu)
discr_loss, gener_loss = losses[0:2]
da_loss, db_loss = losses[2:4]
ga_loss, gb_loss = losses[4:6]
ga_fool, gb_fool = losses[6:8]
gener_a_cyc_loss, gener_b_cyc_loss = losses[8:10]
if clambda is None:
if i % 2 == 0:
opt_discr.zero_grad()
discr_loss.backward()
torch.nn.utils.clip_grad_norm(gener_a.parameters(), 2)
torch.nn.utils.clip_grad_norm(gener_b.parameters(), 2)
opt_discr.step()
else:
opt_gener.zero_grad()
gener_loss.backward()
torch.nn.utils.clip_grad_norm(gener_a.parameters(), 2)
torch.nn.utils.clip_grad_norm(gener_b.parameters(), 2)
opt_gener.step()
else:
consesus_a = (consensus_loss(da_loss, discr_a) +
consensus_loss(ga_loss, gener_a)) * cons_norm * 1/2
consesus_b = (consensus_loss(db_loss, discr_b) +
consensus_loss(gb_loss, gener_b)) * cons_norm * 1/2
consesus_a_log.append(consesus_a.data[0])
consesus_b_log.append(consesus_b.data[0])
discr_a_mega_loss = da_loss - clambda * consesus_a
discr_b_mega_loss = db_loss - clambda * consesus_b
gener_a_mega_loss = ga_loss - clambda * consesus_a
gener_b_mega_loss = gb_loss - clambda * consesus_b
lossic = (gener_b_mega_loss + discr_b_mega_loss +
gener_a_mega_loss + gener_b_mega_loss)
opt_gener_a.zero_grad()
opt_gener_b.zero_grad()
opt_discr_a.zero_grad()
opt_discr_b.zero_grad()
lossic.backward()
torch.nn.utils.clip_grad_norm(gener_a.parameters(), 5)
torch.nn.utils.clip_grad_norm(gener_b.parameters(), 5)
torch.nn.utils.clip_grad_norm(gener_a.parameters(), 5)
torch.nn.utils.clip_grad_norm(gener_b.parameters(), 5)
# (discr_a_mega_loss).backward(retain_graph=True)
# (discr_b_mega_loss).backward(retain_graph=True)
# (gener_a_mega_loss).backward(retain_graph=True)
# (gener_b_mega_loss).backward()
opt_gener_a.step()
opt_gener_b.step()
opt_discr_a.step()
opt_discr_b.step()
ga_grad_log.append(grad_norm(gener_a))
gb_grad_log.append(grad_norm(gener_b))
da_grad_log.append(grad_norm(discr_a))
db_grad_log.append(grad_norm(discr_b))
discr_loss_log.append(discr_loss.data[0])
gener_loss_log.append(gener_loss.data[0])
da_loss_log.append(da_loss.data[0])
db_loss_log.append(db_loss.data[0])
ga_loss_log.append(ga_loss.data[0])
gb_loss_log.append(gb_loss.data[0])
ga_fool_log.append(ga_fool.data[0])
gb_fool_log.append(gb_fool.data[0])
gener_a_cyc_loss_log.append(gener_a_cyc_loss.data[0])
gener_b_cyc_loss_log.append(gener_b_cyc_loss.data[0])
if i % 10 == 0:
incp_a, incp_b = inception_score(
gener_a, gener_b,
inception_model,
test_pull, test_top,
batch_size=100,
splits=10)
incp_a_log.append(incp_a)
incp_b_log.append(incp_b)
# visualize every epoch
if (i + 1) % 100 == 0:
clear_output()
print("Epoch {}".format(epoch + 1))
plt.figure(figsize=(5, 3))
plt.plot(incp_a_log, label="gener_a")
plt.plot(incp_b_log, label="gener_b")
plt.title('Inception score')
plt.legend()
plt.show()
sample_a, sample_b = data_sampler(
1, train_pull, train_top, use_gpu=use_gpu)
visualize_loss(da_loss_log, db_loss_log,
ga_loss_log, gb_loss_log,
exp_window=None)
plot_grad_norms(da_grad_log, db_grad_log,
ga_grad_log, gb_grad_log)
plot_geners(sample_a, sample_b,
gener_a, gener_b)
# + id="7xR1Jk11Wz4k" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}], "base_uri": "https://localhost:8080/", "height": 364} outputId="341666e7-6f95-4dee-bd76-82d6a2685203" executionInfo={"status": "ok", "timestamp": 1521708032220, "user_tz": -180, "elapsed": 583, "user": {"displayName": "\u0418\u043b\u044c\u0434\u0430\u0440 \u041a\u0430\u043c\u0430\u043b\u044c\u0434\u0438\u043d\u043e\u0432", "photoUrl": "//lh4.googleusercontent.com/-V561pRqasQg/AAAAAAAAAAI/AAAAAAAAAY8/SBtycgQVgsg/s50-c-k-no/photo.jpg", "userId": "112336361359478169776"}}
plt.plot(consesus_a_log)
# + id="qnKYAIc_LNS9" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}, {"item_id": 2}, {"item_id": 3}, {"item_id": 4}, {"item_id": 5}], "base_uri": "https://localhost:8080/", "height": 2077} outputId="19198f29-cff7-4ec3-b9d5-4143e86fd1af" executionInfo={"status": "ok", "timestamp": 1521669433306, "user_tz": -180, "elapsed": 4566, "user": {"displayName": "\u0418\u043b\u044c\u0434\u0430\u0440 \u041a\u0430\u043c\u0430\u043b\u044c\u0434\u0438\u043d\u043e\u0432", "photoUrl": "//lh4.googleusercontent.com/-<KEY>/<KEY>SBtycgQVgsg/s50-c-k-no/photo.jpg", "userId": "112336361359478169776"}}
for i in range(5):
sample_a, sample_b = data_sampler(1, train_pull, train_top, use_gpu=True)
plot_geners(sample_a, sample_b, gener_a, gener_b)
# + id="bp60nhbCh7Ws" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "output_extras": [{"item_id": 1}], "base_uri": "https://localhost:8080/", "height": 4879} outputId="3c89cdab-163c-4311-bf3c-f0e1010efa40" executionInfo={"status": "ok", "timestamp": 1521665407263, "user_tz": -180, "elapsed": 506, "user": {"displayName": "\u0418\u043b\u044c\u0434\u0430\u0440 \u041a\u0430\u043c\u0430\u043b\u044c\u0434\u0438\u043d\u043e\u0432", "photoUrl": "//lh4.googleusercontent.com/-V561pRqasQg/AAAAAAAAAAI/AAAAAAAAAY8/SBtycgQVgsg/s50-c-k-no/photo.jpg", "userId": "112336361359478169776"}}
gb_grad_log
# + id="VBcmRYjqU3aF" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
| notebook/train_colab_cyclegan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 315} colab_type="code" executionInfo={"elapsed": 13218, "status": "ok", "timestamp": 1540890868505, "user": {"displayName": "\u5bae\u672c\u572d\u4e00\u90ce", "photoUrl": "https://lh5.googleusercontent.com/-5BLtx8oPSy8/AAAAAAAAAAI/AAAAAAAALtI/-tIwIsmAvCs/s64/photo.jpg", "userId": "00037817427736046144"}, "user_tz": -540} id="0dQutTXVUp-k" outputId="9d94a14e-95ea-47f6-bd57-8ec190b37865"
# #colabを使う方はこちらを使用ください。
# # !pip install torch==0.4.1
# # !pip install torchvision==0.2.1
# # !pip install numpy==1.14.6
# # !pip install matplotlib==2.1.2
# # !pip install pillow==5.0.0
# # !pip install opencv-python==3.4.3.18
# -
# # 第9章 torch.nnパッケージ
# + [markdown] colab_type="text" id="GpArAxspU3vq"
# # 9.11 損失関数
# + colab={} colab_type="code" id="hzyWSFwA2TF-"
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import torchvision.transforms as transforms
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# + [markdown] colab_type="text" id="QozIeAYB4MXZ"
# ## torch.nn.L1Loss
#
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 474, "status": "ok", "timestamp": 1540897550489, "user": {"displayName": "\u5bae\u672c\u572d\u4e00\u90ce", "photoUrl": "https://lh5.googleusercontent.com/-5BLtx8oPSy8/AAAAAAAAAAI/AAAAAAAALtI/-tIwIsmAvCs/s64/photo.jpg", "userId": "00037817427736046144"}, "user_tz": -540} id="BsUG18rSzyE4" outputId="c8490f25-b77d-40b7-e151-47fe09f06b6a"
loss = nn.L1Loss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5)
output = loss(input, target)
output.backward()
print(output)
# + [markdown] colab_type="text" id="CG6e1pv94jIs"
# ## torch.nn.MSELoss
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 465, "status": "ok", "timestamp": 1540897561664, "user": {"displayName": "\u5bae\u672c\u572d\u4e00\u90ce", "photoUrl": "https://lh5.googleusercontent.com/-5BLtx8oPSy8/AAAAAAAAAAI/AAAAAAAALtI/-tIwIsmAvCs/s64/photo.jpg", "userId": "00037817427736046144"}, "user_tz": -540} id="sIDhCbTqzx_Q" outputId="41443b67-5fb0-4d5d-eb49-f635f1f8a8d6"
loss = nn.MSELoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5)
output = loss(input, target)
output.backward()
print(output)
# + [markdown] colab_type="text" id="U7gP88eI46Hm"
# ## torch.nn.CrossEntropyLoss
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 482, "status": "ok", "timestamp": 1540897578466, "user": {"displayName": "\u5bae\u672c\u572d\u4e00\u90ce", "photoUrl": "https://lh5.googleusercontent.com/-5BLtx8oPSy8/AAAAAAAAAAI/AAAAAAAALtI/-tIwIsmAvCs/s64/photo.jpg", "userId": "00037817427736046144"}, "user_tz": -540} id="gxhLaot94ov_" outputId="f9ce5641-cadf-487d-976f-3bb66ee71a2b"
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
output = loss(input, target)
output.backward()
print(output)
# + [markdown] colab_type="text" id="Lmb-Pq-tRWE0"
# ## torch.nn.LogSoftmax
#
# + colab={"base_uri": "https://localhost:8080/", "height": 72} colab_type="code" executionInfo={"elapsed": 455, "status": "ok", "timestamp": 1540897618330, "user": {"displayName": "\u5bae\u672c\u572d\u4e00\u90ce", "photoUrl": "https://lh5.googleusercontent.com/-5BLtx8oPSy8/AAAAAAAAAAI/AAAAAAAALtI/-tIwIsmAvCs/s64/photo.jpg", "userId": "00037817427736046144"}, "user_tz": -540} id="_WsvalSK4pA6" outputId="04b789ae-251c-4889-8bbc-e19eca45d1e2"
m = nn.LogSoftmax()
loss = nn.NLLLoss()
# 入力はサイズN×C = 3×5
input = torch.randn(3, 5, requires_grad=True)
# ターゲット内の各要素は0 <= value <C
target = torch.tensor([1, 0, 4])
output = loss(m(input), target)
output.backward()
# + [markdown] colab_type="text" id="vgTPcQd5fg9Q"
# ## torch.nn.BCELoss
#
# + colab={} colab_type="code" id="3Zcs2JGyU0Ot"
m = nn.Sigmoid()
loss = nn.BCELoss()
input = torch.randn(3, requires_grad=True)
target = torch.empty(3).random_(2)
output = loss(m(input), target)
output.backward()
# + [markdown] colab_type="text" id="NT6ycVJhU3L_"
# ## torch.nn.BCEWithLogitsLoss
# + colab={} colab_type="code" id="j-ZfnPk1gEtW"
loss = nn.BCEWithLogitsLoss()
input = torch.randn(3, requires_grad=True)
target = torch.empty(3).random_(2)
output = loss(m(input), target)
output.backward()
# + [markdown] colab_type="text" id="D8rpMA_6VJzz"
# ## torch.nn.MultiLabelSoftMarginLoss
#
# + colab={} colab_type="code" id="9UJZVuidurik"
loss = nn.MultiLabelSoftMarginLoss()
input = torch.randn(3, requires_grad=True)
target = torch.empty(3).random_(2)
output = loss(m(input), target)
output.backward()
# -
| chapter9/section9_11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Adult Dataset (LUX RESEARCH)
#
# ### About the notebook - This Notebook is just a demonstration of the LUX library, It does not hold the official data analysis, and official algorithms of the dataset, It is just to get the knowledge of the library LUX how we can use it.
#
# The Note book is made on the basis of how simply we can get the apporach towards the EDA and the algorithms which we use in our day-to-day Machine Librabry Life,
# as <NAME> told on the meeting held on March 7, that we can make two different pipelines of the same dataset, here's what the approach looks like.
#
# The one approach or the 1st Pipeline which started from In [1] to In [16] and then again started at In[17] and till the end, hence in between you may get another pipeline (the second one) named (THE LUX Pipeline) where you may find the, EDA done by the LUX Automatically
#
# ## Author - : <NAME>
#
# Here, We have a Dataset which is being named as Adult Dataset.
# Let's Explore Some important features of this dataset before proceeding further.
#
# We have total 14 attributes as we can see in this dataset below, We can see here a lot of attributes, So lets find the factors how we can show some relation with the different attributes.
#
# ## Before Starting let us load the important libraries!
# The Machine Learning Model we are going to use over here is
# Decision Tree, Logistic Regression, LinearDiscriminantAnalysis, KNN, GaussianNB
#
#
# Let's Explore but before that let's load the data and have a look !
#
# # PIPELINE 1 (Manual Pipeline)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
adult = pd.read_csv(r'C:\Users\Adin\Desktop\Ignitus\adult.data.csv')
adult.head()
adult.sum()
# ### Getting more from the data
#
# Now as we have imported the dataset let us find out the size of the dataset using "df.shape" command, also we will get the information of the data and the data types by using "df.info" command.
adult.shape
adult.info()
# Hence, We can see there are no null values so we don't need any extra effort to treat and we cansave our efficient time
#
#
# ## Exploratory Data Analysis
#
# Let's start analysing the data with EDA with some of the graphs, How they shows Relation with eachother.
sns.countplot(x='salary', hue='sex', data=adult )
# By the above plot we can see that on according to this dataset mainly the category of males earn more than females based on above 50K or below 50K, Why only this much ???
# Let's Explore more
sns.countplot(x ='salary',hue='race', data=adult)
# ### Well Let me tell you first I am not a Racist !!
#
# When I tried to find the salary based on the race what I got is this!
# But according to this dataset we see that a Good Earning Labours are mostly comprised of the whites!
# Next is blacks and later on others
# This was canculated on both basis >= 50K and <= 50K
# Now, Here we will check for the outliers if any is present or not, if there is any we will soon remove it inorde to get the better accuracy
#
# Hence Seaborn library offers us the box plot to check the outliers we will be using that here, using a for loop we will find the boxplots for each numeric attributes
sns.boxplot(y=adult['age'],color='r',data=adult)
plt.show()
# The only factor which we could check was age in this dara set, hence we find that there are some outliers we will deal withit now, so let's describe the age column of the data here
adult['age'].describe()
adult['age'] = np.where(adult['age']> 48, np.mean(adult['age']), adult['age'])
adult.head(2)
# Removing the outlier by replacing all age above 48 by its mean
sns.boxplot(y=adult['age'],color='r',data=adult).set_title('Box Plot Without Outliers')
plt.show()
# The above boxplot shows us that the outliers are completely removed
#
# ## Histograms
#
# Histograms are used to check the normal distribution, wheather our data is normally distributed or not let's grab this information using the histogram.
#
# Well if we form the bell shaped curve then we can say that out data is perfectely distributed.
sns.distplot(adult['age'], color='b',kde=True, bins=10)
# We can see it forms a bell shaped but a little of left skewed, we will even explore the kurtosis and skewness of the ages below, what are the values basically.
kurtosis = adult['age'].kurt()
skewness = adult['age'].skew()
print('Kurtosis -:',kurtosis)
print('Skewness -:',skewness)
# ## Count-Plot
# Now we will look more deepely about how thesalary is distributed using the count plot which is offered by the seaborn library
sns.countplot(x='salary',data=adult,palette='Set1')
plt.show()
# We can see that basically there are more number of peoples who earns less than 50k as compared to more than 50k.
# Let's comapre the race again on the countplot and see what the data tells us about the race
sns.countplot(x='race',data=adult,palette='Set3')
plt.show()
# This data is composed of mostly whites and then the blacks as of what the countplots tells us.
# # LUX Library (LUX PIPELINE)
#
# Description -: Lux is a Python library that facilitate fast and easy data exploration by automating the visualization and data analysis process. By simply printing out a dataframe in a Jupyter notebook, Lux recommends a set of visualizations highlighting interesting trends and patterns in the dataset. Visualizations are displayed via an interactive widget that enables users to quickly browse through large collections of visualizations and make sense of their data.
#
# Let's import the library
import lux
import pandas as pd
# Here Below yoy can see a Toggle button it will help you to visualize data by just one click using LUX,
# I will be exploring lux more in the upcoming notebooks.
#
# Here we can see three different types of sections using the LUX API,
# Correlation
# Distribution
# Occurence
#
# Hence by this Library we can get easy hand on practice to the EDA
df = pd.read_csv(r'C:\Users\Adin\Desktop\Ignitus\adult.data.csv')
df
# # Data Preprocessing
# Let's use some models in our dataset and measure the accuracy, but before that we much describe the data and then peform some encodings in our dataset as for example -
#
# We have the sex category, we can change the male or female to 0 or 1 by using label encoding or one-hot encoding, but in this case we will be using the label encodings from sklearn library.
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
# What we will be using here is -
# For all the columns in adult dataset, which ever is object we will encode it to a numerical value.
#
# fit_transform() is a function which will label the categorical to numercal
for col in adult.columns:
if adult[col].dtypes =='object':
adult[col]=label.fit_transform(adult[col])
adult.head(2)
# We can see all the categorical to numerical value.
# Now let's split the X and Y variables on which we will make the predictions
x = adult.drop(columns = 'salary')
x
y = adult['salary']
y
# Splitting the data into train and test variable using train_test_split()
# using test size = 30% and Random state = 42
xtrain, xtest, ytrain, ytest = train_test_split(x,y,test_size=0.30, random_state=42)
# ## Algorithms
#
# Here we are going to create a empty list of model and going to store the algorithms in it,
model = []
model.append(("LR",LogisticRegression()))
model.append(("LDA",LinearDiscriminantAnalysis()))
model.append(("KNN",KNeighborsClassifier()))
model.append(("DT/CART",DecisionTreeClassifier()))
model.append(("NB",GaussianNB()))
# Same way we will be storing the result and names in a list which is shown below
result = []
names = []
# We will run a for loop to select particular model and train from the list of models and trai on the data.
#
# kfold is a crossvalidation form, where we divide our datasets into k number of parts, and keep a fold for testing and rest all for traning and later the one which is kept for testing is used for training and next fold for testing
#
# Basically here we are dividing our data into 10 parts or folds.
#
# But make sure you have imported the model selection from the sklearm library.
from sklearn import model_selection
for name,models in model:
kfold = model_selection.KFold(n_splits=10, random_state = 7)
cv_result = model_selection.cross_val_score(models,xtrain,ytrain,cv=kfold, scoring='accuracy')
result.append(cv_result)
names.append(name)
msg = "%s,%f(%f)"%(name,cv_result.mean(), cv_result.std())
print(msg)
# So, Here we can see that the accuracy we had received by various algorithms are -:
#
# Logistic Regression - 78%
# Linear Discriminant Analysis - 81%
# KNN - 77%
# Desicion Tree - 81%
# Gaussian NB - 79%
#
#
# Let us now kill the process from os.
#
# So first we need to import os
import os
os.kill(os.getpid(), 9)
| Adult Data Auto Analysis - LUX/Adult Data-Analysis (Lux Lib ).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/skyprince999/100Days-of-ML-Code-AY2021/blob/main/Day_2_VERY_SIMPLE_tabular_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="T-o4TgJYIKBs"
# ## Installing packages & dependencies
#
# To run this notebook. You have to upload the data files from the following Kaggle competition:
#
# https://www.kaggle.com/c/tabular-playground-series-mar-2021
#
# This notebook follows the steps provided by <NAME> in Chap 7 of his book - "Deep Learning"
# + colab={"base_uri": "https://localhost:8080/"} id="5q3A6csZIKBx" outputId="5cd8749c-6ae5-4eeb-9ee9-a716ebcd40cc"
# !pip install -Uqq fastbook kaggle waterfallcharts treeinterpreter dtreeviz
# + colab={"base_uri": "https://localhost:8080/"} id="N-5lDrfZIlpp" outputId="d3d6628b-05ba-47fa-eae8-7268dc99d2a8"
# !unzip train.csv.zip
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" id="aWFVHUclIKB2"
#hide
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
from fastbook import *
from pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype
from fastai.tabular.all import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_curve, auc, roc_auc_score, confusion_matrix
from dtreeviz.trees import *
from IPython.display import Image, display_svg, SVG
import matplotlib.pyplot as plt
import numpy as np
import itertools
pd.options.display.max_rows = 20
pd.options.display.max_columns = 8
# + colab={"base_uri": "https://localhost:8080/"} id="wEgE6r5HJwTT" outputId="55768348-101c-4a0f-8daf-d3e92618b59c"
# !pwd
# + colab={"base_uri": "https://localhost:8080/"} id="266r_X7QIKB3" outputId="08f62ff5-1e6b-42e3-a55e-0a0a4cde72c2"
path = Path('/content') # /kaggle/input/tabular-playground-series-mar-2021/
df = pd.read_csv(path/'train.csv', low_memory=False)
print(df.shape)
df.columns
# + [markdown] id="l-wOIeRVIKB4"
# ## Read the data
#
# There are a lot of columns (!!!) Both categorical as well as continous.
# Lets take one of the categorical variables and examine it
#
# But first lets print out all the unique values of each categorical variable
# + colab={"base_uri": "https://localhost:8080/"} id="GAS4dUW7IKB5" outputId="37d6eeb8-ef4f-4e79-bb14-aa12cc0e091e"
for col in df.columns:
if col.startswith('cat'):
print(f"{col} :: ",df[col].unique())
print()
# + [markdown] id="ZSl_N1EaIKB6"
# We will do some pre-processing, by handling strings & missing data
#
# Lets first check how many NAs are there in each column
# + colab={"base_uri": "https://localhost:8080/"} id="pZ7jNs6qIKB6" outputId="1d40e5e4-08e0-4b17-fb32-54fb8c14b0c4"
df.isnull().sum().sum()
# + id="vK1W1VqjIKB7"
#Since no Nulls are there we don't need the proc while pre-processing the data
procs = [Categorify] #, FillMissing]
# defining the target variable
dep_var = 'target'
# defining the splits - 80/20 :: tain/valid
splits = RandomSplitter(valid_pct=0.2)(range_of(df))
# + id="at2d49vpIKB8"
# Identifying the continous (cont) and categorical (cat) variables
cont,cat = cont_cat_split(df, 1, dep_var=dep_var)
# + id="iBjZM5AfIKB9"
to = TabularPandas(df, procs, cat, cont, y_names=dep_var, splits=splits)
# + colab={"base_uri": "https://localhost:8080/"} id="wPFms2luIKB9" outputId="300db9dc-d537-4882-fc11-b1223f4286ce"
len(to.train),len(to.valid)
# + colab={"base_uri": "https://localhost:8080/", "height": 162} id="_iAlAdmdIKB-" outputId="2ae49632-1ca2-4724-ce1a-56542db69008"
#hide_output
to.show(3)
# + [markdown] id="PeOMC9lMIKB-"
# The conversion of categorical columns to numbers is done by simply replacing each unique level with a number. The numbers associated with the levels are chosen consecutively as they are seen in a column, so there's no particular meaning to the numbers in categorical columns after conversion. The exception is if you first convert a column to a Pandas ordered category (as we did for ProductSize earlier), in which case the ordering you chose is used. We can see the mapping by looking at the classes attribute:
# + colab={"base_uri": "https://localhost:8080/", "height": 171} id="y2qyucn9IKB_" outputId="ca8deb69-db9f-46fd-e149-efe9d2327f23"
#hide_output -
to.items.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="zMHIwVHLIKB_" outputId="99a04710-c302-40f6-e883-ed8be28cee7c"
to.classes['cat10']
# + [markdown] id="lizQbMLKIKCA"
# ## Training a DT & explaining model
#
# With max leaf nodes as 4 (*to reduce training time*)
# + colab={"base_uri": "https://localhost:8080/"} id="NNbwH8g4IKCB" outputId="cc530c64-cf63-4289-b7ef-f24c96f029ce"
# %%time
xs,y = to.train.xs,to.train.y
valid_xs,valid_y = to.valid.xs,to.valid.y
m = DecisionTreeClassifier(max_leaf_nodes=4)
m.fit(xs, y);
# + colab={"base_uri": "https://localhost:8080/", "height": 590} id="JFMLcB34IKCB" outputId="aa156dac-e1df-4ad1-cbf6-92ae077267bb"
draw_tree(m, xs, size=10, leaves_parallel=True, precision=2)
# + [markdown] id="1iyRMQKUIKCC"
# 1. The top node represents the *initial model* It's the simples model!
# 2. We do a greedy division of the data based on the cat16. If the value is less than or equal to 2.5 we move to the left if not then to right
#
# ... and so on to create the 4 leaf nodes
#
# + colab={"base_uri": "https://localhost:8080/"} id="sT-re1ABIKCD" outputId="8e6fe8f5-3786-47dc-a32e-c70d0b0d6c5e"
to.classes['cat16']
# + colab={"base_uri": "https://localhost:8080/"} id="JjMi9-VhIKCD" outputId="9c52edda-4375-49fb-8f0f-1437ef53ca13"
to['cat16'].unique()
# + [markdown] id="WS5IxMzPIKCD"
# This shows a chart of the distribution of the data for each split point.
# + colab={"base_uri": "https://localhost:8080/", "height": 474} id="YWYamfC8IKCE" outputId="fb230336-4298-48ed-cb41-c240041467f8"
samp_idx = np.random.permutation(len(y))[:500]
dtreeviz(m, xs.iloc[samp_idx], y.iloc[samp_idx], xs.columns, dep_var,
fontname='DejaVu Sans', scale=1.6, label_fontsize=10,
orientation='LR')
# + [markdown] id="25HzCpNpIKCE"
# Lets try to see how the model performs when the `max_leaf_node` limit is removed
# + id="rTJOwAg5IKCF"
m = DecisionTreeClassifier()
m.fit(xs, y);
# + [markdown] id="08_t0Ly_IKCF"
# ## Defining helper functions
#
# `get_auc_scores`
#
# `plot_roc_curve`
# + id="0vk-R-PhIKCG"
def get_auc_scores(clf, X_train, X_test, y_train, y_test):
"""
Prints the AUC scores for training and testing data
and returns testing score
"""
y_train_score = clf.predict_proba(X_train)[:, 1]
y_test_score = clf.predict_proba(X_test)[:, 1]
auc_train = roc_auc_score(y_train, y_train_score)
auc_test = roc_auc_score(y_test, y_test_score)
print(f" Training AUC: {auc_train}, Testing AUC: {auc_test}")
return y_test_score
# + colab={"base_uri": "https://localhost:8080/"} id="oA2o0hSlIKCG" outputId="bf1b4ae2-bbdf-440a-9a0f-0eacf64b7725"
y_test_score = get_auc_scores(m , xs, valid_xs, y, valid_y)
# + id="VJVVQfRnIKCH"
def plot_roc_curve(y_test, y_test_score):
"""Plot ROC curve for testing data"""
fpr, tpr, _ = roc_curve(y_test, y_test_score)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], "k-")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic")
plt.legend(loc="lower right")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 302} id="qGkeK79eIKCH" outputId="76085268-ce1e-4244-aa6d-e660b21569fb"
plot_roc_curve(valid_y, y_test_score)
# + [markdown] id="1aEri87sIKCI"
# There appears to be major overfitting during training!
#
# Lets check how many leaves are model is using vs the number of training samples
# + colab={"base_uri": "https://localhost:8080/"} id="ONd_QlcoIKCI" outputId="abda4ba0-06e6-4489-f843-0754a3ba8974"
m.get_n_leaves(), len(xs)
# + [markdown] id="YztR7AOnIKCJ"
# Though we have not got as many leaf nodes as the training data... there are still too many leaf nodes vs data points!
#
# That seems a little over-enthusiastic. Indeed, sklearn's default settings allow it to continue splitting nodes until there is only one item in each leaf node. Let's change the stopping rule to tell sklearn to ensure every leaf node contains at least 25 records:
# + colab={"base_uri": "https://localhost:8080/"} id="iwWLBTcKIKCJ" outputId="5f67d066-7575-4d79-fc22-224e503db9b6"
m = DecisionTreeClassifier(min_samples_leaf=25)
m.fit(to.train.xs, to.train.y)
y_test_score = get_auc_scores(m, xs, valid_xs, y, valid_y)
# + colab={"base_uri": "https://localhost:8080/", "height": 302} id="--IhRYlJIKCK" outputId="1176d78b-b7a1-47de-eb4f-3fc37ad84850"
plot_roc_curve(valid_y, y_test_score)
# + [markdown] id="hYkiksmlIKCK"
# This seems better!!! At least we are not overfitting the training set.
#
# And the AUC curve for the validation dataset is also better. This shows us that we are on the right track!
#
# Let's check how many leaf-nodes were used for training
# + colab={"base_uri": "https://localhost:8080/"} id="QthGN9lHIKCL" outputId="1d6b288f-aeac-40cd-9d60-0f475c6463ae"
m.get_n_leaves()
# + [markdown] id="Kb50fbsLIKCL"
# Lets now define a RandomForest Classifier
# + id="Nl9ZiRj9IKCL"
def rf(xs, y, n_estimators=40, max_samples=200_000,
max_features=0.5, min_samples_leaf=5, **kwargs):
return RandomForestClassifier(n_jobs=-1, n_estimators=n_estimators,
max_samples=max_samples, max_features=max_features,
min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)
# + colab={"base_uri": "https://localhost:8080/"} id="UaiARfIJIKCM" outputId="f39090f3-6509-4764-9241-4458614bfadc"
# %%time
m = rf(xs, y);
# + colab={"base_uri": "https://localhost:8080/"} id="xdjxLbvAIKCM" outputId="8ce6c5af-e67f-49bf-fb2e-342a83c60347"
y_test_score_1 = get_auc_scores(m, xs, valid_xs, y, valid_y)
# + colab={"base_uri": "https://localhost:8080/", "height": 302} id="gxDYGw7aIKCN" outputId="803188d8-a8a0-4c97-ee1d-3056c2f09cc8"
plot_roc_curve(valid_y, y_test_score_1)
# + [markdown] id="5JpYjBRDIKCN"
# .....and yes, there is some improvement!
# + id="2P9qlwWRIKCO"
preds = np.stack([t.predict(valid_xs) for t in m.estimators_])
# + colab={"base_uri": "https://localhost:8080/"} id="BlDcB5M2IKCO" outputId="e957c00f-7d07-47d2-e413-e3e051fca20d"
auc = roc_auc_score(valid_y, preds.mean(0))
auc
# + [markdown] id="4Me2VoQIIKCO"
# As can be seen as we add more estimators, the AUC score keeps on increasing, but there is a tapering off after the 30th tree
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="ZAPWCUk8IKCP" outputId="1a232c81-4eab-4b70-d6f0-4eec305a8ecb"
plt.plot([roc_auc_score(valid_y, preds[:i+1].mean(0)) for i in range(40)]);
# + [markdown] id="Qz-xstbPIKCP"
# We saw how the model averages the individual tree's predictions to get an overall prediction—that is, an estimate of the value. But how can we know the confidence of the estimate? One simple way is to use the standard deviation of predictions across the trees, instead of just the mean. This tells us the relative confidence of predictions. In general, we would want to be more cautious of using the results for rows where trees give very different results (higher standard deviations), compared to cases where they are more consistent (lower standard deviations).
#
# In the earlier section on creating a random forest, we saw how to get predictions over the validation set, using a Python list comprehension to do this for each tree in the forest:
# + colab={"base_uri": "https://localhost:8080/"} id="DYz4zXwnIKCa" outputId="8046ffa6-dbbd-4d77-dd5a-40cc0b04924f"
preds.shape
# + [markdown] id="mbmKbUrnIKCa"
# Now we have a prediction for every tree and every auction (40 trees and 60k data points) in the validation set.
#
# Using this we can get the standard deviation of the predictions over all the trees, for each data point:
# + colab={"base_uri": "https://localhost:8080/"} id="Bd-OyDGvIKCb" outputId="eb1a106c-880a-4afe-b759-ee59cf1cac9f"
preds_std = preds.std(0)
preds_std[:5]
# + [markdown] id="chBO3i7tIKCb"
# ## Feature Importance
#
# It's not normally enough just to know that a model can make accurate predictions—we also want to know how it's making predictions. feature importance gives us insight into this. We can get these directly from sklearn's random forest by looking in the `feature_importances_` attribute. Here's a simple function we can use to pop them into a DataFrame and sort them:
# + id="u08OYeqpIKCb"
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="jD67-n2dIKCb" outputId="e629b6f0-6f2f-4a0a-a104-b2796ca85c09"
fi = rf_feat_importance(m, xs)
fi[:10]
# + colab={"base_uri": "https://localhost:8080/", "height": 431} id="PaIZa8hHIKCb" outputId="004d31ab-477a-468d-cb17-2e0ad5658dab"
def plot_fi(fi):
return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False)
plot_fi(fi[:32]);
# + [markdown] id="hQ51X25WIKCc"
# The way these importances are calculated is quite simple yet elegant. The feature importance algorithm loops through each tree, and then recursively explores each branch. At each branch, it looks to see what feature was used for that split, and how much the model improves as a result of that split. The improvement (weighted by the number of rows in that group) is added to the importance score for that feature. This is summed across all branches of all trees, and finally the scores are normalized such that they add to 1.
# + [markdown] id="lHu5KEfsIKCc"
# We select the features which give normalized importance scores of > 0.005
# + colab={"base_uri": "https://localhost:8080/"} id="7bq4v11dIKCc" outputId="1c02b795-9411-420d-a5ce-0aa1a380efe0"
# Keeping only the important variables
to_keep = fi[fi.imp>0.005].cols
len(to_keep)
# + colab={"base_uri": "https://localhost:8080/"} id="2R_3wax4IKCc" outputId="9ab69e6b-fe06-4559-ef65-cf28aa89e8e1"
# %%time
xs_imp = xs[to_keep]
valid_xs_imp = valid_xs[to_keep]
m = rf(xs_imp, y)
y_test_score = get_auc_scores(m, xs_imp, valid_xs_imp, y, valid_y)
# + [markdown] id="r3iIi4o8IKCc"
# Though the AUC score has not improved, we have reduced the number of columns/
#
# + colab={"base_uri": "https://localhost:8080/"} id="EpeQVtJhIKCc" outputId="18432ab8-2733-498c-a642-c81041853e90"
len(xs.columns), len(xs_imp.columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 431} id="TrohsvUaIKCd" outputId="47a623ed-4a2c-4137-cad7-00b6130b0848"
plot_fi(rf_feat_importance(m, xs_imp));
# + [markdown] id="snqW-Ip8IKCd"
# ## Removing redundant features
# + colab={"base_uri": "https://localhost:8080/", "height": 377} id="1xXrpM-OIKCd" outputId="9b27176a-85cd-457e-e7ee-ffa7a468b001"
cluster_columns(xs_imp)
# + [markdown] id="lW8oE4vEIKCd"
# * In this chart, the pairs of columns that are most similar are the ones that were merged together early, far from the "root" of the tree at the left.
# * These might be so closely correlated they are practically synonyms for each other
# * Let's try removing some of these closely related features to see if the model can be simplified without impacting the accuracy.
# * First, we create a function that quickly trains a random forest and returns the `oob_score_` score
# * we're just going to use it to compare different models, based on removing some of the possibly redundant columns
#
# + id="jgXOsvoQIKCd"
def get_oob(df):
m = RandomForestClassifier(n_estimators=40, min_samples_leaf=15,
max_samples=50000, max_features=0.5, n_jobs=-1, oob_score=True)
m.fit(df, y)
return m.oob_score_
# + colab={"base_uri": "https://localhost:8080/"} id="x0hJ1TMGIKCd" outputId="a6564ddf-3f65-4838-8086-47de3384fa48"
get_oob(xs_imp) # This is our reference point
# + colab={"base_uri": "https://localhost:8080/"} id="jNrz93H6IKCe" outputId="2f09f022-1c43-4c22-df74-ba9ee0ba0abe"
# %%time
{c:get_oob(xs_imp.drop(c, axis=1)) for c in (
'cont2', 'cont1', 'cont0', 'cont10', 'cat11', 'cat2',
'cont6', 'cont5', 'cat18', 'cat15', 'cat0', 'cat16')}
# + [markdown] id="nOjhMeFQIKCe"
# * It seems that there is not much difference when you drop the variables (except for `cat16` ...which we have seen to have high feature importance)
# * Now let's try dropping multiple variables. We'll drop one from each of the tightly aligned pairs we noticed earlier. We will also use the result from the above
#
# And see what it does to our OOB scores:
# + colab={"base_uri": "https://localhost:8080/"} id="8OwDaEnXIKCe" outputId="cb4f11bb-409c-4581-a277-7b568ad2b9cb"
# %%time
to_drop = ['cont1', 'cont10', 'cat2', 'cont6', 'cat0'] # The selection of these variables takes some trial & error
# I checked the feature importance plot & the above oob_scores to
# select the best set
get_oob(xs_imp.drop(to_drop, axis=1))
# + [markdown] id="j6WObl0hIKCe"
# Wow!! There is a slight drop in oob_scores but seems okay...and we can go ahead with dropping these columns
# + colab={"base_uri": "https://localhost:8080/"} id="i37HMX7rIKCe" outputId="ad15b38c-2b56-45e4-f9f2-14126fbc9406"
xs_final = xs_imp.drop(to_drop, axis=1)
valid_xs_final = valid_xs_imp.drop(to_drop, axis=1)
m = rf(xs_final, y)
y_test_score = get_auc_scores(m, xs_final, valid_xs_final, y, valid_y)
# + colab={"base_uri": "https://localhost:8080/"} id="ZN_vEfGPIKCe" outputId="fdc4f523-a52d-492a-81e7-661bb4999413"
fi = rf_feat_importance(m, xs_final)
print(fi[:10])
# + colab={"base_uri": "https://localhost:8080/", "height": 448} id="KkyttR9WIKCf" outputId="1aa40d76-b001-4efe-d0ac-c626db285285"
plot_fi(fi)
# + [markdown] id="zrS2nCm1IKCf"
# ## Partial Dependence Plots
#
# The top 5 variables as per the feature importance are.
# We will see if there are any relationships between them and the target variable:
#
# * cat16
# * cat15
# * cont5
# * cat18
# * cont2
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="zhcTvrDCIKCg" outputId="a2ac9009-91d0-43d4-a61c-ad41fb8ba70f"
figure, axis = plt.subplots(1, 3)
cat_columns_interest = ['cat15', 'cat16', 'cat18']
for idx in range(3) :
p = valid_xs_final[cat_columns_interest[idx]].value_counts(sort=False).plot.barh(ax = axis[idx])
c = to.classes[cat_columns_interest[idx]]
#plt.yticks(range(len(c)), c)
axis[idx].set_title(f"{cat_columns_interest[idx]}");
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="EcPJ7YLbIKCg" outputId="fdbd845a-4438-4ef5-c968-e69214898e85"
figure, axis = plt.subplots(1, 2)
cont_columns_interest = ['cont2', 'cont5']
for idx in range(2) :
p = valid_xs_final[cont_columns_interest[idx]].plot.hist(ax = axis[idx])
#plt.yticks(range(len(c)), c)
axis[idx].set_title(f"{cont_columns_interest[idx]}");
# + [markdown] id="QAzmTjBRIKCg"
# Partial dependence plots try to answer the question: if a row varied on nothing other than the feature in question, how would it impact the dependent variable?
#
# For example how does the change in `cat16` value impactthe `target` variable
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="51UBdVdPIKCg" outputId="5caf1808-8209-4085-d12f-6890fd9802f8"
from sklearn.inspection import plot_partial_dependence
fig,ax = plt.subplots(figsize=(12, 4))
plot_partial_dependence(m, valid_xs_final, ['cat15','cat16'],
grid_resolution=20, ax=ax);
# + [markdown] id="OoGy0RhRIKCg"
# The above plot shows that for larger values for `cat15` >>> its likely to move the probability closer to 1.0
#
# However for `cat16` notice the peak & valley at 2.0 & 4.0 respectiely
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="wtMnAm1pIKCh" outputId="4da720d5-9736-4ec6-ac6b-ccdf70e2772d"
from sklearn.inspection import plot_partial_dependence
fig,ax = plt.subplots(figsize=(12, 4))
plot_partial_dependence(m, valid_xs_final, ['cat16','cat18'],
grid_resolution=20, ax=ax);
# + [markdown] id="R6lLNcH9IKCh"
# ## Tree Interpretation
# + id="Xt40rlZVIKCh"
#hide
import warnings
warnings.simplefilter('ignore', FutureWarning)
from treeinterpreter import treeinterpreter
from waterfall_chart import plot as waterfall
# + id="o0raPG0mIKCh"
row = valid_xs_final.iloc[:5]
prediction,bias,contributions = treeinterpreter.predict(m, row.values)
# + colab={"base_uri": "https://localhost:8080/"} id="yJhOx4HrIKCi" outputId="2279d3a0-eef6-4c2e-827d-540aa7f32c78"
prediction[0], bias[0], contributions[0].sum()
# + colab={"base_uri": "https://localhost:8080/"} id="i0SE-mZjIKCi" outputId="5014a306-401a-4c3f-b8b2-520859ffa6d8"
# Needed this code block to understand splicing of 3D arrays
contributions[0, :, 0].shape
# + colab={"base_uri": "https://localhost:8080/"} id="2jEWh0yRIKCi" outputId="a50da35f-c6d5-4d0f-b710-cf90097d7e93"
valid_xs_final.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 293} id="yo80G_LpIKCi" outputId="543beef4-d4b4-4546-af36-ce4e3628f0e7"
#This is causing an error while saving
waterfall(valid_xs_final.columns, contributions[0,:,1], threshold=0.08,
rotation_value=45, formatting='{:,.3f}');
# + [markdown] id="AMqP-I1cIKCi"
# **Note**: Predction for the first data point was 0
#
# ## Training a Neural Network
#
#
# ...to do....
# + id="FsWK-APHIKCi"
| Day_2_VERY_SIMPLE_tabular_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''base'': conda)'
# name: python3
# ---
# +
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.io import read_image, ImageReadMode
import torchvision.transforms as T
from random import randint
import numpy as np
import pylab as pl
from importlib import reload
from src import *
# -
N_CHANNELS = 16 # Number of CA state channels
TARGET_PADDING = 8 # Number of pixels used to pad the target image border
TARGET_SIZE = 40 # Size of the target emoji
IMAGE_SIZE = TARGET_PADDING+TARGET_SIZE
BATCH_SIZE = 4
N_ITERS = 45 # Iterations before applying the loss
POOL_SIZE = 512
CELL_FIRE_RATE = 0.5
# Starting state
pool = SamplePool(POOL_SIZE, N_CHANNELS, IMAGE_SIZE)
imshow(pool[0])
# + tags=[]
# Imports the target emoji
target = read_image("images/firework.png", ImageReadMode.RGB_ALPHA).float()
target = T.Resize((TARGET_SIZE, TARGET_SIZE))(target)
target = RGBAtoFloat(target)
imshow(target)
# -
target = target.cpu()
loss_fn(pad(target, TARGET_PADDING), torch.nn.MSELoss)(pool.sample(16)[0])
# +
# Define the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
target = target.to(device)
model = CAModel().to(device)
# -
randint(*a)
# +
a = (1,2)
print(*a)
# -
# Train the model
torch.backends.cudnn.benchmark = True # Speeds up training
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
criterion = loss_fn(pad(target, TARGET_PADDING), torch.nn.MSELoss)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40,80], gamma=0.3)
model.train_CA(optimizer, criterion, pool, n_epochs=300, scheduler=scheduler, kind="growing", square_side=20)
imshow(model.evolve(pool.sample(1)[0].cuda(), 50)[0])
pl.plot(model.losses)
pl.xlabel("Epochs")
pl.ylabel("Loss")
pl.show()
pl.savefig("growing_losses.png", dpi=200)
# Make evolution video
model.make_video(video_size=IMAGE_SIZE, n_iters=N_ITERS*2, fps=10, rescaling=8)
# A damaged image
imshow(pool[3][0])
# After N_ITERS*5 the CA perfectly corrects it
x, _ = pool[3:4]
x = x.to(device)
x = model.evolve(x, N_ITERS*5)
print(criterion(x))
imshow(x[0])
| pytorch_ca/growingCA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <span style="color:darkblue">Using the Geostreams API</span>
# As of January 2020 gltg has 32,122,836 datapoints.<br>
# <span style="color:red">Please don't fetch all of them at once.</span>
# <hr>
#
#
import requests
import json
import csv
from csv import DictWriter
# ### Set general parameters
# <span style="color:red">Go to the api_server route to create an account!</span><br>
# <span style="color:green">Set server, user credentials, and location for downloads</span>
# +
api_server = r"https://greatlakestogulf.org/geostreams"
user = {'identifier': '******', 'password': '******'}
output_directory = r"downloads"
# -
# ### Authenticate
r = requests.post(api_server + '/api/authenticate', data=json.dumps(user), headers={'Content-Type': 'application/json'})
print("Authentication status:", r.status_code, "for", api_server)
headers = {"x-auth-token": r.headers["x-auth-token"], "Content-Encoding": "application/json"}
# <hr>
#
# ## <span style="color:darkblue">Sensor routes</span>
# ### Get CSV of all sensors with parameters
# +
sensors = requests.get(api_server + "/api/sensors",headers=headers).json()["sensors"]
print(output_directory + '/gltg_sensors.csv')
with open(output_directory + '/gltg_sensors.csv', 'w') as f:
fieldnames = [
'source','name','location', 'longitude', 'latitude', 'max_end_time', 'min_start_time',
'parameters' , 'huc8', 'huc_name', 'online_status'
]
writer = DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
n_sensors = 0
n_sensors_pos = 0
for sensor in sensors:
n_sensors += 1
parameters_list = []
for param in sensor['parameters']:
if param in ['owner','source','unit_code']:
continue
if param[-3:] != "-qc":
parameters_list.append(param + ',\n')
parameters = "".join(parameters_list)
huc8 = None
if 'code' in sensor['properties']['huc']['huc8']:
huc8 = sensor['properties']['huc']['huc8']['code']
else:
huc8 = sensor['properties']['huc']['huc8']
if len(parameters) == 0:
n_sensors_pos += 1
continue
writer.writerow({
"source": sensor['properties']['type']['title'],
"name": sensor['name'],
"location": sensor['properties'].get('location', ""),
'longitude': str(sensor['geometry']['coordinates'][0]),
'latitude': str(sensor['geometry']['coordinates'][1]),
'max_end_time': sensor.get('max_end_time',''),
'min_start_time': sensor.get('min_start_time',''),
'parameters': parameters,
'huc8': huc8,
'huc_name': sensor['properties']['huc'].get('huc_name',''),
'online_status': sensor['properties'].get('online_status',"")
})
print("Sensors skipped " + str(n_sensors_pos) + " of Sensors total " + str(len(sensors)))
# -
# <hr>
#
# ## <span style="color:darkblue">Datapoint Routes</span>
# ### Download JSON of all datapoints by Sensor ID
# <span style="color:green">Set the Sensor ID!</span>
sensor_id = 22
# +
route = api_server + "/api/datapoints?sensor_id=" + str(sensor_id)
r = requests.get(route, headers=headers)
with open(output_directory + '/datapoints_sensor_' + str(sensor_id) + '.json', 'w') as f:
f.write(json.dumps(r.json(), indent=2))
print("Route: " + route)
print("Request Status:", str(r.status_code))
print("Number of datapoints:", len(r.json()))
print("Datapoint JSON saved to " + output_directory + '/datapoints_sensor_' + str(sensor_id) + '.json')
# -
# ### Download CSV of datapoints by sensor ID
# <span style="color:green">Set the Sensor ID!</span>
sensor_id = 22
# +
route = api_server + "/api/datapoints?sensor_id=" + str(sensor_id) + "&format=csv"
r = requests.get(route, headers=headers)
with open(output_directory + '/datapoints_sensor_' + str(sensor_id) + '.csv', 'w') as f:
f.write(r.text)
print("Route: " + route)
print("Request Status:", str(r.status_code))
print("Datapoint JSON saved to " + output_directory + '/datapoints_sensor_' + str(sensor_id) + '.csv')
# -
| pygeostreams/jupyter_notebooks/get_sensors_datapoints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# %ls C:\Users\pje17\Desktop\Lyricsis\M5_Idol_lyrics\Data
song = pd.read_csv('C:/Users/pje17/Desktop/Lyricsis/M5_Idol_lyrics/SongTidy/FinalTidy/tidydata/lyricist_tidy03.csv')
song.head(3)
song.shape
song[song.artist == '방탄소년단']
artist = pd.read_csv('M5_Idol_lyrics/Data/Data20180921/artist_info_combined_ver04.csv', encoding='euc-kr')
artist.head(3)
artist[artist.artist == '마마무']
com = pd.merge(song, artist, on='artist', left_index=False)
com = com.drop(com.columns[0], axis=1)
com[com.artist == '마마무']
# ### 아이돌 멤버가 작곡, 작사, 편곡에 참여했는지 여부 판단하여 남여 비율 비교하기
# +
# 멤버 스플릿해주기. 작곡가, 작사가, 편곡가는 str.contains를 이용해서 보는 것으로!
# com['artist_m'] = com['artist_m'].str.replace(' ', '').str.split(',')
# com['lyricist'] = com['lyricist'].str.replace(' ', '')
# com['composer'] = com['composer'].str.replace(' ', '')
# com['arranger'] = com['arranger'].str.replace(' ', '')
com['artist_m'] = com['artist_m'].str.replace(' ', '').str.replace(r'\(.*?\)','').str.replace(')', '', regex=False).str.split(',')
com['lyricist'] = com['lyricist'].str.replace(' ', '').str.replace(r'\(.*?\)','').str.replace(')', '', regex=False)
com['composer'] = com['composer'].str.replace(' ', '').str.replace(r'\(.*?\)','').str.replace(')', '', regex=False)
com['arranger'] = com['arranger'].str.replace(' ', '').str.replace(r'\(.*?\)','').str.replace(')', '', regex=False)
# 괄호를 날려야함(마틸다의 경우 이상하게 이름이 적혀있어서 괄호가 중복임 ㅡㅡ;;)
# com['artist_m'] = com['artist_m'].str.split(', ')
# .str.split(',')
com[com.artist == '방탄소년단']
# -
# 스플릿을 통해 모두 개개인으로 분리시켰다.
#
# 이제 분석을 용이하게 하기 위해 우선 불필요한 칼럼을 드랍하자.
#
# 불필요한 칼럼 = album, release_date, song_genre, is_title, like, member_num, idol_id, agency_old, debut_t
# +
# # colulmn 드랍하기
# columns = ['album', 'release_date', 'song_genre', 'is_title', 'like', 'member_num', 'idol_id', 'agency_old', 'debut_t', 'creator']
# com2 = com.drop(com[columns], axis=1)
# com2[com2.artist == '방탄소년단']
# -
# 아티스트 멤버의 리스트의 인덱스를 돌면서 각각이 lyricist, composer, arranger의 str에 contain되어있는지를 확인한다.
#
# 담겨있다면 멤버 리스트에서 체크하는 데 사용하고 있던 이름을 빈 리스트에 append하고 새 칼럼에 넣어준다.
#
# 없다면 ''로 채우기.
#
#
com_f = com[com.artist_m.notnull()].reset_index(drop=True)
com_f.head()
com_f[com_f.artist == '마마무']
# +
# com_f.loc[7615].lyricist
com_f.lyricist.str.findall('|'.join(com_f.artist_m.loc[1348]))[1348]
# com_f['lyricist'].str.findall('|'.join(com_f.artist_m.loc[i]))[i]
# com3['lyricist_m'].loc[i] = com_f['lyricist'].str.findall('|'.join(com_f.artist_m.loc[i]))[i]
# -
len(com_f)
com_f.artist_m.loc[0]
com_f['lyricist_m'] = ''
com_f['composer_m'] = ''
com_f['arranger_m'] = ''
# for문 전에 복사해두기
com3 = com_f.copy()
len(com3)
com3.loc[1317]
# +
# artist_m 이 없는 건 드롭하고 시작하기
# lyricist, composer, arranger 셋 모두 Nan이면 드롭하기
com3.artist_m.isnull().sum()
# +
# True False여부로 str.contains인지 판단하여 값을 넣어주는 포문을 시도
# df2 = df[df['Column'].str.contains('|'.join(['M111', 'M222']))]
# com_f['lyricist'].str.contains('|'.join(com_f.artist_m.loc[0]))
# for i in range(len(com_f)-1):
# com_f['lyricist_m'].loc[i] = com_f['lyricist'].str.contains('|'.join(com_f.artist_m.loc[i]))[i]
# com_f.head()
for i in range(len(com_f)):
com3['lyricist_m'].loc[i] = com_f['lyricist'].str.findall('|'.join(com_f.artist_m.loc[i]))[i]
com3['composer_m'].loc[i] = com_f['composer'].str.findall('|'.join(com_f.artist_m.loc[i]))[i]
com3['arranger_m'].loc[i] = com_f['arranger'].str.findall('|'.join(com_f.artist_m.loc[i]))[i]
print(i)
# -
com3.tail()
com3[com3.artist == '동방신기']
# ### 정리한 파일로 분석하기
com4 = com3.copy()
com4.to_csv('artist_m_extracted.csv')
# +
# com4 = pd.read_csv('artist_m_extracted.csv')
# com4.shape
# -
# 작곡 여부만 판단하는 데이터프레임 뽑기. 멤버가 참여하지 않은 곡은 모두 제외.
compose = com4[(com4['composer_m'].str.len() != 0)]
compose = compose[compose.composer_m.notnull()]
compose.tail()
len(compose)
compose.gender.value_counts()
# 작사 여부만 판단하는 데이터프레임 뽑기. 멤버가 참여하지 않은 곡은 모두 제외.
# com4['lyricist_m'] = com4['lyricist_m'].str.replace("'", "", regex=False)
lyricist = com4[(com4['lyricist_m'].str.len() != 0)]
lyricist = lyricist[lyricist.lyricist_m.notnull()]
lyricist[lyricist.artist == '레드벨벳']
len(lyricist)
lyricist.gender.value_counts()
# 편곡 여부만 판단하는 데이터프레임 뽑기. 멤버가 참여하지 않은 곡은 모두 제외.
arrange = com4[(com4['arranger_m'].str.len() != 0)]
arrange = arrange[arrange.arranger_m.notnull()]
arrange.head()
len(arrange)
arrange.gender.value_counts()
arrange[arrange.gender == '남']
len(arrange[arrange.artist == '서태지와 아이들'])
lyricist[lyricist.artist == '동방신기']
compose[compose.gender == '혼']
# ### 작사 남녀 비율 그래프로 비교해보기
# 여성 곡 전체 갯수
len(com[com.gender == '여'])
# 남성 곡 전체 갯수
len(com[com.gender == '남'])
# ### 특정 멤버별로 보기
# 멤버가 작사에 참여한 곡 수 top 20.
test = pd.DataFrame(lyricist.groupby(['gender', 'artist']).lyricist_m.count().sort_values(ascending=False))
test.head(20)
# 멤버가 작곡에 참여한 곡 수 top 20.
test = pd.DataFrame(compose.groupby(['gender', 'artist']).composer_m.count().sort_values(ascending=False))
test.head(20)
# 멤버가 편곡에 참여한 곡 수 top 20.
test = pd.DataFrame(arrange.groupby(['gender', 'artist']).arranger_m.count().sort_values(ascending=False))
test.head(20)
# ### 기본데이터
| EDA/member_Participation_dataframe_SoHyun.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import Models
import numpy as np
import random as rn
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
rn.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
# Load Data
X_brca_mRNA_data = pd.read_csv('./BRCA_mRNA.txt', sep=',', index_col=0) # Dataset has Donor ID as first column
X_brca_CNA_data = pd.read_csv('./BRCA_CNA.txt', sep=',' ,index_col=0) # Dataset has Donor ID as first column
y_brca_data = pd.read_csv('./BRCA_Clinical.txt', sep=',', index_col=0, names=["Label"]) # Dataset has Donor ID on first column and Label on second column.
X_brca_mRNA_filtered = X_brca_mRNA_data.drop(["TCGA-AO-A12C-01","TCGA-AR-A1AT-01","TCGA-BH-A18V-06"], axis=1)
X_brca_CNA_filtered = X_brca_CNA_data.drop(["TCGA-AR-A0U1-01"], axis=1)
y_brca_filtered = y_brca_data.drop(["TCGA-AO-A12C-01","TCGA-AR-A1AT-01","TCGA-BH-A18V-06"], axis=0)
X_train_first_norm, X_train_second_norm, X_swapped_first_norm, X_swapped_second_norm, X_test_first_norm, X_test_second_norm, X_train_concat, X_swapped_concat, X_test_concat, y_train, y_test, y_train_oh, y_test_oh \
= Models.prepare_datasets(X_brca_mRNA_filtered.T,X_brca_CNA_filtered.T, y_brca_filtered, test_size=0.2, swap_noise=0.15)
# CONCAT TRAIN AND TEST DATASET TO TRANSFORM THE ENTIRE DATA (FOR KPCA)
X_first = pd.concat([X_train_first_norm, X_test_first_norm], axis=0)
X_second = pd.concat([X_train_second_norm, X_test_second_norm], axis=0)
y = pd.concat([y_train, y_test], axis=0)
# -
import importlib
importlib.reload(Models)
# +
## Build and Train Multi Autoencoder
autoencoder, encoder, decoder, loss = Models.build_and_train_multi_autoencoder([X_train_first_norm,X_train_second_norm],
[X_train_first_norm,X_train_second_norm],
encoding_dim=100,
regularizer=tf.keras.regularizers.l1_l2(0.00005,0.000003),
dropout=0.25,
epochs=200,
mu=0.1)
## Encode datasets
X_latent_multi_ae = Models.encode_dataset([X_train_first_norm,X_train_second_norm], encoder)
X_latent_test_multi_ae = Models.encode_dataset([X_test_first_norm,X_test_second_norm], encoder)
## Build and Train Autoencoder
autoencoder, encoder, decoder, loss = Models.build_and_train_autoencoder(X_train_concat,
X_train_concat,
encoding_dim=100,
regularizer=tf.keras.regularizers.l1_l2(0.000005,0.000005),
dropout=0.2,
epochs=200,
validation_data=None)
## Encode datasets
X_latent_ae = Models.encode_dataset(X_train_concat, encoder)
X_latent_test_ae = Models.encode_dataset(X_test_concat, encoder)
## Build and Train Multi Denoising Autoencoder
autoencoder, encoder, decoder, loss = Models.build_and_train_multi_autoencoder([X_swapped_first_norm,X_swapped_second_norm],
[X_train_first_norm,X_train_second_norm],
encoding_dim=100,
regularizer=tf.keras.regularizers.l1_l2(0.000005,0.000005),
dropout=0,
epochs=200,
mu=0.5)
## Encode datasets
X_latent_multi_dae = Models.encode_dataset([X_train_first_norm,X_train_second_norm], encoder)
X_latent_test_multi_dae = Models.encode_dataset([X_test_first_norm,X_test_second_norm], encoder)
## Build and Train Denoising Autoencoder
autoencoder, encoder, decoder, loss = Models.build_and_train_autoencoder(X_swapped_concat,
X_train_concat,
encoding_dim=100,
regularizer=tf.keras.regularizers.l1_l2(0.000005,0.000005),
dropout=0.2,
epochs=200)
## Encode datasets
X_latent_dae = Models.encode_dataset(X_train_concat, encoder)
X_latent_test_dae = Models.encode_dataset(X_test_concat, encoder)
# Perform rbf kernel to divided datasets
X_kpca, _ = Models.perform_multi_KPCA(X_first, X_second, y, gamma=0.008, mu=0.8)
# Split into train and test sets
X_latent_kpca, X_latent_test_kpca, y_train_kpca, y_test_kpca = train_test_split(X_kpca, y, test_size=0.2, random_state=1)
# -
### CLASSIFICATION ###
# We use the reduced datasets to train a classifier and compare them.
y_train = np.ravel(y_train)
y_test = np.ravel(y_test)
# Original Concatenated Dataset Classification
original_classify = Models.classify_with_cv(X_train_concat, X_test_concat, y_train, y_test, model_type="Original")
# Multi-AE Classification
multi_ae_classify = Models.classify_with_cv(X_latent_multi_ae, X_latent_test_multi_ae, y_train, y_test, model_type="Multi-AE")
# AE Classification
ae_classify = Models.classify_with_cv(X_latent_ae, X_latent_test_ae, y_train, y_test, model_type="AE")
# Multi-DAE Classification
multi_dae_classify = Models.classify_with_cv(X_latent_multi_dae, X_latent_test_multi_dae, y_train, y_test, model_type="Multi-DAE")
# DAE Classification
dae_classify = Models.classify_with_cv(X_latent_dae, X_latent_test_dae, y_train, y_test, model_type="DAE")
# KPCA Classification
kpca_classify = Models.classify_with_cv(X_latent_kpca, X_latent_test_kpca, y_train, y_test, model_type="KPCA")
###### CLUSTERING ###
original_clustering = Models.cluster(X_train_concat, y_train, model_type="Original")
# Multi-AE Clustering
multi_ae_clustering = Models.cluster(X_latent_multi_ae, y_train, model_type="AE")
# AE Clustering
ae_clustering = Models.cluster(X_latent_ae, y_train, model_type="AE")
# Multi-DAE Clustering
multi_dae_clustering = Models.cluster(X_latent_multi_dae, y_train, model_type="AE")
# DAE Clustering
dae_clustering = Models.cluster(X_latent_dae, y_train, model_type="AE")
# KPCA Clustering
kpca_clustering = Models.cluster(X_latent_kpca, y_train, model_type="AE")
results = [original_classify + original_clustering, multi_ae_classify + multi_ae_clustering, ae_classify + ae_clustering, multi_dae_classify + multi_dae_clustering, dae_classify + dae_clustering, kpca_classify + kpca_clustering]
scores = pd.DataFrame(results, index=["Original","Multi-AE","AE","Multi-DAE","DAE","KPCA"],
columns=["LR accuracy","SVM accuracy", "RF accuracy", "LR ROC-AUC", "SVM ROC-AUC", "RF ROC-AUC","KMeans Silhouette","Spectral Silhouette","Hierarchical Silhouette","KMeans MI","Spectral MI","Hierarchical MI"])
scores
# +
data_acc = scores[["LR accuracy","SVM accuracy","RF accuracy"]]
sns.set()
ax = sns.lineplot(data=data_acc)
plt.legend(fontsize='small')
ax.set_xlabel(f"model")
ax.set_ylabel(f"accuracy")
ax.figure.set_size_inches(10 , 5)
ax.set_title(f"model vs accuracy")
plt.show()
# -
| Multiomic Dataset/Multiomic_Model_Compare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf2]
# language: python
# name: conda-env-tf2-py
# ---
from glob import glob
import os
import sys
from tqdm import tqdm
data_path = r'E:\m_0.65_0.85'
fileNames = glob(data_path+'\\*0.txt')
fileNames=fileNames
# dump
def dump(sr=None, size=512):
M,ALPHA=[],[]
if sr is None: # 从配置文件中获取 sr
with open(data_path+'\\输入参数文本.txt') as _f:
_paras = _f.readlines()
sr = int(_paras[2].split('\t')[0]) + 1
# --------------------------------------------------------------通用begin
with open(fileNames[0]) as _f:
simple_size = sys.getsizeof(_f.read())/1024/1024 # Mb
size = size # 2048:2GB
num = int(size/simple_size)+1 # 每一次存的大小是 num个读取的文件
N = int(len(fileNames)//num) # 存N次大小为num个的文件
last_num = len(fileNames)%num # 最后一次存的文件是 last_num个读取的文件
begin=0
print('size,simple_size,num,N,last_num\n',size,simple_size,num,N,last_num)
for c_i in tqdm(range(N+1)):
if c_i==N:
num = last_num
last = begin+num
filename = fileNames[begin:last]
# --------------------------------------------------------------通用end
i1 = np.zeros((len(filename), sr))
i2 = np.zeros((len(filename), sr))
dop = np.zeros((len(filename), sr))
for idx, i in enumerate(filename):
baseName = os.path.basename(i)
m,alpha = baseName.split(',')
alpha = '.'.join(alpha.split('.')[:-1])
m = m.split('=')[-1]
alpha=alpha.split('=')[-1]
M.append(m)
ALPHA.append(alpha)
i1[idx] = pd.read_table(i, encoding='gb2312',sep='\t',index_col=None).I1.to_numpy().astype(np.float32)
i2[idx] = pd.read_table(i, encoding='gb2312',sep='\t',index_col=None).I2.to_numpy().astype(np.float32)
dop[idx] = pd.read_table(i, encoding='gb2312',sep='\t',index_col=None).DOP.to_numpy().astype(np.float32)
# --------------------------------------------------------------- 通用bedin
out_path = r'E:\out'
np.save(os.path.join(out_path, f'i1{c_i}.npy'),i1)
np.save(os.path.join(out_path, f'i2{c_i}.npy'),i2)
np.save(os.path.join(out_path, f'dop_{c_i}.npy'),dop)
np.savez(os.path.join(out_path, f'M_ALPHA{c_i}.npz'),M=M,ALPHA=ALPHA)
begin = last
# --------------------------------------------------------------- 通用end
return out_path
target_path = dump()
a=np.ones((10,5))
b=np.zeros((3,6))
np.savez('ab.npz',a=a,b=b)
res = np.load('ab.npz')
res['a']
# load
def load_data(path):
fileNames = glob(os.path.join(path,'*'))
name_i1 = [i for i in fileNames if 'i1' in i]
name_i2 = [i for i in fileNames if 'i2' in i]
name_dop = [i for i in fileNames if 'dop' in i]
name_m_alpha = [i for i in fileNames if 'M_ALPHA' in i]
L = len(name_dop) # 3种文件数目相同
c = 0
while True:
i1 = np.load(name_i1[c%L])
i2 = np.load(name_i2[c%L])
dop = np.load(name_dop[c%L])
res = np.load(name_m_alpha[c%L])
M, ALPHA = res['M'], res['ALPHA']
yield i1, i2, dop, M, ALPHA
c += 1
data = load_data(target_path)
# %%time
dop=next(data)
dop[0].shape
| Physics/readBigData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="V0BnsL8yBVSz" outputId="03916bc5-9958-40b7-b551-cff7a5b5d586"
#Using Goole Colab : Mounting google drive
from google.colab import drive
drive.mount('/content/drive')
# + id="qlZt9ClgBR8v"
# Import modules
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import PolynomialFeatures
import warnings
warnings.filterwarnings('ignore')
# + id="aoCHvOJuu4cp"
pd.options.display.float_format = '{:.2f}'.format
# + [markdown] id="9SlyguUWBq4W"
# ##Problem Statement
# ```
# To predict the final price of each home in Melbourne.
# ```
# + colab={"base_uri": "https://localhost:8080/", "height": 551} id="kwxwAPgDCJYU" outputId="c46b9f66-d545-4f74-aad0-290c33e0e348"
#Feature Description
feat_desc= pd.read_excel('/content/drive/MyDrive/Move to Melbourne/RegularizationDataDictionary.xlsx')
feat_desc
# + [markdown] id="hzIcBTkLBR8_"
# ### Load the dataset
#
# + id="3oKCEiAKBR9B"
# Read the dataset and extract the test and train data separately
df_train=pd.read_csv('/content/drive/MyDrive/Move to Melbourne/train.csv')
df_test=pd.read_csv('/content/drive/MyDrive/Move to Melbourne/test.csv')
# Dropping Ids
train_id = df_train['Id']
df_train.drop(['Id'], axis=1, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="oxyVaf0wCsga" outputId="e867eafc-d2b6-473d-e198-210f116db7f2"
# Examine first 5 rows
df_train.head()
# + colab={"base_uri": "https://localhost:8080/"} id="Jq2PCaU6C6DY" outputId="7cd0b3c5-7afc-4602-ad20-1e2bac957208"
# No. of rows and columns
print(df_train.shape)
# Examine the column names
print(df_train.columns)
# + colab={"base_uri": "https://localhost:8080/"} id="Z2IMnYOvDJOZ" outputId="0d54596a-fab9-425e-ff36-1297958165b2"
# Examine no. of non null entries, and column datatype
df_train.info()
#No null values
# + colab={"base_uri": "https://localhost:8080/", "height": 320} id="b_OWgBWwDSzj" outputId="d4bfeded-1519-4d2c-b7aa-e27e43535bd5"
#Calculate Summary Statistics
df_train.describe()
# + [markdown] id="S4cfcEk6Dy8R"
# ### Visualize the data
# + colab={"base_uri": "https://localhost:8080/", "height": 879} id="wNCy5T9jpWDX" outputId="0a3f59e8-c779-4319-de22-ef4fd13e325b"
# Data distribution
data_distribution = df_train.hist(figsize=(15,15))
plt.show();
# Obs:
#Right Skewed - Landsize, BuildingArea, Target Variable - Price
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="znNcuu7fSLgw" outputId="73a4a50d-c39b-405a-ec62-4a0d9809d2fb"
#Price vs BuildingArea
sns.regplot(x="BuildingArea", y="Price", data=df_train) #order=2 for poly reg
# + colab={"base_uri": "https://localhost:8080/", "height": 307} id="sbE2JiMEnyla" outputId="15bb83da-8ce7-4795-ea9c-0d3e73ce41eb"
#Price vs Landsize
sns.regplot(x="Landsize", y="Price", data=df_train)
# + id="Sf1y-T1Wnlsy" colab={"base_uri": "https://localhost:8080/", "height": 307} outputId="1c4101de-5500-4904-843d-a55be8eee4bb"
#Price vs Distance
sns.regplot(x="Distance", y="Price", data=df_train) #order=2 for poly reg
# + id="9IALRU4_79Fv"
# Skewed data (log transformation)
df_train['Price']=np.log1p(df_train['Price'])
df_train['BuildingArea']=np.log1p(df_train['BuildingArea'])
df_train['Landsize']=np.log1p(df_train['Landsize'])
# + colab={"base_uri": "https://localhost:8080/", "height": 776} id="N070PEtxwfYX" outputId="7e94edb1-077e-43e1-9612-d11690938cf6"
# correlation
corr = df_train.corr()
plt.figure(figsize=(15,12))
plt.title('Correlation Matrix')
sns.heatmap(corr,square=True,annot = True)
plt.show();
# + [markdown] id="77M9xKY6BR9D"
# ## Model building
#
# + colab={"base_uri": "https://localhost:8080/"} id="DwvAuMZWBR9F" outputId="81a50da8-d033-4390-952d-64d120852118"
#Splitting training set
X=df_train[['Rooms', 'Type', 'Distance', 'Postcode', 'Bathroom', 'Car', 'Landsize', 'BuildingArea', 'YearBuilt', 'CouncilArea', 'Longtitude']]
y=df_train['Price']
print(X.head())
print(y.head())
#we split 75% of the data to training set while 25% of the data to validation
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=0)
#X_train, X_valid shape
print(X_train.shape)
print(X_valid.shape)
# + id="wAEy-sGXMfiu"
# Fitting Polynomial Feature to the dataset
poly = PolynomialFeatures(degree = 3)
X_train_poly = poly.fit_transform(X_train)
X_valid_poly=poly.transform(X_valid)
# + id="MaKyYr3MjBCu"
# regularization parameters for grid search
ridge_lambdas = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6, 10, 30, 60]
lasso_lambdas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1]
# + colab={"base_uri": "https://localhost:8080/"} id="ajc6b7M7OwJe" outputId="bfae3e3a-918c-4cef-8fec-2e22fd464ad9"
# Ridge model
ridge_model=Ridge()
# grid search on lasso and ridge
ridge_grid=GridSearchCV(estimator=ridge_model,param_grid=dict(alpha=ridge_lambdas))
ridge_grid.fit(X_train_poly,y_train)
# make predictions
ridge_pred=ridge_grid.predict(X_valid_poly)
Error=np.sqrt(mean_squared_error(y_valid,ridge_pred))
print(Error)
#R-squared calculation Ridge
rsquared = r2_score(y_valid,ridge_pred)
print(rsquared)
# + colab={"base_uri": "https://localhost:8080/"} id="kzdBdG6YEtGm" outputId="cc8b0758-f3a7-490a-aed4-842a15e56337"
# lasso model
lasso_model=Lasso()
# grid search on lasso
lasso_grid=GridSearchCV(estimator=lasso_model,param_grid=dict(alpha=lasso_lambdas))
lasso_grid.fit(X_train_poly,y_train)
# make predictions
lasso_pred=lasso_grid.predict(X_valid_poly)
Error=np.sqrt(mean_squared_error(y_valid,lasso_pred))
print(Error)
#R-squared calculation Lasso
rsquared = r2_score(y_valid,lasso_pred)
print(rsquared)
# + colab={"base_uri": "https://localhost:8080/"} id="PHvQ28dVF3UJ" outputId="28e50c94-3b4a-40c7-f9cb-68065f274ef2"
#LR
model=LinearRegression()
model.fit(X_train_poly, y_train)
pred=model.predict(X_valid_poly)
Error=np.sqrt(mean_squared_error(y_valid,pred))
print(Error)
# R-squared calculation LR
rsquared = r2_score(y_valid,pred)
print(rsquared)
# + [markdown] id="ADhynCtcBR9H"
# ### Prediction on the test data and creating the sample submission file.
#
# + colab={"base_uri": "https://localhost:8080/"} id="qppm_a4uBR9K" outputId="6af7f4e6-0bb7-4e27-a80a-6561fe528831"
#the Id column in a separate variable : test_id
test_id = df_test['Id']
df_test.drop(['Id'], axis=1, inplace=True)
#Test data shape and columns names
print(df_test.shape)
print(df_test.columns)
df_test['BuildingArea']=np.log1p(df_test['BuildingArea'])
df_test['Landsize']=np.log1p(df_test['Landsize'])
# + colab={"base_uri": "https://localhost:8080/"} id="6qaRkyO4FSVa" outputId="ac6cd3d0-e7b4-46cc-88f5-bf9552751f59"
#Features selected from test data
#X_test = df_test
X_test=df_test[['Rooms', 'Type', 'Distance', 'Postcode', 'Bathroom', 'Car', 'Landsize', 'BuildingArea', 'YearBuilt', 'CouncilArea', 'Longtitude']]
X_test_poly = poly.transform(X_test)
#make prediction : Value (M)
ytest_pred=ridge_grid.predict(X_test_poly)
#Making df for submission np.expm1()
subm=pd.DataFrame({"Id": test_id, "Price": np.expm1(ytest_pred)})
print(subm.head())
# + id="n8zK-6GWFifJ" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="bc4b36d5-a611-40ca-aa83-b1d386a0b888"
# To CSV for submission
#subm.to_csv('submission_1.csv',index=False)
#from google.colab import files
#files.download('submission_1.csv')
| Move_to_Melbourne/MelbourneHousing_Price_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## How to load a SXI fits file into sunpy
# +
from __future__ import print_function, division
from datetime import timedelta
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# could use the sunpy colormaps instead
import sunpy.cm as scm
from matplotlib.colors import LogNorm
from pylab import figure, cm
# %matplotlib inline
import astropy.time
from astropy.io import fits
from astropy import units as u
from sunpy import sun
import sunpy.map
# +
#### Need to do it manually as sunpy map doesn't understand the original fits file
infile='SXI_20150901_033615143_BA_15.FTS'
# Direct loading of the fits doesn't work
# sxi_map = sunpy.map.Map(infile)
# So manually load in the file and set the data and header
fitsin = fits.open(infile)
data=fitsin[0].data
hdr=fitsin[0].header
obs_time=hdr['date_obs']
# will also have to rotate so note the angle
rotang=hdr['crota1']*u.deg
# Have I got everything?
header = {'cdelt1': hdr['cdelt1'],'cdelt2': hdr['cdelt2'],
'instrume':'SXI','detector':'SXI',
'xcen':hdr['xcen'],'ycen':hdr['ycen'],'telescop':hdr['telescop'],
'exptime':hdr['exptime'],'date_obs':hdr['date_obs'],'wavelnth':12,
'crpix1':hdr['crpix1'],'crpix2':hdr['crpix2'],
'HGLT_OBS': 0,'HGLN_OBS': 0,
'RSUN_OBS': sun.solar_semidiameter_angular_size(obs_time).value,
'RSUN_REF': sun.constants.radius.value,
'DSUN_OBS': sun.sunearth_distance(obs_time).value*u.astrophys.au}
sxi_map = sunpy.map.Map(data, header)
sxi_map.plot_settings['cmap'] = scm.get_cmap('stereohi1')
sxi_map.plot_settings['norm'] = colors.PowerNorm(gamma=0.2,vmax=5e2)#,vmin=5,vmax=1000)
# sxi_map.plot_settings['norm'] = colors.LogNorm(2,5e2)# sxi_map.max())
matplotlib.rcParams['font.size'] = 20
fig = plt.figure(figsize=(10, 8))
ax = plt.subplot()
sxi_map.plot()
sxi_map.draw_limb(color='white',linewidth=2,linestyle='dashed')
sxi_map.draw_grid(color='white',linewidth=2,linestyle='dotted')
title_obsdate='{:.19}'.format('{:%Y-%m-%d %H:%M:%s}'.format(sxi_map.date))
ax.set_title('GOES/SXI Be12A '+title_obsdate)
ax.set_ylabel('y [arcsec]')
ax.set_xlabel('x [arcsec]')
plt.colorbar(fraction=0.04, pad=0.1,label='DN')
plt.show()
# +
# Now lets rotate the image and see what it looks like
rsxi_map = sxi_map.rotate(rotang)
fig = plt.figure(figsize=(10, 8))
ax = plt.subplot()
rsxi_map.plot()
rsxi_map.draw_limb(color='white',linewidth=2,linestyle='dashed')
rsxi_map.draw_grid(color='white',linewidth=1,linestyle='dashed')
title_obsdate='{:.19}'.format('{:%Y-%m-%d %H:%M:%s}'.format(rsxi_map.date))
ax.set_title('GOES/SXI Be12A '+title_obsdate)
ax.set_ylabel('y [arcsec]')
ax.set_xlabel('x [arcsec]')
plt.colorbar(fraction=0.04, pad=0.1,label='DN')
plt.show()
# +
# Now plot our submap location of the rotated map
rangex = u.Quantity([-800*u.arcsec, -200 * u.arcsec])
rangey = u.Quantity([-1500 * u.arcsec, -800 * u.arcsec])
srsxi_map = rsxi_map.submap(rangex, rangey)
fig = plt.figure(figsize=(10, 8))
ax = plt.subplot()
srsxi_map.plot()
srsxi_map.draw_limb(color='white',linewidth=2,linestyle='dashed')
srsxi_map.draw_grid(color='white',linewidth=2,linestyle='dotted')
title_obsdate='{:.19}'.format('{:%Y-%m-%d %H:%M:%s}'.format(srsxi_map.date))
ax.set_title('GOES/SXI Be12A '+title_obsdate)
ax.set_ylabel('y [arcsec]')
ax.set_xlabel('x [arcsec]')
plt.colorbar(fraction=0.04, pad=0.1,label='DN')
plt.show()
# -
# #### well that is not correct ;(
| maps/map_test/.ipynb_checkpoints/sxi_test_map-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="EN3BM2a0JLJR"
# ##### Copyright 2020 Google
# + cellView="form" colab={} colab_type="code" id="sVv2bPc0JMdM"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="yaieLbziJTX5"
# # Hardware grid circuits
# + [markdown] colab_type="text" id="P2-jS0d9KI4r"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.example.org/cirq/research/qaoa/hardware_grid_circuits"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on QuantumLib</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/quantumlib/ReCirq/blob/master/docs/qaoa/hardware_grid_circuits.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/quantumlib/ReCirq/blob/master/docs/qaoa/hardware_grid_circuits.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/ReCirq/docs/qaoa/hardware_grid_circuits.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="p7FRu0xXIfBW"
# The "hardware grid" problem is defined by a Hamiltonian whose topology matches the hardware graph natively. This permits a simple compilation ("routing") with circuit depth per p-step going like $O(1)$.
# + [markdown] colab_type="text" id="JgZdtr7hKaFJ"
# ## Setup
#
# Install the ReCirq package:
# + colab={} colab_type="code" id="NN9a0rDMKa5G"
# !pip install git+https://github.com/quantumlib/ReCirq
# + [markdown] colab_type="text" id="H9heQyxwKkGG"
# Now import Cirq, ReCirq and the module dependencies:
# + colab={} colab_type="code" id="kVH-4o_bKoH_"
import cirq
import recirq
import networkx as nx
import numpy as np
from cirq.contrib.svg import SVGCircuit, circuit_to_svg
from recirq.qaoa.classical_angle_optimization import OptimizationResult
from recirq.qaoa.problems import get_all_hardware_grid_problems
# + [markdown] colab_type="text" id="_BlXR0TaKtZO"
# Set the theme colors:
# + colab={} colab_type="code" id="gpkMGhaDIfBY"
QBLUE = '#1967d2'
QRED = '#ea4335ff'
QGOLD = '#fbbc05ff'
# + [markdown] colab_type="text" id="vaaEu_xiLCav"
# ## Create a grid
# + [markdown] colab_type="text" id="LwpereqKIfBe"
# Here, we'll generate a 3x3 grid with aribitrarily chosen (fake!) beta, gamma parameters.
# + colab={} colab_type="code" id="Von9l7KmIfBf"
fake_device_graph = nx.grid_2d_graph(3, 3)
fake_device_graph = nx.relabel_nodes(
fake_device_graph, mapping={(r, c): cirq.GridQubit(r, c)
for r, c in fake_device_graph.nodes})
problems = get_all_hardware_grid_problems(fake_device_graph, central_qubit=cirq.GridQubit(1, 1),
n_instances=10, rs=np.random.RandomState(52))
n_qubits = 9
instance_i = 0
problem = problems[n_qubits, instance_i]
optimum = OptimizationResult(p=1, f_val=None, gammas=[0.123], betas=[0.456], min_c=None, max_c=None)
nx.draw_networkx(problem.graph,
pos={i: problem.coordinates[i] for i in range(problem.graph.number_of_nodes())},
node_color=QBLUE)
# + [markdown] colab_type="text" id="xEgQ3mcrIfBj"
# If, however, you've been following along, we can load in the results of `HardwareGridProblemGenerationTask`s for which we've actually pre-computed the optimal angles. TODO: enable.
# + [markdown] colab_type="text" id="cQlwHhzLIfBk"
# ```
# from recirq.qaoa.experiments.problem_generation_tasks import HardwareGridProblemGenerationTask
# from recirq.qaoa.experiments.angle_precomputation_tasks import AnglePrecomputationTask
#
# gen_task = HardwareGridProblemGenerationTask(
# dataset_id = '2020-03-19',
# device_name = 'Sycamore23',
# instance_i = 0,
# n_qubits = 5,
# )
#
# pre_task = AnglePrecomputationTask(
# dataset_id = '2020-03-23',
# generation_task = gen_task,
# p = 1,
# )
# print(gen_task)
# print(pre_task)
# ```
# + [markdown] colab_type="text" id="ZuiLBEqgIfBl"
# ```
# from recirq.qaoa.experiments.problem_generation_tasks import DEFAULT_BASE_DIR as PGEN_BASE_DIR
# from recirq.qaoa.experiments.angle_precomputation_tasks import DEFAULT_BASE_DIR as APRE_BASE_DIR
#
# gen_data = recirq.load(gen_task, base_dir=PGEN_BASE_DIR)
# pre_data = recirq.load(pre_task, base_dir=APRE_BASE_DIR)
# problem = gen_data['problem']
# optimum = pre_data['optimum']
# print(optimum)
# nx.draw_networkx(problem.graph,
# pos={i: problem.coordinates[i] for i in range(problem.graph.number_of_nodes())},
# node_color=QBLUE
# )
# ```
# + [markdown] colab_type="text" id="KuvSKzhdIfBm"
# ## Ansatz
#
# As always, the circuit ansatz involves $|+\rangle$ initialization followed by alternating applications of the problem and driver unitaries. We first construct a highly abstracted circuit with these multi-qubit operations.
# + colab={} colab_type="code" id="9lQgwJfpIfBn"
from recirq.qaoa.gates_and_compilation import ProblemUnitary, DriverUnitary
qubits = cirq.LineQubit.range(problem.graph.number_of_nodes())
circuit = cirq.Circuit(
cirq.H.on_each(qubits),
ProblemUnitary(problem.graph, gamma=optimum.gammas[0]).on(*qubits),
DriverUnitary(len(qubits), beta=optimum.betas[0]).on(*qubits)
)
SVGCircuit(circuit)
# + [markdown] colab_type="text" id="HrL9lTEaIfBr"
# ## Harware topology
#
# We can enact the problem unitary with four entangling layers per p-step.
#
# 1. Horizontal links from even columns
# 2. Horizontal links from odd columns
# 3. Vertical links from even rows
# 4. Vertical links from odd rows
#
# To help the algorithm, we must specify `coordinates` to the compilation routine. This maps from bit indices $\in \{0, 1, \dots n\}$ to `(row, column)` coordinates so the compilation routine can categorize the various links into the above four categories. This is a little roundabout since we'll be mapping to `GridQubit`s, but I'm trying to emphasize the distinction between the problem (which is not related to quantum computing) and the implementation (which is).
#
# As always, the driver unitary is nothing more than single-qubit X rotations.
# + colab={} colab_type="code" id="98OkZWSiIfBs"
from recirq.qaoa.gates_and_compilation import compile_problem_unitary_to_hardware_graph, \
compile_driver_unitary_to_rx
circuit = compile_problem_unitary_to_hardware_graph(circuit, problem.coordinates)
circuit = compile_driver_unitary_to_rx(circuit)
SVGCircuit(circuit)
# + [markdown] colab_type="text" id="0XCHWxQwIfBw"
# ## Compilation
#
# To compile $e^{i \gamma w_{ij} Z_i Z_j}$, express the `ZZ` interaction as three rounds of `SYC` gates. We take a brief aside to look at this compilation.
# + colab={} colab_type="code" id="RDDbNl50IfBx"
import numpy as np
zz = cirq.Circuit(cirq.ZZ(*qubits[:2])**(2*0.345/np.pi))
SVGCircuit(zz)
# + colab={} colab_type="code" id="2QvEh_uFIfB0"
from recirq.qaoa.gates_and_compilation import compile_to_syc
zz = compile_to_syc(zz)
SVGCircuit(zz)
# + [markdown] colab_type="text" id="xkudCJcDIfB4"
# ### Function `zz_as_syc` is included for convenience
# + colab={} colab_type="code" id="Fjl17Tq_IfB4"
from recirq.qaoa.gates_and_compilation import zz_as_syc
zz = zz_as_syc(0.345, *qubits[:2])
SVGCircuit(zz)
# + colab={} colab_type="code" id="4wYev76RIfB7"
cirq.testing.assert_allclose_up_to_global_phase(
cirq.Circuit(cirq.ZZ(*qubits[:2])**(2*0.345/np.pi)).unitary(),
zz_as_syc(0.345, *qubits[:2]).unitary(),
atol=1e-8
)
# + colab={} colab_type="code" id="xvunS9GlIfB_"
cirq.testing.assert_allclose_up_to_global_phase(
compile_to_syc(cirq.Circuit(cirq.ZZ(*qubits[:2])**(2*0.345/np.pi))).unitary(),
zz_as_syc(0.345, *qubits[:2]).unitary(),
atol=1e-8
)
# + [markdown] colab_type="text" id="rapp3N1IIfCB"
# ### Structure the gates
#
# Make sure all the gates are well-structured. This means each layer is composed of homogeneous operations which are native to the device.
# + colab={} colab_type="code" id="Ya-8Lq1EIfCC"
from recirq.qaoa.circuit_structure import validate_well_structured
_, stats = validate_well_structured(zz)
stats
# + [markdown] colab_type="text" id="fV0OaST0IfCG"
# ## Compiling to native operations
#
# We use the above compilation of `ZZ` to compile our circuit to native operations. Because our compilation produces well-structured gates and our starting circuit was structured, the resulting circuit is well-structured.
# + colab={} colab_type="code" id="WmPRXvXbIfCH"
from recirq.qaoa.gates_and_compilation import compile_to_syc
circuit = compile_to_syc(circuit)
SVGCircuit(circuit)
# + colab={} colab_type="code" id="e5S5y1qnIfCK"
_, stats = validate_well_structured(circuit)
stats
# + [markdown] colab_type="text" id="zVOUhlpOIfCN"
# ## Append measurement
# + colab={} colab_type="code" id="xrGIYwGSIfCO"
mcircuit = circuit + cirq.measure(*qubits, key='z')
SVGCircuit(mcircuit)
# + colab={} colab_type="code" id="PTr54AsaIfCR"
_, stats = validate_well_structured(mcircuit)
stats
# + [markdown] colab_type="text" id="s7YGb2OVIfCU"
# ## Compile out Z's
# Z gates commute through SYC so we can remove them. This step is not necessary: the quantum operating system will track the virtual Zs if we don't remove them.
# + colab={} colab_type="code" id="rMcZ3yOmIfCV"
from recirq.qaoa.gates_and_compilation import compile_out_virtual_z
mcircuit = compile_out_virtual_z(mcircuit)
SVGCircuit(mcircuit)
# + [markdown] colab_type="text" id="AdKNvgbhIfCZ"
# ## Compile out negligible gates
#
# We've left several `PhX^0` to keep our circuits structured. As the very last compilation step, we can drop these.
# + colab={} colab_type="code" id="ejJTtjj9IfCZ"
from recirq.qaoa.gates_and_compilation import compile_to_non_negligible
mcircuit = compile_to_non_negligible(mcircuit)
SVGCircuit(mcircuit)
# + colab={} colab_type="code" id="bFz9dhyfIfCc"
_, stats = validate_well_structured(mcircuit)
stats
# + [markdown] colab_type="text" id="PRR0w98_IfCf"
# ## Place on device
#
# - Our problem has integer nodes because it should be specified independently of a quantum implementation
# - Our circuit has LineQubit qubits to emphasize the fact that we can place this circuit in multiple locations on a device
# - Our `coordinates` list was used only as a helper for the compilation
#
# We now place the compiled circuit onto a compatible part of the device. Here, we use networkx's subgraph isomorphism routine to find all the possibilities.
# + colab={} colab_type="code" id="5p5qjKgAIfCg"
from cirq.contrib.routing import xmon_device_to_graph
device_graph = xmon_device_to_graph(recirq.get_device_obj_by_name('Sycamore23'))
nx.draw_networkx(device_graph, pos={q: (q.row, q.col) for q in device_graph.nodes}, node_color=QRED)
# + colab={} colab_type="code" id="pxRXrpCvIfCp"
from matplotlib import pyplot as plt
from cirq.contrib.routing import xmon_device_to_graph
device_graph = xmon_device_to_graph(recirq.get_device_obj_by_name('Sycamore23'))
matcher = nx.algorithms.isomorphism.GraphMatcher(device_graph, problem.graph)
# There's a "rotational" freedom which we remove here:
each_set_of_qubits_only_one_subgraph = {}
for q_to_i in matcher.subgraph_isomorphisms_iter():
each_set_of_qubits_only_one_subgraph[frozenset(q_to_i.keys())] = q_to_i
for q_to_i in each_set_of_qubits_only_one_subgraph.values():
nx.draw_networkx(device_graph, pos={q: (q.row, q.col) for q in device_graph.nodes},
node_color=[QRED if q in q_to_i else QBLUE for q in device_graph.nodes])
plt.show()
# + colab={} colab_type="code" id="arUdjiqHIfCs"
i_to_q = {i: q for q, i in q_to_i.items()}
# Since our nodes are contiguous integers starting from 0, we can flatten into a list
device_qubits = [i_to_q[i] for i in range(len(i_to_q))]
del i_to_q
def _mapq(q):
return device_qubits[q.x]
mcircuit = mcircuit.transform_qubits(_mapq)
SVGCircuit(mcircuit)
# + [markdown] colab_type="text" id="swSxqi6jIfCu"
# ## Problem circuit functions
# + colab={} colab_type="code" id="SgGS1rpjIfCv"
from recirq.qaoa.problem_circuits import get_generic_qaoa_circuit
circuit = get_generic_qaoa_circuit(
problem_graph=problem.graph,
qubits=qubits,
gammas=[0.123],
betas=[0.456],
)
SVGCircuit(circuit)
# + colab={} colab_type="code" id="pBd8nsknIfCx"
from recirq.qaoa.problem_circuits import get_routed_hardware_grid_circuit
circuit = get_routed_hardware_grid_circuit(
problem_graph=problem.graph,
qubits=qubits,
coordinates=problem.coordinates,
gammas=[0.123],
betas=[0.456],
)
SVGCircuit(circuit)
# + colab={} colab_type="code" id="loLXlfMEIfC0"
from recirq.qaoa.problem_circuits import get_compiled_hardware_grid_circuit
circuit, qubits = get_compiled_hardware_grid_circuit(
problem=problem,
qubits=device_qubits,
gammas=[0.123],
betas=[0.456],
)
SVGCircuit(circuit)
| docs/qaoa/hardware_grid_circuits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Correlation Analysis Module
# author: <NAME>
# +
# These first two lines configure the notebook to embed any plots graphed
# in the body of the notebook
# %matplotlib inline
# %config InlineBackend.figure_formats=['svg']
# Standard csv and file io python libraries
import csv
import io
import os
# Library for loading data from a webpage (Python 2 and 3 compatible)
from future.standard_library import install_aliases
install_aliases()
from urllib.request import urlopen, Request
# Main python library for mathematical calculations
import numpy as np
# Plotting related python libraries
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# Python libraries for manipulating dates and times as objects
import time
import datetime as dt
from matplotlib.dates import date2num
# -
def getDataInTimeRange(data,times,tstart,tstop):
data_array = np.array(data)
t_array = np.array([t.timestamp() for t in times])
tstart_sec = tstart.timestamp()
tstop_sec = tstop.timestamp()
value_indices = np.where((t_array>=tstart_sec)&(t_array<=tstop_sec))
data_select = data_array[value_indices]
return data_select.tolist()
| Programming Lesson Modules/Correlation Analysis Module.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import csv
import matplotlib.pylab as plt
# %matplotlib inline
def showWeekly(x_axis,y_axis,y_range,start):
weekDiffer = 518400
nextWeek = start + weekDiffer
labels = []
with open('./Datasets/household_power_consumption.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
header=next(reader)
for i in range(len(header)):
if (header[i] == x_axis):
j=i
if (header[i] == y_axis):
k=i
x = [0,1,2,3,4,5,6]
y=[[]]
for i in range(y_range):
y.append([])
# i is here for keep tracking the week
i=0
for row in reader:
if (int(row[j]) <= nextWeek):
y[i].append(row[k])
if (int(row[j]) > nextWeek):
i = i + 1
y[i].append(row[k])
nextWeek = int(row[j]) + weekDiffer
for i in range(len(y)):
labels.append(i)
for y_arr, label in zip(y, labels):
plt.plot(x, y_arr, label=label)
plt.title('Week Analysis')
plt.ylabel(y_axis)
plt.xlabel(x_axis)
plt.legend()
plt.show()
with open('./Datasets/household_power_consumption.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
header = next(reader)
row_count = sum(1 for row in reader)
y_range = int((row_count / 7) - 1)
start = 1164997800
# show weekly analysis graph timestamp vs gloabal active power
showWeekly("timestamp","Global_active_power",y_range,start)
# -
| sliding window mechanism+matplotlib/matplotlibWeeklyChart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ruhi-Sharmin-1/C-code/blob/main/XGBoost(tuned%2Bvalidated).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="zXIcboe3imv9"
from numpy import mean
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.svm import SVC
from scipy.io import savemat
from scipy.io import loadmat
import timeit
import numpy as np
# + id="D99sWb_ZSzG8" colab={"base_uri": "https://localhost:8080/"} outputId="588fcca1-9d29-406f-a695-33d19330f2d6"
from google.colab import drive
#drive.mount('/content/gdrive')
drive.mount("/content/gdrive", force_remount=True)
# + id="szchBRRiS096"
loaddir_data=F"/content/gdrive/My Drive/ml-ruhi/"
data = loadmat(loaddir_data + 'challenge_training2017_cases_normal_ecg_corr_metrics.mat',squeeze_me=True)
training_normal_features = data['all_corr_metrics'][:,:]
n_training_normal = np.shape(training_normal_features)[0]
data = loadmat(loaddir_data + 'challenge_training2017_cases_afib_ecg_corr_metrics.mat',squeeze_me=True)
training_afib_features = data['all_corr_metrics'][:,:]
n_training_afib = np.shape(training_afib_features)[0]
data = loadmat(loaddir_data + 'challenge_training2017_cases_noisy_ecg_corr_metrics.mat',squeeze_me=True)
training_noisy_features = data['all_corr_metrics'][:,:]
n_training_noisy = np.shape(training_noisy_features)[0]
data = loadmat(loaddir_data + 'challenge_training2017_cases_other_ecg_corr_metrics.mat',squeeze_me=True)
training_other_features = data['all_corr_metrics'][:,:]
n_training_other = np.shape(training_other_features)[0]
# + id="AJ51tLRAS3zE" colab={"base_uri": "https://localhost:8080/"} outputId="e64337bf-dcad-471a-b013-a2d62e054e8a"
np.shape(training_normal_features)[0]
# + id="LTAmbO76S5fS"
# append the training datasets and learning datasets
training_features = np.concatenate((training_normal_features,training_afib_features,training_noisy_features,training_other_features),axis=0)
training_labels = np.concatenate((np.zeros(n_training_normal),np.ones(n_training_afib),2*(np.ones(n_training_noisy)),3*(np.ones(n_training_other))))
# + id="5m0KtkhBS9Xc"
def nanremove(x, y):
# input x is training_features, y is labels
if np.argwhere(np.isnan(x)).shape[0]==0:
return x,y
else:
l=np.argwhere(np.isnan(x)).shape[0]
u=np.argwhere(np.isnan(x))
for i in range(l):
x = np.delete(x, (u[i,0]-i), axis=0)
y = np.delete(y, (u[i,0]-i), axis=0)
return x,y
# + id="VdiR1jrqS_MM"
x,y=nanremove(training_features, training_labels)
# + id="9wSFFhcCTA7U"
training_all = np.concatenate((x, y.reshape((-1,1))),axis=1)
#np.random.shuffle(training_all) #adds randomness
training_features = training_all[:,:-1]
training_labels = training_all[:,-1]
# + id="PmjMHit-TDUM"
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
test_size = 0.2 # from 0.01 to 0.1
seed = 4 #change from 4 to 5
x_train, x_test, y_train, y_test = model_selection.train_test_split(training_features, training_labels, test_size=test_size, random_state=seed)
# + id="57mrOdonTFQD"
import xgboost as xgb
max_depth = 10
n_estimators = 110
bst = xgb.XGBClassifier(max_depth=max_depth, learning_rate=0.0001, n_estimators=n_estimators, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0, reg_alpha=0, reg_lambda=1)
bst.fit(training_features, training_labels) #bst=model
# Fit the validation data # model.predict for Y_predict
xgb_pred = bst.predict(x_test)
# extracting most confident predictions
best_preds = np.round(xgb_pred)
# + id="JEG4s1R7TIn1" colab={"base_uri": "https://localhost:8080/"} outputId="4fba0b04-0eb7-4e79-fc7d-d6442f0f68b5"
xgb_pred_proba = bst.predict_proba(X_test)
print(Y_test)
print(xgb_pred_proba)
import pandas as pd
pd.DataFrame(Y_test).to_csv(F"/content/gdrive/My Drive/ml-ruhi/XGBoost-Y-true-4class.csv")
pd.DataFrame(xgb_pred_proba).to_csv(F"/content/gdrive/My Drive/ml-ruhi/XGBoost-Y-pred-4class.csv")
# + id="F2be03zFV27D"
fimp=bst.feature_importances_
# + id="qnAWUdbvV5WU"
sorted_fimp=sorted(fimp, reverse=True)
# + id="jgM87U9NV6CT" colab={"base_uri": "https://localhost:8080/"} outputId="6b144410-1bfb-4acd-c8d7-08efcf30975a"
sorted_fimp
# + id="dDPVnzMqV7_T"
feature_index=np.zeros(len(fimp))
# + id="6HnGkVaMV90N" colab={"base_uri": "https://localhost:8080/"} outputId="f6984b5a-9d23-46af-c120-da46ac7c98cf"
len(fimp)
# + id="lPbW1RxYWBVN"
range(len(fimp))
# + id="zjoTOAJ1WDAt"
for t in range(len(fimp)):
for h in range(len(fimp)):
if sorted_fimp[t]==fimp[h]:
feature_index[t]=h
# + id="sKxnLFyLWFNr" colab={"base_uri": "https://localhost:8080/"} outputId="a0f07c7a-e357-4077-906e-2079fafed86d"
feature_index
# + id="PXoTbpQkWJ9T" colab={"base_uri": "https://localhost:8080/"} outputId="2049a925-1e7b-43fb-f606-da3ee29ed065"
feature_index_fixed = [ 0., 1., 4., 42., 21., 3., 26., 11., 9., 28., 6., 43., 12.,
31., 7., 25., 23., 5., 32., 44., 19., 29., 13., 33., 10., 27.,
45., 17., 39., 8., 30., 47., 35., 14., 16., 46., 34., 24., 18.,
15., 37., 22., 2., 20., 40., 41., 38., 36.] #fixed through observation
feature_index_fixed
# + id="-YFys0f6WM3L" colab={"base_uri": "https://localhost:8080/", "height": 165} outputId="df1cd929-26de-4f65-cf6d-c0bceaaca189"
pyplot.barh(feature_index, sorted_fimp) #did not match well maybe
# + id="7np8FkzOWObz" colab={"base_uri": "https://localhost:8080/", "height": 234} outputId="ab3cc1ac-7ec4-4654-bb17-2ff40a9dc997"
#does not match with the for loop code and previous graphs
ax = plot_importance(bst)
ax.figure.set_size_inches(10,10)
pyplot.title('XGBoost Feature Importance Bar Chart')
pyplot.xticks(fontsize=10)
pyplot.yticks(fontsize=10)
pyplot.show()
# + id="M006N2ZeWO7N" colab={"base_uri": "https://localhost:8080/"} outputId="cbdf25db-d36a-47ad-82c5-6bb8fdc117a1"
from sklearn.metrics import f1_score
score = f1_score(Y_test, best_preds, average='weighted')
print('F-Measure: %.3f' % score)
# + id="QTUc6oaJWz3C" colab={"base_uri": "https://localhost:8080/"} outputId="eb011419-3fc7-41d1-cde6-40f82d6cdac9"
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(Y_test, best_preds)
accuracy * 100.0
# + id="JMOSIaiGW1gZ" colab={"base_uri": "https://localhost:8080/"} outputId="edca0b7e-f219-499b-bdf0-1a559da72a9e"
best_preds.shape[0]
# + id="huCY1vY2W3ax"
best_preds_normal=[]
Y_test_normal=[]
# + id="OKV5_B-zW5D6"
for i in range(best_preds.shape[0]):
if(Y_test[i]==0): # if you want to do it for afib, write Y_test[i]==1
Y_test_normal.append(Y_test[i])
best_preds_normal.append(best_preds[i])
# + id="WCdBN1IvW6zZ"
Y_test_normal=np.array(Y_test_normal)
# + id="MaqovPk3W82a"
best_preds_normal=np.array(best_preds_normal)
# + id="tHM7say_W_SR" colab={"base_uri": "https://localhost:8080/"} outputId="f353019e-b377-4437-980c-f92165f2987f"
accuracy = accuracy_score(Y_test_normal, best_preds_normal)
accuracy * 100.0
# + colab={"base_uri": "https://localhost:8080/"} id="QbNuLxkewH8r" outputId="b1f02b11-076e-45ff-f6d3-6c80f04dafc1"
from sklearn.metrics import f1_score
score = f1_score(Y_test_normal, best_preds_normal, average='weighted')
print('F-Measure: %.3f' % score)
# + id="POO48i_yXCEZ" colab={"base_uri": "https://localhost:8080/"} outputId="c08b8a8b-f0c1-44d3-d63e-e06b7b0ccaa0"
best_preds_afib=[]
Y_test_afib=[]
for i in range(best_preds.shape[0]):
if(Y_test[i]==1): # if you want to do it for afib, write Y_test[i]==1
Y_test_afib.append(Y_test[i])
best_preds_afib.append(best_preds[i])
Y_test_afib=np.array(Y_test_afib)
best_preds_afib=np.array(best_preds_afib)
accuracy = accuracy_score(Y_test_afib, best_preds_afib)
acc = accuracy * 100.0
print('Acc: %.3f' % acc)
from sklearn.metrics import f1_score
score = f1_score(Y_test_afib, best_preds_afib, average='weighted')
print('F-Measure: %.3f' % score)
# + id="6-KR1zzlXEHB" colab={"base_uri": "https://localhost:8080/"} outputId="1b539f81-193b-425e-b391-ef7903a78284"
best_preds_noisy=[]
Y_test_noisy=[]
for i in range(best_preds.shape[0]):
if(Y_test[i]==2): # if you want to do it for afib, write Y_test[i]==1
Y_test_noisy.append(Y_test[i])
best_preds_noisy.append(best_preds[i])
Y_test_noisy=np.array(Y_test_noisy)
best_preds_noisy=np.array(best_preds_noisy)
accuracy = accuracy_score(Y_test_noisy, best_preds_noisy)
acc = accuracy * 100.0
print('Acc: %.3f' % acc)
from sklearn.metrics import f1_score
score = f1_score(Y_test_noisy, best_preds_noisy, average='weighted')
print('F-Measure: %.3f' % score)
# + id="W9zXnXrjXGJp" colab={"base_uri": "https://localhost:8080/"} outputId="9ab1a06f-b5f8-47de-84c8-1954b136e34c"
best_preds_other=[]
Y_test_other=[]
for i in range(best_preds.shape[0]):
if(Y_test[i]==3): # if you want to do it for afib, write Y_test[i]==1
Y_test_other.append(Y_test[i])
best_preds_other.append(best_preds[i])
Y_test_other=np.array(Y_test_other)
best_preds_other=np.array(best_preds_other)
accuracy = accuracy_score(Y_test_other, best_preds_other)
acc = accuracy * 100.0
print('Acc: %.3f' % acc)
from sklearn.metrics import f1_score
score = f1_score(Y_test_other, best_preds_other, average='weighted')
print('F-Measure: %.3f' % score)
# + id="dab7eYdHXG7h"
print('Mean ROC AUC: %.3f' % mean(score))
# + id="6f5Bn0jHXI8R"
# avg F-measure for all 3 types except noise data
from sklearn.metrics import f1_score
score = f1_score(Y_test, best_preds, average=None)
print(score)
score = f1_score(Y_test, best_preds, average='macro')
print(score)
score = f1_score(Y_test, best_preds, average='weighted')
print(score)
# + id="dqw8ZjaYXMGM"
from sklearn.metrics import confusion_matrix,plot_confusion_matrix
cm=confusion_matrix(Y_test, best_preds)
# + id="N6KlVGEIXOMZ"
# code from https://stackoverflow.com/questions/39033880/plot-confusion-matrix-sklearn-with-multiple-labels
def plot_conf_mat(cm, target_names, title='Confusion matrix', cmap=None, normalize=True):
import matplotlib.pyplot as plt
import numpy as np
import itertools
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
# + id="Y9oG35DxXQQc"
target_names=['Normal', 'afib', 'noisy', 'other']
# + id="kD04_w9lXSd6"
plot_conf_mat(cm, target_names)
# + id="JOe95sNTXUA8"
#cm=([[0.9715, 0, 0, 0.0286], [0.0049, 0.9323, 0.0206, 0.0422], [0.3077, 0.0769, 0.3462, 0.2692], [0.4999, 0.0556, 0.0556, 0.3889]])
#https://stackoverflow.com/questions/43691380/how-to-save-load-xgboost-model
import pickle
#file_name = "XGBoost+NN (RAW code).pkl"
file_path= F"/content/gdrive/My Drive/ml-ruhi/"
# save
pickle.dump(bst, open(file_path, "wb"))
# load
bst_loaded = pickle.load(open(file_path, "rb"))
# + id="Kw-dBnkqXaQD"
| XGBoost(tuned+validated).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import IPython
from IPython.display import HTML
from IPython.display import Markdown
from IPython.display import display
import numpy as np
# + [markdown] slideshow={"slide_type": "slide"}
# ## The theoretical density of MZG ($Mn_{0.5}Zn_{0.5}Fe_{1.9}Gd_{0.1}O_{4}$)
#
# The theoretical density of MZG ($Mn_{0.5}Zn_{0.5}Fe_{1.9}Gd_{0.1}O_{4}$) was calculated taking the
# molecular weight of MZG to be 245.84 $g$. Spinel ferrites
# have eight formula units per unit cell. Therefore, the molecular
# weight of one cell is $8 \times (245.84)$ = 1966.68 $g$.
#
# The volume of a cube of side, length $a$ is $a^3$. The volume of a cell is therefore
# $N \times a^{3}$ where $N$ is Avogadro’s number. The unit cell edge $a_0$ ($\mathring{A}$) of
# MZG = 8.3366 $\mathring{A}$ therefore $a^{3}$ = 579.38 $\mathring{A}^{3}$. As 1 $\mathring{A}^3$ = $10^{-24}$ $cm^{3}$,
# $N \times a^{3}$ is therefore: [$6,023\cdot10^{23}][579,38\cdot10^{-24}] = 348,96$ $cm^{3}$
#
#
# Density = mass/volume = 1966.68/348.96 = 5.6358 $g/cm^{3}$, that is, $\rho_{MZG}=5.6358$ $g/cm^{3}$ [[1](https://www4.dcu.ie/sites/default/files/apt/Ceramic%20CoFe_Sintering_2008.pdf)]
# + [markdown] slideshow={"slide_type": "slide"}
# ## Relation between volume fraction or mass of nanoparticles
#
# Let us assume that you wish to prepare a volume ($V_{f}$) in ml of some nanofluid such MZG nanoparticles with base fluid as kerosene.
# We know that the density ($\rho$) of MZG is 5.6358 $g/cm^{3}$ and density of kerosene is 0,8190 $g/cm^3$.
#
# Therefore, volume concentration ($\phi$ in $\%$), is given by:
#
# $$\phi=\left[ \frac{\frac{W_{np}}{\rho_{np}}}{(\frac{W_{np}}{\rho_{np}}+\frac{W_{bf}}{\rho_{bf}})} \right]\times 100$$
#
# Therefore if you fix $\phi$ as 0.5%, then, $W_{MZG}$ = 0.63 g approximately
#
#
# Hence $W_{MZG}$ = 0.63 g is to be mixed in the base fluid to obtain 100 ml CuO/water nanofluid with 0.1% volume fraction.
# -
def volume_fraction(wnp,pnp,wbf,pbf):
r=(wnp/pnp)/(wnp/pnp + wbf/pbf)
return r
phi=[]
a=[0.001*0.819,0.01*0.819,0.045*0.819,0.1*0.819,1*0.819]
b=0.819
#nwbf=b*a
phi=[volume_fraction(0.2607,5.6358,i,0.819) for i in a ]
phi[2]
#nwbf
| Volumetric Fraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# Pandas is a high-performance library for doing data analysis in Python... if you use it correctly. Today we'll go through some common performance traps people fall into, and we'll see how to stay on the fast path.
# + [markdown] heading_collapsed="true" tags=[]
# ## Measure twice, cut once
# + [markdown] heading_collapsed="true" tags=[]
# We know the dangers of [premature optimization](https://en.wikipedia.org/wiki/Program_optimization#When_to_optimize), so before you spend time speeding up some code, it's worth verifying that the code in question is *actually* slow, and identify exactly where it's slow. For this, I prefer tools like
#
# * [snakeviz](https://jiffyclub.github.io/snakeviz/) for function-level profiling
# * [line-profiler](https://github.com/pyutils/line_profiler) meauring specific functions line-by-line.
#
# See https://tomaugspurger.github.io/maintaing-performance.html for more on how to use these tools to identify slow sections of code. From here no out, we'll assume you've already verified that some code needs optimizing.
# + [markdown] heading_collapsed="true" tags=[]
# ## Storage formats and I/O
# + [markdown] heading_collapsed="true" tags=[]
# Your first interaction with pandas is often one of the `pd.read_<format>` functions. While pandas supports reading from many different formats, some are higher-performance than others. In particular, we'll compare two specific formats
#
# * CSV
# * Parquet
#
# If your workload is IO-bound and if you're lucky enough to choose your storage format, switching your storage format can have a big speedup. Let's generate some data for comparison:
# +
# # %load utils.py
# A helper to generate some dummy data
import pandas as pd
import numpy as np
names = ["Alice", "Bob", "Charlie", "Dan", "Edith", "Frank", "George", "Hannah", "Ingrid", "Jerry", "Kevin", "Laura", "Michael", "Norbert", "Oliver", "Patricia", "Quinn", "Ray", "Sarah", "Tim", "Ursula", "Victor", "Wendy", "Xavier", "Yvonne", "Zelda",]
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate, lam=1000):
return rstate.poisson(lam, size=n)
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n), names)
make = {
float: make_float,
int: make_int,
str: make_string,
object: make_string,
"category": make_categorical,
}
def make_timeseries_part(
start="2000-01-01",
end="2000-01-31",
dtypes={"name": "category", "id": int, "x": float, "y": float},
freq="10s",
random_state=None,
kwargs=None
):
kwargs = kwargs or {}
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
state = np.random.RandomState(random_state)
columns = {}
for k, dt in dtypes.items():
kws = {
kk.rsplit("_", 1)[1]: v
for kk, v in kwargs.items()
if kk.rsplit("_", 1)[0] == k
}
columns[k] = make[dt](len(index), state, **kws)
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(
start="2000-01-01",
end="2000-12-31",
dtypes={"name": str, "id": int, "x": float, "y": float},
freq="10s",
partition_freq="1M",
seed=None,
**kwargs
):
"""Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
kwargs:
Keywords to pass down to individual column creation functions.
Keywords should be prefixed by the column name and then an underscore.
Examples
--------
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head() # doctest: +SKIP
id name value
2000-01-01 00:00:00 969 Jerry -0.309014
2000-01-01 02:00:00 1010 Ray -0.760675
2000-01-01 04:00:00 1016 Patricia -0.063261
2000-01-01 06:00:00 960 Charlie 0.788245
2000-01-01 08:00:00 1031 Kevin 0.466002
"""
divisions = list(pd.date_range(start=start, end=end, freq=partition_freq))
state_data = random_state_data(len(divisions) - 1, seed)
name = "make-timeseries-" + tokenize(
start, end, dtypes, freq, partition_freq, state_data
)
dsk = {
(name, i): (
make_timeseries_part,
divisions[i],
divisions[i + 1],
dtypes,
freq,
state_data[i],
kwargs,
)
for i in range(len(divisions) - 1)
}
head = make_timeseries_part("2000", "2000", dtypes, "1H", state_data[0], kwargs)
return DataFrame(dsk, name, head, divisions)
# +
import pandas as pd
import numpy as np
# import utils
ts = make_timeseries_part()
ts
# -
# We'll write it to the two formats, first CSV and then parquet.
# %time ts.to_csv("data.csv")
# %time ts.to_parquet("data.parquet")
# Already we see that parquet can be faster at writing, at least for this dataset (it may be slower for small dataframes). But the difference is even more striking when you try to read that data back in.
#
# CSV is a plaintext format. This can be nice if you want to visually inspect the file. However, it's often slower and (at least for CSV) lacks any way to store data types in the file itself. Let's read them back in with the default arguments.
# %time csv = pd.read_csv("data.csv")
# %time parquet = pd.read_parquet("data.parquet")
# Parquet is about 5x faster. But more importantly, the data read from CSV doesn't exactly match what was written. The original dtypes are
ts.dtypes
# CSV read them back in as
csv.dtypes
# And parquet as
parquet.dtypes
# When reading CSVs, pandas has to infer the dtypes. This is slow (especially for datetimes, so pandas doesn't infer datetimes by default) or impossible for more exotic types like Categorical. You'd need to store these types seperately and provide them explicitly.
# %%time
dtype = {"name": pd.CategoricalDtype(names)}
csv = pd.read_csv(
"data.csv",
parse_dates=["timestamp"],
dtype=dtype,
index_col="timestamp"
)
# So in addition to being faster than CSVs (at least beyond small datasets), parquet can better
# preserve the data types.
# + [markdown] heading_collapsed="true" tags=[]
# ## Reading Parts of the file
# + [markdown] heading_collapsed="true" tags=[]
# When you're optitmizing some piece of code, the fastest way to do something is to not do it at all. Some readers (including both `read_csv` and `read_parquet`) have support for selecting subsets of the data for reading. Both `read_csv` and `read_parquet` let you select a subset of the columns to read in. By not having to read / parse other parts of the dataset, you speed up the reading of the parts you care about.
# -
# %time _ = pd.read_parquet("data.parquet", columns=["x", "y"])
# `read_parquet` also supports selecting a subset of *rows* to read. For maximum performance, you'll want to partition the dataset on disk according to your access pattern. For example, if we want to select a subset of the names, we'd partition on `name`.
ts.to_parquet("data-split.parquet", partition_cols=["name"])
# %%time
ts_frank = pd.read_parquet(
"data-split.parquet",
columns=["x", "y"],
filters=[("name", "=", "Frank")]
)
# We've gone from about 300ms to read the full dataset with CSV to about 14ms to read this subset with Parquet.
# + [markdown] heading_collapsed="true" tags=[]
# ## Constructing DataFrames
# + [markdown] heading_collapsed="true" tags=[]
# A common pattern is to store a full dataset as a bunch of files on disk with the same structure. Suppose we have a directory of parquet files that are generated by some batch process that runs at the end of the month.
# -
months = list(pd.date_range(start="2000-01-01", end="2010-12-31", freq="1M"))
for i in range(len(months) - 1):
start, end = months[i], months[i + 1]
df = make_timeseries_part(start, end, freq="5T")
df.to_parquet(f"../data/{start}.parquet")
# ls ../data/ | head
# And let's suppose we wanted to go from disk to a single pandas dataframe. We have two ways we could get there
#
# 1. Initialize one DataFrame and append to that.
# 2. Make many smaller DataFrames and concatenate them together at the end.
#
# If you were using Python data structures (lists, dictionaries, sets) you'd probably use the first way. In pandas (and NumPy) the second route is faster. Let's compare.
import pathlib
files = list(pathlib.Path("../data").glob("*.parquet"))
# This is the first method: creating an empty DataFrame and appending to it. We'll see that it's relatively slow.
# +
# %%time
result = pd.DataFrame({
"id": np.array([], dtype="int64"),
"name": pd.Categorical([], categories=names),
"x": np.array([], dtype="float64"),
"y": np.array([], dtype="float64")
}, index=pd.DatetimeIndex([], name='timestamp'))
for file in files:
df_part = pd.read_parquet(file)
result = result.append(df_part)
# -
# The second method.
# %%time
parts = [pd.read_parquet(file) for file in files]
ts_full = pd.concat(parts)
# So we have something like a 2X speedup, by simply reading first and then concatenating. Why is that?
#
# pandas' `DataFrame.append` is modeled after Python's `list.append`, but memory-wise they're very differnt. Recall that the columns inside a pandas DataFrame are typically NumPy arrays, and these cannot be expanded inplace. Expanding a NumPy array really means copying the whole thing. Therefore, every append means copying the old df and then add the new df rows.
#
# So repeatedly calling DataFrame.append means repeatedly copying a whole bunch of NumPy arrays.
# + [markdown] heading_collapsed="true" tags=[]
# ## Data Types
# + [markdown] heading_collapsed="true" tags=[]
# Recent versions of pandas feature *nullable data types*. In addition to being more sound with the types of data, they can offer performance improvements. Because Pandas cast ints with Nans to floats and boolean with Nans to object dtypes.
#
# For example, let's generate some boolean data with missing values.
# -
s1 = pd.Series(
np.array([True, False, np.nan], dtype=object)
).repeat(10000)
s1
# Notice that `s1.dtype` is `object`. That's because NumPy doesn't have a boolean dtype that can store missing values. So pandas falls back to an object-dtype ndarray of *Python* objects, which don't benefit from NumPy's typically optimizations. The memory usage will be higher, and operations will be slower.
#
# We can use pandas' nullable boolean dtype by calling `pd.array()` or by specifying `dtype="boolean"`.
s2 = pd.Series(
pd.array([True, False, pd.NA], dtype="boolean")
).repeat(10000)
s2
# When there are missing values, pandas' nullable boolean type takes less memory than the object dtype.
s1.memory_usage(deep=True) / s2.memory_usage(deep=True)
# And operations (like reductions, comparisons, arithmetic, logical operations) take less time.
# %timeit s1 | s1
# %timeit s2 | s2
# Apparently something is being cached, but ignoring that we're still much faster using pandas' nullable type.
#
# Likewise for reductions like `sum` or `mean`.
# %timeit s1.sum()
# %timeit s2.sum()
# Spend some time ensuring that your dtypes look correct. You'll want to avoid `object` dtype whenever possible. Pandas is gradually adding new extension dtypes for more types of data, so object dtype should become rarer.
#
# It's worth mentioning pandas' `Categorical` dtype. This is a "dictionary encoded" type, where we store the unique set of allowed values once (`.categories`) and the specific value for a row as a compressed integer (`.codes`). This gives lower memory usage and (sometimes) faster operations.
#
# For example, `name` is a Categorical storing strings. Let's compare operations on it with an `object` dtype version.
name = ts['name']
name_obj = name.astype(object)
display(name)
# First of all, the object-dtype version uses more memory.
name_obj.memory_usage(deep=True) / name.memory_usage(deep=True)
# Operations like `.value_counts()` are faster on the categorical version.
# %timeit name_obj.value_counts()
# %timeit name.value_counts()
# Be warned that `Categorical` isn't purely an optimization. It does change the semantics of some operations (especially around ordering and "unobserved" categories).
# Try as much as possible to spend as much time as possible in the C code to get the best performance, i.e. use vectorized operations and dtypes other than object because object dtype is numpy array of Python objects. Therefore, any operation on them means go to Python code which is known to be slow.
# + [markdown] heading_collapsed="true" tags=[]
# ## Iteration, Apply, and Vectorization
# -
# One of the keys to achieving high-performance in Pandas (and Python, genenerally) is to avoid doing too much in Python. We want to push the computationally expensive pieces down to compiled languages like C.
#
# Let's suppose we have some data on airports, and wanted to compute the pairwise distances between each.
airports = pd.read_csv(
"https://vega.github.io/vega-datasets/data/airports.csv",
index_col="iata",
nrows=500,
)
airports
# We'll do a bit of renaming and reindexing to generate the DataFrame of pairs.
# +
columns = ["longitude", "latitude"]
idx = pd.MultiIndex.from_product([airports.index, airports.index],
names=['orig', 'dest'])
pairs = pd.concat([
airports[columns]
.add_suffix('_orig')
.reindex(idx, level='orig'),
airports[columns]
.add_suffix('_dest')
.reindex(idx, level='dest')
], axis="columns"
)
pairs
# -
# And now let's consider two implementations of the great circle distance computation.
#
# The first will use pure Python, and computes the distance between two points.
# +
import math
def gcd_py(lat1, lng1, lat2, lng2):
'''
Calculate great circle distance between two points.
https://www.johndcook.com/blog/python_longitude_latitude/
Parameters
----------
lat1, lng1, lat2, lng2: float
Returns
-------
distance:
distance from ``(lat1, lng1)`` to ``(lat2, lng2)`` in kilometers.
'''
# python2 users will have to use ascii identifiers (or upgrade)
degrees_to_radians = math.pi / 180.0
ϕ1 = (90 - lat1) * degrees_to_radians
ϕ2 = (90 - lat2) * degrees_to_radians
θ1 = lng1 * degrees_to_radians
θ2 = lng2 * degrees_to_radians
cos = (math.sin(ϕ1) * math.sin(ϕ2) * math.cos(θ1 - θ2) +
math.cos(ϕ1) * math.cos(ϕ2))
# round to avoid precision issues on identical points causing ValueErrors
cos = round(cos, 8)
arc = math.acos(cos)
return arc * 6373 # radius of earth, in kilometers
# -
# The second uses NumPy, and computes the distances between *arrays* of points.
# Notice how similar the two implementations are.
def gcd_vec(lat1, lng1, lat2, lng2):
'''
Calculate great circle distance.
https://www.johndcook.com/blog/python_longitude_latitude/
Parameters
----------
lat1, lng1, lat2, lng2: float or array of float
Returns
-------
distance:
distance from ``(lat1, lng1)`` to ``(lat2, lng2)`` in kilometers.
'''
ϕ1 = np.deg2rad(90 - lat1)
ϕ2 = np.deg2rad(90 - lat2)
θ1 = np.deg2rad(lng1)
θ2 = np.deg2rad(lng2)
cos = (np.sin(ϕ1) * np.sin(ϕ2) * np.cos(θ1 - θ2) +
np.cos(ϕ1) * np.cos(ϕ2))
# round to avoid precision issues on identical points causing warnings
cos = np.round(cos, 8)
arc = np.arccos(cos)
return arc * 6373 # radius of earth, in kilometers
# And now let's use these functions in a few different ways.
#
# 1. Pass `gcd_py` to `DataFrame.apply`
# 2. Manually iterate over the DataFrame, calling `gcd_py` on each row
# 3. Call `gcd_vec`.
# %%time
# gcd_py with DataFrame.apply
r = pairs.apply(
lambda x: gcd_py(x['latitude_orig'],
x['longitude_orig'],
x['latitude_dest'],
x['longitude_dest']),
axis="columns"
);
# %%time
# gcd_py with manual iteration
_ = pd.Series([gcd_py(*x) for x in pairs.itertuples(index=False)],
index=pairs.index)
# %%time
# gcd_vec
r = gcd_vec(pairs['latitude_orig'], pairs['longitude_orig'],
pairs['latitude_dest'], pairs['longitude_dest'])
r.head()
# Performance-wise, it's clear that the vectorized version is best. And, in my opinion, the code is clearer.
#
# DataFrame.apply is the clear loser hear. It can be useful for quickly writing some little transformation. But `DataFrame.apply(..., axis=1)` generally should be avoided, especially for performance-sensitive code. It does much more work than the other forms we showed.
#
# Not every problem can be solved with vecorization though. Some problems are difficult or impossible to express using just Numpy. For those, we fortunately have Numba.
# + [markdown] heading_collapsed="true" tags=[]
# ## Using Numba
# + [markdown] heading_collapsed="true" tags=[]
# Recent versions of pandas optionally make extensive use of Numba to speed up certain operations. This is helpful when you have some custom user-defined function that you're passing to one of pandas' `.apply`, `.agg`, or `.transform` methods (in a rolling or groupby context).
#
# Consider something like a `df.rolling(n).apply(func)`. At a high level, that operation requires
#
# 1. Splitting the input into groups
# 2. Applying `func` to each group
# 3. Collecting the results into an output group
#
#
# <img src="https://docs.google.com/drawings/d/e/2PACX-1vSpZlYnXg8MfRHlRjm8JDcxkCjrQfI2XoS06JikaoRCuZiQUUgyo5yjWASU-ynNcucK2-eumooIty1-/pub?w=960&h=720">
#
# Now let's suppose we wanted to speed that up with Numba. As a user, you could `@numba.jit` your function. Depending on what your user defined function is doing, that could lead to a nice speedup. But there would still be a bunch of overhead *around* your function that would be relatively slow. Pandas would need to slice into the array (from Python), call your fast function (now in fast machine code), and jump back to Python to form the output array.
#
# <img src="https://docs.google.com/drawings/d/e/2PACX-1vRwvBtrV51LU2qfOxXUrggJ7h0-bTeSSozatQ7AECyhSOxEdO0ivfoXNhwWM5Q-lZvRBxmPMeAX5hzf/pub?w=960&h=540">
#
# When you use the `engine="numba"` keyword, pandas and Numba able to JIT compile a lot more than just your function. We're able to JIT the entire splitting, function application, and result combination so that the whole things stays in fast machine code.
#
# <img src="https://docs.google.com/drawings/d/e/2PACX-1vRYpI3MI4LKZQSz2VUAxQrxiN6wAlnmTCLOF2VcYTDtF5dJEbSE6IY1MgFH8w8GH84Q2Suu9ngjgYD0/pub?w=960&h=540">
# -
# For example, let's compute the mean absolute deviation. Pandas doesn't have a builtin version.
def mad(x):
return np.fabs(x - x.mean()).mean()
# The original dataset `ts` is 10-second frequency. We'll do a rolling mean absolute deviation at 1-minute frequency. But, the naive version is too slow to do on the full dataset.
# %%time
# For speed, limit to 10,000 rows
ts[["x", "y"]].head(10_000).rolling("T").apply(mad, raw=True)
# Now let's try this with `engine="numba"`. At first, things don't look great.
# %%time
ts[["x", "y"]].head(10_000).rolling("T").apply(
mad, engine="numba", raw=True
)
# So the operation is a bit slower. But that's becuase Numba is a just-in-time compiler. It observes what your code is doing and compiles some machine code tailored to the work being done. That compilation takes time, so it's cached and reused. We can call it again and see that things are even faster on subsequent calls.
# %timeit ts[["x", "y"]].head(10_000).rolling("T").apply(mad, engine="numba", raw=True)
# Indeed, it's fast enough that we can call it on the whole thing now.
# %%timeit
_ = ts[["x", "y"]].rolling("T").apply(
mad, engine="numba", raw=True
)
# Using numba for user-defined functions passed to pandas' apply, agg, and transform is extremely powerful. In the currently released version of pandas (1.1) numba-accelerated operations are available in
#
# * GroupBy.aggregate
# * GroupBy.transform
# * Rolling/Expanding.apply
# * Rolling/Expanding.aggregate
#
# ## Summary
#
# Today we've seen a few strategies for writing high-performance pandas code
#
# 1. Choose the best file format for your needs
#
# File formats like Parquet can offer higher performance, especially if your workload only needs to read in subsets of the data
#
# 2. Avoid reapeatedly expanding DataFrames along the rows
#
# We saw that repeatedly calling DataFrame.append was slower than building many dataframes and concatentating them at the end.
#
# 3. Use the right data type
#
# We saw that using pandas' new nullable types can avoid slow `object`-dtypes and cut down on memory usage.
#
# 4. Avoid iteration and apply
#
# We implemented two versions of the great circle distance computation. In pandas, the vectorized version using NumPy was faster than the NumPy version.
#
# 5. Use Numba for user-defined functions
#
# Pandas may not always have a built-in version of the method you need.
| notebooks/Pandas-Performance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.5.2
# language: julia
# name: julia-0.5
# ---
using PyPlot
using Dierckx
using PiecewiseInterpolation
# Define function with first order discontinuity.
# \begin{equation}
# f(t) = \mathrm{e}^{-10|t|}
# \end{equation}
f = t -> exp(-10abs(t))
t = linspace(-1, 1, 1000)
figure(figsize=(4, 3))
plot(t, f.(t))
times = collect(linspace(-1, 1, 30))
values = f.(times)
plot(times, values, "o", label="input data")
xlabel(L"t")
ylabel(L"f(t)")
# Create naive and piecewise spline (with a single discontinuity at $t=0$):
ff = Spline1D(times, values, k=3)
p = PiecewiseSpline1D(times, values, [0.0])
;
# Comparison of exact function, input data, and both interpolations.
figure(figsize=(4, 3))
plot(times, values, "o", label="input data")
new_times = linspace(-1, 1, 1001)
plot(new_times, f.(new_times), label="exact")
plot(new_times, ff.(new_times), label="naive")
plot(new_times, p.(new_times), "--", label="piecewise")
legend()
# Absolute errors of both interpolations (vs exact function).
figure(figsize=(4, 3))
plot(new_times, ff.(new_times) - f.(new_times), label="naive")
plot(new_times, p.(new_times) - f.(new_times), label="piecewise")
ylabel("absolute error")
legend()
axhline(0.0, color="k", lw=0.5)
# Relative errors of both interpolations (vs exact function).
figure(figsize=(4, 3))
plot(new_times, (ff.(new_times) - f.(new_times)) ./ f.(new_times), label="naive")
plot(new_times, (p.(new_times) - f.(new_times)) ./ f.(new_times), label="piecewise")
ylabel("relative error")
legend()
axhline(0.0, color="k", lw=0.5)
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:px4]
# language: python
# name: conda-env-px4-py
# ---
# %load_ext autoreload
# %autoreload 2
# %pylab inline
# +
import px4tools.ulog
import pandas
import os
import pickle
import scipy.interpolate
import px4tools.version
rcParams['lines.linewidth'] = 2
# -
pandas.__version__
px4tools.version.git_revision
# +
d_gyro = px4tools.ulog.cached_log_processing(
log='/home/jgoppert/logs/01-18-17-gyro-bias.ulg',
msg_filter='sensor_gyro',
processing_func=lambda x: x['sensor_gyro_0'].resample('1 s').agg('mean'),
save_path='./logs/01-18-17-gyro-bias-sensor_gyro_0.pkl',
force_processing=False)
d_comb = px4tools.ulog.cached_log_processing(
log='/home/jgoppert/logs/01-18-17-gyro-bias.ulg',
msg_filter='',
processing_func=lambda x: x,
save_path='/home/jgoppert/logs/01-18-17-gyro-bias-comb.pkl',
force_processing=False)
d_gyro_bias = d_comb.concat(dt=1).ffill().bfill()['2 min': '40 m']
# +
d_gyro2 = px4tools.ulog.cached_log_processing(
log='/home/jgoppert/logs/01-18-17-gyro-bias2.ulg',
msg_filter='sensor_gyro',
processing_func=lambda x: x['sensor_gyro_0'].resample('1 s').agg('mean'),
save_path='./logs/01-18-17-gyro-bias2-sensor_gyro_0.pkl',
force_processing=False)
d_comb2 = px4tools.ulog.cached_log_processing(
log='/home/jgoppert/logs/01-18-17-gyro-bias2.ulg',
msg_filter='',
processing_func=lambda x: x,
save_path='/home/jgoppert/logs/01-18-17-gyro-bias2-comb.pkl',
force_processing=False)
d_gyro2_bias = d_comb2.concat(dt=1).ffill().bfill()['2 min': '40 m']
# +
def plot_gyro_bias(d):
for i, axis, color in zip([7, 8, 9], ['x', 'y', 'z'], ['r', 'g', 'b']):
est = getattr(d, 't_estimator_status_0__f_states_{:d}_'.format(i))
est.plot(label=axis + ' est', style=color + '--')
true = getattr(d, 't_sensor_gyro_0__f_{:s}'.format(axis))
true.plot(label=axis, style=color, alpha=0.5)
plt.gcf().autofmt_xdate()
legend(ncol=3, loc='best')
plt.ylabel('rad/s')
plt.title('gyro bias')
def plot_gyro_bias_error(d):
(d.t_estimator_status_0__f_states_7_ - d.t_sensor_gyro_0__f_x).plot(label='x', style='r')
(d.t_estimator_status_0__f_states_8_ - d.t_sensor_gyro_0__f_y).plot(label='y', style='g')
(d.t_estimator_status_0__f_states_9_ - d.t_sensor_gyro_0__f_z).plot(label='z', style='b')
plt.gcf().autofmt_xdate()
legend(ncol=3, loc='best')
plt.title('gyro bias error')
# +
def plot_accel_bias(d):
for i, axis, color in zip([10, 11, 12], ['x', 'y', 'z'], ['r', 'g', 'b']):
est = getattr(d, 't_estimator_status_0__f_states_{:d}_'.format(i))
est.plot(label=axis + ' est', style=color + '--')
true = getattr(d, 't_sensor_accel_0__f_{:s}'.format(axis))
if axis == 'z':
true = pandas.Series(true + 9.8)
true.plot(label=axis, style=color, alpha=0.5)
plt.ylabel('m/s^2')
plt.gcf().autofmt_xdate()
legend(ncol=3, loc='best')
plt.title('accel bias')
def plot_accel_bias_error(d):
(d.t_estimator_status_0__f_states_10_ - d.t_sensor_accel_0__f_x).plot(label='x', style='r')
(d.t_estimator_status_0__f_states_11_ - d.t_sensor_accel_0__f_y).plot(label='y', style='g')
(d.t_estimator_status_0__f_states_12_ - d.t_sensor_accel_0__f_z - 9.8).plot(label='z', style='b')
# +
figure()
plot_gyro_bias(d_gyro_bias)
gca().set_ylim(-0.01, 0.04)
figure()
plot_gyro_bias_error(d_gyro_bias)
gca().set_ylim(-0.02, 0.02)
# +
figure()
plot_gyro_bias(d_gyro2_bias)
gca().set_ylim(-0.1, 0.1)
figure()
plot_gyro_bias_error(d_gyro2_bias)
gca().set_ylim(-0.1, 0.1)
# -
plot_accel_bias(d_gyro2_bias)
#gca().set_ylim(-1, 0.4)
plot_accel_bias(d_gyro_bias)
#gca().set_ylim(-1, 0.4)
est_status = d_comb['estimator_status_0']
# +
def plot_rotation_std_dev(d):
for i in range(3):
data = getattr(d, 't_estimator_status_0__f_covariances_{:d}_'.format(i))
np.rad2deg(sqrt(data)).plot()
plt.ylabel('deg')
plt.title('rotation std. dev.')
plt.grid()
plot_rotation_std_dev(est_status[:'2 m'])
# +
def plot_velocity_std_dev(d):
for i in range(3, 6):
data = getattr(d, 't_estimator_status_0__f_covariances_{:d}_'.format(i))
sqrt(data).plot()
plt.ylabel('m/s')
plt.title('velocity std. dev.')
plt.grid()
plot_velocity_std_dev(est_status[:'2 m'])
# -
px4tools.IEKF_STATES
# +
def plot_gyro_bias_std_dev(d):
for i in range(6, 9):
data = getattr(d, 't_estimator_status_0__f_covariances_{:d}_'.format(i))
np.rad2deg(sqrt(data)).plot()
plt.ylabel('deg')
plt.title('gyro bias std. dev.')
plt.grid()
plot_gyro_bias_std_dev(est_status[:'2 m'])
# +
def plot_accel_bias_std_dev(d):
for i in range(9, 12):
data = getattr(d, 't_estimator_status_0__f_covariances_{:d}_'.format(i))
sqrt(data).plot()
plt.ylabel('m/s^2')
plt.title('accel bias std. dev.')
plt.grid()
plot_accel_bias_std_dev(est_status[:'2 m'])
# +
def plot_pos_std_dev(d):
for i in range(12, 15):
data = getattr(d, 't_estimator_status_0__f_covariances_{:d}_'.format(i))
sqrt(data).plot()
plt.ylabel('m')
plt.title('pos std. dev.')
plt.grid()
plot_pos_std_dev(est_status[:'2 m'])
# +
def plot_pos_std_dev(d):
for i in range(12, 15):
data = getattr(d, 't_estimator_status_0__f_covariances_{:d}_'.format(i))
sqrt(data).plot()
plt.ylabel('m')
plt.title('pos std. dev.')
plt.grid()
plot_pos_std_dev(est_status[:'2 m'])
# -
px4tools.ulog.IEKF_ERROR_STATES
est_status.t_estimator_status_0__f_covariances_6_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_7_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_8_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_9_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_10_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_11_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_12_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_13_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_14_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_15_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_16_[:'2 m'].plot()
est_status.t_estimator_status_0__f_covariances_17_[:'2 m'].plot()
| Gyro bias analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import requests
import json
import pandas as pd
import os
with open('../data/general/auth.json') as auth_json:
authes = pd.read_json(auth_json)
auth = authes['token'].iloc[-1]['token']
headers = {'Authorization': auth}
with open('../data/general/matches_2017_2018_v1.json') as matches_json:
matches = pd.read_json(matches_json)
# -
# Get pass data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/Passes/' + str(id)
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/pass_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
# Get Per Second Data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/PersecData/' + str(id)
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/per_sec_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
# Get Match Data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/CurrData/' + str(id)
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/match_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
# Get Roster Data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/Rosters/' + str(id)
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/roster_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
# Get Players Data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/CurrData/' + str(id) + '/Players'
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/players_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
# Get Tagger Data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/TaggerData/' + str(id)
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/tagger_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
# Get Heatmap Position Data
for id in matches['id']:
url = 'https://api.sentiocloud.net/v2/HeatMapData/' + str(id)
data = requests.get(url=url, headers=headers)
data = json.loads(data.content)
directory = '../data/match_' + str(id)
if not os.path.exists(directory):
os.makedirs(directory)
with open('../data/match_' + str(id) + '/heatmap_data_' + str(id) + '.json', 'w') as file:
json.dump(data, file, indent=4, sort_keys=True)
| src/data.ipynb |