repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
twosigma/beakerx | doc/python/ChartingAPI.ipynb | apache-2.0 | from beakerx import *
import pandas as pd
tableRows = pd.read_csv('../resources/data/interest-rates.csv')
Plot(title="Title",
xLabel="Horizontal",
yLabel="Vertical",
initWidth=500,
initHeight=200)
"""
Explanation: Python API to BeakerX Interactive Plotting
You can access Beaker's native interactive plotting library from Python.
Plot with simple properties
Python plots has syntax very similar to Groovy plots. Property names are the same.
End of explanation
"""
x = [1, 4, 6, 8, 10]
y = [3, 6, 4, 5, 9]
pp = Plot(title='Bars, Lines, Points and 2nd yAxis',
xLabel="xLabel",
yLabel="yLabel",
legendLayout=LegendLayout.HORIZONTAL,
legendPosition=LegendPosition.RIGHT,
omitCheckboxes=True)
pp.add(YAxis(label="Right yAxis"))
pp.add(Bars(displayName="Bar",
x=[1,3,5,7,10],
y=[100, 120,90,100,80],
width=1))
pp.add(Line(displayName="Line",
x=x,
y=y,
width=6,
yAxis="Right yAxis"))
pp.add(Points(x=x,
y=y,
size=10,
shape=ShapeType.DIAMOND,
yAxis="Right yAxis"))
plot = Plot(title= "Setting line properties")
ys = [0, 1, 6, 5, 2, 8]
ys2 = [0, 2, 7, 6, 3, 8]
plot.add(Line(y= ys, width= 10, color= Color.red))
plot.add(Line(y= ys, width= 3, color= Color.yellow))
plot.add(Line(y= ys, width= 4, color= Color(33, 87, 141), style= StrokeType.DASH, interpolation= 0))
plot.add(Line(y= ys2, width= 2, color= Color(212, 57, 59), style= StrokeType.DOT))
plot.add(Line(y= [5, 0], x= [0, 5], style= StrokeType.LONGDASH))
plot.add(Line(y= [4, 0], x= [0, 5], style= StrokeType.DASHDOT))
plot = Plot(title= "Changing Point Size, Color, Shape")
y1 = [6, 7, 12, 11, 8, 14]
y2 = [4, 5, 10, 9, 6, 12]
y3 = [2, 3, 8, 7, 4, 10]
y4 = [0, 1, 6, 5, 2, 8]
plot.add(Points(y= y1))
plot.add(Points(y= y2, shape= ShapeType.CIRCLE))
plot.add(Points(y= y3, size= 8.0, shape= ShapeType.DIAMOND))
plot.add(Points(y= y4, size= 12.0, color= Color.orange, outlineColor= Color.red))
plot = Plot(title= "Changing point properties with list")
cs = [Color.black, Color.red, Color.orange, Color.green, Color.blue, Color.pink]
ss = [6.0, 9.0, 12.0, 15.0, 18.0, 21.0]
fs = [False, False, False, True, False, False]
plot.add(Points(y= [5] * 6, size= 12.0, color= cs))
plot.add(Points(y= [4] * 6, size= 12.0, color= Color.gray, outlineColor= cs))
plot.add(Points(y= [3] * 6, size= ss, color= Color.red))
plot.add(Points(y= [2] * 6, size= 12.0, color= Color.black, fill= fs, outlineColor= Color.black))
plot = Plot()
y1 = [1.5, 1, 6, 5, 2, 8]
cs = [Color.black, Color.red, Color.gray, Color.green, Color.blue, Color.pink]
ss = [StrokeType.SOLID, StrokeType.SOLID, StrokeType.DASH, StrokeType.DOT, StrokeType.DASHDOT, StrokeType.LONGDASH]
plot.add(Stems(y= y1, color= cs, style= ss, width= 5))
plot = Plot(title= "Setting the base of Stems")
ys = [3, 5, 2, 3, 7]
y2s = [2.5, -1.0, 3.5, 2.0, 3.0]
plot.add(Stems(y= ys, width= 2, base= y2s))
plot.add(Points(y= ys))
plot = Plot(title= "Bars")
cs = [Color(255, 0, 0, 128)] * 5 # transparent bars
cs[3] = Color.red # set color of a single bar, solid colored bar
plot.add(Bars(x= [1, 2, 3, 4, 5], y= [3, 5, 2, 3, 7], color= cs, outlineColor= Color.black, width= 0.3))
"""
Explanation: Plot items
Lines, Bars, Points and Right yAxis
End of explanation
"""
plot = Plot(title= "Pandas line")
plot.add(Line(y= tableRows.y1, width= 2, color= Color(216, 154, 54)))
plot.add(Line(y= tableRows.y10, width= 2, color= Color.lightGray))
plot
plot = Plot(title= "Pandas Series")
plot.add(Line(y= pd.Series([0, 6, 1, 5, 2, 4, 3]), width=2))
plot = Plot(title= "Bars")
cs = [Color(255, 0, 0, 128)] * 7 # transparent bars
cs[3] = Color.red # set color of a single bar, solid colored bar
plot.add(Bars(pd.Series([0, 6, 1, 5, 2, 4, 3]), color= cs, outlineColor= Color.black, width= 0.3))
"""
Explanation: Lines, Points with Pandas
End of explanation
"""
ch = Crosshair(color=Color.black, width=2, style=StrokeType.DOT)
plot = Plot(crosshair=ch)
y1 = [4, 8, 16, 20, 32]
base = [2, 4, 8, 10, 16]
cs = [Color.black, Color.orange, Color.gray, Color.yellow, Color.pink]
ss = [StrokeType.SOLID,
StrokeType.SOLID,
StrokeType.DASH,
StrokeType.DOT,
StrokeType.DASHDOT,
StrokeType.LONGDASH]
plot.add(Area(y=y1, base=base, color=Color(255, 0, 0, 50)))
plot.add(Stems(y=y1, base=base, color=cs, style=ss, width=5))
plot = Plot()
y = [3, 5, 2, 3]
x0 = [0, 1, 2, 3]
x1 = [3, 4, 5, 8]
plot.add(Area(x= x0, y= y))
plot.add(Area(x= x1, y= y, color= Color(128, 128, 128, 50), interpolation= 0))
p = Plot()
p.add(Line(y= [3, 6, 12, 24], displayName= "Median"))
p.add(Area(y= [4, 8, 16, 32], base= [2, 4, 8, 16],
color= Color(255, 0, 0, 50), displayName= "Q1 to Q3"))
ch = Crosshair(color= Color(255, 128, 5), width= 2, style= StrokeType.DOT)
pp = Plot(crosshair= ch, omitCheckboxes= True,
legendLayout= LegendLayout.HORIZONTAL, legendPosition= LegendPosition.TOP)
x = [1, 4, 6, 8, 10]
y = [3, 6, 4, 5, 9]
pp.add(Line(displayName= "Line", x= x, y= y, width= 3))
pp.add(Bars(displayName= "Bar", x= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], y= [2, 2, 4, 4, 2, 2, 0, 2, 2, 4], width= 0.5))
pp.add(Points(x= x, y= y, size= 10))
"""
Explanation: Areas, Stems and Crosshair
End of explanation
"""
p = Plot ()
p.add(Line(y=[-1, 1]))
p.add(ConstantLine(x=0.65, style=StrokeType.DOT, color=Color.blue))
p.add(ConstantLine(y=0.1, style=StrokeType.DASHDOT, color=Color.blue))
p.add(ConstantLine(x=0.3, y=0.4, color=Color.gray, width=5, showLabel=True))
Plot().add(Line(y=[-3, 1, 3, 4, 5])).add(ConstantBand(x=[1, 2], y=[1, 3]))
p = Plot()
p.add(Line(x= [-3, 1, 2, 4, 5], y= [4, 2, 6, 1, 5]))
p.add(ConstantBand(x= ['-Infinity', 1], color= Color(128, 128, 128, 50)))
p.add(ConstantBand(x= [1, 2]))
p.add(ConstantBand(x= [4, 'Infinity']))
from decimal import Decimal
pos_inf = Decimal('Infinity')
neg_inf = Decimal('-Infinity')
print (pos_inf)
print (neg_inf)
from beakerx.plot import Text as BeakerxText
plot = Plot()
xs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ys = [8.6, 6.1, 7.4, 2.5, 0.4, 0.0, 0.5, 1.7, 8.4, 1]
def label(i):
if ys[i] > ys[i+1] and ys[i] > ys[i-1]:
return "max"
if ys[i] < ys[i+1] and ys[i] < ys[i-1]:
return "min"
if ys[i] > ys[i-1]:
return "rising"
if ys[i] < ys[i-1]:
return "falling"
return ""
for i in xs:
i = i - 1
if i > 0 and i < len(xs)-1:
plot.add(BeakerxText(x= xs[i], y= ys[i], text= label(i), pointerAngle= -i/3.0))
plot.add(Line(x= xs, y= ys))
plot.add(Points(x= xs, y= ys))
plot = Plot(title= "Setting 2nd Axis bounds")
ys = [0, 2, 4, 6, 15, 10]
ys2 = [-40, 50, 6, 4, 2, 0]
ys3 = [3, 6, 3, 6, 70, 6]
plot.add(YAxis(label="Spread"))
plot.add(Line(y= ys))
plot.add(Line(y= ys2, yAxis="Spread"))
plot.setXBound([-2, 10])
#plot.setYBound(1, 5)
plot.getYAxes()[0].setBound(1,5)
plot.getYAxes()[1].setBound(3,6)
plot
plot = Plot(title= "Setting 2nd Axis bounds")
ys = [0, 2, 4, 6, 15, 10]
ys2 = [-40, 50, 6, 4, 2, 0]
ys3 = [3, 6, 3, 6, 70, 6]
plot.add(YAxis(label="Spread"))
plot.add(Line(y= ys))
plot.add(Line(y= ys2, yAxis="Spread"))
plot.setXBound([-2, 10])
plot.setYBound(1, 5)
plot
"""
Explanation: Constant Lines, Constant Bands
End of explanation
"""
import time
millis = current_milli_time()
hour = round(1000 * 60 * 60)
xs = []
ys = []
for i in range(11):
xs.append(millis + hour * i)
ys.append(i)
plot = TimePlot(timeZone="America/New_York")
# list of milliseconds
plot.add(Points(x=xs, y=ys, size=10, displayName="milliseconds"))
plot = TimePlot()
plot.add(Line(x=tableRows['time'], y=tableRows['m3']))
"""
Explanation: TimePlot
End of explanation
"""
y = pd.Series([7.5, 7.9, 7, 8.7, 8, 8.5])
dates = [np.datetime64('2015-02-01'),
np.datetime64('2015-02-02'),
np.datetime64('2015-02-03'),
np.datetime64('2015-02-04'),
np.datetime64('2015-02-05'),
np.datetime64('2015-02-06')]
plot = TimePlot()
plot.add(Line(x=dates, y=y))
"""
Explanation: numpy datatime64
End of explanation
"""
y = pd.Series([7.5, 7.9, 7, 8.7, 8, 8.5])
dates = pd.Series(['2015-02-01',
'2015-02-02',
'2015-02-03',
'2015-02-04',
'2015-02-05',
'2015-02-06']
, dtype='datetime64[ns]')
plot = TimePlot()
plot.add(Line(x=dates, y=y))
"""
Explanation: Timestamp
End of explanation
"""
import datetime
y = pd.Series([7.5, 7.9, 7, 8.7, 8, 8.5])
dates = [datetime.date(2015, 2, 1),
datetime.date(2015, 2, 2),
datetime.date(2015, 2, 3),
datetime.date(2015, 2, 4),
datetime.date(2015, 2, 5),
datetime.date(2015, 2, 6)]
plot = TimePlot()
plot.add(Line(x=dates, y=y))
import datetime
y = pd.Series([7.5, 7.9, 7, 8.7, 8, 8.5])
dates = [datetime.datetime(2015, 2, 1),
datetime.datetime(2015, 2, 2),
datetime.datetime(2015, 2, 3),
datetime.datetime(2015, 2, 4),
datetime.datetime(2015, 2, 5),
datetime.datetime(2015, 2, 6)]
plot = TimePlot()
plot.add(Line(x=dates, y=y))
"""
Explanation: Datetime and date
End of explanation
"""
millis = current_milli_time()
nanos = millis * 1000 * 1000
xs = []
ys = []
for i in range(11):
xs.append(nanos + 7 * i)
ys.append(i)
nanoplot = NanoPlot()
nanoplot.add(Points(x=xs, y=ys))
"""
Explanation: NanoPlot
End of explanation
"""
y1 = [1,5,3,2,3]
y2 = [7,2,4,1,3]
p = Plot(title='Plot with XYStacker', initHeight=200)
a1 = Area(y=y1, displayName='y1')
a2 = Area(y=y2, displayName='y2')
stacker = XYStacker()
p.add(stacker.stack([a1, a2]))
"""
Explanation: Stacking
End of explanation
"""
SimpleTimePlot(tableRows, ["y1", "y10"], # column names
timeColumn="time", # time is default value for a timeColumn
yLabel="Price",
displayNames=["1 Year", "10 Year"],
colors = [[216, 154, 54], Color.lightGray],
displayLines=True, # no lines (true by default)
displayPoints=False) # show points (false by default))
#time column base on DataFrame index
tableRows.index = tableRows['time']
SimpleTimePlot(tableRows, ['m3'])
rng = pd.date_range('1/1/2011', periods=72, freq='H')
ts = pd.Series(np.random.randn(len(rng)), index=rng)
df = pd.DataFrame(ts, columns=['y'])
SimpleTimePlot(df, ['y'])
"""
Explanation: SimpleTime Plot
End of explanation
"""
p = TimePlot(xLabel= "Time", yLabel= "Interest Rates")
p.add(YAxis(label= "Spread", upperMargin= 4))
p.add(Area(x= tableRows.time, y= tableRows.spread, displayName= "Spread",
yAxis= "Spread", color= Color(180, 50, 50, 128)))
p.add(Line(x= tableRows.time, y= tableRows.m3, displayName= "3 Month"))
p.add(Line(x= tableRows.time, y= tableRows.y10, displayName= "10 Year"))
"""
Explanation: Second Y Axis
The plot can have two y-axes. Just add a YAxis to the plot object, and specify its label.
Then for data that should be scaled according to this second axis,
specify the property yAxis with a value that coincides with the label given.
You can use upperMargin and lowerMargin to restrict the range of the data leaving more white, perhaps for the data on the other axis.
End of explanation
"""
import math
points = 100
logBase = 10
expys = []
xs = []
for i in range(0, points):
xs.append(i / 15.0)
expys.append(math.exp(xs[i]))
cplot = CombinedPlot(xLabel= "Linear")
logYPlot = Plot(title= "Linear x, Log y", yLabel= "Log", logY= True, yLogBase= logBase)
logYPlot.add(Line(x= xs, y= expys, displayName= "f(x) = exp(x)"))
logYPlot.add(Line(x= xs, y= xs, displayName= "g(x) = x"))
cplot.add(logYPlot, 4)
linearYPlot = Plot(title= "Linear x, Linear y", yLabel= "Linear")
linearYPlot.add(Line(x= xs, y= expys, displayName= "f(x) = exp(x)"))
linearYPlot.add(Line(x= xs, y= xs, displayName= "g(x) = x"))
cplot.add(linearYPlot,4)
cplot
plot = Plot(title= "Log x, Log y", xLabel= "Log", yLabel= "Log",
logX= True, xLogBase= logBase, logY= True, yLogBase= logBase)
plot.add(Line(x= xs, y= expys, displayName= "f(x) = exp(x)"))
plot.add(Line(x= xs, y= xs, displayName= "f(x) = x"))
plot
"""
Explanation: Combined Plot
End of explanation
"""
|
bioinformatica-corso/lezioni | laboratorio/lezione17-09dic21/esercizio2-biopython.ipynb | cc0-1.0 | import Bio
"""
Explanation: Biopython - Esercizio2
Prendere in input un entry in formato embl di una sequenza nucleotidica di mRNA e, senza conoscere la proteina effettivamente annotata nel file ma solo sulla base della sequenza nucleotidica del trascritto, trovare tutte le proteine di oltre 1000 amminoacidi che il trascritto potrebbe esprimere.
In questo esercizio definiamo come proteina potenzialmente esprimibile dal trascritto la proteina che si ottiene dalla traduzione di una qualsiasi sottostringa che inizia con lo start codon atg e finisce con uno degli stop codon (tale che non comprenda stop codon in mezzo).
Installare il package Bio di Biopython.
Importare il package Bio.
End of explanation
"""
from Bio import SeqIO
"""
Explanation: Importare il package SeqIO.
End of explanation
"""
embl_record = SeqIO.read('./M10051.txt', 'embl')
embl_record
"""
Explanation: Ottenere il record del file di input
End of explanation
"""
readings = [embl_record[f:] for f in [0,1,2]]
readings = [r[:len(r)-len(r)%3] for r in readings]
readings
"""
Explanation: Ottenere la lista dei 3 frame di lettura della sequenza (come oggetti di tipo SeqRecord).
Primo frame di lettura: il più lungo prefisso la cui lunghezza è multiplo di 3.
Secondo frame di lettura: la più lunga sottostringa che inzia alla posizione 1 la cui lunghezza è multiplo di 3.
Terzo frame di lettura: la più lunga sottostringa che inzia alla posizione 2 la cui lunghezza è multiplo di 3.
End of explanation
"""
reading_translations = [str(r.translate().seq) for r in readings]
reading_translations
"""
Explanation: Ottenere la lista delle tre traduzioni dei frame di lettura (come oggetti di tipo str).
End of explanation
"""
stop_chunk_list = [t.split('*') for t in reading_translations]
stop_chunk_list
"""
Explanation: Separare ognuna delle 3 traduzioni tramite il simbolo *, che rappresenta lo stop codon, e produrre la lista delle liste delle singole parti separate, chiamate nel seguito stop chunks.
End of explanation
"""
stop_chunk_list = [chunk for list_of_list in stop_chunk_list for chunk in list_of_list]
stop_chunk_list
"""
Explanation: Trasformare la lista precedente da lista di liste di stringhe a lista di stringhe (lista di stop chunks).
End of explanation
"""
import re
m_chunk_list = [re.findall('(M[^M]+)', stop_chunk) for stop_chunk in stop_chunk_list]
m_chunk_list
"""
Explanation: Da ognuno degli stop chunks della lista precedente estrarre le sottostringhe consecutive che iniziano con M (e terminano una posizione prima della M successiva), che chiameremo M chunks.
Produrre una lista di liste ciascuna contenenti gli M chunks per un dato stop chunk.
Esempio di stop chunk con spazio inserito prima di ogni M:
QCLPWRGRAGVPI MAFLWFESLWK MQDSHDST MSSGVQRSFLY MSVHLKVDSFGYQFN
M chunks dello stop chunk di esempio:
MAFLWFESLWK MQDSHDST MSSGVQRSFLY MSVHLKVDSFGYQFN
End of explanation
"""
protein_list = [''.join(m_chunks[i:]) for m_chunks in m_chunk_list for i in range(len(m_chunks))]
protein_list
"""
Explanation: Per ognuna delle liste di M chunks, effettuare le concatenazioni degli M chunks: prima concatenare tutte le stringhe dalla prima all'ultima, poi concatenare tutte le stringhe dalla seconda all'ultima, etc. per ottenere le proteine potenzialmente esprimibili dal trascritto.
Ad esempio per questa lista degli M chunks di uno stop chunk:
MAFLWFESLWK MQDSHDST MSSGVQRSFLY MSVHLKVDSFGYQFN
si devono effettuare quattro concatenazioni e ottenere quindi quattro proteine potenziali:
Concatenazione di:MAFLWFESLWK MQDSHDST MSSGVQRSFLY MSVHLKVDSFGYQFN
Concatenazione di:
MQDSHDST MSSGVQRSFLY MSVHLKVDSFGYQFN
1. Concatenazione di:
MSSGVQRSFLY MSVHLKVDSFGYQFN
1. Concatenazione di:
MSVHLKVDSFGYQFN
Produrre quindi la lista delle proteine relative a tutti gli stop chunks.
End of explanation
"""
protein_list = [protein for protein in protein_list if len(protein) > 1000]
protein_list
"""
Explanation: Eliminare le proteine che non superano una lunghezza di 1000 amminoacidi.
End of explanation
"""
|
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/tpu/tools/colab/Classification_Iris_data_with_Keras.ipynb | apache-2.0 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Example of a classification model using Keras for the Iris dataset."""
import json
import os
import pandas as pd
import pprint
import tensorflow as tf
import time
import numpy as np
from tensorflow import keras
print(tf.__version__)
"""
Explanation: A simple classification model using Keras with Cloud TPUs
This notebook demonstrates using Cloud TPUs in colab to build a simple classification model using iris dataset to predict the species of the flower. This model is using 4 input features (SepalLength, SepalWidth, PetalLength, PetalWidth) to determine one of these flower species (Setosa, Versicolor, Virginica).
Advantages:
* GCP account is not compulsory which is a must pre-requisite for the models using TPUEstimator()
* This tutorial gives a way to take your own data insteasd of using already loaded data into Keras.
NOTE: This tutorial is just for learning how to write a simple model using Keras. It should not be used for comparision with training on CPU's because we have very less data in this iris_data example.
Imports
End of explanation
"""
use_tpu = True #@param {type:"boolean"}
if use_tpu:
assert 'COLAB_TPU_ADDR' in os.environ, 'Missing TPU; did you request a TPU in Notebook Settings?'
if 'COLAB_TPU_ADDR' in os.environ:
TF_MASTER = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
else:
TF_MASTER=''
with tf.Session(TF_MASTER) as session:
print ('List of devices:')
pprint.pprint(session.list_devices())
"""
Explanation: Resolve TPU Address
End of explanation
"""
# Model specific parameters
# TPU address
tpu_address = TF_MASTER
# Number of epochs
epochs = 50
# Number of steps_per_epoch
steps_per_epoch = 20
# NOTE: Total number of training steps = Number of epochs * Number of steps_per_epochs
# Total number of evaluation steps. If '0', evaluation after training is skipped
eval_steps = 50
"""
Explanation: FLAGS used as model params
End of explanation
"""
TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
PREDICTION_INPUT_DATA = {
'SepalLength': [6.9, 5.1, 5.9, 6.0, 5.5, 6.2, 5.5, 6.3],
'SepalWidth': [3.1, 3.3, 3.0, 3.4, 2.5, 2.9, 4.2, 2.8],
'PetalLength': [5.4, 1.7, 4.2, 4.5, 4.0, 4.3, 1.4, 5.1],
'PetalWidth': [2.1, 0.5, 1.5, 1.6, 1.3, 1.3, 0.2, 1.5],
}
PREDICTION_OUTPUT_DATA = ['Virginica', 'Setosa', 'Versicolor', 'Versicolor', 'Versicolor', 'Versicolor', 'Setosa', 'Virginica']
def maybe_download():
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1], TRAIN_URL)
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)
return train_path, test_path
def load_data(y_name='Species'):
"""Returns the iris dataset as (train_x, train_y), (test_x, test_y)."""
train_path, test_path = maybe_download()
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0, dtype={'SepalLength': pd.np.float32,
'SepalWidth': pd.np.float32, 'PetalLength': pd.np.float32, 'PetalWidth': pd.np.float32, 'Species': pd.np.int32})
train_x, train_y = train, train.pop(y_name)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0, dtype={'SepalLength': pd.np.float32,
'SepalWidth': pd.np.float32, 'PetalLength': pd.np.float32, 'PetalWidth': pd.np.float32, 'Species': pd.np.int32})
test_x, test_y = test, test.pop(y_name)
return (train_x, train_y), (test_x, test_y)
"""
Explanation: Download training input data and define prediction input & output
End of explanation
"""
def get_model():
return keras.Sequential([
keras.layers.Dense(10, input_shape=(4,), activation=tf.nn.relu, name = "Dense_1"),
keras.layers.Dense(10, activation=tf.nn.relu, name = "Dense_2"),
keras.layers.Dense(3, activation=None, name = "logits"),
keras.layers.Dense(3, activation=tf.nn.softmax, name = "softmax")
])
dnn_model = get_model()
dnn_model.compile(optimizer=tf.train.AdagradOptimizer(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_crossentropy'])
dnn_model.summary()
"""
Explanation: Define model (2 hidden layers with 10 neurons in each)
End of explanation
"""
tpu_model = tf.contrib.tpu.keras_to_tpu_model(
dnn_model,
strategy=tf.contrib.tpu.TPUDistributionStrategy(
tf.contrib.cluster_resolver.TPUClusterResolver(TF_MASTER)))
tpu_model.summary()
"""
Explanation: Creating a TPU model from a Keras Model
End of explanation
"""
# Fetch the data
(train_x, train_y), (test_x, test_y) = load_data()
# Train the model
tpu_model.fit(
train_x, train_y,
steps_per_epoch = steps_per_epoch,
epochs=epochs,
)
"""
Explanation: Training of the model on TPU
End of explanation
"""
tpu_model.evaluate(test_x, test_y,
steps = eval_steps)
"""
Explanation: Evaluation of the model
End of explanation
"""
tpu_model.save_weights('./DNN_TPU_1024.h5', overwrite=True)
"""
Explanation: Save the model
End of explanation
"""
COLUMNS_NAME=['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth']
data = pd.DataFrame(PREDICTION_INPUT_DATA, columns=COLUMNS_NAME)
print(data)
"""
Explanation: Prediction
Prediction data
End of explanation
"""
predictions = tpu_model.predict(data)
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
for pred_dict, expec in zip(predictions, PREDICTION_OUTPUT_DATA):
class_index = np.argmax(pred_dict)
class_probability = np.max(pred_dict)
print(template.format(SPECIES[class_index], 100*class_probability, expec))
"""
Explanation: Prediction on TPU
End of explanation
"""
cpu_model = tpu_model.sync_to_cpu()
cpu_predictions = cpu_model.predict(data)
template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
for pred_dict, expec in zip(cpu_predictions, PREDICTION_OUTPUT_DATA):
class_index = np.argmax(pred_dict)
class_probability = np.max(pred_dict)
print(template.format(SPECIES[class_index], 100*class_probability, expec))
"""
Explanation: Prediction on CPU
End of explanation
"""
|
NervanaSystems/neon_course | answers/04 Writing a custom layer-ANSWER_KEY.ipynb | apache-2.0 | import neon
print neon.__version__
# use a GPU backend
from neon.backends import gen_backend
be = gen_backend('gpu', batch_size=128)
# load data
from neon.data import MNIST
mnist = MNIST(path='../data/')
train_set = mnist.train_iter
test_set = mnist.valid_iter
"""
Explanation: Building a new layer
This notebook will guide you through implementing a custom layer in neon, as well as a custom activation function. You will learn
* general interface for defining new layers
* using the nervana backend functions
Preamble
The first step is to set up our compute backend, and initialize our dataset.
End of explanation
"""
from neon.layers.layer import ParameterLayer, interpret_in_shape
# Subclass from ParameterLayer, which handles the allocation
# of memory buffers for the output activations, weights, and
# bprop deltas.
class MyLinear(ParameterLayer):
def __init__(self, nout, init, name=None):
super(MyLinear, self).__init__(init, name, "Disabled")
self.nout = nout
# required attributes
self.inputs = None #..?
self.in_shape = None # shape of the inputs to this layer
self.out_shape = None # shape of the outputs from this layer
def __str__(self):
return "Linear Layer '%s': %d inputs, %d outputs" % (
self.name, self.nin, self.nout)
def configure(self, in_obj):
"""
Configure the layer's input shape and output shape attributes. This is
required for allocating the output buffers.
"""
super(MyLinear, self).configure(in_obj)
# shape of the input is in (# input features, batch_size)
(self.nin, self.nsteps) = interpret_in_shape(self.in_shape)
# shape of the output is (# output units, batch_size)
self.out_shape = (self.nout, self.nsteps)
# if the shape of the weights have not been allocated,
# we know that his layer's W is a tensor of shape (# outputs, # inputs).
if self.weight_shape is None:
self.weight_shape = (self.nout, self.nin)
return self
# We use the superclass' allocate() method.
# for a general layer, where you may have other memory allocations
# needed for computations, you can implement allocate() with
# your own variables.
#
# def allocate(self)
# fprop function
# * inference flag can be used to not store activations that may be unneeded
# * beta...?
def fprop(self, inputs, inference=False, beta=0.0):
self.inputs = inputs
# here we compute y = W*X inefficiently using the backend functions
# try substituting this with the backend `compound_dot` function to see
# the speed-up from using a custom kernel!
for r in range(self.outputs.shape[0]):
for c in range(self.outputs.shape[1]):
self.outputs[r,c] = self.be.sum(self.be.multiply(self.W[r], self.inputs[:,c].T))
# self.be.compound_dot(A=self.W, B=self.inputs, C=self.outputs, beta=beta)
return self.outputs
def bprop(self, error, alpha=1.0, beta=0.0):
# to save you headache, we use the backend compound_dot function here to compute
# the back-propogated deltas = W^T*error.
if self.deltas:
self.be.compound_dot(A=self.W.T, B=error, C=self.deltas, alpha=alpha, beta=beta)
self.be.compound_dot(A=error, B=self.inputs.T, C=self.dW)
return self.deltas
"""
Explanation: Build your own layer
Instead of importing the neon supplied Affine Layer, we will instead build our own.
Note: Affine is actually a compound layer; it bundles a linear layer with a bias transform and an activation function. The Linear layer is what implements a fully connected layer.
First, lets build our own linear layer, called MyLinear, and then we will wrap that layer in a compound layer MyAffine.
There are several important components to a layer in neon:
* configure: during model initialization, this layer will receive the previous layer's object and use it to set this model's in_shape and out_shape attributes.
* allocate: after each layer's shape is configured, this layer's shape information will be used to allocate memory for the output activations from fprop.
* fprop: forward propagation. Should return a tensor with shape equal to the layer's out_shape attribute.
* bprop: backward propagation.
In the implementation below, fprop is implemented using element-wise operations. It will be very slow. Try replacing it with the neon backend implementation of compound_dot, such as in the bprop function.
End of explanation
"""
from neon.layers.layer import CompoundLayer
class MyAffine(CompoundLayer):
def __init__(self, nout, init, bias=None,
batch_norm=False, activation=None, name=None):
super(MyAffine, self).__init__(bias=bias, activation=activation, name=name)
self.append(MyLinear(nout, init, name=name))
self.add_postfilter_layers()
"""
Explanation: Wrap the above layer in a container, which bundles an activation and batch normalization.
End of explanation
"""
from neon.transforms.transform import Transform
class MySoftmax(Transform):
"""
SoftMax activation function. Ensures that the activation output sums to 1.
"""
def __init__(self, name=None, epsilon=2**-23):
"""
Class constructor.
Arguments:
name (string, optional): Name (default: none)
epsilon (float, optional): Not used.
"""
super(MySoftmax, self).__init__(name)
self.epsilon = epsilon
def __call__(self, x):
"""
Implement the softmax function. The input has shape (# features, batch_size) and
the desired output is (# features, batch_size), but where the features sum to 1.
We use the formula:
f(x) = e^(x-max(x)) / sum(e^(x-max(x)))
"""
return (self.be.reciprocal(self.be.sum(
self.be.exp(x - self.be.max(x, axis=0)), axis=0)) *
self.be.exp(x - self.be.max(x, axis=0)))
def bprop(self, x):
"""
We take a shortcut here- the derivative cancels out with the CrossEntropy term.
"""
return 1
"""
Explanation: Defining an activation function (transform)
We can understand more the backend functions by implementing our own softmax function.
End of explanation
"""
from neon.initializers import Gaussian
from neon.models import Model
from neon.transforms.activation import Rectlin
init_norm = Gaussian(loc=0.0, scale=0.01)
# assemble all of the pieces
layers = []
layers.append(MyAffine(nout=100, init=init_norm, activation=Rectlin()))
layers.append(MyAffine(nout=10, init=init_norm, activation=MySoftmax()))
# initialize model object
mlp = Model(layers=layers)
"""
Explanation: Putting together all of the pieces
The architecture here is the same as in the mnist_mlp.py example, instead here we use our own MyAffine layer and MySoftmax activation function.
End of explanation
"""
from neon.layers import GeneralizedCost
from neon.transforms import CrossEntropyMulti
from neon.optimizers import GradientDescentMomentum
from neon.callbacks.callbacks import Callbacks
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9)
callbacks = Callbacks(mlp, eval_set=test_set)
mlp.fit(train_set, optimizer=optimizer, num_epochs=10, cost=cost,
callbacks=callbacks)
"""
Explanation: Fit
Using Cross Entropy loss and Gradient Descent optimizer, train the model. This will be slow, because our fprop is inefficient. Replace the fprop function using the backend's compound_dot method!
End of explanation
"""
|
aborgher/Main-useful-functions-for-ML | Python_jupyter_utilities/pyHDF5.ipynb | gpl-3.0 | import h5py
import numpy as np
!rm mytestfile.hdf5
# create a new hdf5 file
f = h5py.File("mytestfile.hdf5", "w")
f.filename, f.name
"""
Explanation: h5py package to create HDF5 file
Link: http://docs.h5py.org/en/latest/mpi.html
An HDF5 file is a container for two kinds of objects:
- datasets, which are array-like collections of data,
- groups, which are folder-like containers that hold datasets and other groups.
where: Groups work like dictionaries, and datasets work like NumPy arrays
End of explanation
"""
# you can create group or dataset into the file
# Using existing data:
data = np.random.logistic(size=100)
dsetdata = f.create_dataset("dsetdata", data=data)
print(dsetdata)
# Creating the dataset without data
dsetname = f.create_dataset(
"dsetname", shape=(100, ), dtype='i') # i, f, etc..
print(dsetname)
dsetdata.dtype, dsetdata.shape, dsetdata.size
# check data in filename
for i in f.items():
print(i)
# support array-style slicing, example of read/write in this cell
dsetname[0] = 5
dsetname[5:15] = np.random.uniform(size=10,low=0, high=5)
dsetname[10:20:2]
dsetname.value, dsetname.name, dsetname.fillvalue, dsetname.shuffle
"""
Explanation: dataset
End of explanation
"""
# “HDF” stands for “Hierarchical Data Format”.
# Every object in an HDF5 file has a name, and they’re arranged in a POSIX-style hierarchy with /-separators
print(f.name, dsetname.name, dsetdata.name)
# create subgroup
grp = f.create_group("subgroup")
# create dataset from the group variable
dset2 = grp.create_dataset("another_dataset", (50, ), dtype='f')
dset2.name
# giving a full path, each group will be created
dset3 = f.create_dataset('subgroup2/dataset_three', (10,), dtype='i')
dset3.name
# get dataset using full path
dataset_three = f['subgroup2/dataset_three']
dataset_three
"subgroup/another_dataset" in f
# wrong way to check the entire tree structure
for name in f:
print(name)
# iterate over the file or group using visit() or visititems() which both take a callable
def printname(x):
print(x)
f.visit(printname)
print('')
grp.visit(printname)
"""
Explanation: groups
End of explanation
"""
# you can store metadata right next to the data (groups and datasets) it describes (in a dictionary interface)
dsetname.attrs['descrizione'] = 'dati a caso'
dsetname.attrs['data'] = '04/04/2014'
dsetname.attrs['pippo'] = 150
'data' in dsetname.attrs
# see all metadata
for i in dsetname.attrs.items():
print(i)
f.close()
"""
Explanation: metadata
End of explanation
"""
!rm iris.hdf5
import pandas as pd
df = pd.read_csv('iris.csv')
df.to_hdf(path_or_buf='iris.hdf5', key='iris_', mode='w', format='fixed')
# fixed format: Fast writing/reading. Not-appendable, nor searchable
# table forma: Write as a PyTables Table structure which may perform worse
# but allow more flexible operations like searching selecting subsets of the data
# reopen the file and check the structure and how it has stored the columns etc..
newf = h5py.File('iris.hdf5')
newf.visit(printname)
for i in newf.keys():
for j in newf[i].keys():
print(newf[i + '/' + j])
for i in newf.keys():
for j in newf[i].keys():
print(newf[i + '/' + j].name)
print(newf[i + '/' + j].value, end='\n\n')
newf.close()
# read an hdf5 file to pandas dataframe
df_hdf5 = pd.read_hdf('iris.hdf5')
df_hdf5.head()
## append data to the previous data on the hdf5
df.to_hdf('iris.hdf5', 'data', append=True, format='table')
df.to_hdf('iris.hdf5', 'data', append=True, format='table') # since the format is table you can append stak data
# to retrive the data you have to use the key (here data or iris_)
df_hdf5 = pd.read_hdf('iris.hdf5',key='data')
print(len(df_hdf5))
df_hdf5
"""
Explanation: From pandas dataframe to hdf5 and viceversa
End of explanation
"""
|
romil93/SentimentAnalysis-CSCI544-Fall2016 | romil/logistic_regression-with-imdb.ipynb | apache-2.0 | test_data_df.head()
"""
Explanation: And the test data.
End of explanation
"""
train_data_df.Sentiment.value_counts()
"""
Explanation: Let's count how many labels do we have for each sentiment class.
End of explanation
"""
import numpy as np
np.mean([len(s.split(" ")) for s in train_data_df.Text])
"""
Explanation: Finally, let's calculate the average number of words per sentence. We could do the following using a list comprehension with the number of words per sentence.
End of explanation
"""
import re, nltk
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
# remove non letters
text = re.sub("[^a-zA-Z]", " ", text)
# tokenize
tokens = nltk.word_tokenize(text)
# stem
stems = stem_tokens(tokens, stemmer)
return stems
########
vectorizer = CountVectorizer(
analyzer = 'word',
tokenizer = tokenize,
lowercase = True,
stop_words = 'english',
max_features = 100
)
corpus_data_features = vectorizer.fit_transform(train_data_df.Text.tolist() + test_data_df.Text.tolist())
"""
Explanation: First we need to init the vectorizer. We need to remove puntuations, lowercase, remove stop words, and stem words. All these steps can be directly performed by CountVectorizer if we pass the right parameter values. We can do as follows.
End of explanation
"""
corpus_data_features_nd = corpus_data_features.toarray()
corpus_data_features_nd.shape
vocab = vectorizer.get_feature_names()
print(vocab)
"""
Explanation: Numpy arrays are easy to work with, so convert the result to an array.
End of explanation
"""
dist = np.sum(corpus_data_features_nd, axis=0)
for tag, count in zip(vocab, dist):
print(count, tag)
"""
Explanation: We can also print the counts of each word in the vocabulary as follows.
End of explanation
"""
from sklearn.cross_validation import train_test_split
# remember that corpus_data_features_nd contains all of our original train and test data, so we need to exclude
# the unlabeled test entries
X_train, X_test, y_train, y_test = train_test_split(
corpus_data_features_nd[0:len(train_data_df)],
train_data_df.Sentiment,
train_size=0.85,
random_state=1234)
print(X_test[0])
"""
Explanation: A bag-of-words linear classifier
End of explanation
"""
from sklearn.linear_model import LogisticRegression
log_model = LogisticRegression()
log_model = log_model.fit(X=X_train, y=y_train)
"""
Explanation: Now we are ready to train our classifier.
End of explanation
"""
y_pred = log_model.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
"""
Explanation: Now we use the classifier to label our evaluation set. We can use either predict for classes or predict_proba for probabilities.
End of explanation
"""
log_model = LogisticRegression()
log_model = log_model.fit(X=corpus_data_features_nd[0:len(train_data_df)], y=train_data_df.Sentiment)
test_pred = log_model.predict(corpus_data_features_nd[len(train_data_df):])
actual_pred = test_data_df["Sentiment"].tolist()
print(classification_report(test_pred, actual_pred))
"""
Explanation: Finally, we can re-train our model with all the training data and use it for sentiment classification with the original (unlabeled) test set.
End of explanation
"""
|
intel-analytics/BigDL | python/nano/notebooks/pytorch/cifar10/nano-trainer-example.ipynb | apache-2.0 | from time import time
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
from pytorch_lightning import LightningModule, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torch.optim.lr_scheduler import OneCycleLR
from torchmetrics.functional import accuracy
from bigdl.nano.pytorch.trainer import Trainer
from bigdl.nano.pytorch.vision import transforms
"""
Explanation: BigDL-Nano Resnet example on CIFAR10 dataset
This example illustrates how to apply bigdl-nano optimizations on a image recognition case based on pytorch-lightning framework. The basic image recognition module is implemented with Lightning and trained on CIFAR10 image recognition Benchmark dataset.
End of explanation
"""
def prepare_data(data_path, batch_size, num_workers):
train_transforms = transforms.Compose(
[
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
cifar10_normalization()
]
)
test_transforms = transforms.Compose(
[
transforms.ToTensor(),
cifar10_normalization()
]
)
cifar10_dm = CIFAR10DataModule(
data_dir=data_path,
batch_size=batch_size,
num_workers=num_workers,
train_transforms=train_transforms,
test_transforms=test_transforms,
val_transforms=test_transforms
)
return cifar10_dm
"""
Explanation: CIFAR10 Data Module
Import the existing data module from bolts and modify the train and test transforms.
You could access CIFAR10 for a view of the whole dataset.
End of explanation
"""
def create_model():
model = torchvision.models.resnet18(pretrained=False, num_classes=10)
model.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
model.maxpool = nn.Identity()
return model
"""
Explanation: Resnet
Modify the pre-existing Resnet architecture from TorchVision. The pre-existing architecture is based on ImageNet images (224x224) as input. So we need to modify it for CIFAR10 images (32x32).
End of explanation
"""
class LitResnet(LightningModule):
def __init__(self, learning_rate=0.05, steps_per_epoch=45000 , batch_size=32):
super().__init__()
self.save_hyperparameters()
self.model = create_model()
def forward(self, x):
out = self.model(x)
return F.log_softmax(out, dim=1)
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
self.log("train_loss", loss)
return loss
def evaluate(self, batch, stage=None):
x, y = batch
logits = self(x)
loss = F.nll_loss(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
if stage:
self.log(f"{stage}_loss", loss, prog_bar=True)
self.log(f"{stage}_acc", acc, prog_bar=True)
def validation_step(self, batch, batch_idx):
self.evaluate(batch, "val")
def test_step(self, batch, batch_idx):
self.evaluate(batch, "test")
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
lr=self.hparams.learning_rate,
momentum=0.9,
weight_decay=5e-4,
)
steps_per_epoch = self.hparams.steps_per_epoch // self.hparams.batch_size
scheduler_dict = {
"scheduler": OneCycleLR(
optimizer,
0.1,
epochs=self.trainer.max_epochs,
steps_per_epoch=steps_per_epoch,
),
"interval": "step",
}
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
seed_everything(7)
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
BATCH_SIZE = 32
NUM_WORKERS = 0
data_module = prepare_data(PATH_DATASETS, BATCH_SIZE, NUM_WORKERS)
LIMIT_TRAIN_BATCHES = float(os.environ.get('LIMIT_TRAIN_BATCHES', 1.0))
LIMIT_VAL_BATCHES = float(os.environ.get('LIMIT_VAL_BATCHES', 1.0))
EPOCHS = int(os.environ.get('FIT_EPOCHS', 30))
"""
Explanation: Lightning Module
Check out the configure_optimizers method to use custom Learning Rate schedulers. The OneCycleLR with SGD will get you to around 92-93% accuracy in 20-30 epochs and 93-94% accuracy in 40-50 epochs. Feel free to experiment with different LR schedules from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
End of explanation
"""
model = LitResnet(learning_rate=0.05)
model.datamodule = data_module
checkpoint_callback = ModelCheckpoint(dirpath="checkpoints/", save_top_k=1, monitor="val_loss", filename="renet18_single_none")
basic_trainer = Trainer(num_processes = 1,
use_ipex = False,
progress_bar_refresh_rate=10,
max_epochs=EPOCHS,
limit_train_batches=LIMIT_TRAIN_BATCHES,
limit_val_batches=LIMIT_VAL_BATCHES,
logger=TensorBoardLogger("lightning_logs/", name="basic"),
callbacks=[LearningRateMonitor(logging_interval="step"), checkpoint_callback])
start = time()
basic_trainer.fit(model, datamodule=data_module)
basic_fit_time = time() - start
outputs = basic_trainer.test(model, datamodule=data_module)
basic_acc = outputs[0]['test_acc'] * 100
basic_trainer.save_checkpoint("checkpoints/model.ckpt")
"""
Explanation: Train
Use Trainer from bigdl.nano.pytorch.trainer for BigDL-Nano pytorch.
This Trainer extends PyTorch Lightning Trainer by adding various options to accelerate pytorch training.
:param num_processes: number of processes in distributed training. default: 4.
:param use_ipex: whether we use ipex as accelerator for trainer. default: True.
:param cpu_for_each_process: A list of length `num_processes`, each containing a list of
indices of cpus each process will be using. default: None, and the cpu will be
automatically and evenly distributed among processes.
The next few cells show examples of different parameters.
Single Process
End of explanation
"""
model = LitResnet(learning_rate=0.05)
model.datamodule = data_module
checkpoint_callback = ModelCheckpoint(dirpath="checkpoints/", save_top_k=1, monitor="val_loss", filename="renet18_single_ipex", save_weights_only=True)
single_ipex_trainer = Trainer(num_processes=1,
use_ipex = True,
distributed_backend="subprocess",
progress_bar_refresh_rate=10,
max_epochs=EPOCHS,
limit_train_batches=LIMIT_TRAIN_BATCHES,
limit_val_batches=LIMIT_VAL_BATCHES,
logger=TensorBoardLogger("lightning_logs/", name="single_ipex"),
callbacks=[LearningRateMonitor(logging_interval="step"), checkpoint_callback])
start = time()
single_ipex_trainer.fit(model, datamodule=data_module)
single_ipex_fit_time = time() - start
outputs = single_ipex_trainer.test(model, datamodule=data_module)
single_ipex_acc = outputs[0]['test_acc'] * 100
"""
Explanation: Single Process with IPEX
End of explanation
"""
model = LitResnet(learning_rate=0.1, batch_size=64)
model.datamodule = data_module
checkpoint_callback = ModelCheckpoint(dirpath="checkpoints/", save_top_k=1, monitor="val_loss", filename="renet18_multi_ipex", save_weights_only=True)
multi_ipex_trainer = Trainer(num_processes=2,
use_ipex=True,
distributed_backend="subprocess",
progress_bar_refresh_rate=10,
max_epochs=EPOCHS,
limit_train_batches=LIMIT_TRAIN_BATCHES,
limit_val_batches=LIMIT_VAL_BATCHES,
logger=TensorBoardLogger("lightning_logs/", name="multi_ipx"),
callbacks=[LearningRateMonitor(logging_interval="step"), checkpoint_callback])
start = time()
multi_ipex_trainer.fit(model, datamodule=data_module)
multi_ipex_fit_time = time() - start
outputs = multi_ipex_trainer.test(model, datamodule=data_module)
multi_ipex_acc = outputs[0]['test_acc'] * 100
template = """
| Precision | Fit Time(s) | Accuracy(%) |
| Basic | {:5.2f} | {:5.2f} |
| Single With Ipex | {:5.2f} | {:5.2f} |
| Multiple With Ipex| {:5.2f} | {:5.2f} |
"""
summary = template.format(
basic_fit_time, basic_acc,
single_ipex_fit_time, single_ipex_acc,
multi_ipex_fit_time, multi_ipex_acc
)
print(summary)
"""
Explanation: Multiple Processes with IPEX
End of explanation
"""
|
toddstrader/deep-learning | tv-script-generation/dlnd_tv_script_generation.ipynb | mit | """
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
return None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
"""
# Number of Epochs
num_epochs = None
# Batch Size
batch_size = None
# RNN Size
rnn_size = None
# Embedding Dimension Size
embed_dim = None
# Sequence Length
seq_length = None
# Learning Rate
learning_rate = None
# Show stats for every n number of batches
show_every_n_batches = None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
tensorflow/docs | site/en/r1/tutorials/distribute/training_loops.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
# Import TensorFlow
import tensorflow.compat.v1 as tf
# Helper libraries
import numpy as np
import os
print(tf.__version__)
"""
Explanation: tf.distribute.Strategy with Training Loops
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r1/tutorials/distribute/training_loops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r1/tutorials/distribute/training_loops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
Note: This is an archived TF1 notebook. These are configured
to run in TF2's
compatibility mode
but will run in TF1 as well. To use TF1 in Colab, use the
%tensorflow_version 1.x
magic.
This tutorial demonstrates how to use tf.distribute.Strategy with custom training loops. We will train a simple CNN model on the fashion MNIST dataset. The fashion MNIST dataset contains 60000 train images of size 28 x 28 and 10000 test images of size 28 x 28.
We are using custom training loops to train our model because they give us flexibility and a greater control on training. Moreover, it is easier to debug the model and the training loop.
End of explanation
"""
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# Adding a dimension to the array -> new shape == (28, 28, 1)
# We are doing this because the first layer in our model is a convolutional
# layer and it requires a 4D input (batch_size, height, width, channels).
# batch_size dimension will be added later on.
train_images = train_images[..., None]
test_images = test_images[..., None]
# Getting the images in [0, 1] range.
train_images = train_images / np.float32(255)
test_images = test_images / np.float32(255)
train_labels = train_labels.astype('int64')
test_labels = test_labels.astype('int64')
"""
Explanation: Download the fashion mnist dataset
End of explanation
"""
# If the list of devices is not specified in the
# `tf.distribute.MirroredStrategy` constructor, it will be auto-detected.
strategy = tf.distribute.MirroredStrategy()
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
"""
Explanation: Create a strategy to distribute the variables and the graph
How does tf.distribute.MirroredStrategy strategy work?
All the variables and the model graph is replicated on the replicas.
Input is evenly distributed across the replicas.
Each replica calculates the loss and gradients for the input it received.
The gradients are synced across all the replicas by summing them.
After the sync, the same update is made to the copies of the variables on each replica.
Note: You can put all the code below inside a single scope. We are dividing it into several code cells for illustration purposes.
End of explanation
"""
BUFFER_SIZE = len(train_images)
BATCH_SIZE_PER_REPLICA = 64
BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 10
"""
Explanation: Setup input pipeline
When training a model with multiple GPUs, you can use the extra computing power effectively by increasing the batch size. In general, use the largest batch size that fits the GPU memory, and tune the learning rate accordingly.
End of explanation
"""
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_images, train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
train_ds = strategy.experimental_distribute_dataset(train_dataset)
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_images, test_labels)).batch(BATCH_SIZE)
test_ds = strategy.experimental_distribute_dataset(test_dataset)
"""
Explanation: tf.distribute.Strategy.experimental_distribute_dataset evenly distributes the dataset across all the replicas.
End of explanation
"""
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = tf.train.GradientDescentOptimizer(0.001)
"""
Explanation: Model Creation
Create a model using tf.keras.Sequential. You can also use the Model Subclassing API to do this.
End of explanation
"""
with strategy.scope():
def train_step(dist_inputs):
def step_fn(inputs):
images, labels = inputs
logits = model(images)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
loss = tf.nn.compute_average_loss(cross_entropy, global_batch_size=BATCH_SIZE)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
per_replica_losses = strategy.run(
step_fn, args=(dist_inputs,))
mean_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with strategy.scope():
train_iterator = train_ds.make_initializable_iterator()
iterator_init = train_iterator.initializer
var_init = tf.global_variables_initializer()
loss = train_step(next(train_iterator))
with tf.Session() as sess:
sess.run([var_init])
for epoch in range(EPOCHS):
sess.run([iterator_init])
for step in range(10000):
if step % 1000 == 0:
print('Epoch {} Step {} Loss {:.4f}'.format(epoch+1,
step,
sess.run(loss)))
"""
Explanation: Define the loss function
Normally, on a single machine with 1 GPU/CPU, loss is divided by the number of examples in the batch of input.
So, how should the loss be calculated when using a tf.distribute.Strategy?
For an example, let's say you have 4 GPU's and a batch size of 64. One batch of input is distributed
across the replicas (4 GPUs), each replica getting an input of size 16.
The model on each replica does a forward pass with its respective input and calculates the loss. Now, instead of dividing the loss by the number of examples in its respective input (BATCH_SIZE_PER_REPLICA = 16), the loss should be divided by the GLOBAL_BATCH_SIZE (64).
Why do this?
This needs to be done because after the gradients are calculated on each replica, they are synced across the replicas by summing them.
How to do this in TensorFlow?
* If you're writing a custom training loop, as in this tutorial, you should sum the per example losses and divide the sum by the GLOBAL_BATCH_SIZE:
scale_loss = tf.reduce_sum(loss) * (1. / GLOBAL_BATCH_SIZE) or you can use tf.nn.compute_average_loss which takes the per example loss,
optional sample weights, and GLOBAL_BATCH_SIZE as arguments and returns the scaled loss.
If you are using regularization losses in your model then you need to scale
the loss value by number of replicas. You can do this by using the tf.nn.scale_regularization_loss function.
Using tf.reduce_mean is not recommended. Doing so divides the loss by actual per replica batch size which may vary step to step.
This reduction and scaling is done automatically in keras model.compile and model.fit
If using tf.keras.losses classes, the loss reduction needs to be explicitly specified to be one of NONE or SUM. AUTO and SUM_OVER_BATCH_SIZE are disallowed when used with tf.distribute.Strategy. AUTO is disallowed because the user should explicitly think about what reduction they want to make sure it is correct in the distributed case. SUM_OVER_BATCH_SIZE is disallowed because currently it would only divide by per replica batch size, and leave the dividing by number of replicas to the user, which might be easy to miss. So instead we ask the user do the reduction themselves explicitly.
Training loop
End of explanation
"""
|
tensorflow/docs-l10n | site/zh-cn/agents/tutorials/2_environments_tutorial.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TF-Agents Authors.
End of explanation
"""
!pip install "gym>=0.21.0"
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import numpy as np
from tf_agents.environments import py_environment
from tf_agents.environments import tf_environment
from tf_agents.environments import tf_py_environment
from tf_agents.environments import utils
from tf_agents.specs import array_spec
from tf_agents.environments import wrappers
from tf_agents.environments import suite_gym
from tf_agents.trajectories import time_step as ts
"""
Explanation: 环境
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://tensorflow.google.cn/agents/tutorials/2_environments_tutorial"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 tensorflow.google.cn 上查看</a>
</td>
<td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/agents/tutorials/2_environments_tutorial.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 运行</a>
</td>
<td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/agents/tutorials/2_environments_tutorial.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 Github 上查看源代码</a>
</td>
<td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/agents/tutorials/2_environments_tutorial.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a> </td>
</table>
简介
强化学习 (RL) 的目标是设计可通过与环境交互进行学习的代理。在标准 RL 设置中,代理在每个时间步骤都会收到一个观测值并选择一个操作。该操作将应用于环境,而环境会返回奖励和新的观测值。代理会训练策略以选择合适的操作,旨在使奖励总和(即回报)最大化。
在 TF-Agents 中,可以使用 Python 或 TensorFlow 实现环境。Python 环境通常更易于实现、理解和调试,但 TensorFlow 环境则更为高效并且支持自然并行化。最常见的工作流是在 Python 中实现环境,然后使用我们的包装器之一将其自动转换为 TensorFlow。
让我们首先看一下 Python 环境。TensorFlow 环境采用非常相似的 API。
设置
如果尚未安装 TF-Agents 或 Gym,请运行以下命令:
End of explanation
"""
class PyEnvironment(object):
def reset(self):
"""Return initial_time_step."""
self._current_time_step = self._reset()
return self._current_time_step
def step(self, action):
"""Apply action and return new time_step."""
if self._current_time_step is None:
return self.reset()
self._current_time_step = self._step(action)
return self._current_time_step
def current_time_step(self):
return self._current_time_step
def time_step_spec(self):
"""Return time_step_spec."""
@abc.abstractmethod
def observation_spec(self):
"""Return observation_spec."""
@abc.abstractmethod
def action_spec(self):
"""Return action_spec."""
@abc.abstractmethod
def _reset(self):
"""Return initial_time_step."""
@abc.abstractmethod
def _step(self, action):
"""Apply action and return new time_step."""
"""
Explanation: Python 环境
Python 环境的 step(action) -> next_time_step 方法可将操作应用于环境,并返回有关下一步的以下信息:
observation:此为环境状态的一部分,可供代理观测以选择下一步的操作。
reward:代理会进行学习,目标是实现多个步骤奖励总和的最大化。
step_type:与环境的交互通常是序列/片段的一部分。例如,下象棋时多次移动棋子。step_type 可以是 FIRST、MID 或 LAST 之一,分别指示该时间步骤是序列中的第一步、中间步或最后一步。
discount:此为一个浮点数,表示下一个时间步骤的奖励相对于当前时间步骤的奖励的权重。
它们被分组到一个命名元组 TimeStep(step_type, reward, discount, observation)。
environments/py_environment.PyEnvironment 内包含了所有 python 环境必须实现的接口。主要方法为:
End of explanation
"""
environment = suite_gym.load('CartPole-v0')
print('action_spec:', environment.action_spec())
print('time_step_spec.observation:', environment.time_step_spec().observation)
print('time_step_spec.step_type:', environment.time_step_spec().step_type)
print('time_step_spec.discount:', environment.time_step_spec().discount)
print('time_step_spec.reward:', environment.time_step_spec().reward)
"""
Explanation: 除了 step() 方法外,环境还提供了一个 reset() 方法,该方法可以启动新的序列并提供初始 TimeStep。不必显式调用 reset 方法。我们假定在片段结束或首次调用 step() 时环境均会自动重置。
请注意,子类不会直接实现 step() 或 reset()。相反,它们会重写 _step() 和 _reset() 方法。这些方法返回的时间步骤将通过 current_time_step() 缓存和公开。
observation_spec 和 action_spec 方法会返回一组 (Bounded)ArraySpecs 嵌套,分别描述观测值和操作的名称、形状、数据类型和范围。
我们在 TF-Agents 中反复提及嵌套,其定义为由列表、元组、命名元组或字典组成的任何树状结构。这些内容可以任意组合以保持观测值和操作的结构。我们发现,对于包含许多观测值和操作的更复杂环境而言,这种结构非常实用。
使用标准环境
TF Agents 针对许多标准环境(如 OpenAI Gym、DeepMind-control 和 Atari)内置了包装器,因此它们支持我们的 py_environment.PyEnvironment 接口。这些包装的环境可以使用我们的环境套件轻松加载。让我们通过 OpenAI Gym 加载 CartPole 环境,并查看操作和 time_step_spec。
End of explanation
"""
action = np.array(1, dtype=np.int32)
time_step = environment.reset()
print(time_step)
while not time_step.is_last():
time_step = environment.step(action)
print(time_step)
"""
Explanation: 可以看到, 环境所预期的操作类型为 [0, 1] 区间内的 int64,当观测值为长度等于 4 的 float32 向量且折扣因子为 [0.0, 1.0] 区间内的 float32 时会返回 TimeSteps。现在,让我们尝试对整个片段采取固定操作 (1,)。
End of explanation
"""
class CardGameEnv(py_environment.PyEnvironment):
def __init__(self):
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32, minimum=0, maximum=1, name='action')
self._observation_spec = array_spec.BoundedArraySpec(
shape=(1,), dtype=np.int32, minimum=0, name='observation')
self._state = 0
self._episode_ended = False
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def _reset(self):
self._state = 0
self._episode_ended = False
return ts.restart(np.array([self._state], dtype=np.int32))
def _step(self, action):
if self._episode_ended:
# The last action ended the episode. Ignore the current action and start
# a new episode.
return self.reset()
# Make sure episodes don't go on forever.
if action == 1:
self._episode_ended = True
elif action == 0:
new_card = np.random.randint(1, 11)
self._state += new_card
else:
raise ValueError('`action` should be 0 or 1.')
if self._episode_ended or self._state >= 21:
reward = self._state - 21 if self._state <= 21 else -21
return ts.termination(np.array([self._state], dtype=np.int32), reward)
else:
return ts.transition(
np.array([self._state], dtype=np.int32), reward=0.0, discount=1.0)
"""
Explanation: 创建自己的 Python 环境
对于许多客户而言,一个常见用例是采用 TF-Agents 中的一个标准代理(请参见 agents/)解决他们的问题。为此,客户需要将问题视为环境。那么,让我们看一下如何在 Python 中实现环境。
假设我们要训练一个代理来玩以下纸牌游戏(受 21 点玩法启发):
使用无限张数字为 1 到 10 的纸牌进行游戏。
代理每个回合可以做两件事:随机抽取一张新的纸牌,或者停止当前回合。
目标是在回合结束时使您的纸牌上数字的总和尽可能接近 21,但不大于 21。
代表游戏的环境可能如下所示:
操作:有 2 个操作。操作 0 为抽取一张新的纸牌;操作 1 为终止当前回合。
观测值:当前回合的纸牌上数字的总和。
奖励:目标是尽可能接近 21 但不超过 21,因此我们可以在回合结束时使用以下奖励实现这一目标:sum_of_cards - 21 if sum_of_cards <= 21, else -21
End of explanation
"""
environment = CardGameEnv()
utils.validate_py_environment(environment, episodes=5)
"""
Explanation: 让我们确保已正确地定义了上述环境。创建自己的环境时,您必须确保生成的观测值和 time_step 符合规范中定义的正确形状和类型。这些内容用于生成 TensorFlow 计算图,因此如有差错,可能会造成难以调试的问题。
为了验证我们的环境,我们将使用随机策略来生成操作,并将迭代 5 个片段以确保按预期进行。如果我们收到的 time_step 不符合环境规范,则会提示错误。
End of explanation
"""
get_new_card_action = np.array(0, dtype=np.int32)
end_round_action = np.array(1, dtype=np.int32)
environment = CardGameEnv()
time_step = environment.reset()
print(time_step)
cumulative_reward = time_step.reward
for _ in range(3):
time_step = environment.step(get_new_card_action)
print(time_step)
cumulative_reward += time_step.reward
time_step = environment.step(end_round_action)
print(time_step)
cumulative_reward += time_step.reward
print('Final Reward = ', cumulative_reward)
"""
Explanation: 现在我们可以确定环境正在按预期工作,让我们使用固定策略运行此环境:抽取 3 张纸牌,然后结束该回合。
End of explanation
"""
env = suite_gym.load('Pendulum-v1')
print('Action Spec:', env.action_spec())
discrete_action_env = wrappers.ActionDiscretizeWrapper(env, num_actions=5)
print('Discretized Action Spec:', discrete_action_env.action_spec())
"""
Explanation: 环境包装器
环境包装器使用 python 环境,并返回该环境的修改版本。原始环境和修改后的环境均为 py_environment.PyEnvironment 的实例,并且可以将多个包装器链接在一起。
可以在 environments/wrappers.py 中找到一些常用的包装器。例如:
ActionDiscretizeWrapper:将连续操作空间转换成离散操作空间。
RunStats:捕获环境的运行统计信息,例如采用的步数、完成的片段数等。
TimeLimit:在固定步数后终止片段。
示例 1:操作离散化包装器
InvertedPendulum 是一个接受 [-2, 2] 区间内连续操作的 PyBullet 环境。如果要在此环境中训练离散操作代理(例如 DQN),则必须离散化(量化)操作空间。这正是 ActionDiscretizeWrapper 的工作。请对比包装前后的 action_spec:
End of explanation
"""
class TFEnvironment(object):
def time_step_spec(self):
"""Describes the `TimeStep` tensors returned by `step()`."""
def observation_spec(self):
"""Defines the `TensorSpec` of observations provided by the environment."""
def action_spec(self):
"""Describes the TensorSpecs of the action expected by `step(action)`."""
def reset(self):
"""Returns the current `TimeStep` after resetting the Environment."""
return self._reset()
def current_time_step(self):
"""Returns the current `TimeStep`."""
return self._current_time_step()
def step(self, action):
"""Applies the action and returns the new `TimeStep`."""
return self._step(action)
@abc.abstractmethod
def _reset(self):
"""Returns the current `TimeStep` after resetting the Environment."""
@abc.abstractmethod
def _current_time_step(self):
"""Returns the current `TimeStep`."""
@abc.abstractmethod
def _step(self, action):
"""Applies the action and returns the new `TimeStep`."""
"""
Explanation: 包装后的 discrete_action_env 为 py_environment.PyEnvironment 的实例,可视为常规 python 环境。
TensorFlow 环境
TF 环境的接口在 environments/tf_environment.TFEnvironment 中定义,其与 Python 环境非常相似。TF 环境与 python 环境在以下两个方面有所不同:
TF 环境生成张量对象而非数组
与规范相比,TF 环境会为生成的张量添加批次维度。
将 python 环境转换为 TF 环境可以使 tensorflow 支持并行化运算。例如,用户可以定义 collect_experience_op 从环境中收集数据并添加到 replay_buffer,并定义 train_op 从 replay_buffer 中读取数据并训练代理,然后在 TensorFlow 中自然地并行运行二者。
End of explanation
"""
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
print(isinstance(tf_env, tf_environment.TFEnvironment))
print("TimeStep Specs:", tf_env.time_step_spec())
print("Action Specs:", tf_env.action_spec())
"""
Explanation: current_time_step() 方法会返回当前 time_step 并在需要时初始化环境。
reset() 方法会在环境中强制执行重置并返回 current_step。
如果 action 不依赖于上一个 time_step,则在 Graph 模式下将需要 tf.control_dependency。
现在,让我们看看如何创建 TFEnvironments。
创建自己的 TensorFlow 环境
此操作比在 Python 中创建环境复杂得多,因此,我们将不会在本 Colab 中进行介绍。此处提供了一个示例。更常见的用例是在 Python 中实现您的环境,并使用我们的 TFPyEnvironment 包装器将其包装为 TensorFlow 环境(请参见下文)。
将 Python 环境包装为 TensorFlow 环境
我们可以使用 TFPyEnvironment 包装器将任何 Python 环境轻松包装为 TensorFlow 环境。
End of explanation
"""
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
# reset() creates the initial time_step after resetting the environment.
time_step = tf_env.reset()
num_steps = 3
transitions = []
reward = 0
for i in range(num_steps):
action = tf.constant([i % 2])
# applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action)
transitions.append([time_step, action, next_time_step])
reward += next_time_step.reward
time_step = next_time_step
np_transitions = tf.nest.map_structure(lambda x: x.numpy(), transitions)
print('\n'.join(map(str, np_transitions)))
print('Total reward:', reward.numpy())
"""
Explanation: 请注意,规范的类型现在为:(Bounded)TensorSpec。
用法示例
简单示例
End of explanation
"""
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
time_step = tf_env.reset()
rewards = []
steps = []
num_episodes = 5
for _ in range(num_episodes):
episode_reward = 0
episode_steps = 0
while not time_step.is_last():
action = tf.random.uniform([1], 0, 2, dtype=tf.int32)
time_step = tf_env.step(action)
episode_steps += 1
episode_reward += time_step.reward.numpy()
rewards.append(episode_reward)
steps.append(episode_steps)
time_step = tf_env.reset()
num_steps = np.sum(steps)
avg_length = np.mean(steps)
avg_reward = np.mean(rewards)
print('num_episodes:', num_episodes, 'num_steps:', num_steps)
print('avg_length', avg_length, 'avg_reward:', avg_reward)
"""
Explanation: 整个片段
End of explanation
"""
|
Capepy/scipy_2015_sklearn_tutorial | notebooks/05.1 In Depth - Linear Models.ipynb | cc0-1.0 | rng = np.random.RandomState(4)
X = rng.normal(size=(1000, 50))
beta = rng.normal(size=50)
y = np.dot(X, beta) + 4 * rng.normal(size=1000)
from sklearn.utils import shuffle
X, y = shuffle(X, y)
from sklearn import linear_model, cross_validation
from sklearn.learning_curve import learning_curve
def plot_learning_curve(est, X, y):
training_set_size, train_scores, test_scores = learning_curve(est, X, y, train_sizes=np.linspace(.1, 1, 30))
estimator_name = est.__class__.__name__
line = plt.plot(training_set_size, train_scores.mean(axis=1), '--', label="training scores " + estimator_name)
plt.plot(training_set_size, test_scores.mean(axis=1), '-', label="test scores " + estimator_name, c=line[0].get_color())
plt.xlabel('Training set size')
plt.legend(loc='best')
#plt.ylim(-1, 1)
plot_learning_curve(linear_model.LinearRegression(), X, y)
"""
Explanation: In depth with linear models
Linear models are useful when little data is available or for very large feature spaces, as in text classification. In addition, they form a good case study for regularization.
Linear models for regression
All linear models for regression learn a coefficient parameter coef_ and an offset intercept_ to make predictions using a linear combination of features:
y_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_
The difference between the linear models for regression is what kind of restrictions are put on coef_ and intercept_ (know as regularization), in addition to fitting the training data well.
The most standard linear model is the 'ordinary least squares regression', often simply called 'linear regression'. It doesn't put any additional restrictions on coef_, so when the number of features is large, it becomes ill-posed and the model overfits.
Let us generate a simple simulation, to see the behavior of these models.
End of explanation
"""
plot_learning_curve(linear_model.LinearRegression(), X, y)
plot_learning_curve(linear_model.Ridge(alpha=20), X, y)
"""
Explanation: As we can see, the ordinary linear regression is not defined if there are less training samples than features. In the presence of noise, it does poorly as long as the number of samples is not several times the number of features.
The LinearRegression is then overfitting: fitting noise. We need to regularize.
The Ridge estimator is a simple regularization (called l2 penalty) of the ordinary LinearRegression. In particular, it has the benefit of being not computationally more expensive than the ordinary least square estimate.
End of explanation
"""
plot_learning_curve(linear_model.LinearRegression(), X, y)
plot_learning_curve(linear_model.Ridge(), X, y)
plot_learning_curve(linear_model.RidgeCV(), X, y)
"""
Explanation: We can see that in the low-sample limit, the Ridge estimator performs much better than the unregularized model.
The regularization of the Ridge is a shrinkage: the coefficients learned are biased towards zero. Too much bias is not beneficial, but with very few samples, we will need more bias.
The amount of regularization is set via the alpha parameter of the Ridge. Tuning it is critical for performance. We can set it automatically by cross-validation using the RidgeCV estimator:
End of explanation
"""
beta[10:] = 0
y = np.dot(X, beta) + 4*rng.normal(size=1000)
plot_learning_curve(linear_model.Ridge(), X, y)
plot_learning_curve(linear_model.Lasso(), X, y)
"""
Explanation: The Lasso estimator is useful to impose sparsity on the coefficient. In other words, it is to be prefered if we believe that many of the features are not relevant. This is done via the so-called l1 penalty.
Let us create such a situation with a new simulation where only 10 out of the 50 features are relevant:
End of explanation
"""
plot_learning_curve(linear_model.RidgeCV(), X, y)
plot_learning_curve(linear_model.LassoCV(n_alphas=20), X, y)
"""
Explanation: We can see that the Lasso estimator performs, in our case, better than the Ridge when there are a small number of training observations. However when there are a lot of observations the Lasso under-performs. Indeed, the variance-reducing effect of the regularization is less critical in these situations, and the bias becomes too detrimental.
As with any estimator, we should tune the regularization parameter to get the best prediction. For this purpose, we can use the LassoCV object. Note that it is a significantly more computationally costly operation than the RidgeCV. To speed it up, we reduce the number of values explored for the alpha parameter.
End of explanation
"""
plt.figure(figsize=(10, 5))
plot_learning_curve(linear_model.RidgeCV(), X, y)
plot_learning_curve(linear_model.ElasticNetCV(l1_ratio=.6, n_alphas=20), X, y)
plot_learning_curve(linear_model.LassoCV(n_alphas=20), X, y)
"""
Explanation: ElasticNet sits in between Lasso and Ridge. It has a tuning parameter, l1_ratio, that controls this behavior: when set to 0 (only l2 penalty), ElasticNet is a Ridge, when set to 1 (only l1 penalty), it is a Lasso. It is useful when your coefficients are not that sparse. The sparser the coefficients, the higher we should set l1_ratio. Note that l1_ratio can also be set by cross-validation, although we won't do it here to limit computational cost.
End of explanation
"""
from figures import plot_linear_svc_regularization
plot_linear_svc_regularization()
"""
Explanation: Exercise
Find the best linear regression prediction on the diabetes dataset, that is available in the scikit-learn datasets.
Linear models for classification
All linear models for classification learn a coefficient parameter coef_ and an offset intercept_ to make predictions using a linear combination of features:
y_pred = x_test[0] * coef_[0] + ... + x_test[n_features-1] * coef_[n_features-1] + intercept_ > 0
As you can see, this is very similar to regression, only that a threshold at zero is applied.
Again, the difference between the linear models for classification what kind of regularization is put on coef_ and intercept_, but there are also minor differences in how the fit to the training set is measured (the so-called loss function).
The two most common models for linear classification are the linear SVM as implemented in LinearSVC and LogisticRegression.
Regularization: the linear classifiers can suffer from over-fit in the presence of many features and must be regularized. The 'C' parameter controls that regularization: large C values give unregularized model, while small C give strongly regularized models.
A good intuition for regularization of linear classifiers is that with high regularization, it is enough if most of the points are classified correctly. But with less regularization, more importance is given to each individual data point.
This is illustrated using an linear SVM with different values of C below.
End of explanation
"""
|
ragavvenkatesan/Convolutional-Neural-Networks | pantry/tutorials/notebooks/Multi-layer Neural Network.ipynb | mit | from yann.network import network
from yann.special.datasets import cook_mnist
data = cook_mnist()
dataset_params = { "dataset": data.dataset_location(), "id": 'mnist', "n_classes" : 10 }
net = network()
net.add_layer(type = "input", id ="input", dataset_init_args = dataset_params)
"""
Explanation: Multi-layer Neural Network
By virture of being here, it is assumed that you have gone through the Quick Start. To recap the Quicks tart tutorial, We imported MNIST dataset and trained a Logistic Regression which produces a linear classification boundary. It is impossible to learn complex functions like XOR with linear classification boundary.
A Neural Network is a function approximator consisting of several neurons organized in a layered fashion. Each neuron takes input from previous layer, performs some mathematical calculation and sends output to next layer. A neuron produces output only if the result of the calculation it performs is greater than some threshold. This threshold function is called activation function. Depending on the type of the task different activation functions can be used. Some of the most commonly used activation functions are sigmoid, tanh, ReLu and maxout. It is inspired from the functioning of human brain where one neuron sends signal to other neuron only if the electical signal in the first neuron is greater than some threshold.
A Feed Forward Neural network/ multi-layer perceptron has an input layer, an output layer and some hidden layers. The actual magic of the neural networks happens in the hidden layers and they represent the function the network is trying to approximate. Output layer is generally a softmax function that converts the inputs into probabilities. Let us look at the mathematical representation of the hidden layer and output layer
Hidden layer:
let $[a_{i-1}^1], a_{i-1}^2, a_{i-1}^3 ........ a_{i-1}^n]$ be the activations of the previous layer $i-1$
$$h_i = w_i^0 + w_i^1a_{i-1}^1 + w_i^2a_{i-1}^2 + ...... + w_i^na_{i-1}^n$$
$$a_i = act(h_i)$$
Where i is the layer number,
$[w_i^1, w_i^2, w_i^3, ......... w_i^n]$ be the parameters between the $i^{th}$ and $(i-1)^{th}$ layer, $w_i^0$ is the bias which is the input when there is no activation from the previous layer,
1,2....n are the dimensions of the layer,
$a_i$ is the activation at the layer, and $act()$ is the activation function for that layer.
Output layer:
let our network has l layers
$$z = w_i^0 + w_i^1a_{i-1}^1 + w_i^2a_{i-1}^2 + ...... + w_i^na_{i-1}^n$$
$$a = softmax(z)$$
$$correct class = argmax(a)$$
Where a represents the output probabilities, z represents the weighted activations of the previous layer.
Neural Network training:-
Neural Network has a lot of parameters to learn. Consider a neural network with 2 layers of each 100 neurons and input dimension of 1024 and 10 outputs. Then the number of parameters to learn is 1024 * 100 * 100 * 10 i.e., 102400000 parameters. Learning these many parameters is a complex task because for each parameter we need to calculate the gradient of error function and update the parameters with that gradient. The computational instability of this process is the reason for neural networks to loose it's charm quickly. There is a technique called Back propagation that solved this problem. The following section gives a brief insight into the backpropagation technique.
Back Propagation:
YANN handles the Back propagation by itself. But, it does not hurt to know how it works. A neural network can be represented mathematically as $$O = f_1(W_l(f_2(W_{l-1}f_3(..f_n(WX)..)))$$ where $f_1, f_2, f_3$ are activation functions.
An Error function can be represented as $$E(f_1(W_l(f_2(W_{l-1}f_3(..f_n(WX)..))))$$ where $E()$ is some error function. The gradient of $W_l$ is given by:
$$g_l = \frac{\partial E(f_1(W_lf_2(W_{l-1}f_3(..f_n(WX)..))))}{\partial W_l} $$
Applying chain rule:
$$g_l = \frac{\partial E(f_1())}{\partial f_1}\frac{\partial f_1}{\partial W_l}
$$
The gradient of error w.r.t $W_{l-1}$ after applying chain rule:
$$g_l = \frac{\partial E(f_1())}{\partial f_1}\frac{\partial f_1(W_lf_2())}{\partial f_2}\frac{\partial f_2()}{\partial W_2}
$$
In the above equations the first term $\frac{\partial E(f_1())}{\partial f_1}$ remains same for both gradients. Similarly for rest of the parameters we reuse the terms from the previous gradient calculation. This process drastically reduces the number of calculations in Neural Network training.
Let us take this one step further and create a neural network with two hidden layers. We begin as usual by importing the network class and creating the input layer.
End of explanation
"""
net.add_layer (type = "dot_product",
origin ="input",
id = "dot_product_1",
num_neurons = 800,
regularize = True,
activation ='relu')
net.add_layer (type = "dot_product",
origin ="dot_product_1",
id = "dot_product_2",
num_neurons = 800,
regularize = True,
activation ='relu')
"""
Explanation: In Instead of connecting this to a classfier as we saw in the Quick Start , let us add a couple of fully connected hidden layers. Hidden layers can be created using layer type = dot_product.
End of explanation
"""
net.add_layer ( type = "classifier",
id = "softmax",
origin = "dot_product_2",
num_classes = 10,
activation = 'softmax',
)
net.add_layer ( type = "objective",
id = "nll",
origin = "softmax",
)
"""
Explanation: Notice the parameters passed. num_neurons is the number of nodes in the layer. Notice also how we modularized the layers by using the id parameter. origin represents which layer will be the input to the new layer. By default yann assumes all layers are input serially and chooses the last added layer to be the input. Using origin, one can create various types of architectures. Infact any directed acyclic graphs (DAGs) that could be hand-drawn could be implemented. Let us now add a classifier and an objective layer to this.
End of explanation
"""
optimizer_params = {
"momentum_type" : 'polyak',
"momentum_params" : (0.9, 0.95, 30),
"regularization" : (0.0001, 0.0002),
"optimizer_type" : 'rmsprop',
"id" : 'polyak-rms'
}
net.add_module ( type = 'optimizer', params = optimizer_params )
"""
Explanation: The following block is something we did not use in the Quick Start tutorial. We are adding optimizer and optimizer parameters to the network. Let us create our own optimizer module this time instead of using the yann default. For any module in yann, the initialization can be done using the add_module method. The add_module method typically takes input type which in this case is optimizer and a set of intitliazation parameters which in our case is params = optimizer_params. Any module params, which in this case is the optimizer_params is a dictionary of relevant options. If you are not familiar with the optimizers in neural network, I would suggest you to go through the Optimizers to Neural network series of tutorials to get familiar with the effect of differnt optimizers in a Nueral Network.
A typical optimizer setup is:
End of explanation
"""
learning_rates = (0.05, 0.01, 0.001)
"""
Explanation: We have now successfully added a Polyak momentum with RmsProp back propagation with some and co-efficients that will be applied to the layers for which we passed as argument regularize = True. For more options of parameters on optimizer refer to the optimizer documentation . This optimizer will therefore solve the following error:
where is the error, is the sigmoid layer and is the ith layer of the network.
End of explanation
"""
net.cook( optimizer = 'polyak-rms',
objective_layer = 'nll',
datastream = 'mnist',
classifier = 'softmax',
)
net.train( epochs = (20, 20),
validate_after_epochs = 2,
training_accuracy = True,
learning_rates = learning_rates,
show_progress = True,
early_terminate = True)
"""
Explanation: The learning_rate, supplied here is a tuple. The first indicates a annealing of a linear rate, the second is the initial learning rate of the first era, and the third value is the leanring rate of the second era. Accordingly, epochs takes in a tuple with number of epochs for each era.
Noe we can cook, train and test as usual:
End of explanation
"""
net.test()
"""
Explanation: This time, let us not let it run the forty epochs, let us cancel in the middle after some epochs by hitting ^c. Once it stops lets immediately test and demonstrate that the net retains the parameters as updated as possible.
Some new arguments are introduced here and they are for the most part easy to understand in context. epoch represents a tuple which is the number of epochs of training and number of epochs of fine tuning epochs after that. There could be several of these stages of finer tuning. Yann uses the term ‘era’ to represent each set of epochs running with one learning rate. show_progress will print a progress bar for each epoch. validate_after_epochs will perform validation after such many epochs on a different validation dataset.
Once done, lets run net.test():-
End of explanation
"""
from yann.pantry.tutorials.mlp import mlp
mlp(dataset = data.dataset_location())
"""
Explanation: The full code for this tutorial with additional commentary can be found in the file pantry.tutorials.mlp.py. If you have toolbox cloned or downloaded or just the tutorials downloaded, Run the code as,
End of explanation
"""
|
miroli/veclib | Playground.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from IPython.display import display_png
%matplotlib inline
plt.style.use('seaborn-whitegrid')
"""
Explanation: VecLib
A Python library for playing with and visualizing vectors in Jupyter notebooks. For personal learning purposes.
End of explanation
"""
class Vector():
"""The base class for all vector operations"""
def __init__(self, arr, base=np.array([1, 1])):
self._arr = arr
self.base = base
def dot(self, other):
return np.dot(self._arr, other._arr)
def cross(self, other):
return Vector(np.cross(self._arr, other._arr))
def plot(self, ax=None):
dims = len(self._arr)
if dims > 3:
raise Exception('Cannot plot over 3 dimensions')
if not ax:
fig = plt.figure()
proj = '3d' if dims == 3 else None
ax = fig.add_subplot(111, projection=proj)
if dims == 1:
self._plot1d(ax)
elif dims == 2:
self._plot2d(ax)
elif dims == 3:
self._plot3d(ax)
def _plot2d(self, ax):
x, y = self._arr * self.base
ax.plot([0, x], [0, y])
min_, max_ = min(x, y), max(x, y)
ax.set_xlim([min(0, min_), max_])
ax.set_ylim([min(0, min_), max_])
def _plot2d_quiver(self, ax):
"""Work in progress."""
x, y = self._arr
ax.quiver(0, 0, x, y, angles='xy', scale_units='xy', scale=1)
xmin = 0 if x >= 0 else x
xmax = 0 if x <= 0 else x
ymin = 0 if y >= 0 else y
ymax = 0 if y <= 0 else y
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
return ax
def _plot3d(self, ax):
x, y, z = self._arr
ax.plot([0, x], [0, y], [0, z])
def __add__(self, other):
return Vector(self._arr + other._arr)
def __sub__(self, other):
return Vector(self._arr - other._arr)
def __mul__(self, scalar):
return self._arr * scalar
def __eq__(self, other):
return np.all(self._arr == other._arr)
def _repr_png_(self):
return display_png(self.plot())
def __repr__(self):
return 'vector({})'.format([x for x in self._arr])
"""
Explanation: Roadmap
<s>Addition and subtraction</s>
<s>Scaling (multiplication)</s>
<s>Visualizing in 2D</s>
<s>Visualizing in 3D</s>
Visualization legends
Visualize dot products as projections
Compute determinant
Cross products
End of explanation
"""
v1 = Vector([2, 2, 3])
v1
"""
Explanation: Basic plots
Plotting single vector in 2D
End of explanation
"""
fig, ax = plt.subplots()
v1 = Vector(np.array([1,2]))
v2 = Vector(np.array([5,-1]))
v1.plot(ax)
v2.plot(ax)
"""
Explanation: Plotting multiple vectors in 2D
End of explanation
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
v1 = Vector(np.array([5,3,4]))
v2 = Vector(np.array([1,-2,5]))
v1.plot(ax)
v2.plot(ax)
"""
Explanation: Plotting vectors in 3D
End of explanation
"""
v1 = Vector(np.array([1,3]))
v2 = Vector(np.array([2,1]))
v1.dot(v2)
"""
Explanation: Operations
Dot product
End of explanation
"""
v1 = Vector(np.array([1,0,0]))
v2 = Vector(np.array([0,1,0]))
v3 = v1.cross(v2)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
v1.plot(ax)
v2.plot(ax)
v3.plot(ax)
"""
Explanation: Cross product
End of explanation
"""
fig, ax = plt.subplots()
v1 = Vector(np.array([1, 1]), base=np.array([5, 2]))
v2 = Vector(np.array([1, 1]), base=np.array([-2, 3]))
v1.plot(ax)
v2.plot(ax)
"""
Explanation: Other
Changing basis
End of explanation
"""
|
gench/rec2 | RecipeRecommender_ZulkufGenc.ipynb | gpl-3.0 | import json
import numpy as np
from numpy import ma
import io
import re
import itertools
import random
from bokeh.charts import Histogram
import networkx as nx
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
from sklearn.feature_extraction import DictVectorizer
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from bokeh.plotting import figure, output_notebook, show
from bokeh.models import NumeralTickFormatter,ColumnDataSource, LabelSet
output_notebook()
import copy
%matplotlib inline
def get_matrix_form(ingredients_list):
"""
Converts list of ingredients to sparse matrix form
Args:
ingredients_list: list of ingredients lists
"""
vec = DictVectorizer()
X = vec.fit_transform([Counter(x) for x in ingredients_list])
return X.toarray(),np.array(vec.get_feature_names())
"""
Explanation: Recipe Recommender Capstone Project
Task: Given a recipe from the existing data set that you have, suggest another recipe by replacing n (random) ingredients. The new recipe should:
- “make sense” according to certain standards of taste
- not be a subset of ingredients of an existing recipe in the dataset
Data Cleaning
The data has been scrapped from http://allrecipes.com/. It consists of over 7K recipes and 2.5M reviews.
End of explanation
"""
#filter out the words that are not ingredients
unrelated = ["","a","an",'teaspoon','oz','cup','tablespoon','gram','kilogram','kg','pound',
'milliliter','ml','envelope','substitute','chip',
'ounce','tsp','tbl','tb','tbsp','pint','pt','lb','liter','fluid',"halves","inch",
"skinless", "boneless","pounded","thickness","thick","cooking","cubed","instant",
"to","from","unsalted","pinch","chopped",'fresh', 'flat', 'leaf',"packages",
"minced","cloves","pinched","sliced","tablespoons","taste","all","purpose",
'teaspoons', 'beaten', 'cups', 'for', 'frying', 'or', 'as', 'needed' ,"ground",
"large","small","big","cut","half","finely","peeled","cooked","lightly",
"in","and","or","kosher","halal","ounces","ounces","can","freshly","crushed","dried",
"slices","slice","diced",'into', 'cubes','divided',"melted","frozen","deveined",
"optional","cans","canned","grated","pounds","lean","packed","mashed","overripe",
'quarters', 'vertically','package','tops', 'removed', 'seeded','softened','extra',
"bottle","condensed","of","style","heavy","torn","piece","shredded","pieces",
"stewed","chunks","chunk","mix","whole","clove","light","assorted","s","with",
"food","jar","spray","toasted",'favorite', 'chop','bite',
'chuck','chop','cleaned','container','covered','degree','delicious','double',
'medium','prepared',
'preserve','quick','refrigerated','rinsed','roast','rolled','room','stew',
'temperature','plus','packet','pack',
'trimmed','unwrapped','warm','flavored','link','sized','bulk','low',
'high','sifted','','square','thinly','drained','halved',
'cube','concentrate','crumb','crumbled','warmed','partially',
'portion','dissolved','halve','skinned','thin','deboned','boiled',
'butterflied','cooled','more','defrosted','size','quartered'
]
def is_numeric(c):
"""
Check if a given string is numeric or alphanumeric
Args:
c (str): string to check
Returns:
True if the given parameter is numeric
"""
try:
float(c)
except ValueError:
return False
else:
return True
def clean_ingredients(ingredients):
"""
Filters the words that are not an ingredient but are in the ingredient specifications
Args:
ing (str): a line from ingredient list
Returns:
ingredient string: if it is considered as an ingredient
False: if the given ingredient is filtered as a non-ingredient
"""
ingredients = list(map(str.lower,re.split("\W+",ingredients)))
return [wnl.lemmatize(ingredient.strip()) for ingredient in ingredients if not (len(ingredient) <1 or ingredient in unrelated or is_numeric(ingredient))]
#read the recipes
all_ingredients = []
recipe_dic = dict()
reviews_dic = dict()
names = []
with open("data/recipes.data","r") as f:
for line in f:
recipe = json.load(io.StringIO(line))
name = recipe['name']
num_rating = recipe['num_ratings']
categories = recipe['categories']
rid = recipe['id']
rating = recipe['rating']
cleaned_ingredients = [clean_ingredients(_ingredients) for _ingredients in recipe['ingredients'] ]
ingredients_filtered = [_ingredient for _ingredient in cleaned_ingredients if len(_ingredient) < 4]
#print (" id: ",rid,"\n Rating: ",rating,"\n #Ratings: ",num_rating,"\n Name: ",name,"\n Categories: ",categories,"\n Ingredients: ",ingredients,"\n\n")
if ingredients_filtered and len(ingredients_filtered) > 0:
recipe_dic[name] = list(set([" ".join(x) for x in ingredients_filtered]))#ingredients_filtered
reviews_dic[name] = rating
"""
Explanation: Removing the Unrelated Words from Ingredients
Quantifiers and descriptive words are filtered out from the ingredients.
Each word is lemmatized
End of explanation
"""
len(recipe_dic.values())
#flatten the ingredients of each recipe for vectorization
all_ingredients = [ingredient for ingredient in recipe_dic.values()]
ingredient_matrix,vocabulary = get_matrix_form(all_ingredients)
word_counts = ingredient_matrix.sum(axis=0)
sorted_indicies = np.argsort(word_counts)[::-1]
word_counts = np.array(list(map(lambda x: int(x),word_counts)))
"""
Explanation: Ingredient Analysis
Total number of recipes:
End of explanation
"""
ingredient_matrix.shape[1]
"""
Explanation: Number of unique ingredients:
End of explanation
"""
ingredient_frequency = list(zip(vocabulary[sorted_indicies],word_counts[sorted_indicies]))[:1000]
#ingredient_frequency = list(zip(vocabulary[sorted_indicies],word_counts[sorted_indicies]))
#sorted(vec.vocabulary_)
#divide number of occurance of each ingredient to the total number of recipes
popular_ingredients = list(map(lambda x: ( x[0],float( "{0:.2f}".format(x[1] / len(recipe_dic.values())) ) ), ingredient_frequency[:30]))
#separate the ingredient names and frequencies for plotting
ings = list(map(lambda x: x[0], popular_ingredients))[::-1]
freq = list(map(lambda x: x[1] , popular_ingredients))[::-1]
"""
Explanation: Find out which ingredients are most frequently used in the recipes
End of explanation
"""
p = figure(title="Most Popular 30 Ingredients", y_range=ings, x_range=[0,1])
p.segment(0, ings, freq, ings, line_width=2, line_color="green", )
p.circle(freq, ings, size=15, fill_color="orange", line_color="green", line_width=3, )
p.xaxis[0].formatter = NumeralTickFormatter(format="0%")
source = ColumnDataSource(data=dict(height=ings,
weight=freq,
names=list(map(lambda x: "{0}%".format(int(x * 100)),freq))))
labels = LabelSet(x='weight', y='height', text='names', level='glyph',text_font_size="7pt",
x_offset=10, y_offset=-1, source=source, render_mode='canvas')
p.add_layout(labels)
show(p)
"""
Explanation: Most Popular 30 Ingredients
End of explanation
"""
#get the ingredients occuring at least in 3 recipes
min_10recp_ingredients = [x[0] for x in ingredient_frequency if x[1] >= 5 and len(x[0]) > 2 ]
#calculate stats about the frequent and rare ingredients
nof_eliminated = 0
total_eliminated = 0
updated_recipe_dic = {}
for recipe, ingredients in recipe_dic.items():
selected_ingredients = set(ingredients).intersection(set(min_10recp_ingredients))
if len(ingredients) - len(selected_ingredients) != 0:
nof_eliminated += 1
total_eliminated += len(ingredients) - len(selected_ingredients)
updated_recipe_dic[recipe] = selected_ingredients
"""
Explanation: Filter out Rare Ingredients
End of explanation
"""
round(nof_eliminated / len(recipe_dic.items()),2)
"""
Explanation: Percentage of recipes having a non-standard ingredient:
End of explanation
"""
round(total_eliminated / nof_eliminated ,2)
"""
Explanation: Average number of ingredients dropped from recipes:
End of explanation
"""
recps_with_essential_ingredients = [ingredient for ingredient in updated_recipe_dic.values()]
"""
Explanation: Ingredient Assosication
Get the flatten list of essential ingredients after filtering the rare ones
End of explanation
"""
ingredient_matrix, vocabulary = get_matrix_form(recps_with_essential_ingredients)
word_counts = ingredient_matrix.sum(axis=0)
sorted_indicies = np.argsort(word_counts)[::-1]
word_counts = np.array(list(map(lambda x: int(x),word_counts)))
len(recps_with_essential_ingredients)
ingredient_matrix.shape
#get the reviews in the same order with recipes
reviews = [round(reviews_dic[recipe],2) for recipe in updated_recipe_dic.keys()]
#np.sum(np.isneginf([x[1] for x in reviews_dic.items()]))
p_reviews = Histogram(reviews, bins=30,
title="Reviews Distribution",xlabel="reviews", ylabel="count",
plot_height=300,plot_width=300)
show(p_reviews)
"""
Explanation: Re-calculate the ingredient occurance matrix
End of explanation
"""
def get_joint_probabilities(cooccurrence_matrix,indicies,log_probability = True):
"""
Calculates the joint probability of ingredient occurances with the given ones
"""
if log_probability:
#sum log probabilities of the co-occurances with the ingredients of the selected recipe
#and sort them in decsending order
with np.errstate(divide='ignore'):
joint_probabilities = np.sum(
ma.log2(cooccurrence_matrix[indicies]).filled(-500),
axis=0)
else:
#multiply the probabilities to get the joint probability of each ingredient's
#occurance with already existing ones in the recipe
joint_probabilities = np.prod(cooccurrence_matrix[indicies], axis=0)
return joint_probabilities
def select_ingredients(indicies,kept_indicies,cooccurrence_matrix,surprise_factor = 1):
"""
Selects complimentary ingredients that are likely to occur together with the given ones
Surprise factor alters the ingredient selection. If it is 1 then always the ingredient
with the highes probability is selected. If it is 3, for example, the ingredient is selected
from the most likely 3 other ingredients randomly.
"""
new_ingredients = {}
nof_new_ingredients = len(indicies) - len(kept_indicies)
#new_conditional_probabilities = rw_conditional_log_probabilities
#new_sorted_ingredient_indicies = rw_log_sorted_ingredient_indicies.copy()
excluded_ingredients = copy.deepcopy(indicies)
indicies_selected = copy.deepcopy(kept_indicies)
assert surprise_factor > 0, "The surprise factor must be greater than 0"
for i in range(nof_new_ingredients):
con_probabilities = get_joint_probabilities(cooccurrence_matrix,indicies_selected)
sorted_indicies = np.argsort(con_probabilities)[::-1]
#exclude the ingredients of the recipe from the ingredient list
ingredients_to_choose = ma.masked_where(np.in1d(sorted_indicies,
np.array(excluded_ingredients) ),
sorted_indicies)
#choose ingredient that is the most likely or one of the most likely
new_ingredients[i] = random.choice(ingredients_to_choose[~ingredients_to_choose.mask][:surprise_factor].data)
#new_conditional_probabilities = new_conditional_probabilities + cooccurrence_matrix_normalized[new_ingredients[i]]
#new_sorted_ingredient_indicies = np.argsort(new_conditional_probabilities)[::-1]
excluded_ingredients.append(new_ingredients[i])
indicies_selected.append(new_ingredients[i])
return new_ingredients
def calculate_normalized_cooccurrence_matrix(ingredient_matrix, rankings = None):
"""
Calculates the normalized co-occurance matrix of ingredients. If rankings are not None then
calculate the ranking weighted co-occurance matrix
"""
if rankings:
review_weighted_ingredient_matrix = ingredient_matrix * np.array(reviews)[:,None]
cooccurrence_matrix = np.dot(review_weighted_ingredient_matrix.transpose(),
review_weighted_ingredient_matrix)
#rw_cooccurrence_matrix_diagonal = np.diagonal(rw_cooccurrence_matrix)
#with np.errstate(divide='ignore', invalid='ignore'):
#rw_cooccurrence_matrix_normalized = np.nan_to_num(np.true_divide(rw_cooccurrence_matrix, rw_cooccurrence_matrix_diagonal[:, None]))
else:
cooccurrence_matrix = np.dot(ingredient_matrix.transpose(),ingredient_matrix)
cooccurrence_matrix_diagonal = np.diagonal(cooccurrence_matrix)
with np.errstate(divide='ignore', invalid='ignore'):
cooccurrence_matrix_normalized = np.nan_to_num(np.true_divide(cooccurrence_matrix, cooccurrence_matrix_diagonal[:, None]))
#print('\ncooccurrence_matrix_normalized:\n{0}'.format(cooccurrence_matrix_normalized))
return cooccurrence_matrix_normalized
def bulk_select_ingredients(cooccurrence_matrix,indicies,to_replace_indicies):
"""
Select all missing ingredients in one based on the joint
"""
#sort the ingredients from maximum to minimum
conditional_probabilities = get_joint_probabilities(cooccurrence_matrix,indicies,False)
#remove the ingredients already in the recipe
sorted_ingredient_indicies = np.argsort(conditional_probabilities)[::-1]
complimentary_ingredients = [ing_index for ing_index in sorted_ingredient_indicies if not ing_index in indicies]
#get as many new ingredients as the ones to be replaced
new_ingredients = vocabulary[complimentary_ingredients[:len(to_replace_indicies)]]
return new_ingredients
def print_recipe(ingredients,to_replace,new_ingredients):
print ("\nOriginal Recipe: \n" + " , ".join(ingredients))
print("\nIngredients dropped: \n" + "\x1b[31m" + " - ".join(to_replace) + "\x1b[0m")
print("\nNew Ingredients added: \n" + "\x1b[32m" + " - ".join(new_ingredients) + "\x1b[0m")
print ("\nNew recipe: \n" + " ** ".join(new_ingredients) + " ** " + " ** ".join(set(ingredients).difference( set(to_replace))))
"""
Explanation: Ingredient Recommendation
End of explanation
"""
recipe, ingredients = random.choice(list(updated_recipe_dic.items()))
print ("\nRecipe: " + recipe)
#print ("\nIngredients: " + str(ingredients))
to_replace = np.random.choice(list(ingredients), np.random.randint(low=1, high=2),replace=False)
#print ("\nIngredients to replace: " + str(to_replace))
indicies = [int(np.where(ingredient == vocabulary)[0]) for ingredient in ingredients]
to_replace_indicies = [int(np.where(ingredient == vocabulary)[0]) for ingredient in to_replace]
to_keep_indicies = [ing_index for ing_index in indicies if not ing_index in to_replace_indicies]
nof_new_ingredients = len(to_replace_indicies)
#print ("\nIngredients to keep: " + str(vocabulary[to_keep_indicies]))
cooccurrence_matrix = calculate_normalized_cooccurrence_matrix(ingredient_matrix)
new_ingredients = bulk_select_ingredients(cooccurrence_matrix,indicies,to_replace_indicies)
print_recipe(ingredients,to_replace,new_ingredients)
"""
Explanation: Randomly select a recipe and ingredients to replace
End of explanation
"""
selected_indicies = select_ingredients(indicies,to_keep_indicies,cooccurrence_matrix,3)
new_selected_ingredients = vocabulary[list(selected_indicies.values())]
print_recipe(ingredients,to_replace,new_selected_ingredients)
"""
Explanation: Find the Best Fitting Ingredients with a Suprise Factor
End of explanation
"""
rw_cooccurrence_matrix_normalized = calculate_normalized_cooccurrence_matrix(ingredient_matrix,reviews)
selected_indicies = select_ingredients(indicies,to_keep_indicies,rw_cooccurrence_matrix_normalized,3)
new_selected_ingredients = vocabulary[list(selected_indicies.values())]
print_recipe(ingredients,to_replace,new_selected_ingredients)
"""
Explanation: Find the Best Fitting Ingredients Weighted by Ratings
Here, I also consider the average ratings of the recipes where the ingredients are taken from. Normally, occurance of each ingredient is incremented by one when it found in a recipe. Here, I tried to increment it by its average rating which is scaled in the range of [0,1].
Calculate rank-weighted co-occurance matrix
End of explanation
"""
G=nx.Graph()
for ingredients in updated_recipe_dic.values():
G.add_nodes_from(ingredients)
G.add_edges_from(list(itertools.combinations(list(ingredients), 2)))
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
d = nx.degree(G)
import operator
sorted_x = sorted(d.items(), key=operator.itemgetter(1))[::-1]
plt.axis('off')
nlist = [x[0] for x in sorted_x[20:]]
ndegree = [x[1] for x in sorted_x[20:]]
pos = nx.random_layout(G)
nx.draw_networkx_nodes(G,pos,nodelist=nlist, node_color="green",with_labels=False,node_size=ndegree, alpha=0.3)
#nx.draw_networkx_nodes(G,pos, node_color="blue",with_labels=True,node_size=[v * 10 for v in d.values()], alpha=0.7)
nx.draw_networkx_labels(G,pos,dict(zip(nlist,nlist)),font_size=6, font_color="white")
nx.draw_networkx_edges(G, pos,nodelist=nlist,edge_color="white");
pos = nx.shell_layout(G)
#nx.draw(G, nodelist=d.keys(), node_size=[v * 100 for v in d.values()])
nlist = [x[0] for x in sorted_x[:10]]
ndegree = [x[1] * 10 for x in sorted_x[:10]]
nx.draw_networkx_nodes(G,pos,nodelist=nlist, node_color="black",with_labels=False,node_size=ndegree, alpha=0.9)
#nx.draw_networkx_nodes(G,pos, node_color="blue",with_labels=True,node_size=[v * 10 for v in d.values()], alpha=0.7)
nx.draw_networkx_labels(G,pos,dict(zip(nlist,nlist)),font_size=10, font_color="white")
nx.draw_networkx_edges(G, pos,nodelist=nlist,edge_color="white");
nlist = [x[0] for x in sorted_x[10:20]]
ndegree = [x[1] * 3 for x in sorted_x[10:20]]
nx.draw_networkx_nodes(G,pos,nodelist=nlist, node_color="red",with_labels=False,node_size=ndegree, alpha=0.7)
#nx.draw_networkx_nodes(G,pos, node_color="blue",with_labels=True,node_size=[v * 10 for v in d.values()], alpha=0.7)
nx.draw_networkx_labels(G,pos,dict(zip(nlist,nlist)),font_size=10, font_color="white")
nx.draw_networkx_edges(G, pos,nodelist=nlist,edge_color="white");
"""
Explanation: Network Analysis
Create a graph
End of explanation
"""
|
xaibeing/cn-deep-learning | tutorials/intro-to-tflearn/TFLearn_Sentiment_Analysis_Solution.ipynb | mit | import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
"""
Explanation: Sentiment analysis with TFLearn
In this notebook, we'll continue Andrew Trask's work by building a network for sentiment analysis on the movie review data. Instead of a network written with Numpy, we'll be using TFLearn, a high-level library built on top of TensorFlow. TFLearn makes it simpler to build networks just by defining the layers. It takes care of most of the details for you.
We'll start off by importing all the modules we'll need, then load and prepare the data.
End of explanation
"""
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
"""
Explanation: Preparing the data
Following along with Andrew, our goal here is to convert our reviews into word vectors. The word vectors will have elements representing words in the total vocabulary. If the second position represents the word 'the', for each review we'll count up the number of times 'the' appears in the text and set the second position to that count. I'll show you examples as we build the input data from the reviews data. Check out Andrew's notebook and video for more about this.
Read the data
Use the pandas library to read the reviews and postive/negative labels from comma-separated files. The data we're using has already been preprocessed a bit and we know it uses only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like The, the, and THE, all the same way.
End of explanation
"""
from collections import Counter
total_counts = Counter()
for _, row in reviews.iterrows():
total_counts.update(row[0].split(' '))
print("Total words in data set: ", len(total_counts))
"""
Explanation: Counting word frequency
To start off we'll need to count how often each word appears in the data. We'll use this count to create a vocabulary we'll use to encode the review data. This resulting count is known as a bag of words. We'll use it to select our vocabulary and build the word vectors. You should have seen how to do this in Andrew's lesson. Try to implement it here using the Counter class.
Exercise: Create the bag of words from the reviews data and assign it to total_counts. The reviews are stores in the reviews Pandas DataFrame. If you want the reviews as a Numpy array, use reviews.values. You can iterate through the rows in the DataFrame with for idx, row in reviews.iterrows(): (documentation). When you break up the reviews into words, use .split(' ') instead of .split() so your results match ours.
End of explanation
"""
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
"""
Explanation: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
End of explanation
"""
print(vocab[-1], ': ', total_counts[vocab[-1]])
"""
Explanation: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
End of explanation
"""
word2idx = {word: i for i, word in enumerate(vocab)}
"""
Explanation: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Note: When you run, you may see a different word from the one shown above, but it will also have the value 30. That's because there are many words tied for that number of counts, and the Counter class does not guarantee which one will be returned in the case of a tie.
Now for each review in the data, we'll make a word vector. First we need to make a mapping of word to index, pretty easy to do with a dictionary comprehension.
Exercise: Create a dictionary called word2idx that maps each word in the vocabulary to an index. The first word in vocab has index 0, the second word has index 1, and so on.
End of explanation
"""
def text_to_vector(text):
word_vector = np.zeros(len(vocab), dtype=np.int_)
for word in text.split(' '):
idx = word2idx.get(word, None)
if idx is None:
continue
else:
word_vector[idx] += 1
return np.array(word_vector)
"""
Explanation: Text to vector function
Now we can write a function that converts a some text to a word vector. The function will take a string of words as input and return a vector with the words counted up. Here's the general algorithm to do this:
Initialize the word vector with np.zeros, it should be the length of the vocabulary.
Split the input string of text into a list of words with .split(' '). Again, if you call .split() instead, you'll get slightly different results than what we show here.
For each word in that list, increment the element in the index associated with that word, which you get from word2idx.
Note: Since all words aren't in the vocab dictionary, you'll get a key error if you run into one of those words. You can use the .get method of the word2idx dictionary to specify a default returned value when you make a key error. For example, word2idx.get(word, None) returns None if word doesn't exist in the dictionary.
End of explanation
"""
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
"""
Explanation: If you do this right, the following code should return
```
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
array([0, 1, 0, 0, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0])
```
End of explanation
"""
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
"""
Explanation: Now, run through our entire review data set and convert each review to a word vector.
End of explanation
"""
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split], 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split], 2)
trainY
"""
Explanation: Train, Validation, Test sets
Now that we have the word_vectors, we're ready to split our data into train, validation, and test sets. Remember that we train on the train data, use the validation data to set the hyperparameters, and at the very end measure the network performance on the test data. Here we're using the function to_categorical from TFLearn to reshape the target data so that we'll have two output units and can classify with a softmax activation function. We actually won't be creating the validation set here, TFLearn will do that for us later.
End of explanation
"""
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
# Inputs
net = tflearn.input_data([None, 10000])
# Hidden layer(s)
net = tflearn.fully_connected(net, 200, activation='ReLU')
net = tflearn.fully_connected(net, 25, activation='ReLU')
# Output layer
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='sgd',
learning_rate=0.1,
loss='categorical_crossentropy')
model = tflearn.DNN(net)
return model
"""
Explanation: Building the network
TFLearn lets you build the network by defining the layers.
Input layer
For the input layer, you just need to tell it how many units you have. For example,
net = tflearn.input_data([None, 100])
would create a network with 100 input units. The first element in the list, None in this case, sets the batch size. Setting it to None here leaves it at the default batch size.
The number of inputs to your network needs to match the size of your data. For this example, we're using 10000 element long vectors to encode our input data, so we need 10000 input units.
Adding layers
To add new hidden layers, you use
net = tflearn.fully_connected(net, n_units, activation='ReLU')
This adds a fully connected layer where every unit in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call. It's telling the network to use the output of the previous layer as the input to this layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling net = tflearn.fully_connected(net, n_units).
Output layer
The last layer you add is used as the output layer. There for, you need to set the number of units to match the target data. In this case we are predicting two classes, positive or negative sentiment. You also need to set the activation function so it's appropriate for your model. Again, we're trying to predict if some input data belongs to one of two classes, so we should use softmax.
net = tflearn.fully_connected(net, 2, activation='softmax')
Training
To set how you train the network, use
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
Again, this is passing in the network you've been building. The keywords:
optimizer sets the training method, here stochastic gradient descent
learning_rate is the learning rate
loss determines how the network error is calculated. In this example, with the categorical cross-entropy.
Finally you put all this together to create the model with tflearn.DNN(net). So it ends up looking something like
net = tflearn.input_data([None, 10]) # Input
net = tflearn.fully_connected(net, 5, activation='ReLU') # Hidden
net = tflearn.fully_connected(net, 2, activation='softmax') # Output
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy')
model = tflearn.DNN(net)
Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc.
End of explanation
"""
model = build_model()
"""
Explanation: Intializing the model
Next we need to call the build_model() function to actually build the model. In my solution I haven't included any arguments to the function, but you can add arguments so you can change parameters in the model if you want.
Note: You might get a bunch of warnings here. TFLearn uses a lot of deprecated code in TensorFlow. Hopefully it gets updated to the new TensorFlow version soon.
End of explanation
"""
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=100)
"""
Explanation: Training the network
Now that we've constructed the network, saved as the variable model, we can fit it to the data. Here we use the model.fit method. You pass in the training features trainX and the training targets trainY. Below I set validation_set=0.1 which reserves 10% of the data set as the validation set. You can also set the batch size and number of epochs with the batch_size and n_epoch keywords, respectively. Below is the code to fit our the network to our word vectors.
You can rerun model.fit to train the network further if you think you can increase the validation accuracy. Remember, all hyperparameter adjustments must be done using the validation set. Only use the test set after you're completely done training the network.
End of explanation
"""
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
"""
Explanation: Testing
After you're satisified with your hyperparameters, you can run the network on the test set to measure it's performance. Remember, only do this after finalizing the hyperparameters.
End of explanation
"""
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
"""
Explanation: Try out your own text!
End of explanation
"""
|
sueiras/training | tensorflow_old/03-text_use_cases/02_sentiment_model/01_Model6_CNN.ipynb | gpl-3.0 | #Imports
from __future__ import print_function
import numpy as np
import tensorflow as tf
print(tf.__version__)
data_path='/home/ubuntu/data/training/keras/aclImdb/'
"""
Explanation: Sentiment model with CNNs
Use Convolutions to create a sentiment model.
Based on: http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
End of explanation
"""
# Generator of list of files in a folder and subfolders
import os
import fnmatch
def gen_find(filefilter, top):
for path, dirlist, filelist in os.walk(top):
for name in fnmatch.filter(filelist, filefilter):
yield os.path.join(path, name)
def read_sentences(path):
sentences = []
sentences_list = gen_find("*.txt", path)
for ff in sentences_list:
with open(ff, 'r') as f:
sentences.append(f.readline().strip())
return sentences
# Read train sentences and create train target
sentences_trn_pos = read_sentences(data_path+'train/pos/')
sentences_trn_neg = read_sentences(data_path+'train/neg/')
sentences_trn_ini = sentences_trn_pos + sentences_trn_neg
print('max_document_length trn: ', max([len(x.split(" ")) for x in sentences_trn_ini]))
y_trn_ini = np.array([[1.,0.]]*len(sentences_trn_pos) + [[0.,1.]]*len(sentences_trn_neg), dtype=np.float32)
print(y_trn_ini.shape)
print(y_trn_ini)
# Shuffle train data
from sklearn.utils import shuffle
sentences_trn, y_trn = shuffle(sentences_trn_ini, y_trn_ini)
print(y_trn)
# Read test sentences and create test target
sentences_tst_pos = read_sentences(data_path+'test/pos/')
sentences_tst_neg = read_sentences(data_path+'test/neg/')
sentences_tst = sentences_tst_pos + sentences_tst_neg
print('max_document_length tst: ', max([len(x.split(" ")) for x in sentences_tst]))
y_tst = np.array([[1.,0.]]*len(sentences_tst_pos) + [[0.,1.]]*len(sentences_tst_neg), dtype=np.float32)
print(y_tst.shape)
# Build vocabulary and transform sentences
from tensorflow.contrib import learn
sequence_length =100
# Train vocab and apply to train
vocab_processor = learn.preprocessing.VocabularyProcessor(sequence_length, min_frequency=10)
X_trn = np.array(list(vocab_processor.fit_transform(sentences_trn)))
# Apply trained vocab to test
X_tst = np.array(list(vocab_processor.transform(sentences_tst)))
# Size vocabulary
vocab_size = len(vocab_processor.vocabulary_)
# Check results
print('Vocab size: ', vocab_size)
print('X trn shape: ', X_trn.shape)
print('X tst shape: ', X_tst.shape)
print('First sentence: ', X_trn[0])
print('house id: ', vocab_processor.vocabulary_.get('house'))
"""
Explanation: Read data and create sequences
End of explanation
"""
# Model parameters
embedding_size = 128
num_filters = 32
filter_sizes = [3, 6, 12]
# Start an interactive session
gpu_options = tf.GPUOptions(allow_growth = True)
sess = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=True))
# Inputs
input_x = tf.placeholder(tf.int32, shape=[None, sequence_length], name="input_x")
print(input_x)
input_y = tf.placeholder(tf.int32, shape=[None, 2], name="input_y")
print(input_y)
dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Embedding layer
with tf.name_scope("embedding"):
W_embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name="W_embedding")
embedded_chars = tf.nn.embedding_lookup(W_embedding, input_x)
print(embedded_chars)
# Add an aditional dimension to match to the convolution requirements
embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
print(embedded_chars_expanded)
# Create a convolution + maxpool layer for each filter size
def conv_layer(x, size_x=2, size_y=2, input_channels=1, output_channels=32):
W_conv = tf.Variable(tf.truncated_normal([size_x, size_y, input_channels, output_channels], stddev=0.1), name='W')
b_conv = tf.Variable(tf.constant(0.1, shape=[output_channels]), name='b')
conv_out = tf.nn.relu(tf.nn.conv2d(x, W_conv, strides=[1, 1, 1, 1], padding='VALID') + b_conv, name='conv')
pooled = tf.nn.max_pool(conv_out,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
return pooled
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
pooled = conv_layer(embedded_chars_expanded, size_x=filter_size, size_y=embedding_size, input_channels=1, output_channels=num_filters)
pooled_outputs.append(pooled)
print(pooled_outputs)
# Combine all the pooled features
h_pool = tf.concat(pooled_outputs, 3)
print(h_pool)
# Reshape to flat the tensor: f
num_filters_total = num_filters * len(filter_sizes)
h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
print(h_pool_flat)
# Add dropout
with tf.name_scope("dropout"):
h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable("W", shape=[num_filters_total, 2], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[2]), name="b")
# scores = h_drop * W + b
scores = tf.nn.xw_plus_b(h_drop, W, b, name="scores")
print(scores)
# predictions: position of the max value of scores
predictions = tf.argmax(scores, 1, name="predictions")
print(predictions)
# Calculate the Mean of the cross-entropy loss in the batch
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=input_y), name='loss')
# Accuracy: percent of correct predictions
with tf.name_scope("accuracy"):
accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, tf.argmax(input_y, 1)), "float"), name="accuracy")
#Optimizer
with tf.name_scope("train") as scope:
train_step = tf.train.AdamOptimizer(1e-3).minimize(loss)
"""
Explanation: The model
Declare placeholders
Embedding layers
Convolutional and max pooling layers
Merge convolutions oputput
Dense layer to predictions
End of explanation
"""
def batch_iter(X, y, batch_size):
"""
Generates a batch iterator for inputs (X) and targets (y) of batch_size size.
"""
data_size = len(X)
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_X = X[shuffle_indices]
shuffled_y = y[shuffle_indices]
num_batches = int((data_size-1)/batch_size) + 1
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_X[start_index:end_index], shuffled_y[start_index:end_index]
# Test the generator function
#b_iter= batch_iter(X_trn, y_trn, 2)
#print(b_iter.next()) #python2
#print(next(b_iter)) # python3
b_iter()
#Inicialization.
sess.run(tf.global_variables_initializer())
# Train proccess parameters
num_epochs = 15
batch_size = 128
loss_trn_epoch = []
loss_tst_epoch = []
acc_trn_epoch = []
acc_tst_epoch = []
print('e- LssTrn - AccTrn - LssTst - AccTst' )
for epoch in range(num_epochs):
loss_trn = []
acc_trn = []
loss_tst = []
acc_tst = []
# Train step
for x_batch, y_batch in batch_iter(X_trn, y_trn, batch_size):
train_step.run(feed_dict={input_x: x_batch, input_y: y_batch, dropout_keep_prob: 0.5})
loss_step, acc_step = sess.run([loss, accuracy],
feed_dict={input_x: x_batch, input_y: y_batch, dropout_keep_prob: 1})
loss_trn += [loss_step]
acc_trn += [acc_step]
# Validation step
for x_batch_test, y_batch_test in batch_iter(X_tst, y_tst, batch_size):
loss_step, acc_step = sess.run([loss, accuracy],
feed_dict={input_x: x_batch_test, input_y: y_batch_test, dropout_keep_prob: 1})
loss_tst += [loss_step]
acc_tst += [acc_step]
# Summary
print(epoch, np.mean(loss_trn), np.mean(acc_trn), np.mean(loss_tst), np.mean(acc_tst))
loss_trn_epoch += [np.mean(loss_trn)]
loss_tst_epoch += [np.mean(loss_tst)]
acc_trn_epoch += [np.mean(acc_trn)]
acc_tst_epoch += [np.mean(acc_tst)]
"""
Explanation: Training procces
Create a generator to create the batches of inputs and targets. Train & test.
Iterate over the data to train the model
Each iteration over all the data is an epoch
For each epoch iterate overt the batches
End of explanation
"""
# Plot loss
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(loss_trn_epoch)
plt.plot(loss_tst_epoch)
plt.show()
# Plot accuracy
plt.plot(acc_trn_epoch)
plt.plot(acc_tst_epoch)
plt.show()
"""
Explanation: Results
End of explanation
"""
|
NathanYee/ThinkBayes2 | code/chap05.ipynb | gpl-2.0 | from __future__ import print_function, division
% matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from thinkbayes2 import Pmf, Cdf, Suite, Beta
import thinkplot
"""
Explanation: Think Bayes: Chapter 5
This notebook presents code and exercises from Think Bayes, second edition.
Copyright 2016 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
End of explanation
"""
def Odds(p):
return p / (1-p)
"""
Explanation: Odds
The following function converts from probabilities to odds.
End of explanation
"""
def Probability(o):
return o / (o+1)
"""
Explanation: And this function converts from odds to probabilities.
End of explanation
"""
p = 0.2
Odds(p)
"""
Explanation: If 20% of bettors think my horse will win, that corresponds to odds of 1:4, or 0.25.
End of explanation
"""
o = 1/5
Probability(o)
"""
Explanation: If the odds against my horse are 1:5, that corresponds to a probability of 1/6.
End of explanation
"""
prior_odds = 1
likelihood_ratio = 0.75 / 0.5
post_odds = prior_odds * likelihood_ratio
post_odds
"""
Explanation: We can use the odds form of Bayes's theorem to solve the cookie problem:
End of explanation
"""
post_prob = Probability(post_odds)
post_prob
"""
Explanation: And then we can compute the posterior probability, if desired.
End of explanation
"""
likelihood_ratio = 0.25 / 0.5
post_odds *= likelihood_ratio
post_odds
"""
Explanation: If we draw another cookie and it's chocolate, we can do another update:
End of explanation
"""
post_prob = Probability(post_odds)
post_prob
"""
Explanation: And convert back to probability.
End of explanation
"""
like1 = 0.01
like2 = 2 * 0.6 * 0.01
likelihood_ratio = like1 / like2
likelihood_ratio
"""
Explanation: Oliver's blood
The likelihood ratio is also useful for talking about the strength of evidence without getting bogged down talking about priors.
As an example, we'll solve this problem from MacKay's {\it Information Theory, Inference, and Learning Algorithms}:
Two people have left traces of their own blood at the scene of a crime. A suspect, Oliver, is tested and found to have type 'O' blood. The blood groups of the two traces are found to be of type 'O' (a common type in the local population, having frequency 60) and of type 'AB' (a rare type, with frequency 1). Do these data [the traces found at the scene] give evidence in favor of the proposition that Oliver was one of the people [who left blood at the scene]?
If Oliver is
one of the people who left blood at the crime scene, then he
accounts for the 'O' sample, so the probability of the data
is just the probability that a random member of the population
has type 'AB' blood, which is 1%.
If Oliver did not leave blood at the scene, then we have two
samples to account for. If we choose two random people from
the population, what is the chance of finding one with type 'O'
and one with type 'AB'? Well, there are two ways it might happen:
the first person we choose might have type 'O' and the second
'AB', or the other way around. So the total probability is
$2 (0.6) (0.01) = 1.2$%.
So the likelihood ratio is:
End of explanation
"""
post_odds = 1 * like1 / like2
Probability(post_odds)
"""
Explanation: Since the ratio is less than 1, it is evidence against the hypothesis that Oliver left blood at the scence.
But it is weak evidence. For example, if the prior odds were 1 (that is, 50% probability), the posterior odds would be 0.83, which corresponds to a probability of:
End of explanation
"""
# Solution goes here
# Solution goes here
"""
Explanation: So this evidence doesn't "move the needle" very much.
Exercise: Suppose other evidence had made you 90% confident of Oliver's guilt. How much would this exculpatory evince change your beliefs? What if you initially thought there was only a 10% chance of his guilt?
Notice that evidence with the same strength has a different effect on probability, depending on where you started.
End of explanation
"""
rhode = Beta(1, 1, label='Rhode')
rhode.Update((22, 11))
wei = Beta(1, 1, label='Wei')
wei.Update((21, 12))
"""
Explanation: Comparing distributions
Let's get back to the Kim Rhode problem from Chapter 4:
At the 2016 Summer Olympics in the Women's Skeet event, Kim Rhode faced Wei Meng in the bronze medal match. They each hit 15 of 25 targets, sending the match into sudden death. In the first round, both hit 1 of 2 targets. In the next two rounds, they each hit 2 targets. Finally, in the fourth round, Rhode hit 2 and Wei hit 1, so Rhode won the bronze medal, making her the first Summer Olympian to win an individual medal at six consecutive summer games.
But after all that shooting, what is the probability that Rhode is actually a better shooter than Wei? If the same match were held again, what is the probability that Rhode would win?
I'll start with a uniform distribution for x, the probability of hitting a target, but we should check whether the results are sensitive to that choice.
First I create a Beta distribution for each of the competitors, and update it with the results.
End of explanation
"""
thinkplot.Pdf(rhode.MakePmf())
thinkplot.Pdf(wei.MakePmf())
thinkplot.Config(xlabel='x', ylabel='Probability')
"""
Explanation: Based on the data, the distribution for Rhode is slightly farther right than the distribution for Wei, but there is a lot of overlap.
End of explanation
"""
iters = 1000
count = 0
for _ in range(iters):
x1 = rhode.Random()
x2 = wei.Random()
if x1 > x2:
count += 1
count / iters
"""
Explanation: To compute the probability that Rhode actually has a higher value of p, there are two options:
Sampling: we could draw random samples from the posterior distributions and compare them.
Enumeration: we could enumerate all possible pairs of values and add up the "probability of superiority".
I'll start with sampling. The Beta object provides a method that draws a random value from a Beta distribution:
End of explanation
"""
rhode_sample = rhode.Sample(iters)
wei_sample = wei.Sample(iters)
np.mean(rhode_sample > wei_sample)
"""
Explanation: Beta also provides Sample, which returns a NumPy array, so we an perform the comparisons using array operations:
End of explanation
"""
def ProbGreater(pmf1, pmf2):
total = 0
for x1, prob1 in pmf1.Items():
for x2, prob2 in pmf2.Items():
if x1 > x2:
total += prob1 * prob2
return total
pmf1 = rhode.MakePmf(1001)
pmf2 = wei.MakePmf(1001)
ProbGreater(pmf1, pmf2)
pmf1.ProbGreater(pmf2)
pmf1.ProbLess(pmf2)
"""
Explanation: The other option is to make Pmf objects that approximate the Beta distributions, and enumerate pairs of values:
End of explanation
"""
import random
def flip(p):
return random.random() < p
"""
Explanation: Exercise: Run this analysis again with a different prior and see how much effect it has on the results.
Simulation
To make predictions about a rematch, we have two options again:
Sampling. For each simulated match, we draw a random value of x for each contestant, then simulate 25 shots and count hits.
Computing a mixture. If we knew x exactly, the distribution of hits, k, would be binomial. Since we don't know x, the distribution of k is a mixture of binomials with different values of x.
I'll do it by sampling first.
End of explanation
"""
iters = 1000
wins = 0
losses = 0
for _ in range(iters):
x1 = rhode.Random()
x2 = wei.Random()
count1 = count2 = 0
for _ in range(25):
if flip(x1):
count1 += 1
if flip(x2):
count2 += 1
if count1 > count2:
wins += 1
if count1 < count2:
losses += 1
wins/iters, losses/iters
"""
Explanation: flip returns True with probability p and False with probability 1-p
Now we can simulate 1000 rematches and count wins and losses.
End of explanation
"""
rhode_rematch = np.random.binomial(25, rhode_sample)
thinkplot.Hist(Pmf(rhode_rematch))
wei_rematch = np.random.binomial(25, wei_sample)
np.mean(rhode_rematch > wei_rematch)
np.mean(rhode_rematch < wei_rematch)
"""
Explanation: Or, realizing that the distribution of k is binomial, we can simplify the code using NumPy:
End of explanation
"""
from thinkbayes2 import MakeBinomialPmf
def MakeBinomialMix(pmf, label=''):
mix = Pmf(label=label)
for x, prob in pmf.Items():
binom = MakeBinomialPmf(n=25, p=x)
for k, p in binom.Items():
mix[k] += prob * p
return mix
rhode_rematch = MakeBinomialMix(rhode.MakePmf(), label='Rhode')
wei_rematch = MakeBinomialMix(wei.MakePmf(), label='Wei')
thinkplot.Pdf(rhode_rematch)
thinkplot.Pdf(wei_rematch)
thinkplot.Config(xlabel='hits')
rhode_rematch.ProbGreater(wei_rematch), rhode_rematch.ProbLess(wei_rematch)
"""
Explanation: Alternatively, we can make a mixture that represents the distribution of k, taking into account our uncertainty about x:
End of explanation
"""
from thinkbayes2 import MakeMixture
def MakeBinomialMix2(pmf):
binomials = Pmf()
for x, prob in pmf.Items():
binom = MakeBinomialPmf(n=25, p=x)
binomials[binom] = prob
return MakeMixture(binomials)
"""
Explanation: Alternatively, we could use MakeMixture:
End of explanation
"""
rhode_rematch = MakeBinomialMix2(rhode.MakePmf())
wei_rematch = MakeBinomialMix2(wei.MakePmf())
rhode_rematch.ProbGreater(wei_rematch), rhode_rematch.ProbLess(wei_rematch)
"""
Explanation: Here's how we use it.
End of explanation
"""
iters = 1000
pmf = Pmf()
for _ in range(iters):
k = rhode_rematch.Random() + wei_rematch.Random()
pmf[k] += 1
pmf.Normalize()
thinkplot.Hist(pmf)
"""
Explanation: Exercise: Run this analysis again with a different prior and see how much effect it has on the results.
Distributions of sums and differences
Suppose we want to know the total number of targets the two contestants will hit in a rematch. There are two ways we might compute the distribution of this sum:
Sampling: We can draw samples from the distributions and add them up.
Enumeration: We can enumerate all possible pairs of values.
I'll start with sampling:
End of explanation
"""
ks = rhode_rematch.Sample(iters) + wei_rematch.Sample(iters)
pmf = Pmf(ks)
thinkplot.Hist(pmf)
"""
Explanation: Or we could use Sample and NumPy:
End of explanation
"""
def AddPmfs(pmf1, pmf2):
pmf = Pmf()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
pmf[v1 + v2] += p1 * p2
return pmf
"""
Explanation: Alternatively, we could compute the distribution of the sum by enumeration:
End of explanation
"""
pmf = AddPmfs(rhode_rematch, wei_rematch)
thinkplot.Pdf(pmf)
"""
Explanation: Here's how it's used:
End of explanation
"""
pmf = rhode_rematch + wei_rematch
thinkplot.Pdf(pmf)
"""
Explanation: The Pmf class provides a + operator that does the same thing.
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercise: The Pmf class also provides the - operator, which computes the distribution of the difference in values from two distributions. Use the distributions from the previous section to compute the distribution of the differential between Rhode and Wei in a rematch. On average, how many clays should we expect Rhode to win by? What is the probability that Rhode wins by 10 or more?
End of explanation
"""
iters = 1000
pmf = Pmf()
for _ in range(iters):
ks = rhode_rematch.Sample(6)
pmf[max(ks)] += 1
pmf.Normalize()
thinkplot.Hist(pmf)
"""
Explanation: Distribution of maximum
Suppose Kim Rhode continues to compete in six more Olympics. What should we expect her best result to be?
Once again, there are two ways we can compute the distribution of the maximum:
Sampling.
Analysis of the CDF.
Here's a simple version by sampling:
End of explanation
"""
iters = 1000
ks = rhode_rematch.Sample((6, iters))
ks
"""
Explanation: And here's a version using NumPy. I'll generate an array with 6 rows and 10 columns:
End of explanation
"""
maxes = np.max(ks, axis=0)
maxes[:10]
"""
Explanation: Compute the maximum in each column:
End of explanation
"""
pmf = Pmf(maxes)
thinkplot.Hist(pmf)
"""
Explanation: And then plot the distribution of maximums:
End of explanation
"""
pmf = rhode_rematch.Max(6).MakePmf()
thinkplot.Hist(pmf)
"""
Explanation: Or we can figure it out analytically. If the maximum is less-than-or-equal-to some value k, all 6 random selections must be less-than-or-equal-to k, so:
$ CDF_{max}(x) = CDF(x)^6 $
Pmf provides a method that computes and returns this Cdf, so we can compute the distribution of the maximum like this:
End of explanation
"""
def Min(pmf, k):
cdf = pmf.MakeCdf()
cdf.ps = 1 - (1-cdf.ps)**k
return cdf
pmf = Min(rhode_rematch, 6).MakePmf()
thinkplot.Hist(pmf)
"""
Explanation: Exercise: Here's how Pmf.Max works:
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
cdf.ps **= k
return cdf
Write a function that takes a Pmf and an integer n and returns a Pmf that represents the distribution of the minimum of k values drawn from the given Pmf. Use your function to compute the distribution of the minimum score Kim Rhode would be expected to shoot in six competitions.
End of explanation
"""
# Solution goes here
# Solution goes here
"""
Explanation: Exercises
Exercise: Suppose you are having a dinner party with 10 guests and 4 of them are allergic to cats. Because you have cats, you expect 50% of the allergic guests to sneeze during dinner. At the same time, you expect 10% of the non-allergic guests to sneeze. What is the distribution of the total number of guests who sneeze?
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercise This study from 2015 showed that many subjects diagnosed with non-celiac gluten sensitivity (NCGS) were not able to distinguish gluten flour from non-gluten flour in a blind challenge.
Here is a description of the study:
"We studied 35 non-CD subjects (31 females) that were on a gluten-free diet (GFD), in a double-blind challenge study. Participants were randomised to receive either gluten-containing flour or gluten-free flour for 10 days, followed by a 2-week washout period and were then crossed over. The main outcome measure was their ability to identify which flour contained gluten.
"The gluten-containing flour was correctly identified by 12 participants (34%)..."
Since 12 out of 35 participants were able to identify the gluten flour, the authors conclude "Double-blind gluten challenge induces symptom recurrence in just one-third of patients fulfilling the clinical diagnostic criteria for non-coeliac gluten sensitivity."
This conclusion seems odd to me, because if none of the patients were sensitive to gluten, we would expect some of them to identify the gluten flour by chance. So the results are consistent with the hypothesis that none of the subjects are actually gluten sensitive.
We can use a Bayesian approach to interpret the results more precisely. But first we have to make some modeling decisions.
Of the 35 subjects, 12 identified the gluten flour based on resumption of symptoms while they were eating it. Another 17 subjects wrongly identified the gluten-free flour based on their symptoms, and 6 subjects were unable to distinguish. So each subject gave one of three responses. To keep things simple I follow the authors of the study and lump together the second two groups; that is, I consider two groups: those who identified the gluten flour and those who did not.
I assume (1) people who are actually gluten sensitive have a 95% chance of correctly identifying gluten flour under the challenge conditions, and (2) subjects who are not gluten sensitive have only a 40% chance of identifying the gluten flour by chance (and a 60% chance of either choosing the other flour or failing to distinguish).
Using this model, estimate the number of study participants who are sensitive to gluten. What is the most likely number? What is the 95% credible interval?
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercise Coming soon: the space invaders problem.
End of explanation
"""
|
rkastilani/PowerOutagePredictor | PowerOutagePredictor/Tree/TreeClassifier_Example.ipynb | mit | df = pd.DataFrame()
weather = df.append({"Day_length_hr": 11,
"Avg_Temp_F": 38,
"Avg_humidity_percent": 85,
"Max_windspeed_mph": 16,
"Avg_windspeed_mph": 7,
"Max_windgust_mph": 20,
"Precipitation_in": 0.33},
ignore_index=True)
weather
"""
Explanation: input weather data following the package instruction
End of explanation
"""
tree.predictOutageProba(weather,'dt')
"""
Explanation: predict the possibility of there classes (normal, bad, extreme) using decision tree
End of explanation
"""
tree.predictOutage(weather,'rf')
"""
Explanation: predict the class of outage status (0 for normal, 1 for bad, 2 for extreme) for a given day using random forest
End of explanation
"""
|
vvolkl/kalman-samples | constant_velocity.ipynb | gpl-2.0 | # allow use of python3 syntax
from __future__ import division, print_function, absolute_import
import numpy as np
# local script with often used
import kalman as k
# contents of local file kalman.py
# %load kalman.py
import numpy as np
import matplotlib.pyplot as plt
def kalman_predict( A, # transition matrix
r, # measurement error matrix
H, # transformation matrix from state vector to measurement
p, # initial variance on prediction
xkal, # estimated state vector
xpredict, # predicted state vector
xmeas): # measurements
for i in range(1, xkal.shape[0]): # for each measurement do
# prediction: recursive formula
xpredict[:, i] = np.dot(A, xkal[:, i - 1])
# predict covariance
p = np.dot(np.dot(A, p), A.T)
# construct kalman gain matrix according to prediction equations
# higher gain leads to higher influence of measurement,
# lower gain to higher influence of predicion
K = np.dot(np.dot(p, H.T), np.linalg.inv(np.dot(np.dot(H, p), H.T) + r))
# construct estimate from prediction and gain
xkal[:, i] = xpredict3[:, i] + np.dot(K, (xmeas[:, i] - H*xpredict[:, i]))
# update covariance with gain
p = np.dot(np.identity(K.shape[0]) - K, p)
return xkal, xpredict
def plot_results(xkal, xpredict, xmeas, xtrue):
fig1 = plt.figure()
ax1 = plt.axes()
plt.plot(xtrue, 'b-', label = 'True')
plt.plot(xmeas[0].T, 'rx', label = 'Measuement')
plt.plot(xpredict[0].T, 'g.', label = 'Prediction')
plt.plot(xkal[0].T, 'ko', label = 'Kalman')
plt.xlabel('Iteration')
plt.ylabel('X')
fig2 = plt.figure()
ax2 = plt.axes()
plt.axhline(v)
plt.axhline(np.mean(xmeas[1]))
plt.plot(xpredict[1].T, 'g.', label = 'Prediction')
plt.plot(xmeas[1].T, 'rx', label = 'Measurement')
plt.plot(xkal[1].T, 'ko', label = 'Kalman')
plt.xlabel('Iteration')
plt.ylabel('Velocity')
return [[fig1, fig2], [ax1, ax2]]
"""
Explanation: 1. Kalman Filter applied to 1D Movement with constant velocity
End of explanation
"""
# number of measurements
N = 10
# time step
dt = 1.
# final time
T = N * dt
# velocity
v = -10.
"""
Explanation: Parameters
The system under consideration is an object traveling under constant velocity.
Its motion (in both time and space) can be parametrized as a straight line with
intercept $x_0$ and inclination $v$. The position is measured $N$ times at time
intervals $dt$, or alternatively at some fixed positions given by $k$ surfaces.
End of explanation
"""
# initial position
x0 = 100.
# elementwise add offset x0 to array of positions at different times
xtrue = x0 + v * np.linspace(0, T, N)
print(xtrue)
"""
Explanation: True trajectory
End of explanation
"""
sigma = 10
noise = np.random.normal(loc=0, scale=sigma, size=xtrue.shape)
xmeas = xtrue + noise
print(xmeas)
"""
Explanation: The measurement is noisy and the results are normally distributed with variance
$\sigma^2$.
Measured trajectory
End of explanation
"""
# estimated track parameters at times k
xkal = np.zeros(xmeas.shape)
# prediction for new track parameters based on previous ones
xpredict = np.zeros(xmeas.shape)
# covariance matrices (here only numbers) of the measurements
p = np.zeros(xmeas.shape)
# Kalman gain matrices
K = np.zeros(xmeas.shape)
# initial position
xpredict[0] = xkal[0] = xmeas[0]
# initial variance on prediction
p[0] = 20
# measurement error
r = sigma**2
# transformation matrix (from state to measurement)
H = 1
for i in range(1, N):
# prediction: recursive formula
xpredict[i] = xkal[i - 1] + v * dt
p[i] = p[i - 1]
# constructing Kalman gain matrix
# in this case, the gain shrinks with each recursion
# makes sense, as one outlier should not influence a prediction based on many points
K[i] = p[i] / (p[i] + r)
# final estimate of local track paramters based on prediction and
# measurement
xkal[i] = xpredict[i] + K[i] * (xmeas[i] - H * xpredict[i])
# update covariance
p[i] = (1 - K[i]) * p[i]
"""
Explanation: Kalman Filter
System equation
In this simplest case the state vector $\mathbf{p}_k = [x_0, v]$ at surface $k$
is left unchanged by the time evolution of the system. An alternative
parametrization is given by the The deterministic function $\mathbf{f}_k$
(which has a linear approximation $\mathbf{F}_k$ that describes how the track
parameter would change from one surface to another is just the identity.
Additionaly, future track parameters are affected by process noise
$\mathbf{\delta}_k$. Usually only a subset of the track parameters are affected
by process noise. This is expressed by multiplying the matrix representation of
process noise with a projection matrix $\mathbf{P}_k$.
The covariance matrix of $\mathbf{\delta}_k$ is denoted $\mathbf{Q}_k$.
Measurement equation
The deterministic function $\mathbf{h}_k$ with linear expansion $\mathbf{H}_k$
maps the track parameters $\mathbf{p}_k$ to measurable quantities (p.ex. space
time points). The covariance of the measurement noise is denoted $\mathbf{V}_k$
Noattion:
[1] Frühwirth, Rudolf, and Meinhard Regler. Data analysis techniques for high-
energy physics. Vol. 11. Cambridge University Press, 2000.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plot
plot.plot(xtrue, 'b-', label = 'True')
plot.plot(xmeas, 'rx', label = 'Measuement')
plot.plot(xpredict, 'g.', label = 'Prediction')
plot.plot(xkal, 'ko', label = 'Kalman')
plot.xlabel('Iteration')
plot.ylabel('X')
plot.legend()
plot.show()
plot.subplot(3,1,1)
plot.plot(p,'o')
plot.ylabel('Prediction cov')
plot.subplot(3,1,2)
plot.plot(K,'o')
plot.ylabel('Kalman gain')
plot.xlabel('Iteration')
plot.show()
"""
Explanation: Plot results
End of explanation
"""
xpredict2 = np.matrix (np.linspace(0,10,N*2).reshape((2, N)))
xkal2 = np.matrix (np.linspace(0,10,N*2).reshape((2, N)))
# initial position and velocity
xpredict2[:,0] = xkal2[:,0] = np.array ( [[xmeas[0]], [np.random.normal(v,1.5) ] ])
# initial variance on prediction
p2 = np.matrix ( [[20, 0],
[0, 20]] )
# measurement error
r = np.matrix([[sigma^2]])
# prediction matrix
A = np.matrix ( [[1, dt],
[0, 1]] )
# transformation matrix (from measurement to state vector)
H = np.matrix ( [[1 , 0]] )
for i in range(1,N):
# prediction: recursive formula
xpredict2[:,i] = np.dot(A, xkal2[:,i-1] )
p2 = A*p2*A.T
K2 = np.dot(p2*H.T, np.linalg.inv(H*p2*H.T+r))
xkal2[:,i] = xpredict2[:,i] + K2*(xmeas[i] - H*xpredict2[:,i])
p2 = (np.identity(2)-K2) * p2
plot.plot(xtrue, 'b-', label = 'True')
plot.plot(xmeas, 'rx', label = 'Measuement')
plot.plot(xpredict2[0].T, 'g.', label = 'Prediction')
plot.plot(xkal2[0].T, 'ko', label = 'Kalman')
plot.xlabel('Iteration')
plot.ylabel('X')
plot.show()
plot.axhline(v)
plot.plot(xpredict2[1].T, 'g.', label = 'Prediction')
plot.plot(xkal2[1].T, 'ko', label = 'Kalman')
plot.xlabel('Iteration')
plot.ylabel('Velocity')
plot.show()
"""
Explanation: 2. Same problem but with unknown velocity
End of explanation
"""
xmeas3 = np.matrix (np.linspace(0,10,N*2).reshape((2, N)))
sigma3 = 1
for i in range(0,N):
xmeas3[0,i] = np.random.normal(xtrue[i], sigma)
xmeas3[1,i] = np.random.normal(v, sigma3)
print(xmeas3.T)
xpredict3 = np.matrix (np.linspace(0,10,N*2).reshape((2, N)))
xkal3 = np.matrix (np.linspace(0,10,N*2).reshape((2, N)))
# initial position
xpredict3[:,0] = xkal3[:,0] = np.array ( [[xmeas3[0,0]], [xmeas3[1,0]] ] )
# initial variance on prediction
p2 = np.matrix ( [[20, 0],
[0, 20]] )
# measurement error
r3 = np.matrix([[0.001*sigma*sigma, 0],
[0 , 0.001*sigma3*sigma3]])
# prediction matrix
A = np.matrix ( [[1, dt],
[0, 1]] )
# transformation matrix (from measurement to state vector)
H3 = np.matrix ( [[1 , 0],
[0, 1]] )
xkal3, xpredict3 = k.kalman_predict(A, r3, H3, p2, xkal3, xpredict3, xmeas3)
figs = plot_results(xkal3, xpredict3, xmeas3, xtrue)
plt.show()
"""
Explanation: 3. Same problem but with unknown velocity that is also measured
In principle should be better than 2. - why isn't ?? Additional measurement
(on x_velocity) should improve kalman\
But kalman already knows about the velocity from the transformation matrix A /
the initial value we give to xkal and xpredict?
End of explanation
"""
|
jdhp-docs/python-notebooks | opendata_observatoires_des_loyers_fr.ipynb | mit | %matplotlib inline
#%matplotlib notebook
import matplotlib
matplotlib.rcParams['figure.figsize'] = (9, 9)
import pandas as pd
url = "https://www.data.gouv.fr/fr/datasets/r/1fee314d-c278-424f-a029-a74d877eb185"
df2016 = pd.read_csv(url,
encoding='iso-8859-1',
sep=';',
decimal=',')
url = "https://www.data.gouv.fr/fr/datasets/r/15d902ed-4dc3-457d-9c5d-bfe1151cb573"
df2015 = pd.read_csv(url,
encoding='iso-8859-1',
sep=';',
decimal=',')
url = "https://www.data.gouv.fr/fr/datasets/r/42aaf838-46c9-4434-95a9-00173c6d4627"
df2014 = pd.read_csv(url,
encoding='iso-8859-1',
sep=';',
decimal=',')
frames = [df2014, df2015, df2016]
df = pd.concat(frames, ignore_index=True)
"""
Explanation: Observatoire des loyers
Données de l'observatoire des loyers (https://www.observatoires-des-loyers.org/):
- source de données utilisée (fichiers CSV en open data): https://www.data.gouv.fr/fr/datasets/resultats-nationaux-des-observatoires-locaux-des-loyers/
- exemple d'utilisation: https://www.miximum.fr/blog/premiers-tests-avec-le-machine-learning/
Remarque
Ce jeu de données a un gros défaut: les statistiques au niveau des communes (ou des arrondissements) sont disponible que sur https://www.observatoires-des-loyers.org/ mais ont été retirées des fichiers CSV...
Récupération des données
End of explanation
"""
df.shape
df.columns
"""
Explanation: Informations utiles sur les données
End of explanation
"""
#df.Observatory.value_counts().plot.barh()
df.agglomeration.value_counts().plot.barh()
"""
Explanation: Données par agglomération
End of explanation
"""
dfp = df[df.agglomeration == "Paris intra-muros"]
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
dfp[dfp.Data_year == 2014].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax1)
dfp[dfp.Data_year == 2015].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax2)
#dfp[dfp.Data_year == 2016].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax3)
dfp[dfp.Data_year == 2014].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
s=dfp[dfp.Data_year == 2014].nombre_obsservations,
alpha=0.1)
dfp[dfp.Data_year == 2015].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#s=dfp[dfp.Data_year == 2015].nombre_obsservations,
#c="blue",
alpha=0.5)
"""
Explanation: Paris intra-muros
End of explanation
"""
dfp = df[df.agglomeration == "Agglomération parisienne"]
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
dfp[dfp.Data_year == 2014].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax1)
dfp[dfp.Data_year == 2015].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax2)
dfp[dfp.Data_year == 2014].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#c=dfp[dfp.Data_year == 2014].nombre_obsservations,
alpha=0.5)
#cmap="Blues")
dfp[dfp.Data_year == 2015].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#s=dfp[dfp.Data_year == 2015].nombre_obsservations,
#c="blue",
alpha=0.5)
"""
Explanation: Remarque: pourquoi cette grosse différence avec l'année précédente ?
Région Parisienne
End of explanation
"""
dfp = df[df.agglomeration == "Agglomération parisienne (hors Paris)"]
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 5))
dfp[dfp.Data_year == 2014].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax1)
dfp[dfp.Data_year == 2015].Type_habitat.value_counts().plot.pie(y="Type_habitat", ax=ax2)
dfp[dfp.Data_year == 2014].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#c=dfp[dfp.Data_year == 2014].nombre_obsservations,
alpha=0.5)
#cmap="Blues")
dfp[dfp.Data_year == 2015].plot.scatter(x="moyenne_loyer_mensuel",
y="surface_moyenne",
#s=dfp[dfp.Data_year == 2015].nombre_obsservations,
#c="blue",
alpha=0.5)
"""
Explanation: Région Parisienne
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_ica_from_raw.ipynb | bsd-3-clause | # Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.preprocessing import ICA
from mne.preprocessing import create_ecg_epochs, create_eog_epochs
from mne.datasets import sample
"""
Explanation: Compute ICA on MEG data and remove artifacts
ICA is fit to MEG raw data.
The sources matching the ECG and EOG are automatically found and displayed.
Subsequently, artifact detection and rejection quality are assessed.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 45, n_jobs=1, l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
filter_length='10s', phase='zero-double', fir_design='firwin2')
raw.annotations = mne.Annotations([1], [10], 'BAD')
raw.plot(block=True)
# For the sake of example we annotate first 10 seconds of the recording as
# 'BAD'. This part of data is excluded from the ICA decomposition by default.
# To turn this behavior off, pass ``reject_by_annotation=False`` to
# :meth:`mne.preprocessing.ICA.fit`.
raw.annotations = mne.Annotations([0], [10], 'BAD')
"""
Explanation: Setup paths and prepare raw data.
End of explanation
"""
# Other available choices are `infomax` or `extended-infomax`
# We pass a float value between 0 and 1 to select n_components based on the
# percentage of variance explained by the PCA components.
ica = ICA(n_components=0.95, method='fastica')
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
ica.fit(raw, picks=picks, decim=3, reject=dict(mag=4e-12, grad=4000e-13))
# maximum number of components to reject
n_max_ecg, n_max_eog = 3, 1 # here we don't expect horizontal EOG components
"""
Explanation: 1) Fit ICA model using the FastICA algorithm.
End of explanation
"""
title = 'Sources related to %s artifacts (red)'
# generate ECG epochs use detection via phase statistics
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5, picks=picks)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps')
ica.plot_scores(scores, exclude=ecg_inds, title=title % 'ecg', labels='ecg')
show_picks = np.abs(scores).argsort()[::-1][:5]
ica.plot_sources(raw, show_picks, exclude=ecg_inds, title=title % 'ecg')
ica.plot_components(ecg_inds, title=title % 'ecg', colorbar=True)
ecg_inds = ecg_inds[:n_max_ecg]
ica.exclude += ecg_inds
# detect EOG by correlation
eog_inds, scores = ica.find_bads_eog(raw)
ica.plot_scores(scores, exclude=eog_inds, title=title % 'eog', labels='eog')
show_picks = np.abs(scores).argsort()[::-1][:5]
ica.plot_sources(raw, show_picks, exclude=eog_inds, title=title % 'eog')
ica.plot_components(eog_inds, title=title % 'eog', colorbar=True)
eog_inds = eog_inds[:n_max_eog]
ica.exclude += eog_inds
"""
Explanation: 2) identify bad components by analyzing latent sources.
End of explanation
"""
# estimate average artifact
ecg_evoked = ecg_epochs.average()
ica.plot_sources(ecg_evoked, exclude=ecg_inds) # plot ECG sources + selection
ica.plot_overlay(ecg_evoked, exclude=ecg_inds) # plot ECG cleaning
eog_evoked = create_eog_epochs(raw, tmin=-.5, tmax=.5, picks=picks).average()
ica.plot_sources(eog_evoked, exclude=eog_inds) # plot EOG sources + selection
ica.plot_overlay(eog_evoked, exclude=eog_inds) # plot EOG cleaning
# check the amplitudes do not change
ica.plot_overlay(raw) # EOG artifacts remain
# To save an ICA solution you can say:
# ica.save('my_ica.fif')
# You can later load the solution by saying:
# from mne.preprocessing import read_ica
# read_ica('my_ica.fif')
# Apply the solution to Raw, Epochs or Evoked like this:
# ica.apply(epochs)
"""
Explanation: 3) Assess component selection and unmixing quality.
End of explanation
"""
|
eggie5/UCSD-MAS-DSE220 | hmwk1/Hmwk 1.ipynb | mit | import pandas as pd
%pylab inline
"""
Explanation: Hmwk #1
End of explanation
"""
df = pd.read_csv("weather.csv", header=0, index_col=0)
df
"""
Explanation: Represent the following table using a data structure of your choice
End of explanation
"""
mean_temp = df["temperature"].mean()
mean_temp
mean_humidity = df["humidity"].mean()
mean_humidity
"""
Explanation: Calculate the mean temperature and mean humidity
End of explanation
"""
temp_selector = df['temperature'] > mean_temp
df[temp_selector][["outlook", "play"]]
"""
Explanation: Print outlook and play for those days where the temperature is greater than the average temperature
End of explanation
"""
humidity_selector = df['humidity'] > mean_humidity
df[humidity_selector][["outlook", "play"]]
"""
Explanation: Print outlook and play for those days where the humidity is greater than the average humidity
End of explanation
"""
df["temp_C"] = ( df["temperature"] - 32 ) * (5/9.0)
df
"""
Explanation: Convert the temperature to Celsius and add a new column therefore in the table.
End of explanation
"""
play_selector = df["play"]=="yes"
play_days = df[play_selector]
len(play_days)
"""
Explanation: #1
How often do you play tennis independent of the other attributes?
From the out put we can see that we played tennis 9 days w/ a probibilty of 9/14
End of explanation
"""
sunny_selector = df["outlook"]=="sunny"
sunny_play_days = df[sunny_selector & play_selector]
len(sunny_play_days)
"""
Explanation: #2
How often do you play tennis when it is "sunny"?
From the output we can see that we played when it was sunny on 2 days or 2/14
End of explanation
"""
print play_days["temperature"].mean()
print play_days["temperature"].min()
print play_days["temperature"].max()
"""
Explanation: #3
*Compare the average, minimum and maximum temperature when you play tennis? *
End of explanation
"""
print play_days["humidity"].mean()
print play_days["humidity"].min()
print play_days["humidity"].max()
"""
Explanation: #4
Compare the average, minimum and maximum humidity when you play tennis?
End of explanation
"""
pyplot.ylabel('Temperature')
pyplot.xlabel("Humidity")
pyplot.scatter(x=play_days["humidity"], y=play_days["temperature"], c='green')
no_play_days = df[df["play"]=="no"]
pyplot.scatter(x=no_play_days["humidity"], y=no_play_days["temperature"], c='red', marker="x")
pyplot.legend(['Play', "No Play"])
"""
Explanation: #5
Plot the an scatter plot (x,y diagramm) of humidity (x) and temperature (y) when you play tennis compared to when you do not play tennis.
End of explanation
"""
#these are in units of thousands, need to scale
df1 = pd.read_fwf("processed/st0009ts.txt", header=0, index_col=0, thousands=",").transpose()
df2 = pd.read_fwf("processed/st1019ts.txt", header=0, index_col=0, thousands=",").transpose()
df3 = pd.read_fwf("processed/st2029ts.txt", header=0, index_col=0, thousands=",").transpose()
df4 = pd.read_fwf("processed/st3039ts.txt", header=0, index_col=0, thousands=",").transpose()
df5 = pd.read_fwf("processed/st4049ts.txt", header=0, index_col=0, thousands=",").transpose()
df6 = pd.read_fwf("processed/st5060ts.txt", header=0, index_col=0, thousands=",").transpose()
df7 = pd.read_fwf("processed/st6070ts.txt", header=0, index_col=0, thousands=",").transpose()
df = pd.concat([df1, df2, df3, df4, df5, df6, df7])
#scale up to unit of 1
df = df.apply(lambda x: x*1000)
#for some reason, this dataset format uses '.'s in U.S. but doesn't for anything else. We'll normalize it here
df[["U.S."]]
df.rename(columns={'U.S.': 'US'}, inplace=True)
#the file format changes here
transform = lambda x: "19"+x[2:4]
df_9 = pd.read_fwf("processed/st7080ts.txt", header=0, index_col=0, thousands=",").transpose()
df_9.index = df_9.index.map(transform)
df_10 = pd.read_fwf("processed/st8090ts.txt", header=0, index_col=0, thousands=",").transpose()
df_10.index = df_10.index.map(transform)
df_10
df_2 = pd.concat([df_9, df_10])
"""
Explanation: The only inferences I can make from the scatter plot above, is that you always play when the humidity is between 70 and 85. Temperature seems to play no part of the decision process when you go out to play as from teh scatter plot the plays and no play poionts are evenly distributed across the y axis (Temperature).
#2
We have a set of 8 files where the first 7 and the last 2 have a different format. First I removed the header information from the files and removed any superfolous line breaks, I then read them into pandas in two respective groups. I then had to normalize the dates of teh second dataset to match the dates of the first. Also I had to normalize the values of the first dataset b/c they were in units of 1000, so I made it in units of 1.
Cleaning & Normalization
End of explanation
"""
# now merge the two together to get the compleete mergered df
df = pd.concat([df, df_2])
df=df.sort_index() #sort
"""
Explanation: Merging
End of explanation
"""
df[["CA", "AK"]].plot()
"""
Explanation: Plot CA vs AK
End of explanation
"""
df["New England"] = df[["CT", "ME", "MA", "NH", "RI", "VT"]].sum(axis=1)
df["Southwest"] = df[["AZ", "CA", "CO", "NV", "NM", "TX", "UT"]].sum(axis=1)
df[["New England", "Southwest"]].plot()
"""
Explanation: New England vs Southwest
In order to plot these values I have to do some feature engineering to create columns for the respective regions that are not in the original dataset.
For New England I used: CT, ME, MA, NH, RI, VT
For the Southwest, I used: AZ, CA, CO, NV, NM, TX, UT
Feature Engineering
End of explanation
"""
#remove a few composite columns:
df.drop('US', axis=1, inplace=True)
df.drop('Southwest', axis=1, inplace=True)
df.drop('New England', axis=1, inplace=True)
delta = {}
rel_delta={}
for state in df.columns:
delta[state]=df[state].iloc[-1] - df[state].iloc[50]
rel_delta[state] = (df[state].iloc[-1] - df[state].iloc[50]) / df[state].iloc[50]*1. * 100
ddf=pd.DataFrame(delta, index=["delta"]).transpose()
ddf = ddf.sort(["delta"], ascending=False)
ddf.head()
"""
Explanation: Greatest Change in Population
We can quantify population growth in direct terms or relativly using percentages:
Magnitude Delta
We don't have measurements for Alaska until 1950, so if we compare growth from 1950 in terms of pure magnitude, the top states are shown below:
End of explanation
"""
ddp=pd.DataFrame(rel_delta, index=["% change"]).transpose()
ddp = ddp.sort(["% change"], ascending=False)
ddp.head()
"""
Explanation: As you can see from teh table above, CA had the largest growth in terms of raw numbers for the time period. However, we can gain additional insites by looking at percentatge growth.
Relative Growth
We can also measure growth as a percenagte difference: As you can see Nevada had the largest percent growth from 1950 to 1900
End of explanation
"""
ddp.tail(n=10)
"""
Explanation: Some states had no net growth and some had negative growth:
End of explanation
"""
from sklearn import tree
import numpy as np
wine = np.loadtxt("wine.data", delimiter=',')
#Get the targets (first column of file)
Y = wine[:, 0]
#Remove targets from input data
X = wine[:, 1:]
"""
Explanation: 3
4
We will use a Decision Tree to build a model that will allow us to classify wines into one of three categories.
End of explanation
"""
#lets split into a test and training set
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=9)
"""
Explanation: Test/Train Split
Split the dat set into 75% for training and 25% for testing
End of explanation
"""
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, Y_train)
clf.score(X_test, Y_test)
"""
Explanation: Train Model
I used the defaults for the DecisionTreeClasifier package in scikit
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(3)
plt.xticks(tick_marks, ["1", "2", "3"], rotation=45)
plt.yticks(tick_marks, ["1", "2", "3"])
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
from sklearn.metrics import confusion_matrix
y_true = Y_test
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_true, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
plt.show()
"""
Explanation: Evaluation
Evaluate based on confusion matrix how well the model performed on training vs. testing.
End of explanation
"""
odf = pd.read_csv("hmwk_1_data/AHW_1.csv")
odf.head()
"""
Explanation: As you can see from the confusion matrix, inputs of Class 1 & 2 were perfectly classified. There were only 2 mistakes on Class 3.
5
End of explanation
"""
odf["Age"].plot(kind="hist")
odf["Age"].describe()
odf["Weight"].plot(kind="hist")
odf["Weight"].describe()
odf["Height"].plot(kind="hist")
odf["Height"].describe()
"""
Explanation: What are the statistical distributions of variables using no class?
End of explanation
"""
odf.isnull().sum()
"""
Explanation: How much missing data is there?
End of explanation
"""
male = odf["Sex"]=="M"
female = odf["Sex"]=="F"
odf[male]["Age"].plot(kind="hist")
odf[female]["Age"].plot(kind="hist")
odf[male]["Weight"].plot(kind="hist")
odf[female]["Weight"].plot(kind="hist")
odf[male]["Height"].plot(kind="hist")
odf[female]["Height"].plot(kind="hist")
"""
Explanation: How do distributions differ by each gender?
End of explanation
"""
odf.describe()
"""
Explanation: Describe summary statistics for each attribute.
End of explanation
"""
from pandas.tools.plotting import scatter_matrix
pd.scatter_matrix(odf, alpha=0.2, figsize=(10, 10), diagonal='kde')
"""
Explanation: Visualize potential difference via the scatter plots.
Are there any ‘high’ correlations between variables?
We can see a correlation between height and weight
End of explanation
"""
odf["lbs"] = odf["Weight"] * 2.20462
odf.head()
pd.scatter_matrix(odf, alpha=0.2, figsize=(10, 10), diagonal='kde')
"""
Explanation: Create a new variable for the weight in lbs
Creating a new column for pounds obviously does not create any new correlations b/c it simply a linear combination w/ the kg weight.
End of explanation
"""
odf["w+h"] = odf["Weight"] + odf["Height"]
odf.drop('lbs', axis=1, inplace=True)
odf.head()
pd.scatter_matrix(odf, alpha=0.2, figsize=(10, 10), diagonal='kde')
"""
Explanation: Add new variable weight + height.
End of explanation
"""
odf["BMI"] = odf["Weight"] / ((odf["Height"]*0.01)**2)
odf.head()
odf[male]["BMI"].plot(kind="hist")
odf[female]["BMI"].plot(kind="hist")
print odf[male]["BMI"].describe()
print
print odf[female]["BMI"].describe()
"""
Explanation: BMI
There appears to be obese males and females in the dataset
End of explanation
"""
sports = list(set(odf["Sport"]))
sports
# choose 3 random sports
sports
import random
random_sports = random.sample(sports, 3)
for sport in random_sports:
sport_selector = odf["Sport"] == sport
odf[sport_selector].plot(kind="scatter", x="Height", y="Weight", marker='x')
"""
Explanation: Split Data By Sport
End of explanation
"""
|
FlyRanch/figurefirst | examples/regenerate/regenerate_notebook.ipynb | mit | import numpy as np
import figurefirst
fifi = figurefirst
from IPython.display import display,SVG,Markdown
layout = fifi.FigureLayout('figure_template.svg', hide_layers=['template'])
layout.make_mplfigures(hide=True)
"""
Explanation: Saving figure source data
Many scientific journals are (for good reason) requiring that authors upload the source data for their figures. For complex analysis pipelines this can be complicated and frustrating. FigureFirst to the rescue!
This notebook demonstrates how you can use FigureFirst to save the source data for your figures automatically. Source data here means the data, function calls, and the arguments to those functions.
With the FigureFirst formatted source data in hand, you can easily rebuild the figure, or output the data to a human readable CSV (Markdown) file.
End of explanation
"""
for key, axis in layout.axes.items():
print (key)
# note, you can use the data filename, or the layout filename (as long as you use the defaults)
fifi.regenerate.clear_fifidata('figure_template.svg', key)
"""
Explanation: Clear out the data for all the figures and axes
End of explanation
"""
ax = layout.axes[('figure_1', 'axis_a')]
ax.breadcrumb
"""
Explanation: All the magic happens through the FFAxis class, which is a wrapper for the MatPlotLib Axis
To keep track of which axis you're working with you can use the breadcrumb
End of explanation
"""
ax = layout.axes[('figure_1', 'axis_a')]
# make some fake data
x = np.linspace(0,10,100)
y = np.sin(x)
# call matplotlib's plot function with the figurefirst wrapper, which saves the data
title = 'Sine wave for ' + ax.breadcrumb['layout_key'][1]
argument_1 = 'Time'
argument_2 = 'Response'
ax._plot([title, argument_1, argument_2], x, y, color='blue')
"""
Explanation: Use matplotlib to plot some data on figure_1 axis_a
End of explanation
"""
ax = layout.axes[('figure_1', 'axis_b')]
# use figurefirst wrapper for adding a patch
# note: matplotlib function add_artist does not work in regeneration step (artists cannot be pickled)
ax._add_mpl_patch(['This is a rectangle'], 'Rectangle', (3, 0), 1.5, 1, fill=False, color='red', linewidth=1)
# Generally we recommend using '_' notation, and including a title and description of the arguments
# However, for quick formatting calls, there is a faster notation. This does not work with custom functions.
# First set record to True
ax.record = True
# Then make your function calls as usual
# matplotlib functions
ax.set_xlim(0,5)
ax.set_ylim(-1,1)
# figurefirst.mpl_functions
ax.adjust_spines(['left', 'bottom'])
ax.set_fontsize(6)
"""
Explanation: Use some functions in figurefirst.mpl_functions on figure_1 axis_b
End of explanation
"""
ax = layout.axes[('figure_2', 'axis_a')]
def foo(ax, x, list_of_noisy_ys, color='green'):
mean_y = np.mean(list_of_noisy_ys, axis=0)
ax.plot(x, mean_y, color=color, linewidth=3)
for y in list_of_noisy_ys:
ax.plot(x, y, color=color, linewidth=1, alpha=0.2)
# save a custom plotting function in the data file
list_of_noisy_ys = []
for i in range(6):
noisy_y = y + np.random.uniform(-0.5, 0.5, len(y))
list_of_noisy_ys.append(noisy_y)
ax._custom(['Plot line and dots', 'Time', 'List of y values'], foo, x, list_of_noisy_ys, color='magenta')
"""
Explanation: Use a pickle-able user defined plotting function on figure_2 axis_a
End of explanation
"""
layout.append_figure_to_layer(layout.figures['figure_1'], 'figure_1', cleartarget=True)
layout.append_figure_to_layer(layout.figures['figure_2'], 'figure_2', cleartarget=True)
svg = 'figure_output.svg'
layout.write_svg(svg)
SVG(svg)
"""
Explanation: Use a custom function from another package:
ax._custom(['Title', 'Arg description], 'package.module.function', args, *kwargs)
Save figures to layout and write svg
End of explanation
"""
# first clear anything that is there
fifi.regenerate.clear_fifidata(layout.data_filename, layout_key='Supplemental Data')
a = [np.random.random(10) for i in range(5)]
b = [1,2,3,4]
layout.write_fifidata(['Title of Arbitrary Data', 'Description of Data A', 'Description of Data B'],
a, b)
"""
Explanation: You can also write arbitrary data to the file using a similar interface through the layout object
End of explanation
"""
fifi.regenerate.replot('figure_template.svg', output_filename='new_figure_output.svg')
svg = 'new_figure_output.svg'
layout.set_layer_visibility('Layer 1',False)
layout.write_svg(svg)
SVG(svg)
"""
Explanation: Regenerate the figure from the saved data
End of explanation
"""
data = fifi.regenerate.load_data_file('figure_template.svg') # you can either use the layout or data filename
"""
Explanation: Take a look at the data file
End of explanation
"""
data[('figure_1', 'axis_a')]
"""
Explanation: Here are all the plotting actions, data, and settings called for figure_1 axis_a
End of explanation
"""
# This is optional, but helps to connect the data in the markdown / csv file to the actual panel names you have
# If left blank (None), the Panel names will just be the layout_keys reformatted, e.g. 'figure_1_axis_b')
panel_id_to_layout_keys = {'a': [('figure_1', 'axis_a'), ('figure_1', 'axis_b')],
'b': [('figure_2', 'axis_a')]}
# Define a figure number
figure_number = 1
# Header, optional
header = '# This file contains the data needed for generating figure 1\n### FigureFirst example by Floris van Breugel'
fifi.regenerate.write_to_csv('figure_template_data.dillpickle', figure_number, \
panel_id_to_layout_keys, header=header)
# Take a look at the file. If you need a ".csv" file, just change the extension.
# Markdown files can be displayed nicely in Chrome:
# https://stackoverflow.com/questions/9843609/view-markdown-files-offline
with open('figure_template_data_summary.md', 'r') as fh:
content = fh.read()
display(Markdown(content))
"""
Explanation: If you need a more standard and human readable format, you can convert the data file into a markdown / csv file
Only data that has argument descriptions associated with it will be saved. This prevents clogging the file with tick marks, etc. The titles and descriptions are drawn from the data file, so use descriptive titles when writing the code!
Because we did not provide descriptions for the arguments to the rectangle call, it's data is not saved.
End of explanation
"""
|
pysg/pyther | Modelo de impregnacion/modelo2/Activité 10_1.ipynb | mit | import numpy as np
from scipy import integrate
from matplotlib.pylab import *
def tank(t, y):
"""
Dynamic balance for a CSTR
C_A = y[0] = the concentration of A in the tank, mol/L
Returns dy/dt = F/V*(C_{A,in} - C_A) - k*C_A^2
"""
F = 20.1 # L/min
CA_in = 2.5 # mol/L
V = 100 # L
k = 0.15 # L/(mol.min)
# Assign some variables for convenience of notation
CA = y[0]
# Output from ODE function must be a COLUMN vector, with n rows
n = len(y) # 1: implies its a single ODE
dydt = np.zeros((n,1))
dydt[0] = F/V*(CA_in - CA) - k*CA**2
return dydt
# The ``driver`` that will integrate the ODE(s):
#if __name__ == '__main__':
# Start by specifying the integrator:
# use ``vode`` with "backward differentiation formula"
r = integrate.ode(tank).set_integrator('vode', method='bdf')
# Set the time range
t_start = 0.0
t_final = 5.0
delta_t = 0.1
# Number of time steps: 1 extra for initial condition
num_steps = np.floor((t_final - t_start)/delta_t) + 1
print(num_steps)
# Set initial condition(s): for integrating variable and time!
CA_t_zero = 0.5
r.set_initial_value([CA_t_zero], t_start)
# Additional Python step: create vectors to store trajectories
t = np.zeros((num_steps, 1))
CA = np.zeros((num_steps, 1))
t[0] = t_start
CA[0] = CA_t_zero
# Integrate the ODE(s) across each delta_t timestep
k = 1
while r.successful() and k < num_steps:
r.integrate(r.t + delta_t)
# Store the results to plot later
t[k] = r.t
CA[k] = r.y[0]
k += 1
# All done! Plot the trajectories:
plot(t, CA)
grid('on')
xlabel('Time [minutes]')
ylabel('Concentration [mol/L]')
"""
Explanation: Evaluation des modèles pour l'extraction supercritique
L'extraction supercritique est de plus en plus utilisée afin de retirer des matières organiques de différents liquides ou matrices solides. Cela est dû au fait que les fluides supercritiques ont des avantages non négligeables par rapport aux autres solvants, ils ont des caractèreistiques comprises entre celles des gaz et celles des solides. En changeant la température et la pression ils peuvent capter des composés différents, ils sont donc très efficaces.
Le méchanisme de l'extraction supercritique est le suivant :
- Transport du fluide vers la particule, en premier lieu sur sa surface et en deuxième lieu a l'intérieur de la particule par diffusion
- Dissolution du soluté avec le fluide supercritique
- Transport du solvant de l'intérieur vers la surface de la particule
- Transport du solvant et des solutés de la surface de la particule vers la masse du solvant
A - Le modèle de Reverchon :
Afin d'utiliser ce modèle, définissons les variables qui vont y être admises, ci-dessous la nomenclature du modèle :
Le modèle :
Il est basé sur l'intégration des bilans de masses différentielles tout le long de l'extraction, avec les hypothèses suivants :
- L'écoulement piston existe à l'intérieur du lit, comme le montre le schéma ci-contre :
- La dispersion axiale du lit est négligeable
- Le débit, la température et la pression sont constants
Cela nous permet d'obtenir les équations suivantes :
- $uV.\frac{\partial c_{c}}{\partial t}+eV.\frac{\partial c_{c}}{\partial t}+ AK(q-q) = 0$
- $(1-e).V.uV\frac{\partial c_{q}}{\partial t}= -AK(q-q*)$
Les conditions initiales sont les suivantes : C = 0, q=q0 à t = 0 et c(0,t) à h=0
La phase d'équilibre est : c = k.q*
Sachant que le fluide et la phase sont uniformes à chaque stage, nous pouvons définir le modèle en utilisant les équations différentielles ordinaires (2n). Les équations sont les suivantes :
- $(\frac{W}{p}).(Cn- Cn-1) + e (\frac{v}{n}).(\frac{dcn}{dt})+(1-e).(\frac{v}{n}).(\frac{dcn}{dt}) = 0$
- $(\frac{dqn}{dt} = - (\frac{1}{ti})(qn-qn*)$
- Les conditions initiales sont : cn = 0, qn = q0 à t = 0
Ejemplo ODE
End of explanation
"""
import numpy as np
from scipy import integrate
import matplotlib.pyplot as plt
def vdp1(t, y):
return np.array([y[1], (1 - y[0]**2)*y[1] - y[0]])
t0, t1 = 0, 20 # start and end
t = np.linspace(t0, t1, 100) # the points of evaluation of solution
y0 = [2, 0] # initial value
y = np.zeros((len(t), len(y0))) # array for solution
y[0, :] = y0
r = integrate.ode(vdp1).set_integrator("dopri5") # choice of method
r.set_initial_value(y0, t0) # initial values
for i in range(1, t.size):
y[i, :] = r.integrate(t[i]) # get one more value, add it to the array
if not r.successful():
raise RuntimeError("Could not integrate")
plt.plot(t, y)
plt.show()
"""
Explanation: Ejemplo 2 funciona
End of explanation
"""
|
mbakker7/ttim | pumpingtest_benchmarks/13_multiwell_slug_test-.ipynb | mit | %matplotlib inline
from ttim import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
"""
Explanation: Slug Test for Confined Aquifer
This test is taken from examples of AQTESOLV.
End of explanation
"""
H0 = 2.798 #initial displacement in m
b = -6.1 #aquifer thickness
rw1 = 0.102 #well radius of Ln-2 Well
rw2 = 0.071 #well radius of observation Ln-3 Well
rc1 = 0.051 #casing radius of Ln-2 Well
rc2 = 0.025 #casing radius of Ln-3 Well
r = 6.45 #distance from observation well to test well
"""
Explanation: Set background parameters:
End of explanation
"""
Q = np.pi * rc1 ** 2 * H0
print('Slug:', round(Q, 5), 'm^3')
"""
Explanation: Slug:
End of explanation
"""
data1 = np.loadtxt('data/ln-2.txt')
t1 = data1[:, 0] / 60 / 60 / 24 #convert time from seconds to days
h1 = data1[:, 1]
data2 = np.loadtxt('data/ln-3.txt')
t2 = data2[:, 0] / 60 / 60 / 24
h2 = data2[:, 1]
"""
Explanation: Load data:
End of explanation
"""
ml_0 = ModelMaq(kaq=10, z=[0, b], Saq=1e-4, \
tmin=1e-5, tmax=0.01)
w_0 = Well(ml_0, xw=0, yw=0, rw=rw1, rc=rc1, tsandQ=[(0, -Q)], layers=0, wbstype='slug')
ml_0.solve()
"""
Explanation: Create single layer conceptual model:
End of explanation
"""
#unknown parameters: kaq, Saq
ca_0 = Calibrate(ml_0)
ca_0.set_parameter(name='kaq0', initial=10)
ca_0.set_parameter(name='Saq0', initial=1e-4)
ca_0.series(name='Ln-2', x=0, y=0, layer=0, t=t1, h=h1)
ca_0.series(name='Ln-3', x=r, y=0, layer=0, t=t2, h=h2)
ca_0.fit(report=True)
display(ca_0.parameters)
print('RMSE:', ca_0.rmse())
hm1_0 = ml_0.head(0, 0, t1, layers=0)
hm2_0 = ml_0.head(r, 0, t2, layers=0)
plt.figure(figsize=(8, 5))
plt.semilogx(t1, h1/H0, '.', label='obs ln-2')
plt.semilogx(t1, hm1_0[0]/H0, label='ttim ln-2')
plt.semilogx(t2, h2/H0, '.', label='obs ln-3')
plt.semilogx(t2, hm2_0[0]/H0, label='ttim ln-3')
plt.xlabel('time(d)')
plt.ylabel('h/H0')
plt.legend();
"""
Explanation: Calibrate with two datasets simultaneously:
End of explanation
"""
ml_1 = ModelMaq(kaq=10, z=[0, b], Saq=1e-4, \
tmin=1e-5, tmax=0.01)
w_1 = Well(ml_1, xw=0, yw=0, rw=rw1, res=0, rc=rc1, tsandQ=[(0, -Q)], layers=0, wbstype='slug')
ml_1.solve()
#unknown parameters: kaq, Saq, res
ca_1 = Calibrate(ml_1)
ca_1.set_parameter(name='kaq0', initial=10)
ca_1.set_parameter(name='Saq0', initial=1e-4)
ca_1.set_parameter_by_reference(name='res', parameter=w_1.res, initial=0)
ca_1.series(name='Ln-2', x=0, y=0, layer=0, t=t1, h=h1)
ca_1.series(name='Ln-3', x=r, y=0, layer=0, t=t2, h=h2)
ca_1.fit(report=True)
display(ca_1.parameters)
print('RMSE:', ca_1.rmse())
hm1_1 = ml_1.head(0, 0, t1, layers=0)
hm2_1 = ml_1.head(r, 0, t2, layers=0)
plt.figure(figsize=(8, 5))
plt.semilogx(t1, h1/H0, '.', label='obs ln-2')
plt.semilogx(t1, hm1_1[0]/H0, label='ttim ln-2')
plt.semilogx(t2, h2/H0, '.', label='obs ln-3')
plt.semilogx(t2, hm2_1[0]/H0, label='ttim ln-3')
plt.xlabel('time(d)')
plt.ylabel('h/H0')
plt.legend();
"""
Explanation: Try adding well skin resistance res:
End of explanation
"""
#Determine elevations of each layer.
#Thickness of each layer is set to be 0.5 m.
z = np.arange(0, b, -0.5)
zlay = np.append(z, b)
nlay = len(zlay) - 1
Saq_2 = 1e-4 * np.ones(nlay)
n = np.arange(0, 13,1)
ml_2 = Model3D(kaq=10, z=zlay, Saq=Saq_2, kzoverkh=1, tmin=1e-5, tmax=0.01, \
phreatictop=True)
w_2 = Well(ml_2, xw=0, yw=0, rw=rw1, tsandQ=[(0, -Q)], layers=n, rc=rc1, \
wbstype='slug')
ml_2.solve()
"""
Explanation: Adding well screen resistance does not improve the performance obviously. While the AIC value increases. Thus, res should be removed from the model.
Try multilayer conceptual model:
End of explanation
"""
ca_2 = Calibrate(ml_2)
ca_2.set_parameter(name='kaq0_12', initial=10)
ca_2.set_parameter(name='Saq0_12', initial=1e-4, pmin=0)
ca_2.series(name='Ln-2', x=0, y=0, layer=n, t=t1, h=h1)
ca_2.series(name='Ln-3', x=r, y=0, layer=n, t=t2, h=h2)
ca_2.fit(report=True)
display(ca_2.parameters)
print('RMSE:', ca_2.rmse())
hm1_2 = ml_2.head(0, 0, t1, layers=n)
hm2_2 = ml_2.head(r, 0, t2, layers=n)
plt.figure(figsize=(8, 5))
plt.semilogx(t1, h1/H0, '.', label='obs ln-2')
plt.semilogx(t1, hm1_2[0]/H0, label='ttim ln-2')
plt.semilogx(t2, h2/H0, '.', label='obs ln-3')
plt.semilogx(t2, hm2_2[0]/H0, label='ttim ln-3')
plt.xlabel('time(d)')
plt.ylabel('h/H0')
plt.legend();
"""
Explanation: Calibrate with two datasets simultaneously:
End of explanation
"""
t = pd.DataFrame(columns=['k [m/d]', 'Ss [1/m]'], \
index=['MLU', 'AQTESOLV', 'ttim-single', 'ttim-multi'])
t.loc['AQTESOLV'] = [1.166, 9.368E-06]
t.loc['MLU'] = [1.311, 8.197E-06]
t.loc['ttim-single'] = ca_0.parameters['optimal'].values
t.loc['ttim-multi'] = ca_2.parameters['optimal'].values
t['RMSE'] = [0.010373, 0.009151, ca_0.rmse(), ca_1.rmse()]
t
"""
Explanation: Summary of values presented by AQTESOLV & MLU
End of explanation
"""
|
flohorovicic/pynoddy | docs/notebooks/3-Events-Copy1.ipynb | gpl-2.0 | from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
%matplotlib inline
"""
Explanation: Geological events in pynoddy: organisation and adpatiation
We will here describe how the single geological events of a Noddy history are organised within pynoddy. We will then evaluate in some more detail how aspects of events can be adapted and their effect evaluated.
End of explanation
"""
import sys, os
import matplotlib.pyplot as plt
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy
import pynoddy.history
import pynoddy.events
import pynoddy.output
reload(pynoddy)
# Change to sandbox directory to store results
os.chdir(os.path.join(repo_path, 'sandbox'))
# Path to exmaple directory in this repository
example_directory = os.path.join(repo_path,'examples')
# Compute noddy model for history file
history = 'simple_two_faults.his'
history_ori = os.path.join(example_directory, history)
output_name = 'noddy_out'
reload(pynoddy.history)
reload(pynoddy.events)
H1 = pynoddy.history.NoddyHistory(history_ori)
# Before we do anything else, let's actually define the cube size here to
# adjust the resolution for all subsequent examples
H1.change_cube_size(100)
# compute model - note: not strictly required, here just to ensure changed cube size
H1.write_history(history)
pynoddy.compute_model(history, output_name)
"""
Explanation: Loading events from a Noddy history
In the current set-up of pynoddy, we always start with a pre-defined Noddy history loaded from a file, and then change aspects of the history and the single events. The first step is therefore to load the history file and to extract the single geological events. This is done automatically as default when loading the history file into the History object:
End of explanation
"""
H1.events
"""
Explanation: Events are stored in the object dictionary "events" (who would have thought), where the key corresponds to the position in the timeline:
End of explanation
"""
H1.events[3].properties
"""
Explanation: We can see here that three events are defined in the history. Events are organised as objects themselves, containing all the relevant properties and information about the events. For example, the second fault event is defined as:
End of explanation
"""
H1 = pynoddy.history.NoddyHistory(history_ori)
# get the original dip of the fault
dip_ori = H1.events[3].properties['Dip']
# add 10 degrees to dip
add_dip = -10
dip_new = dip_ori + add_dip
# and assign back to properties dictionary:
H1.events[3].properties['Dip'] = dip_new
# H1.events[2].properties['Dip'] = dip_new1
new_history = "dip_changed"
new_output = "dip_changed_out"
H1.write_history(new_history)
pynoddy.compute_model(new_history, new_output)
# load output from both models
NO1 = pynoddy.output.NoddyOutput(output_name)
NO2 = pynoddy.output.NoddyOutput(new_output)
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', position=0, ax = ax1, colorbar=False, title="Dip = %.0f" % dip_ori, savefig=True, fig_filename ="tmp.eps")
NO2.plot_section('y', position=1, ax = ax2, colorbar=False, title="Dip = %.0f" % dip_new)
plt.show()
"""
Explanation: Changing aspects of geological events
So what we now want to do, of course, is to change aspects of these events and to evaluate the effect on the resulting geological model. Parameters can directly be updated in the properties dictionary:
End of explanation
"""
H1 = pynoddy.history.NoddyHistory(history_ori)
# The names of the two fault events defined in the history file are:
print H1.events[2].name
print H1.events[3].name
"""
Explanation: Changing the order of geological events
The geological history is parameterised as single events in a timeline. Changing the order of events can be performed with two basic methods:
Swapping two events with a simple command
Adjusting the entire timeline with a complete remapping of events
The first method is probably the most useful to test how a simple change in the order of events will effect the final geological model. We will use it here with our example to test how the model would change if the timing of the faults is swapped.
The method to swap two geological events is defined on the level of the history object:
End of explanation
"""
# Now: swap the events:
H1.swap_events(2,3)
# And let's check if this is correctly relfected in the events order now:
print H1.events[2].name
print H1.events[3].name
"""
Explanation: We now swap the position of two events in the kinematic history. For this purpose, a high-level function can directly be used:
End of explanation
"""
new_history = "faults_changed_order.his"
new_output = "faults_out"
H1.write_history(new_history)
pynoddy.compute_model(new_history, new_output)
reload(pynoddy.output)
# Load and compare both models
NO1 = pynoddy.output.NoddyOutput(output_name)
NO2 = pynoddy.output.NoddyOutput(new_output)
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', ax = ax1, colorbar=False, title="Model 1")
NO2.plot_section('y', ax = ax2, colorbar=False, title="Model 2")
plt.show()
"""
Explanation: Now let's create a new history file and evaluate the effect of the changed order in a cross section view:
End of explanation
"""
diff = (NO2.block - NO1.block)
"""
Explanation: Determining the stratigraphic difference between two models
Just as another quick example of a possible application of pynoddy to evaluate aspects that are not simply possible with, for example, the GUI version of Noddy itself. In the last example with the changed order of the faults, we might be interested to determine where in space this change had an effect. We can test this quite simply using the NoddyOutput objects.
The geology data is stored in the NoddyOutput.block attribute. To evaluate the difference between two models, we can therefore simply compute:
End of explanation
"""
fig = plt.figure(figsize = (5,3))
ax = fig.add_subplot(111)
ax.imshow(diff[:,10,:].transpose(), interpolation='nearest',
cmap = "RdBu", origin = 'lower left')
"""
Explanation: And create a simple visualisation of the difference in a slice plot with:
End of explanation
"""
NO1.export_to_vtk(vtk_filename = "model_diff", data = diff)
"""
Explanation: (Adding a meaningful title and axis labels to the plot is left to the reader as simple excercise :-) Future versions of pynoddy might provide an automatic implementation for this step...)
Again, we may want to visualise results in 3-D. We can use the export_to_vtk-function as before, but now assing the data array to be exported as the calulcated differnce field:
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.20/_downloads/f760cc2f1a5d6c625b1e14a0b05176dd/plot_ecog.ipynb | bsd-3-clause | # Authors: Eric Larson <larson.eric.d@gmail.com>
# Chris Holdgraf <choldgraf@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
"""
Explanation: Working with ECoG data
MNE supports working with more than just MEG and EEG data. Here we show some
of the functions that can be used to facilitate working with
electrocorticography (ECoG) data.
End of explanation
"""
mat = loadmat(mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat')
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode positions given in meters
# Now we make a montage stating that the sEEG contacts are in head
# coordinate system (although they are in MRI). This is compensated
# by the fact that below we do not specicty a trans file so the Head<->MRI
# transform is the identity.
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
coord_frame='head')
print('Created %s channel positions' % len(ch_names))
"""
Explanation: Let's load some ECoG electrode locations and names, and turn them into
a :class:mne.channels.DigMontage class.
End of explanation
"""
info = mne.create_info(ch_names, 1000., 'ecog').set_montage(montage)
"""
Explanation: Now that we have our electrode positions in MRI coordinates, we can create
our measurement info structure.
End of explanation
"""
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'])
mne.viz.set_3d_view(fig, 200, 70)
"""
Explanation: We can then plot the locations of our electrodes on our subject's brain.
<div class="alert alert-info"><h4>Note</h4><p>These are not real electrodes for this subject, so they
do not align to the cortical surface perfectly.</p></div>
End of explanation
"""
# We'll once again plot the surface, then take a snapshot.
fig_scatter = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces='pial')
mne.viz.set_3d_view(fig_scatter, 200, 70)
xy, im = snapshot_brain_montage(fig_scatter, montage)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
plt.show()
"""
Explanation: Sometimes it is useful to make a scatterplot for the current figure view.
This is best accomplished with matplotlib. We can capture an image of the
current mayavi view, along with the xy position of each electrode, with the
snapshot_brain_montage function.
End of explanation
"""
|
max-ionov/rucoref | notebooks/singletons.ipynb | lgpl-3.0 | %cd '/Users/max/Projects/Coreference/'
%cd 'rucoref'
from anaphoralib.corpora import rueval
from anaphoralib.tagsets import multeast
from anaphoralib.experiments.base import BaseClassifier
from anaphoralib import utils
from anaphoralib.experiments import utils as exp_utils
%cd '..'
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from imblearn.over_sampling import BorderlineSMOTE
import numpy as np
%matplotlib inline
lists_dir = 'CLLS-2016/wordlists'
texts_dir = 'Corpus-2015/Tokens.txt'
gs_dir = 'Corpus-2015/Groups.txt'
tagset = multeast
random_state = 42
"""
Explanation: Experiment for the paper "Identification of singleton mentions in Russian"
Replication of CLLS-2016 paper (Ionov and Toldova 2016)
To reproduce this experiment you will need:
1. RuCor corpus (from 2015-10-29)
2. Python modules:
* scikit-learn (v. 0.22.1)
* imbalanced-learn (v. 0.6.2)
* matplotlib (v. 3.1.3)
2. anaphoralib Python module
Since anaphoralib is in an early stage of development, there is no way to install it yet, so in order to import it, you should cd to the folder with the module. Paths to the corpus should be updated accordingly.
End of explanation
"""
rucoref = rueval.RuCorefCorpus(multeast, rueval)
exp_utils.load_corpus(rucoref, texts_dir, gs_dir)
rucoref.groups[0][:10]
rucoref.print_stats()
rucoref.create_indices()
"""
Explanation: Reading the texts from GS and matching them to actual texts
Loading chains and GS
End of explanation
"""
import codecs
def load_list(filename):
data = set()
with codecs.open(filename, encoding='utf-8') as inp_file:
for line in inp_file:
data.add(line.strip('\r\n'))
return data
import os
wordlists = {}
for filename in os.listdir(lists_dir):
wordlists[filename.replace('.txt', '')] = load_list(os.path.join(lists_dir, filename))
print(wordlists.keys())
"""
Explanation: Loading special lists
Special lists load from the directory stored in lists_dir
End of explanation
"""
import collections
word_index = []
group_index = []
for i, text in enumerate(rucoref.texts):
word_index.append(collections.defaultdict(set))
group_index.append(collections.defaultdict(set))
for word in text:
word_index[-1]['_'.join(word.lemma)].add(word.offset)
for group in rucoref.groups[i]:
for g in group.iter_groups():
group_index[-1]['_'.join(g.lemma)].add(g.offset)
print('\n'.join(list(group_index[0].keys())[:15]))
"""
Explanation: Building indices and dictionaries
Building additional indices (of all words and all groups):
End of explanation
"""
import re
class SingletonClassifier(BaseClassifier):
def __init__(self):
super(SingletonClassifier, self).__init__()
self.feat_zones_ = ('struct', 'string', 'lists', 'synt')
self.stats = {'str_matches_before', 'head_matches_before', 'n_adj', 'len_np', 'is_genitive'}
self.stats.update('in_list_{}'.format(l) for l in wordlists)
self.rx_lat = re.compile('[A-Za-z]')
self.pronouns = {u"его", u"ее", u"её", u"ей", u"ему", u"ею", u"им", u"ими", u"их", u"которая",
u"которого", u"которое", u"которой", u"котором", u"которому", u"которую", u"которые",
u"который", u"которым", u"которыми", u"которых", u"него", u"нее", u"неё", u"ней", u"нем",
u"нём", u"нему", u"нею", u"ним", u"ними", u"них", u"он", u"она", u"они", u"оно", u"свое",
u"своё", u"своего", u"своей", u"своем", u"своём", u"своему", u"своею", u"свой", u"свои",
u"своим", u"своими", u"своих", u"свою", u"своя", u"себе", u"себя", u"собой", u"собою"}
self.clear_stats()
def get_feature_vector(self, corpus, group, i_text, save_feature_names=False):
if save_feature_names:
self.feature_names_ = []
vctr = []
group_lemma = '_'.join(group.lemma)
group_occurrences = group_index[i_text][group_lemma] if group_lemma in group_index[i_text] else []
head_index = group.head
head_lemma = group.lemma[group.head]
head_occurrences = word_index[i_text][head_lemma] if head_lemma in word_index[i_text] else []
head_offset = group.head_offset
group_words = group.words if group.type != 'word' else [group]
str_matches_before = sum(1 for occ in group_occurrences if occ < group.offset)
head_matches_before = sum(1 for occ in head_occurrences if occ < group.offset)
adj_in_group = [word for word in group_words[:head_index+1] if tagset.pos_filters['adj'](word)]
self.stats['str_matches_before'].append(str_matches_before)
self.stats['head_matches_before'].append(head_matches_before)
self.stats['n_adj'].append(len(adj_in_group))
self.stats['len_np'].append(len(group_words))
if 'string' in self.feat_zones_:
vctr.append(('str_match_before=0', str_matches_before == 0))
vctr.append(('head_match_before=0', head_matches_before == 0))
#vctr.append(('uppercase', all(word.isupper() and len(word) > 1 for word in group.wordform)))
#vctr.append(('capitalized', any(word[0].isupper() and len(group.wordform) > 1 for word in group.wordform[1:])))
vctr.append(('latin', any(self.rx_lat.search(word) for word in group.wordform)))
vctr.append(('is_proper_noun', corpus.tagset.pos_filters['properNoun'](group)))
vctr.append(('is_animate', corpus.tagset.extract_feature('animate', group) == u'y'))
vctr.append(('is_pronoun', group.wordform[0] in self.pronouns and len(group_words) == 1))
i_word = corpus.words_index[i_text][group.offset]
left_word = corpus.texts[i_text][i_word - 1] if i_word > 0 else None
right_word = corpus.texts[i_text][i_word + len(group.wordform) + 1] \
if i_word + len(group.wordform) + 1 < len(corpus.texts[i_text]) else None
if 'struct' in self.feat_zones_:
#vctr.append(('conj', bool((left_word and corpus.tagset.pos_filters['conj'](left_word))
# or (right_word and corpus.tagset.pos_filters['conj'](right_word)))))
vctr.append(('len_np==1', len(group.tags) == 1))
vctr.append(('1<len_np<4', 1 < len(group.tags) < 4))
vctr.append(('len_np>=4', 1 < len(group.tags) >= 4))
vctr.append(('n_adj=0', len(adj_in_group) == 0))
#vctr.append(('n_adj>1', len(adj_in_group) > 1))
vctr.append(('n_adj>2', len(adj_in_group) > 2))
vctr.append(('is_genitive', corpus.tagset.extract_feature('case', group) == u'g'))
self.stats['is_genitive'].append(vctr[-1][1])
sent_begin = left_word is None or left_word.tag == 'SENT'
sent_end = right_word is None or right_word.tag == 'SENT'
nomin = corpus.tagset.extract_feature('case', group) == u'n'
accus = corpus.tagset.extract_feature('case', group) == u'a'
if 'synt' in self.feat_zones_:
vctr.append(('is_subject', nomin or sent_begin))
#vctr.append(('is_object', accus or sent_end))
if 'lists' in self.feat_zones_:
for l in wordlists:
feat_name = 'in_list_{}'.format(l)
vctr.append((feat_name, any(lemma in wordlists[l] for lemma in group.lemma[:head_index+1])))
self.stats[feat_name].append(vctr[-1][1])
if save_feature_names:
self.feature_names_ = [feat[0] for feat in vctr]
return [int(feat[1]) for feat in vctr]
def prepare_data(self, corpus, random_state=42, test_size=0.3, feature_zones=None):
if feature_zones:
self.feat_zones_ = feature_zones
self.groups = []
self.x_data = []
self.y_data = []
self.stats['class'] = []
self.cur_data_ = 'Singletons'
self.class_names_ = ('non-singleton', 'singleton')
save_features = True
exceptions = {u'и', u'в', u'а', u'к', u'у', u'по', u'где', u'ведь', u'с'}
for i_text, text in enumerate(corpus.texts):
for i, mention in enumerate(corpus.mentions[i_text]):
group = corpus.heads_index[i_text][mention.offset]
if group.lemma[0] in exceptions and group.tags[0].startswith('N'):
continue
if i not in rucoref.gs_index[i_text]:
self.y_data.append(self.class_names_.index('singleton'))
else:
self.y_data.append(self.class_names_.index('non-singleton'))
self.x_data.append(self.get_feature_vector(corpus, group, i_text, save_features))
self.groups.append(group)
self.stats['class'].append(self.class_names_[self.y_data[-1]])
save_features = False
#pronoun_index = self.feature_names_.index('is_pronoun')
#if self.x_data[-1][pronoun_index]:
# self.x_data.pop()
# self.y_data.pop()
# continue
#del self.x_data[-1][pronoun_index]
super(SingletonClassifier, self).prepare_data(corpus, random_state, test_size)
#del self.feature_names_[pronoun_index]
class_numbers = [sum(1 for item in self.y_data if item == cur_class) for cur_class in range(len(self.class_names_))]
self.ratio = float(min(class_numbers) / float(max(class_numbers)))
"""
Explanation: Creating a classifier
End of explanation
"""
singleton_clf = SingletonClassifier()
singleton_clf.prepare_data(rucoref, random_state=random_state)
"""
Explanation: Training and testing
End of explanation
"""
def baseline_predict(data):
y_pred = np.zeros(len(data))
for i, row in enumerate(data):
y_pred[i] = (row[0] == 1 and row[1] == 1)
return y_pred
singleton_clf.test(y_pred=baseline_predict(singleton_clf.x_data_test), test_name='baseline')
"""
Explanation: Baseline
Baseline condition: NP is a singleton if there is no such exact string or its head in the text before
End of explanation
"""
singleton_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string',))
clf = RandomForestClassifier(n_estimators=200, random_state=random_state)
sampler = BorderlineSMOTE(sampling_strategy='auto', kind='borderline-1', random_state=random_state)
singleton_clf.fit(clf, sampler)
singleton_clf.print_stats()
len(singleton_clf.x_data_train)
singleton_clf.test(test_name='string features')
"""
Explanation: String features
End of explanation
"""
singleton_clf = SingletonClassifier()
singleton_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct'))
clf = RandomForestClassifier(n_estimators=200, random_state=random_state)
sampler = BorderlineSMOTE(sampling_strategy='auto', kind='borderline-1', random_state=random_state)
singleton_clf.fit(clf, sampler)
singleton_clf.test(test_name='string+struct features')
"""
Explanation: String + Struct features
End of explanation
"""
singleton_clf = SingletonClassifier()
singleton_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists'))
clf = RandomForestClassifier(n_estimators=200, random_state=random_state)
sampler = BorderlineSMOTE(sampling_strategy='auto', kind='borderline-1', random_state=random_state)
singleton_clf.fit(clf, sampler)
singleton_clf.test(test_name='string+struct+lists')
"""
Explanation: String + Struct + List features
End of explanation
"""
singleton_clf = SingletonClassifier()
singleton_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists', 'synt'))
clf = RandomForestClassifier(n_estimators=200, random_state=random_state)
sampler = BorderlineSMOTE(sampling_strategy='auto', kind='borderline-1', random_state=random_state)
singleton_clf.fit(clf, sampler)
singleton_clf.test(test_name='all features')
for i, feat_val in enumerate(singleton_clf.clf_.feature_importances_):
print('{}: {:.4f}'.format(singleton_clf.feature_names_[i], feat_val))
out_singletons = open('singletons.all.txt', 'w', encoding='utf-8')
out_non_singletons = open('non-singletons.all.txt', 'w', encoding='utf-8')
for i, item in enumerate(singleton_clf.groups_train):
if singleton_clf.y_data_train[i] == 1:
out_singletons.write(str(singleton_clf.groups_train[i]))
out_singletons.write('\n')
else:
out_non_singletons.write(str(singleton_clf.groups_train[i]))
out_non_singletons.write('\n')
out_fp = open('singletons.fp.txt', 'w', encoding='utf-8')
out_fn = open('singletons.fn.txt', 'w', encoding='utf-8')
y_pred = singleton_clf.clf_.predict(singleton_clf.x_data_test)
for i, item in enumerate(singleton_clf.groups_test):
if singleton_clf.y_data_test[i] == 0 and y_pred[i] != singleton_clf.y_data_test[i]:
out_fp.write(str(singleton_clf.groups_test[i]))
out_fp.write('\n')
if singleton_clf.y_data_test[i] == 1 and y_pred[i] != singleton_clf.y_data_test[i]:
out_fn.write(str(singleton_clf.groups_test[i]))
out_fn.write('\n')
"""
Explanation: All features
End of explanation
"""
regr = LogisticRegression(random_state=random_state, max_iter=200)
sampler = BorderlineSMOTE(sampling_strategy='auto', kind='borderline-1', random_state=random_state)
singleton_clf.fit(regr, sampler)
for i, feat_name in enumerate(singleton_clf.feature_names_):
print('{}: {:.4f}'.format(feat_name, regr.coef_[0,i]))
"""
Explanation: Calculating feature importances
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import anaphoralib.experiments.utils
singleton_clf.stats.keys()
singleton_clf = SingletonClassifier()
singleton_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists', 'synt'))
feature_distributions = {}
for feat_name in singleton_clf.stats:
feature_distributions[feat_name] = {cls: [] for cls in singleton_clf.class_names_ + ('total',)}
for i, elem in enumerate(singleton_clf.stats['class']):
feature_distributions[feat_name][elem].append(singleton_clf.stats[feat_name][i])
feature_distributions[feat_name]['total'].append(singleton_clf.stats[feat_name][i])
anaphoralib.experiments.utils.latexify()
def plot_feat_distribution(distribution, bins, class_names, x_label='Feature value', filename='plot.pdf'):
bins = range(7)
ax = plt.gca()
ax.set_xlabel(x_label)
ax.set_ylabel("Density")
#ax.set_title("Distribution of feature")
plt.tight_layout()
format_axes(ax)
normed = True
true_hist = np.histogram(distribution[class_names[1]], bins, density=normed)
false_hist = np.histogram(distribution[class_names[0]], bins, density=normed)
w = 0.3
true_x = [item for item in range(len(true_hist[0]))]
false_x = [item+w for item in range(len(false_hist[0]))]
ax.set_xticks([item + float(w) for item in true_x])
ax.set_xticklabels(true_x)
rects1 = plt.bar(false_x, false_hist[0], w, color='0.3')
rects2 = plt.bar(true_x, true_hist[0], w, color='0.7')
plt.legend((rects1, rects2), class_names, loc='upper right')
plt.savefig("{}.pdf".format(filename))
plt.show()
plt.close()
import os
anaphoralib.experiments.utils.latexify(columns=2)
for feat_name in feature_distributions:
if feat_name == 'class':
continue
anaphoralib.experiments.utils.plot_feature_distribution(feature_distributions[feat_name], range(7),
singleton_clf.class_names_,
x_label=feat_name.replace('_', '\\_'), filename=os.path.join('CLLS-2016', feat_name))
from sklearn.model_selection import learning_curve
from sklearn.metrics import make_scorer, f1_score
from sklearn.utils import shuffle
singleton_clf = SingletonClassifier()
singleton_clf.prepare_data(rucoref, random_state=random_state, feature_zones=('string', 'struct', 'lists', 'synt'))
clf = RandomForestClassifier(n_estimators=200, random_state=random_state)
shuffled_x_data, shuffled_y_data = shuffle(singleton_clf.x_data, singleton_clf.y_data, random_state=random_state)
train_sizes_abs, train_scores, test_scores = learning_curve(clf,
shuffled_x_data,
shuffled_y_data,
cv=3,
scoring=make_scorer(f1_score, pos_label=0))
anaphoralib.experiments.utils.latexify(columns=2)
anaphoralib.experiments.utils.plot_learning_curve(train_sizes_abs,
train_scores, test_scores,
score_name='f1',
filename=os.path.join('CLLS-2016', 'learning_curve_plot'))
"""
Explanation: Additional actions
Getting feature distributions
End of explanation
"""
|
martinjrobins/hobo | examples/sampling/transformed-parameters.ipynb | bsd-3-clause | import pints
import pints.toy as toy
import pints.plot
import numpy as np
import matplotlib.pyplot as plt
# Set some random seed so this notebook can be reproduced
np.random.seed(10)
# Load a forward model
model = toy.LogisticModel()
"""
Explanation: Sampling from a transformed parameter space
This example shows you how to run (and compare) Bayesian inference using a transformed parameter space.
Searching in a transformed space can improve performance (and robustness) of many sampling methods, and make some methods applicable to problems that cannot otherwise be tackled.
Unlike transforming error measures, for probability density functions (PDFs), in order to make sure the probability for any arbitrary interval within the PDFs conserves between parameter transformations, we cannot simply transform the model parameters by using a model wrapper.
We need what is called the Jacobian adjustment to 'correct' the transformed PDFs or rather to ensure this conservation (as explained in pints.TransformedLogPDF).
An example notebook here shows how things can go wrong with a naive wrapper without the Jacobian adjustment.
All of these (easy-to-miss) adjustments are done behind the scenes by our pints.Transformation and pints.MCMCController as shown in this example.
We start by loading a pints.Forwardmodel implementation, in this case a logistic model.
End of explanation
"""
# Create some toy data
real_parameters = [0.015, 500]
times = np.linspace(0, 1000, 1000)
org_values = model.simulate(real_parameters, times)
# Add noise
noise = 10
values = org_values + np.random.normal(0, noise, org_values.shape)
real_parameters = np.array(real_parameters + [noise])
# Get properties of the noise sample
noise_sample_mean = np.mean(values - org_values)
noise_sample_std = np.std(values - org_values)
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, values)
# Create a log-likelihood function (adds an extra parameter!)
log_likelihood = pints.GaussianLogLikelihood(problem)
# Create a uniform prior over both the parameters and the new noise variable
log_prior = pints.UniformLogPrior(
[0.001, 10, noise*0.1],
[1.0, 1000, noise*100]
)
# Create a posterior log-likelihood (log(likelihood * prior))
log_posterior = pints.LogPosterior(log_likelihood, log_prior)
"""
Explanation: We then define some parameters and set up the problem for the Bayesian inference.
End of explanation
"""
# Choose starting points for 3 mcmc chains
xs = [
[0.7, 20, 2],
[0.005, 900, 100],
[0.01, 100, 500],
]
"""
Explanation: In this example, we will pick some considerably difficult starting points for the MCMC chains.
End of explanation
"""
# Create mcmc routine with four chains
mcmc = pints.MCMCController(log_posterior, 3, xs, method=pints.HaarioBardenetACMC)
# Add stopping criterion
mcmc.set_max_iterations(4000)
# Start adapting after 1000 iterations
mcmc.set_initial_phase_iterations(1000)
# Disable logging mode
mcmc.set_log_to_screen(False)
# Run!
print('Running...')
chains = mcmc.run()
print('Done!')
# Discard warm up
chains = chains[:, 2000:, :]
# Look at distribution across all chains
pints.plot.pairwise(np.vstack(chains), kde=False, parameter_names=[r'$r$', r'$K$', r'$\sigma$'])
# Show graphs
plt.show()
"""
Explanation: Let's run an Adaptive Covariance MCMC without doing any parameter transformation to check its performance.
End of explanation
"""
results = pints.MCMCSummary(chains=chains, time=mcmc.time(), parameter_names=["r", "k", "sigma"])
print(results)
"""
Explanation: The MCMC samples are not ideal, because we've started the MCMC run from some difficult starting points.
We can use MCMCSummary to inspect the efficiency of the MCMC run.
End of explanation
"""
# Create parameter transformation
transformation = pints.LogTransformation(n_parameters=len(xs[0]))
# Create mcmc routine with four chains
mcmc = pints.MCMCController(log_posterior, 3, xs,
method=pints.HaarioBardenetACMC,
transformation=transformation)
# Add stopping criterion
mcmc.set_max_iterations(4000)
# Start adapting after 1000 iterations
mcmc.set_initial_phase_iterations(1000)
# Disable logging mode
mcmc.set_log_to_screen(False)
# Run!
print('Running...')
chains = mcmc.run()
print('Done!')
# Discard warm up
chains = chains[:, 2000:, :]
# Look at distribution across all chains
pints.plot.pairwise(np.vstack(chains), kde=False, parameter_names=[r'$r$', r'$K$', r'$\sigma$'])
# Show graphs
plt.show()
"""
Explanation: Now, we create a create a pints.Transformation object for log-transformation and re-run the MCMC to see if it makes any difference.
End of explanation
"""
results = pints.MCMCSummary(chains=chains, time=mcmc.time(), parameter_names=["r", "k", "sigma"])
print(results)
"""
Explanation: The MCMC samples using parameter transformation looks very similar to the one in another example notebook which we had some good starting points and without parameter transformation.
This is a good sign! It suggests the transformation did not mess anything up.
Now we check the efficiency again:
End of explanation
"""
|
Fifth-Cohort-Awesome/NightThree | Three_RSH.ipynb | mit | import csv
datafile = open('/Users/kra7830/Desktop/MSDS_School/Info_Structures/dev/NightThree/tmdb_5000_movies.csv', 'r')
myreader = csv.reader(datafile)
#for i in myreader:
#print i
##### This prints lots of texts
import pandas as pd
# Read the CSV into a pandas data frame (df)
df = pd.read_csv('/Users/kra7830/Desktop/MSDS_School/Info_Structures/dev/NightThree/tmdb_5000_movies.csv', delimiter=','
)
df = pd.DataFrame(df)
"""
a budget
b genres -> embedded lists
c homepage
d id
e keywords -> embedded lists
f original_language
g original_title
h overview
i popularity
j production_companies -> embedded lists
k production_countries -> embedded lists
l release_date
m revenue
n runtime
o spoken_languages -> embedded lists
p status
q tagline
r title
s vote_average
t vote_count
"""
"""
Explanation: RSHolt - MSDS
Goal 1 - NightThree
End of explanation
"""
import json
import pandas as pd
lst = df['genres'].values.tolist()
print lst[1]
print type(lst)
"""
Explanation: List test
I am testing the formation of a list from one of the embedded json varibles...
End of explanation
"""
f1 = df[['budget','genres']]
f1.head()
"""
Explanation: Enumeration test
f1 = a dataframe that contain a non-json variable and an embedded json varible. The first test will be to enumerate through genres to unpack the json and loop over two lists simultaneously.
http://treyhunner.com/2016/04/how-to-loop-with-indexes-in-python/
End of explanation
"""
### GOAL 2 / TEST 1: Unpack Collapsed Variables
ids =[]
names =[]
mel =[]
dic = lst #list of lists
budget = list(f1['budget']) #corresponding list of budgets should match up to iter
for i, d in enumerate(dic):
d_lst = json.loads(dic[i]) #this json.loads data in usable format
#dl = pd.DataFrame(d_lst) #this put id and name in dataframe
budg = budget[i] #this was used to enumerate over the json.loads
for i, j in enumerate(d_lst):
f_ids = (d_lst[i]['id'])
f2_names = (d_lst[i]['name'])
mel.append(budg)
ids.append(f_ids)
names.append(f2_names)
gf = pd.DataFrame({'Budget':mel, 'ID': ids, 'name': names})
print gf.head()
"""
Explanation: Testing the unpacking of JSON with enumeration...
End of explanation
"""
###
"""
Explanation: Looks like it worked... and the variables are unpacked in a individual format.
End of explanation
"""
"""
a budget
b genres -> embedded lists
c homepage
d id
e keywords -> embedded lists
f original_language
g original_title
h overview
i popularity
j production_companies -> embedded lists
k production_countries -> embedded lists
l release_date
m revenue
n runtime
o spoken_languages -> embedded lists
p status
q tagline
r title
s vote_average
t vote_count
"""
df_new = df[['title','budget', 'homepage', 'id', 'original_language', 'original_title', 'overview', 'popularity',
'release_date', 'revenue', 'runtime', 'status', 'tagline', 'vote_average', 'vote_count' ]]
#df_new is the dataframe that has all variables EXCLUDING the JSON data...
"""
Explanation: Build Out Compressed Data for Goal 2 & 3
Organizing a little bit...
End of explanation
"""
import numpy as np
b_list = df['genres'].values.tolist()
e_list = df['keywords'].values.tolist()
j_list = df['production_companies'].values.tolist()
k_list = df['production_countries'].values.tolist()
o_list = df['spoken_languages'].values.tolist()
##thought (enumerate to) join to movie title?? make that a primary key? Then left merge data frames, by title ?
def unpack_json(x):
global bf, ef, jf, kf, of #global dataframes to be saved for further use
ids = []
names = []
movie_key = []
r_list = list(df_new['title']) #list of titles will enumerate to collapsed data
n = json.loads(x[1])
xn = n[0].keys()
#print xn
for i, j in enumerate(x):
movie_title = r_list[i] #movie title is the key
js = json.loads(x[i]) #json loading
#print js #test for working
#es = json.loads(b_list[i])
#print movie_title
for i,j in enumerate(js):
f = js[i][xn[0]]
f2 = js[i][xn[1]]
movie_key.append(movie_title)
ids.append(f)
names.append(f2)
#logic to deterime which list.append goes to which global variable
if x == e_list: #keywords
ef = pd.DataFrame({'Title': movie_key, 'Keyword_ID': ids, 'Keyword_name': names})
#print gf.head()
print 'Success'
elif x == b_list: #genres
bf = pd.DataFrame({'Title': movie_key, 'Genres_ID': ids, 'Genres_name': names})
#print bf.head()
print 'Success'
elif x == j_list: #production_companies
jf = pd.DataFrame({'Title': movie_key, 'ProdComp_ID': ids, 'ProcComp_name': names})
#print jf.head() #Remeber these are backwards
print 'Success'
elif x == k_list: #production_countries
kf = pd.DataFrame({'Title': movie_key, 'ProdCty_ID': ids, 'ProdCty_name': names})
#print kf.head()
print 'Success'
elif x == o_list: #spoken_languages
of = pd.DataFrame({'Title': movie_key, 'Lang_ID': ids, 'Lang_name': names})
print of.head()
print 'Success'
else:
print "NOPE"
unpack_json(e_list)
unpack_json(b_list)
unpack_json(j_list)
unpack_json(k_list)
unpack_json(o_list)
"""
Explanation: Making lists of all the json variables...
End of explanation
"""
### JOIN to make long vertical Data Set
#caller.join(other.set_index('key'), on='key')
r = df_new.join(bf.set_index('Title'), on='title', how='left') #bf
r1 = r.join(ef.set_index('Title'), on='title', how='left') #ef
r2 = r1.join(jf.set_index('Title'), on='title', how='left') #jf
r3 = r2.join(kf.set_index('Title'), on='title', how='left') #kf
final_long_df = r3.join(of.set_index('Title'), on='title', how='left') #of = Last join, so final
print final_long_df.head(2)
#print final_long_df.count()
"""
Explanation: Goal 3 Making Long Data (Goal 2 below)
"Wide data sets are good for exploration, but 'long' data sets are good for training. Let's attempt to expand all the collapsed field vertically instead of horizontally. Does this result in data duplication? What do you think about that? Yes and No are both correct -- but what's the context?"
Yes, it duplicated a lot of values. Yes, it is good for increasing signal in some case models. However, wide data can be used in many statistical measues as well such as anything involving the logistic regression.
-- Now that I have the data unpacked, I will left join to the dataframe df_new on movie title. This should elongate the data vertically as each unique value is added.
End of explanation
"""
print str("#this is OG data with no JSON" "\n"), df_new.count()
print bf['Genres_name'].count()
print "\n"
print str("#this is generes unpacked with movie" "\n") , bf.head()
print "\n"
print str("#this is Genres unpacked with movie counts" "\n"), bf.count()
print "\n"
#print r.count()
pd.set_option('display.max_rows', 500)
"""
Explanation: Looking at counts...
End of explanation
"""
#Getting all the unique values of keys to store
def store_keys(x):
global key_un, gen_un, prod_comp_un, prod_cty_un, lang_un
ident = []
n = json.loads(x[1])
xn = n[0].keys()
for i, j in enumerate(x):
js = json.loads(x[i]) #json loading
for i,j in enumerate(js):
f = js[i][xn[0]]
f2 = js[i][xn[1]]
ident.append(f2)
if x == e_list: #keywords
key_un = pd.DataFrame(np.array(np.unique(ident)))
print key_un.head()
print 'Success'
elif x == b_list: #genres
gen_un = pd.DataFrame(np.array(np.unique(ident)))
#print gen_un.head()
print 'Success'
elif x == j_list: #production_companies
prod_comp_un = pd.DataFrame(np.array(np.unique(ident)))
#print prod_comp_un.head()
print 'Success'
elif x == k_list: #production_countries
prod_cty_un = pd.DataFrame(np.array(np.unique(ident)))
#print prod_cty_un.head()
print 'Success'
elif x == o_list: #spoken_languages
lang_un = pd.DataFrame(np.array(np.unique(ident)))
#print lang_un.head()
print 'Success'
else:
print "NOPE"
store_keys(e_list)
store_keys(b_list)
store_keys(j_list)
store_keys(k_list)
store_keys(o_list)
"""
Explanation: Goal 2: Making Wide Data
I want to collect only the key values from the compressed data.
End of explanation
"""
lang_un[0].head()
"""
Explanation: Testing to see if keys were captured... These should be eventually used as features in the wide data set.
End of explanation
"""
df_genres = pd.get_dummies(bf['Genres_name'])
df_keyword = pd.get_dummies(ef['Keyword_name'])
df_prod_name = pd.get_dummies(jf['ProdComp_ID'])
df_prod_country = pd.get_dummies(kf['ProdCty_name'])
df_lang = pd.get_dummies(of['Lang_name'])
df_prod_name.head() #nice function... very simple
"""
Explanation: Making Dummy Values
First, make key values as features in a new data set. Using pandas.dummy to create values 1 or 0.
End of explanation
"""
genres_wide = pd.concat([bf, df_genres], axis=1)
keyword_wide = pd.concat([ef, df_keyword], axis=1)
prod_name_wide = pd.concat([jf, df_prod_name], axis=1)
prod_country_wide = pd.concat([kf, df_prod_country], axis=1)
lang_wide = pd.concat([of, df_lang], axis=1)
genres_wide.head()
"""
Explanation: Concat directly back to the unpacked dataframes...
End of explanation
"""
df_gr = pd.DataFrame(genres_wide.drop(['Genres_name','Genres_ID'], 1))
df_ky = keyword_wide.drop(['Keyword_ID', 'Keyword_name'], 1)
df_pn = prod_name_wide.drop(['ProdComp_ID','ProcComp_name'], 1)
df_pc = prod_country_wide.drop(['ProdCty_name','ProdCty_ID'], 1)
df_ln = lang_wide.drop(['Lang_ID','Lang_name'], 1)
df_ln.head()
### create pivot tables as DFs as records then format title for simplier join...
def multi_table_creation(x):
global gr_table, ky_table, pn_table, pc_table, ln_table
if x is df_gr:
gr_table = pd.DataFrame(pd.pivot_table(x, index=['Title'], aggfunc=np.sum))
gr_table = pd.DataFrame(gr_table.to_records())
gr_table = gr_table.rename(index=str, columns={"Title": "title"})
print "OK"
elif x is df_ky:
ky_table = pd.pivot_table(x, index=['Title'], aggfunc=np.sum)
ky_table = pd.DataFrame(ky_table.to_records())
ky_table = ky_table.rename(index=str, columns={"Title": "title"})
elif x is df_pn:
pn_table = pd.pivot_table(x, index=['Title'], aggfunc=np.sum)
pn_table = pd.DataFrame(pn_table.to_records())
pn_table = pn_table.rename(index=str, columns={"Title": "title"})
elif x is df_pc:
pc_table = pd.pivot_table(x, index=['Title'], aggfunc=np.sum)
pc_table = pd.DataFrame(pc_table.to_records())
pc_table = pc_table.rename(index=str, columns={"Title": "title"})
elif x is df_ln:
ln_table = pd.pivot_table(x, index=['Title'], aggfunc=np.sum)
ln_table = pd.DataFrame(ln_table.to_records())
ln_table = ln_table.rename(index=str, columns={"Title": "title"})
multi_table_creation(df_gr)
multi_table_creation(df_ky)
multi_table_creation(df_pn)
multi_table_creation(df_pc)
multi_table_creation(df_ln)
ky_table.head() #title is now a column name, not a pivoted var...
"""
Explanation: Removing Names and ID...
End of explanation
"""
t = df_new.join(gr_table.set_index('title'), on='title', how='left') #bf
t.head()
"""
Explanation: Now we have tables with unique Titles of movies with wide set of features.
End of explanation
"""
t.iloc[:,15].head() ### Values changed to float64... see below for fix
"""
Explanation: Not sure why, but values changed to float...
End of explanation
"""
#gr_table, ky_table, pn_table, pc_table, ln_table
t = df_new.join(gr_table.set_index('title'), on='title', how='left') #bf
ky_table = ky_table.sort_values(by = 'title')
t1 = t.join(ky_table.set_index('title'), on='title', how='left', lsuffix='_left', rsuffix='_right')
t2 = t1.join(pn_table.set_index('title'), on='title', how='left', lsuffix='_left', rsuffix='_right') #bf
t3 = t2.join(pc_table.set_index('title'), on='title', how='left', lsuffix='_left', rsuffix='_right') #bf
t4 = t3.join(ln_table.set_index('title'), on='title', how='left', lsuffix='_left', rsuffix='_right') #bf
t4.head()
"""
Explanation: Joining tables back to each other to create very very wide data...
After this, I will remove NANs and convert to type INT (See below).
End of explanation
"""
t4.fillna(0, inplace=True)
"""
Explanation: Filling NaNs before INT conversion...
End of explanation
"""
#t['History'] = t['History'].apply(int)
#key_un, gen_un, prod_comp_un, prod_cty_un, lang_un . #the keys I stored earlier
for i in gen_un[0]:
t4[i] = t4[i].apply(int)
#for i in key_un[0]:
#t4[i] = t4[i].apply(int)
#for i in prod_comp_un[0]:
# t4[i] = t4[i].apply(int)
#for i in prod_cty_un[0]:
# t4[i] = t4[i].apply(int)
#for i in lang_un[0]:
# t4[i] = t4[i].apply(int)
"""
Explanation: Final Stretch = converting to INT based on list of key values saved previously
Example: Only doing Genres as example... This needs optimization as it's very slow!!! But it works...
End of explanation
"""
f = pd.DataFrame(t4.iloc[0]) #for Ex. only selecting small data
pd.set_option('display.max_columns', 100) # expand to see more variables
f.T
"""
Explanation: Goal 2 fun... Example of Super Wide Data in Logistic Regression
End of explanation
"""
import scipy.stats as stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
log_test = t4[['title','budget', 'original_language','runtime','vote_average','Action', 'Adventure',
'Comedy','Fantasy', 'Foreign', 'Family', 'History', 'Horror', 'Music', 'Thriller', 'War']]
results = smf.glm('Action ~ Adventure',
data=log_test, family=sm.families.Binomial()).fit()
#results = smf.glm('Action ~ Adventure + Comedy + Fantasy + Foreign + Family + History + Horror + Thriller + War',
# data=log_test, family=sm.families.Binomial()).fit()
print(results.summary())
results.params
"""
Explanation: Importing some stats packages...
End of explanation
"""
for i, j in enumerate(results.params):
if i == 0:
next
else:
print results.model.data.param_names[i],"is" , np.round(np.exp(j),1), "x likely to be also an Action movie."
"""
Explanation: Unadjusted Odds Ratio Example. Logistic regression is one technique that could be used
to predict / analyze patterns for all 15015 features in the wide data set.
End of explanation
"""
|
tmaila/autopilot | autopilot.ipynb | apache-2.0 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
%matplotlib inline
# Import run log from CSV file
try:
# Python log file
df_py = pd.DataFrame.from_csv('results/run.py.csv')
except:
print "Python dataset could not be loaded. Is the dataset missing?"
try:
# C++ log file
df_cc = pd.DataFrame.from_csv('results/run.cc.csv')
except:
print "C++ dataset could not be loaded. Is the dataset missing?"
try:
# Real-Time C++ log file
df_rt = pd.DataFrame.from_csv('results/run.rt.csv')
except:
print "Real-time C++ dataset could not be loaded. Is the dataset missing?"
# Remove two first rows to remove initialization effects
df_py = df_py.loc[df_py.index > 0]
df_cc = df_cc.loc[df_cc.index > 0]
df_rt = df_rt.loc[df_rt.index > 0]
# Select time difference column
dt_py = df_py['Time_Diff']
dt_cc = df_cc['Time_Diff']
dt_rt = df_rt['Time_Diff']
deg_py = df_py['Output']
deg_cc = df_cc['Output']
deg_rt = df_rt['Output']
"""
Explanation: Real-Time Deep Learning Inference for Self-Driving Cars with TensorFlow
Analysis of Execution Time Statistics of Python, C++ and C++ Real-Time Inferences
Background
Deep learning based self driving cars need to make steering and other control decisions at deterministic pace to be able to safely control the behavior of the vehicle in traffic. A car traveling 70 mph moves one feet every 10 ms. One feet can be the difference between successful corrective action and a fatal accident. In this project we analyze the inference execution time determinism and jitter. We compare Python, C++ and real-time optimized C++ deep learning inference implementations for self driving cars using TensorFlow. We expect some jitter and jitter of the order of 5-10 ms is probably acceptable for a self driving car but 50 ms delays probably would be too much for such a critical control system.
In this project we have created a TensorFlow implementation of the NVIDIA End-to-End Deep Learning for Self-Driving Cars self driving car model. Our implementation is based on the Python TensorFlow implementation by Sully Chen.
The project consists of a python based training script and a python and two C++ based inference implementations. At the end of the training the training script saves a graph definition where all weights are replaced with corresponding constant values. This saved graph is then loaded by the Python and both C++ inference implementations.
As the execution environment we used Ubuntu 16.10 with a custom Linux 4.9.13 kernel with a Preempt RT real-time patch. This patch adds hard real-time support to the operating system. The inference was performed on a NVIDIA GTX 1060 GPU.
In this notebook, we analyze compare the time it takes to perform inference steps for the model using the Python and both C++ based inference implementations. All implementations execute the same TensorFlow graph but it is expected that the Python code has most jitter as it may get impacted by the garbage collection occasionally slowing down the inference for short periods of time. The regular C++ implementation is also expected to have some jitter, altough less than the Python implementation as C++ doesn't get impacted by grabage collection. The real-time optimized C++ implementation is expected to have least jitter.
We start by loading the results datasets for inference timing from all of the inference implementations.
End of explanation
"""
display(df_py.describe())
"""
Explanation: Timing Statistics
Python
Let's first look at the timing statistics of of performing the inference steps. We start with the Python based inference.
End of explanation
"""
display(df_cc.describe())
"""
Explanation: C++
And below is the timing statistics for the C++ implementation.
End of explanation
"""
display(df_rt.describe())
"""
Explanation: Real-Time Optimized C++
And below is the timing statistics for the real-time C++ implementation on Ubuntu with a Preempt RT linux kernel patch.
End of explanation
"""
# Figure 1 - Time histograms
bins = np.linspace(0.0,0.008,100)
plt.figure(1)
dt_py.hist(bins=bins)
plt.title('Python')
plt.figure(2)
dt_cc.hist(bins=bins)
plt.title('C++')
plt.figure(3)
dt_rt.hist(bins=bins)
plt.title('Real-Time C++')
plt.figure(4)
dt_py.hist(bins=bins)
dt_cc.hist(bins=bins)
dt_rt.hist(bins=bins)
plt.title('All implementations')
"""
Explanation: The mean exectution time for the inference steps in the Time_Diff column is nearly identical for all three implementations. The maximum execution time is significantly lower for the real-time optimized C++ implementation than for the regular C++ and Python implementations.
Timing Histograms
To get a better understanding of the exection times let's look at the step execution time histograms for both implementations.
End of explanation
"""
plt.figure(4)
dt_py.plot()
plt.title('Python')
plt.figure(5)
dt_cc.plot()
plt.title('C++')
plt.figure(6)
dt_rt.plot()
plt.title('Real-Time C++')
"""
Explanation: The three implementations have rather similar timing distributions. All distributions have two peaks, the secondary peak is most prominent in the real-time optimized C++ implementation and least prominent in the Python implementation. The second peak is most likely caused by undeterminism in reading images from disk and interfacing with the NVIDIA hardware, neither of which are time deterministic real-time operations.
Let's then compare the step execution time over the whole time span.
End of explanation
"""
plt.figure(6)
plt.title('Steering action over time')
plt.subplot(311)
deg_py.plot()
plt.title('Python')
plt.subplot(312)
deg_cc.plot()
plt.title('C++')
plt.subplot(313)
deg_cc.plot()
plt.title('Real-Time C++')
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=1.0,
wspace=0.35)
"""
Explanation: The time series give a better picture of the worst case scenario. The real-time optimized C++ implementation outshines the C++ and Python implementations that have very similar performance. What is not visible in these plots but that was evident during some of the testing that both the Python and the regular C++ implementation suffer from occasional peaks in the time series that are much worse than any of the visible values. This is because the operating system can take control from either of these processes to process some other tasks. The real-time C++ implementation can only be interrupted by hardware and real-time kernel tasks but not by other tasks in the operating system.
Steering Decisions
The timing results are only comparable if both inference implementations result in the same steering decisions.
To analyze this let's plot the outputs of the inference model as function of the images. We should expect the functions to look identical.
End of explanation
"""
|
dennys-bd/Coursera-Machine-Learning-Specialization | Course 2 - ML, Regression/Overfitting_Demo_Ridge_Lasso.ipynb | mit | import graphlab
import math
import random
import numpy
from matplotlib import pyplot as plt
%matplotlib inline
"""
Explanation: Overfitting demo
Create a dataset based on a true sinusoidal relationship
Let's look at a synthetic dataset consisting of 30 points drawn from the sinusoid $y = \sin(4x)$:
End of explanation
"""
random.seed(98103)
n = 30
x = graphlab.SArray([random.random() for i in range(n)]).sort()
"""
Explanation: Create random values for x in interval [0,1)
End of explanation
"""
y = x.apply(lambda x: math.sin(4*x))
"""
Explanation: Compute y
End of explanation
"""
random.seed(1)
e = graphlab.SArray([random.gauss(0,1.0/3.0) for i in range(n)])
y = y + e
"""
Explanation: Add random Gaussian noise to y
End of explanation
"""
data = graphlab.SFrame({'X1':x,'Y':y})
data
"""
Explanation: Put data into an SFrame to manipulate later
End of explanation
"""
def plot_data(data):
plt.plot(data['X1'],data['Y'],'k.')
plt.xlabel('x')
plt.ylabel('y')
plot_data(data)
"""
Explanation: Create a function to plot the data, since we'll do it many times
End of explanation
"""
def polynomial_features(data, deg):
data_copy=data.copy()
for i in range(1,deg):
data_copy['X'+str(i+1)]=data_copy['X'+str(i)]*data_copy['X1']
return data_copy
"""
Explanation: Define some useful polynomial regression functions
Define a function to create our features for a polynomial regression model of any degree:
End of explanation
"""
def polynomial_regression(data, deg):
model = graphlab.linear_regression.create(polynomial_features(data,deg),
target='Y', l2_penalty=0.,l1_penalty=0.,
validation_set=None,verbose=False)
return model
"""
Explanation: Define a function to fit a polynomial linear regression model of degree "deg" to the data in "data":
End of explanation
"""
def plot_poly_predictions(data, model):
plot_data(data)
# Get the degree of the polynomial
deg = len(model.coefficients['value'])-1
# Create 200 points in the x axis and compute the predicted value for each point
x_pred = graphlab.SFrame({'X1':[i/200.0 for i in range(200)]})
y_pred = model.predict(polynomial_features(x_pred,deg))
# plot predictions
plt.plot(x_pred['X1'], y_pred, 'g-', label='degree ' + str(deg) + ' fit')
plt.legend(loc='upper left')
plt.axis([0,1,-1.5,2])
"""
Explanation: Define function to plot data and predictions made, since we are going to use it many times.
End of explanation
"""
def print_coefficients(model):
# Get the degree of the polynomial
deg = len(model.coefficients['value'])-1
# Get learned parameters as a list
w = list(model.coefficients['value'])
# Numpy has a nifty function to print out polynomials in a pretty way
# (We'll use it, but it needs the parameters in the reverse order)
print 'Learned polynomial for degree ' + str(deg) + ':'
w.reverse()
print numpy.poly1d(w)
"""
Explanation: Create a function that prints the polynomial coefficients in a pretty way :)
End of explanation
"""
model = polynomial_regression(data, deg=2)
"""
Explanation: Fit a degree-2 polynomial
Fit our degree-2 polynomial to the data generated above:
End of explanation
"""
print_coefficients(model)
"""
Explanation: Inspect learned parameters
End of explanation
"""
plot_poly_predictions(data,model)
"""
Explanation: Form and plot our predictions along a grid of x values:
End of explanation
"""
model = polynomial_regression(data, deg=4)
print_coefficients(model)
plot_poly_predictions(data,model)
"""
Explanation: Fit a degree-4 polynomial
End of explanation
"""
model = polynomial_regression(data, deg=16)
print_coefficients(model)
"""
Explanation: Fit a degree-16 polynomial
End of explanation
"""
plot_poly_predictions(data,model)
"""
Explanation: Woah!!!! Those coefficients are crazy! On the order of 10^6.
End of explanation
"""
def polynomial_ridge_regression(data, deg, l2_penalty):
model = graphlab.linear_regression.create(polynomial_features(data,deg),
target='Y', l2_penalty=l2_penalty,
validation_set=None,verbose=False)
return model
"""
Explanation: Above: Fit looks pretty wild, too. Here's a clear example of how overfitting is associated with very large magnitude estimated coefficients.
#
#
Ridge Regression
Ridge regression aims to avoid overfitting by adding a cost to the RSS term of standard least squares that depends on the 2-norm of the coefficients $\|w\|$. The result is penalizing fits with large coefficients. The strength of this penalty, and thus the fit vs. model complexity balance, is controled by a parameter lambda (here called "L2_penalty").
Define our function to solve the ridge objective for a polynomial regression model of any degree:
End of explanation
"""
model = polynomial_ridge_regression(data, deg=16, l2_penalty=1e-25)
print_coefficients(model)
plot_poly_predictions(data,model)
"""
Explanation: Perform a ridge fit of a degree-16 polynomial using a very small penalty strength
End of explanation
"""
model = polynomial_ridge_regression(data, deg=16, l2_penalty=100)
print_coefficients(model)
plot_poly_predictions(data,model)
"""
Explanation: Perform a ridge fit of a degree-16 polynomial using a very large penalty strength
End of explanation
"""
for l2_penalty in [1e-25, 1e-10, 1e-6, 1e-3, 1e2]:
model = polynomial_ridge_regression(data, deg=16, l2_penalty=l2_penalty)
print 'lambda = %.2e' % l2_penalty
print_coefficients(model)
print '\n'
plt.figure()
plot_poly_predictions(data,model)
plt.title('Ridge, lambda = %.2e' % l2_penalty)
data
"""
Explanation: Let's look at fits for a sequence of increasing lambda values
End of explanation
"""
# LOO cross validation -- return the average MSE
def loo(data, deg, l2_penalty_values):
# Create polynomial features
data = polynomial_features(data, deg)
# Create as many folds for cross validatation as number of data points
num_folds = len(data)
folds = graphlab.cross_validation.KFold(data,num_folds)
# for each value of l2_penalty, fit a model for each fold and compute average MSE
l2_penalty_mse = []
min_mse = None
best_l2_penalty = None
for l2_penalty in l2_penalty_values:
next_mse = 0.0
for train_set, validation_set in folds:
# train model
model = graphlab.linear_regression.create(train_set,target='Y',
l2_penalty=l2_penalty,
validation_set=None,verbose=False)
# predict on validation set
y_test_predicted = model.predict(validation_set)
# compute squared error
next_mse += ((y_test_predicted-validation_set['Y'])**2).sum()
# save squared error in list of MSE for each l2_penalty
next_mse = next_mse/num_folds
l2_penalty_mse.append(next_mse)
if min_mse is None or next_mse < min_mse:
min_mse = next_mse
best_l2_penalty = l2_penalty
return l2_penalty_mse,best_l2_penalty
"""
Explanation: Perform a ridge fit of a degree-16 polynomial using a "good" penalty strength
We will learn about cross validation later in this course as a way to select a good value of the tuning parameter (penalty strength) lambda. Here, we consider "leave one out" (LOO) cross validation, which one can show approximates average mean square error (MSE). As a result, choosing lambda to minimize the LOO error is equivalent to choosing lambda to minimize an approximation to average MSE.
End of explanation
"""
l2_penalty_values = numpy.logspace(-4, 10, num=10)
l2_penalty_mse,best_l2_penalty = loo(data, 16, l2_penalty_values)
"""
Explanation: Run LOO cross validation for "num" values of lambda, on a log scale
End of explanation
"""
plt.plot(l2_penalty_values,l2_penalty_mse,'k-')
plt.xlabel('$\ell_2$ penalty')
plt.ylabel('LOO cross validation error')
plt.xscale('log')
plt.yscale('log')
"""
Explanation: Plot results of estimating LOO for each value of lambda
End of explanation
"""
best_l2_penalty
model = polynomial_ridge_regression(data, deg=16, l2_penalty=best_l2_penalty)
print_coefficients(model)
plot_poly_predictions(data,model)
"""
Explanation: Find the value of lambda, $\lambda_{\mathrm{CV}}$, that minimizes the LOO cross validation error, and plot resulting fit
End of explanation
"""
def polynomial_lasso_regression(data, deg, l1_penalty):
model = graphlab.linear_regression.create(polynomial_features(data,deg),
target='Y', l2_penalty=0.,
l1_penalty=l1_penalty,
validation_set=None,
solver='fista', verbose=False,
max_iterations=3000, convergence_threshold=1e-10)
return model
"""
Explanation: Lasso Regression
Lasso regression jointly shrinks coefficients to avoid overfitting, and implicitly performs feature selection by setting some coefficients exactly to 0 for sufficiently large penalty strength lambda (here called "L1_penalty"). In particular, lasso takes the RSS term of standard least squares and adds a 1-norm cost of the coefficients $\|w\|$.
Define our function to solve the lasso objective for a polynomial regression model of any degree:
End of explanation
"""
for l1_penalty in [0.0001, 0.01, 0.1, 10]:
model = polynomial_lasso_regression(data, deg=16, l1_penalty=l1_penalty)
print 'l1_penalty = %e' % l1_penalty
print 'number of nonzeros = %d' % (model.coefficients['value']).nnz()
print_coefficients(model)
print '\n'
plt.figure()
plot_poly_predictions(data,model)
plt.title('LASSO, lambda = %.2e, # nonzeros = %d' % (l1_penalty, (model.coefficients['value']).nnz()))
"""
Explanation: Explore the lasso solution as a function of a few different penalty strengths
We refer to lambda in the lasso case below as "l1_penalty"
End of explanation
"""
|
boffi/boffi.github.io | dati_2015/ha03/06_3_DOF_System.ipynb | mit | bm = [[p(( 1, 0)), p(( 1, 1)), p(( 1, 2)), p(( 3, 0)), p(( 0, 0))],
[p(( 0, 0)), p(( 0, 0)), p(( 1, 0)), p(( 1, 0)), p(( 0, 0))],
[p(( 0, 0)), p(( 0,-1)), p(( 0,-1)), p((-1, 0)), p((-1, 0))]]
"""
Explanation: 3 DOF System
<img src="bending.svg" style="width:100%">
In the figure above
<ol type='a'>
<li> the system under investigation, with the two supported masses and
the dynamical degrees of freedom that describe the system deformation
(top left);
<li> the three diagrams of bending moment (in red positive bending moments,
in blue negative ones) that derive from application of external unit
forces, corresponding to each of the three degrees of freedom.
</ol>
The same bending moments are represented in the following data structure in terms of polynomials of first degree p((linear_coefficient, constant_coefficient)), each row corresponding to a load condition while the terms in each row are corresponding, the first 4 to the segments on length L on the horizontal part, from left to right (1,2,3) and from rigth to left (4), the fifth is corresponding to the vertical part, from top to bottom.
End of explanation
"""
F = np.mat([[sum(polyint(bm0[i]*bm1[i])(1) for i in range(5))
for bm1 in bm] for bm0 in bm])
print('F = 1/6 * L^3/EJ *')
print(F*6)
"""
Explanation: To compute the flexibilities we sum the integrals of the products of bending moments on each of the five spans of unit length that we are using and place the results in a 2D data structure that is eventually converted to a matrix by np.mat.
End of explanation
"""
K = F.I
print('K = 3/136 * EJ/L^3 *')
print(K*136/3)
"""
Explanation: we invert the flexibility matrix to obtain the stiffness matrix
End of explanation
"""
M = np.mat(np.eye(3)) ; M[2,2]=2
print('M = m *')
print (M)
evals, evecs = eigh(K,M)
print("Eigenvalues, w_0^2 *", evals)
for i in range(3):
if evecs[0,i]<0: evecs[:,i]*=-1
print("Matrix of mass normalized eigenvectors,")
print(evecs)
"""
Explanation: and eventually we define the mass matrix
End of explanation
"""
pi = np.pi
t1 = np.linspace(0,2*pi,601)
plt.plot(t1,1-np.cos(t1))
plt.xlabel(r'$\omega_0t$', size=20)
plt.ylabel(r'$p(t)\,\frac{L^3}{\delta\,EJ}$', size=20)
plt.xlim((0,2*pi))
plt.ylim((-0.05,2.05))
plt.xticks((0,pi/2,pi,pi*1.5,2*pi),
(r'$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$', r'$2\pi$'), fontsize=20)
plt.title('The normalized load')
plt.show()
"""
Explanation: The Load
The load is $F_0\,\boldsymbol{r}\,f(t)$ with $F_0 = \delta EJ/L^3$, $\boldsymbol{r}=\begin{Bmatrix}1&0&0\end{Bmatrix}^T$ and
$f(t) = 2\sin^2(\omega_0t/2)=1-\cos(\omega_0t)$ for $0\le \omega_0 t\le 2\pi$ while $f(t)=0$ otherwise.
End of explanation
"""
r = np.array((1,0,0))
w = np.sqrt(evals)
C = np.dot(evecs.T,r)/evals
D = np.dot(evecs.T,r)/(1-evals)
display(Latex(r'\begin{align}' +
r'\\'.join(r"""
\frac{\xi_%d(t)}\delta &= %+g %+g \cos(\omega_0 t),
&& \text{for } 0 \le \omega_0 t \le 2\pi.
""" % (i+1,C[i],D[i]) for i in range(3)) +
r'\end{align}'))
for i in 0, 1, 2:
plt.plot(t1, C[i]+D[i]*np.cos(t1), label=r'$\xi_%d(t)$'%(i+1))
plt.xlabel(r'$\omega_0t$', size=20)
plt.ylabel(r'$\xi/\delta$', size=20)
plt.legend(loc=0, ncol=3)
plt.xlim((0,2*pi))
plt.xticks((0,pi/2,pi,pi*1.5,2*pi),
(r'$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$', r'$2\pi$'))
plt.title('The particular integrals, mode by mode')
plt.show()
"""
Explanation: The Particular Integrals
For our load, each modal equation of motion can be written as
\begin{align}
m \ddot q_i + m \Lambda_i^2\omega_0^2 q_i &=
\delta\frac{EJ}{L^3}\boldsymbol\psi_i^T\boldsymbol{r}\,
(1-\cos(\omega_0t))\Rightarrow\
\ddot q_i + \Lambda_i^2\omega_0^2 q_i &= G_i \delta\omega_0^2 \,
(1-\cos(\omega_0t))
\end{align}
with $G_i = \boldsymbol\psi_i^T\boldsymbol{r}.$
With $\xi_i = C_i + D_i \cos(\omega_0 t)$, substituting in the equation of motion and considering separately the constant terms and the cosine terms, with appropriate simplifications we have
\begin{align}
\Lambda_i^2\,C_i &= +G_i \, \delta\
(\Lambda_i^2-1) \, D_i &= -G_i\,\delta
\end{align}
and consequently
$$ C_i = +\delta\,\frac{\boldsymbol\psi_i^T\boldsymbol{r}}{\Lambda^2_i},\qquad
D_i = -\delta\,\frac{\boldsymbol\psi_i^T\boldsymbol{r}}{\Lambda^2_i-1}.$$
End of explanation
"""
A = -C - D
L = np.sqrt(evals)
t1 = np.linspace(0,2*pi,601)
q1 = [A[i]*np.cos(L[i]*t1) + C[i] + D[i]*np.cos(t1) for i in (0,1,2)]
display(Latex(r'\begin{align}' +
r'\\'.join(r"""
\frac{q_%d(t)}\delta &= %+g %+g \cos(\omega_0 t) %+g \cos(%g\omega_0t), &&
\text{for } 0 \le \omega_0 t \le 2\pi.
""" % (i+1,C[i],D[i],A[i],L[i]) for i in range(3)) +
r'\end{align}'))
"""
Explanation: Modal Responses
With respect to the forced phase, the modal responses have the generic expression
\begin{align}
q_i(t) & = A_i\cos(\Lambda_i\omega_0t)
+ B_i\sin(\Lambda_i\omega_0t) + C_i + D_i\cos(\omega_0t),\
\dot q_i(t) & = \Lambda_i\omega_0 \left(
B_i\cos(\Lambda_i\omega_0t) - A_i\sin(\Lambda_i\omega_0t) \right) -
\omega_0 D_i \sin(\omega_0t),
\end{align}
and we can write, for the specified initial rest conditions, that
$$ A_i + C_i + D_i = 0, \qquad B_i = 0$$
hence
\begin{align}
q_i(t) & = (1-\cos(\Lambda_i\omega_0t)) C_i
+ (\cos(\omega_0t) - \cos(\Lambda_i\omega_0t)) D_i,\
{\dot q}_i(t) & = \Lambda_i\omega_0 (C_i+D_i) \sin(\Lambda_i\omega_0t) -
\omega_0 D_i \sin(\omega_0t).
\end{align}
End of explanation
"""
ct1 = np.cos(L*2*pi)
st1 = np.sin(L*2*pi)
q0t1 = C + D*np.cos(2*pi) + A*ct1
q1t1 = - D*np.sin(2*pi) - A*st1*L
print(q0t1, q1t1)
As = (q0t1*L*ct1 - q1t1*st1)/L
Bs = (q0t1*L*st1 + q1t1*ct1)/L
print(As*ct1+Bs*st1, L*(Bs*ct1-As*st1))
t2 = np.linspace(2*pi, 4*pi, 601)
q2 = [As[i]*np.cos(L[i]*t2) + Bs[i]*np.sin(L[i]*t2) for i in (0,1,2)]
display(Latex(r'\begin{align}' +
r'\\'.join(r"""
\frac{q^*_%d(t)}\delta &= %+g \cos(%g\omega_0 t) %+g \sin(%g\omega_0t), &&
\text{for } 2\pi \le \omega_0 t.
""" % (i+1, As[i], L[i], Bs[i], L[i]) for i in range(3)) +
r'\end{align}'))
"""
Explanation: With respect to the free response phase, $2\pi \le \omega_0t$, writing
$$
q^_i(t) = A^_i \cos(\Lambda_i\omega_0t) + B^*_i \sin(\Lambda_i\omega_0t)
$$
imposing the continuity of modal displacements and modal velocities we have
\begin{align}
q_i(t_1) &= A^_i \cos(\Lambda_i\omega_0t_1) + B^_i \sin(\Lambda_i\omega_0t_1)\
\dot q_i(t_1) &= \big(
B^_i \cos(\Lambda_i\omega_0t_1) - A^_i \sin(\Lambda_i\omega_0t_1)
\big) \Lambda_i\omega_0
\end{align}
that gives
\begin{align}
A^_i &= \frac{q_i(t_1)\Lambda_i\omega_0\cos(\Lambda_i\omega_0t_1) - \dot q_i(t_1)\sin(\Lambda_i\omega_0t_1)}{\Lambda_i\omega_0} \
B^_i &= \frac{q_i(t_1)\Lambda_i\omega_0\sin(\Lambda_i\omega_0t_1) + \dot q_i(t_1)\cos(\Lambda_i\omega_0t_1)}{\Lambda_i\omega_0} \
\end{align}
End of explanation
"""
for i in (0,1,2):
plt.plot(t1/pi,q1[i], color=l_colors[i],
label='$q_{%d}(t)$'%(i+1))
plt.plot(t2/pi,q2[i], color=l_colors[i])
plt.xlabel(r'$\omega_0t/\pi$', fontsize=18)
plt.ylabel(r'$q/\delta$', fontsize=18)
plt.legend(loc=0, fontsize=18)
plt.show()
"""
Explanation: Plotting the modal responses
Let's plot the modal responses, first one by one, to appreciate the details of the single modal response
End of explanation
"""
for i in (0,1,2):
plt.plot(t1/pi,q1[i], color=l_colors[i],
label='$q_{%d}(t)$'%(i+1))
plt.plot(t2/pi,q2[i], color=l_colors[i])
plt.xlabel(r'$\omega_0t/\pi$', fontsize=18)
plt.ylabel(r'$q/\delta$', fontsize=18)
plt.legend(loc=0, fontsize=18)
plt.show()
"""
Explanation: then all of them in a single plot, to appreciate the relative magnutudes of the different modal responses
End of explanation
"""
t = np.hstack((t1, t2))
q = np.hstack((q1, q2))
x = np.dot(evecs, q)
"""
Explanation: System Response in Natural Coordinates
We stack together the times and the modal responses for the forced and the free phases in two single vectors, then we compute the nodal response by premultiplying the modal response by the eigenvectors matrix
End of explanation
"""
for i in (0,1,2): plt.plot(t/pi,x[i],
label='$x_{%d}(t)$'%(i+1))
plt.xlabel(r'$\omega_0t/\pi$', fontsize=18)
plt.ylabel(r'$x/\delta$', fontsize=18)
plt.legend(loc=0, fontsize=18)
plt.show()
"""
Explanation: Plotting of the natural coordinate responses
All of them in a single plot, as they have the same order of magnitude
End of explanation
"""
ct2 = np.cos(L*4*pi)
st2 = np.sin(L*4*pi)
q0t2 = As*ct2+Bs*st2 ; q1t2 = L*(Bs*ct2-As*st2)
display(Latex(r"$\boldsymbol x(t_2) = \{"+
",".join("%10.6f"%x for x in np.dot(evecs,q0t2))+
"\}\,\delta$"))
display(Latex(r"$\boldsymbol v(t_2) = \{"+
",".join("%10.6f"%x for x in np.dot(evecs,q1t2))+
"\}\,\omega_0\,\delta$"))
"""
Explanation: Final Displacements and Final Velocities
Say that $t_2=4\pi/\omega_0$, we compute the vectors of sines and cosines with different frequencies at $t_2$, then we compute the modal displacements and velocities (note that the dimensional velocities are these adimensional velocities multiplied by $\omega_0\,\delta$) and eventually we compute the nodal quantities by premultiplication by the eigenvectors matrix.
End of explanation
"""
|
twosigma/beaker-notebook | doc/python/TableAPI.ipynb | apache-2.0 | import pandas as pd
from beakerx import *
from beakerx.object import beakerx
pd.read_csv('../resources/data/interest-rates.csv')
table = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))
table.setAlignmentProviderForColumn('m3', TableDisplayAlignmentProvider.CENTER_ALIGNMENT)
table.setRendererForColumn("y10", TableDisplayCellRenderer.getDataBarsRenderer(False))
table.setRendererForType(ColumnType.Double, TableDisplayCellRenderer.getDataBarsRenderer(True))
table
df = pd.read_csv('../resources/data/interest-rates.csv')
df['time'] = df['time'].str.slice(0,19).astype('datetime64[ns]')
table = TableDisplay(df)
table.setStringFormatForTimes(TimeUnit.DAYS)
table.setStringFormatForType(ColumnType.Double, TableDisplayStringFormat.getDecimalFormat(4,6))
table.setStringFormatForColumn("m3", TableDisplayStringFormat.getDecimalFormat(0, 0))
table
table = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))
table
#freeze a column
table.setColumnFrozen("y1", True)
#hide a column
table.setColumnVisible("y30", False)
table.setColumnOrder(["m3", "y1", "y5", "time", "y2"])
def config_tooltip(row, column, table):
return "The value is: " + str(table.values[row][column])
table.setToolTip(config_tooltip)
table.setDataFontSize(16)
table.setHeaderFontSize(18)
table
mapListColorProvider = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 5}
]
tabledisplay = TableDisplay(mapListColorProvider)
colors = [
[Color.LIGHT_GRAY, Color.GRAY, Color.RED],
[Color.DARK_GREEN, Color.ORANGE, Color.RED],
[Color.MAGENTA, Color.BLUE, Color.BLACK]
]
def color_provider(row, column, table):
return colors[row][column]
tabledisplay.setFontColorProvider(color_provider)
tabledisplay
mapListFilter = [
{"a":1, "b":2, "c":3},
{"a":4, "b":5, "c":6},
{"a":7, "b":8, "c":5}
]
display = TableDisplay(mapListFilter)
def filter_row(row, model):
return model[row][1] == 8
display.setRowFilter(filter_row)
display
table = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))
table.addCellHighlighter(TableDisplayCellHighlighter.getHeatmapHighlighter("m3", TableDisplayCellHighlighter.FULL_ROW))
table
"""
Explanation: Python API for Table Display
In addition to APIs for creating and formatting BeakerX's interactive table widget, the Python runtime configures pandas to display tables with the interactive widget instead of static HTML.
End of explanation
"""
beakerx.pandas_display_default()
pd.read_csv('../resources/data/interest-rates.csv')
"""
Explanation: Display mode: Pandas default
End of explanation
"""
beakerx.pandas_display_table()
pd.read_csv('../resources/data/interest-rates.csv')
"""
Explanation: Display mode: TableDisplay Widget
End of explanation
"""
TableDisplay([{'y1':4, 'm3':2, 'z2':1}, {'m3':4, 'z2':2}])
TableDisplay({"x" : 1, "y" : 2})
"""
Explanation: Recognized Formats
End of explanation
"""
mapList4 = [
{"a":1, "b":2, "c":3},
{"a":4, "b":5, "c":6},
{"a":7, "b":8, "c":5}
]
display = TableDisplay(mapList4)
def dclick(row, column, tabledisplay):
tabledisplay.values[row][column] = sum(map(int,tabledisplay.values[row]))
display.setDoubleClickAction(dclick)
def negate(row, column, tabledisplay):
tabledisplay.values[row][column] = -1 * int(tabledisplay.values[row][column])
def incr(row, column, tabledisplay):
tabledisplay.values[row][column] = int(tabledisplay.values[row][column]) + 1
display.addContextMenuItem("negate", negate)
display.addContextMenuItem("increment", incr)
display
mapList4 = [
{"a":1, "b":2, "c":3},
{"a":4, "b":5, "c":6},
{"a":7, "b":8, "c":5}
]
display = TableDisplay(mapList4)
#set what happens on a double click
display.setDoubleClickAction("runDoubleClick")
display
print("runDoubleClick fired")
print(display.details)
"""
Explanation: Programmable Table Actions
End of explanation
"""
df = pd.read_csv('../resources/data/interest-rates.csv')
df.set_index(['m3'])
df = pd.read_csv('../resources/data/interest-rates.csv')
df.index = df['time']
df
"""
Explanation: Set index to DataFrame
End of explanation
"""
dataToUpdate = [
{'a':1, 'b':2, 'c':3},
{'a':4, 'b':5, 'c':6},
{'a':7, 'b':8, 'c':9}
]
tableToUpdate = TableDisplay(dataToUpdate)
tableToUpdate
tableToUpdate.values[0][0] = 99
tableToUpdate.sendModel()
tableToUpdate.updateCell(2,"c",121)
tableToUpdate.sendModel()
"""
Explanation: Update cell
End of explanation
"""
table = TableDisplay({
'w': '$2 \\sigma$',
'x': '<em style="color:red">italic red</em>',
'y': '<b style="color:blue">bold blue</b>',
'z': 'strings without markup work fine too',
})
table.setStringFormatForColumn("Value", TableDisplayStringFormat.getHTMLFormat())
table
"""
Explanation: HTML format
HTML format allows markup and styling of the cell's content. Interactive JavaScript is not supported however.
End of explanation
"""
TableDisplay({'Two Sigma': 'http://twosigma.com', 'BeakerX': 'http://BeakerX.com'})
"""
Explanation: Auto linking of URLs
The normal string format automatically detects URLs and links them. An underline appears when the mouse hovers over such a string, and when you click it opens in a new window.
End of explanation
"""
|
patrickmineault/xcorr-notebooks | notebooks/Paired-sampling.ipynb | mit | %config InlineBackend.figure_format = 'retina'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import plotnine
import seaborn as sns
sns.set(style="darkgrid")
class LNP:
"""A simple LNP model neuron."""
def __init__(self):
rg = np.arange(-31.5, 32.5)
self.w = np.cos(rg / 20.0 * 2 * np.pi) * np.exp(-rg ** 2 / 2 / 10 ** 2)
self.w /= 4
self.input_size = len(self.w)
self.rate_multiplier = 1
def forward(self, X):
"""The nonlinearity is $\log(1 + \exp(x))$"""
return np.random.poisson(self.rate_multiplier * np.log(1 + np.exp(X.dot(self.w) - .5)))
N = 10000
model = LNP()
plt.plot(model.w)
plt.title("Model weights")
X = np.random.randn(N, model.input_size)
Y = model.forward(X)
# Measure the convergence of the model as a function of N
plt.plot(Y)
"""
Explanation: Efficiency of different stimulus ensembles for systems identification
In reverse correlation, we approximate a nonlinear, stochastic function $y \sim f(X)$ locally by a linear approximation weighted by a gaussian window. We compute this approximation by computing the response $y$ of the system for a variety of normally distributed inputs $X \sim N(0, \sigma^2)$. An estimate of the response is given by a sum of the stimuli weighted by the responses, $\hat w = \frac{1}{N} X^T y$. When these responses are action potentials, or spikes measures from biological neurons, the estimate $\hat w$ is also called the spike-triggered average.
How good is this estimator? Not very good. It can, however, be significantly improved by careful consideration of the properties of this Monte Carlo estimator, namely by changing the input ensemble to use antithetic sampling, or by shifting the distribution of the response.
To demonstrate this, I use an example nonlinear function to be estimated via this black-box method. It consists of weighting the input with a windowed sinusoid, followed by an expansive nonlinearity that drives a Poisson process.
End of explanation
"""
def estimate_w_hat(X, Y):
N = X.shape[0]
# remove the mean.
estimates = np.cumsum(X * (Y.reshape((-1, 1))), axis=0)
w_hat = 1 / np.arange(1, N + 1).reshape((-1, 1)) * estimates
return w_hat
def calculate_rho(w_hats, w):
rhos = w_hats.dot(w) / np.sqrt((w_hats ** 2).sum(1) * (w ** 2).sum())
assert rhos.shape == w_hats.shape[:1]
return rhos
w_hat = estimate_w_hat(X, Y)
rho = calculate_rho(w_hat, model.w)
plt.plot(np.arange(1, N+1), rho)
plt.xlabel("Number of stimuli")
plt.ylim((0, 1))
plt.title("correlation between estimated weights and true weights")
"""
Explanation: Calculate estimates of the weights using reverse correlation. This is given by:
$$\frac{1}{N} X^T y$$
End of explanation
"""
results = []
X = np.random.randn(N, len(model.w))
y = model.forward(X)
# Center y for this demo.
y = y - y.mean()
for i in range(5):
w_hat = estimate_w_hat(X, y + i * 2)
rho = calculate_rho(w_hat, model.w)
results += [{'nstims': j + 1,
'rho': rho[j],
'offset': str(i*2)} for j in range(len(rho))]
df = pd.DataFrame(results)
sns.lineplot(x='nstims', y='rho', hue='offset', data=df, ci=None)
plt.legend((0, 2, 4, 6, 8))
"""
Explanation: Improving on the reverse correlation estimate
Our crude estimate works, but we can squeeze a bit more efficiency out of it.
One concern is that our estimate is unduly affected by shifts in the mean of the response. If we set:
$y \to y + a$
Then the reverse correlation estimate shifts to:
$$\hat w = \frac{1}{N} X^T(y + a) = \frac{1}{N} X^T y + \frac{a}{N} X^T \hat 1$$
Now imagine that our responses $y$ are centered around 0. As $a$ becomes larger and larger, the variance of $\hat w$ will grow, and our estimates will be worse and worse. Let's show this in a simulation:
End of explanation
"""
results = []
X = np.random.randn(N, len(model.w))
rg = np.floor(np.arange(N) / 2.0).astype(np.int)
alternating_sign = 2 * ((np.arange(N) % 2) - .5)
Xs = alternating_sign.reshape((-1, 1)) * X[rg, :]
y = model.forward(Xs)
# Center y for this demo.
y = y - y.mean()
for i in range(5):
w_hat = estimate_w_hat(Xs, y + i * 2)
rho = calculate_rho(w_hat, model.w)
results += [{'nstims': j + 1,
'rho': rho[j],
'offset': str(i*2)} for j in range(len(rho))]
df = pd.DataFrame(results)
sns.lineplot(x='nstims', y='rho', hue='offset', data=df, ci=None)
plt.legend((0, 2, 4, 6, 8))
plt.title("Estimate quality, antithetic sampling estimate")
"""
Explanation: Adding an offset increases the variance of the estimates and thus it takes more stimuli for the reverse correlation estimate to converge to the true underlying parameter. One solution is antithetic sampling. The idea is to generate stimuli in symmetric pairs $(x, -x)$. It follows that we have that $X^T \hat 1 = 0$, which reduces the variance of the estimate:
End of explanation
"""
def estimate_w_hat_low_var(X, Y):
N = X.shape[0]
# remove the mean.
Ym = np.cumsum(Y)
estimates = np.cumsum(X * (Y.reshape((-1, 1))), axis=0)
mean = (Ym / np.arange(1, N + 1)).reshape((-1, 1)) * np.cumsum(X, axis=0)
w_hat = 1 / np.arange(1, N + 1).reshape((-1, 1)) * (estimates - mean)
return w_hat
results = []
X = np.random.randn(N, len(model.w))
y = model.forward(X)
for i in range(5):
w_hat = estimate_w_hat_low_var(X, y + i * 2)
rho = calculate_rho(w_hat, model.w)
results += [{'nstims': j + 1,
'rho': rho[j],
'offset': str(i*2)} for j in range(len(rho))]
df = pd.DataFrame(results)
sns.lineplot(x='nstims', y='rho', hue='offset', data=df, ci=None)
plt.legend((0, 2, 4, 6, 8))
plt.title("Estimate quality, variance-stabilized estimate")
"""
Explanation: Indeed, this renders the estimates largely immune to the offset. However, there's another way of reducing the variance: choose $a$ such that $Var(\hat w)$ is minimized. Note that the expectation of $X^T a$ is 0, so it cannot affect the mean of the estimator; however, it can nevertheless change its variance. Indeed, we can show that the minimum variance estimate for $\hat w$ occurs when $a = \bar y$.
This trick also pops up in other Monte Carlo estimates; see the Control Variates section of this excellent blog post for more references.
End of explanation
"""
N = 4000
results = []
for i in range(100):
X = np.random.randn(N, model.input_size)
Y = model.forward(X)
w_hat = estimate_w_hat_low_var(X, Y)
rho = calculate_rho(w_hat, model.w)
rg = np.floor(np.arange(N) / 2.0).astype(np.int)
alternating_sign = 2 * ((np.arange(N) % 2) - .5)
Xs = alternating_sign.reshape((-1, 1)) * X[rg, :]
Y = model.forward(Xs)
w_hat = estimate_w_hat_low_var(Xs, Y)
rho_p = calculate_rho(w_hat, model.w)
results += [{'run': i,
'nstims': j + 1,
'rho': rho[j],
'sampling_type': 'normal'} for j in range(len(rho))]
results += [{'run': i,
'nstims': j + 1,
'rho': rho_p[j],
'sampling_type': 'antithetical'} for j in range(len(rho_p))]
df = pd.DataFrame(results)
ax = plt.figure(figsize=(8, 6)).gca()
sns.lineplot(x='nstims', y='rho', hue='sampling_type', data=df[::100], ax=ax)
"""
Explanation: Which trick works best? For this scenario, it turns out that the variance-stabilized estimate works better than antithetic sampling:
End of explanation
"""
|
EtienneCmb/brainpipe | examples/f_Leave_p-subjects_out.ipynb | gpl-3.0 | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
# u can use %matplotlib notebook, but there is some bugs with xticks and title
from brainpipe.classification import *
from brainpipe.visual import *
"""
Explanation: This notebook illustrate how to permform a Leave p-subjects out (usually, people working in EEG/MEG do a Leav 1-subject out). This notebook will use some notions introduce in the Classification.ipynb notebook
Documentation: https://etiennecmb.github.io/classification.html
Import librairies
End of explanation
"""
n_features = 5 # Number of features
"""
Explanation: Create a random dataset
We are going to create a random dataset for a 2 class problem, with n_feature for 4 subjects and a diffrent number of trials for each subject. The quality of decoding of features will be increasing with the ranking, meaning that the first feature is going to be a bad one, the second alittle bit better, the third..., the last, the best one.
Dataset settings
End of explanation
"""
def dataset_pear_subject(ntrials):
"""Create a dataset for each subject. This little function
will return x and y, the dataset and the label vector of each subject
"""
spread = np.linspace(0, 0.7, n_features)
class1 = np.random.uniform(size=(ntrials, n_features)) + spread
class2 = np.random.uniform(size=(ntrials, n_features)) - spread
x = np.concatenate((class1, class2), axis=0)
y = np.ravel([[k]*ntrials for k in np.arange(2)])
return x, y
# Create a random dataset and a label vector for each subject
x_s1, y_s1 = dataset_pear_subject(20) # 20 trials for subject 1
x_s2, y_s2 = dataset_pear_subject(25) # 25 trials for subject 2
x_s3, y_s3 = dataset_pear_subject(18) # 18 trials for subject 3
x_s4, y_s4 = dataset_pear_subject(10) # 10 trials for subject 4
# Concatenate all datasets and vectors in list :
x = [x_s1, x_s2, x_s3, x_s4]
y = [y_s1, y_s2, y_s3, y_s4]
# Plot each subject dataset:
plt.figure(0, figsize=(12,6))
plt.boxplot(x);
rmaxis(plt.gca(), ['top', 'right']);
plt.xlabel('Subjects'), plt.ylabel('Values');
print(y)
"""
Explanation: Create datasets
End of explanation
"""
# Classification object :
lpso = LeavePSubjectOut(y, 4, pout=1, clf='svm', kern='linear') # Leave ONE-subject out (pout)
# Run classification :
da, pvalue, daperm = lpso.fit(x, n_perm=20, method='label_rnd')
"""
Explanation: Classification
Define a classifier and the leave p-subject out cross-validation
End of explanation
"""
plt.figure(1, figsize=(12,8))
lpso.daplot(da, daperm=daperm, chance_method='perm', rmax=['top', 'right'],
dpax=['bottom', 'left'], cmap='viridis', ylim=[10,100], chance_unique=False,
chance_color='darkgreen')
# Display informations about features :
lpso.info.featinfo
"""
Explanation: Plot your results
Plot decoding
End of explanation
"""
# p<0.05
dap05 = lpso.stat.perm_pvalue2da(daperm, p=0.05)
# p<0.01
dap01 = lpso.stat.perm_pvalue2da(daperm, p=0.01)
# p<0.01 with maximum statistics correction
dap_01corrected = lpso.stat.perm_pvalue2da(daperm, p=0.01, maxst=True)
plt.figure(2, figsize=(8, 6))
plt.plot(dap05, lw=2, color='darkblue', label='p<0.05')
plt.plot(dap01, lw=2, color='darkred', label='p<0.01')
plt.plot(dap_01corrected, lw=2, color='darkgreen', label='p<0.01 corrected')
plt.xlabel('Features'), plt.ylabel('Decoding accuracy')
plt.legend(loc=0)
"""
Explanation: A little bit of stat (not to much)
Ok, you have your true decoding accuracies, the pvalues and the decoding of all permutations. Inside the classification object lpso, there is some sub-methods for advanced statistics. So, take a look at lpso.stat.
As an example, say that you need to find the p=0.05 corresponding decoding accuracy in the permutation and for each feature:
End of explanation
"""
# Define the group parameter :
grp = ['Group1: the bad one']*5 + ['Group2: the middle one']*3 + ['Group3: the best one']*7
# Change the current classifier :
da2, pvlue2, daperm2 = lpso.fit(x, n_perm=50, grp=grp, method='label_rnd', center=True)
"""
Explanation: Grouping witth the Leave p-subject out
The group parameter and multi-features
Previously, we saw how to classify each feature separatly. Now we are going to see how to group features. In this example, we defined 15 features. So, we are going to define 3 groups features
- 'Group1: the bad one': the 5 first
- 'Group2: the middle one': the 3 following
- 'Group3: the best one': the last 7 features
End of explanation
"""
plt.figure(3, figsize=(8, 6))
lpso.daplot(da2, cmap='Spectral_r', ylim=[45, 100], chance_method='perm',
daperm=daperm2, chance_color='darkgreen');
lpso.info.featinfo
# Export tables to a excel file:
lpso.info.to_excel('Leave_p-subject_Out.xlsx')
"""
Explanation: Plot the grouping decoding
End of explanation
"""
|
xtr33me/deep-learning | gan_mnist/Intro_to_GANs_Exercises.ipynb | mit | %matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name="inputs_real")
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name="inputs_z")
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
Exercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
Exercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
Exercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.
End of explanation
"""
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, g_hidden_size, False, alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
Exercise: Build the network from the functions you defined earlier.
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
Exercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.
End of explanation
"""
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
Exercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
SSDS-Croatia/SSDS-2017 | Day-1/First day - Introduction to Machine Learning with Tensorflow.ipynb | mit | import tensorflow as tf
"""
Explanation: Summer School of Data Science - Split '17
1. Introduction to Machine Learning with TensorFlow
This hands-on session serves as an introductory course for essential TensorFlow usage and basic machine learning with TensorFlow. This notebook is partly based on and follow the approach of chapter 6 of the book "Deep Learning" by Ian Goodfellow, Yoshua Bengio and Aaron Courville, available at: http://www.deeplearningbook.org/.
Other useful tutorials exist in the form of Jupyter notebooks, some of which are:
- https://github.com/udacity/deep-learning
- https://github.com/DataScienceUB/DeepLearningfromScratch
This notebook covers basic TensorFlow usage concepts, which are then applied to elementary machine learning models like linear and logistic regression, and finally a simple multilayer perceptron is built and trained using the established TensorFlow concepts.
Basic TensorFlow concepts
TensorFlow is an open source Python library which provides multiple APIs for buidling and evaluating computational graphs. These graphs can be used to represent any machine learning model, and TensorFlow provides methods for efficient optimization and evaluation of the models. The programmer's guide for TensorFlow can be found at https://www.tensorflow.org/programmers_guide/, and the full documentation is availale at https://www.tensorflow.org/api_docs/python/.
The import statement for TensorFlow programs is: import tensorflow as tf. This provides access to all TensorFlow APIs, classes, methods and symbols.
End of explanation
"""
# create a TensorFlow constant tensor
# create a TensorFlow constant of a specific data type and shape
"""
Explanation: Tensor
The basic concept behind TensorFlow is the tensor - an n-dimensional array of a base datatype. In TensorFlow it is represented by the tf.Tensor object which will produce a value when evaluated. A tf.Tensor object has a shape (which defines the structure of the elements) and a data type, shared by all the elements in the Tensor. The main types of tensors are:
- Constant
- Variable
- Placeholder
The tf.constant() method creates a constant tensor, populated with values of a data type, specified by arguments value, shape (optional), dtype (optional).
End of explanation
"""
# create a TensorFlow session and evaluate the created constant
"""
Explanation: However, any Tensor is only evaluated within a Session, which is the environment in which all tensors and operations are executed.
End of explanation
"""
# create a tensor of any shape populated with zeros and check within the session
# create a tensor of any shape populated with ones and check within the session
"""
Explanation: Other very common and useful methods for creating tensors of constant value are tf.zeros() and tf.ones().
End of explanation
"""
# create a random tensor containing values from a uniform distribution between 10 and 20
"""
Explanation: Tensors containing random values from various distribution can be created using a number of methods, with the most commonly used being tf.random_uniform() and tf.random_normal().
End of explanation
"""
# add a scalar to a tensor
# subtract two tensors
# divide two integer tensors
"""
Explanation: Simple algebraic operations such as +,-,/,and * can be used with tensors in this form, or by calling tf.add(), tf.subtract(), tf.divide(), or tf.multiply(). These are all element-wise, and defined for tensors of equal shapes and data-types. Tensors can be cast into a specific data type by calling tf.cast().
End of explanation
"""
# try out varied mathematical operations with various tensors
"""
Explanation: Other very useful operations include:
- Absolute value (modulus) - tf.abs()
- Exponentiation with $e$ - tf.exp()
- Square and other powers - tf.square() and tf.pow()
- Matrix multiplication - tf.matmul()
- Transpose - tf.transpose()
End of explanation
"""
# create a placeholder and feed it a value in a session
# create two placeholders and a tensor implementing matrix multiplication
"""
Explanation: Placeholders and Variables
Placeholders and Vairables are special kinds of tensors which are the essential building blocks of more complex data and computation streams. These are the most commonly used types of tensors in TensorFlow.
A Placeholder is a tensor which acts like a "promise" to provide a value at the evaluation of the computational graph. Placeholders are mostly used as input points in the computational graph where data will be provided. It will produce an error when evaluated, unless the value is fed to the session.
End of explanation
"""
# create a variable, initialize it, and assign a new value within a session
"""
Explanation: A Variable is a tensor which allows the addition of trainable parameters to the computational graph. Constants are intialized when created, as opposed to variables, which need to be initialized within the session (and the initialization procedure must be defined). Variables can be "manually" assigned a new value using tf.assign, and their state is kept within the session object. This is mostly used for model training, during which variables are changed within the optimization process.
End of explanation
"""
#define placeholders for data
#define model parameters as variables
#create a tensor which calculates the model output
"""
Explanation: Linear regression in TensorFlow
Linear regression is one of the simplest and most commonly used regression models. The multivariate linear regression can be written as:
$$y = w^{T}x + b$$
where $y \in \mathbb{R}$ is the output, $w \in \mathbb{R}^{p}$ is a column vector containing $p$ weights for $p$ features in $x \in \mathbb{R}^{p}$, and $b \in \mathbb{R}$ is the bias. The parameters contained in $w$ and $b$ are also called coefficients and are trained by using a gradient descent algorithm.
Exercise:
Let us build a univariate linear regression model for a simple problem, using the previously introduced TensorFlow concepts:
- The model input $x$ is a placeholder for data
- The trainable model parameters $w$ and $b$ are defined as TensorFlow Variables
- The model output $\hat{y}$ is a Tensor
- The obesrved output $y$ is also a placeholder, where data will be provided for training purpose
End of explanation
"""
#define the loss function as the mean of all squared errors (MSE)
#create a gradient descent optimizer
#create a train operation
#generate data to train the regression
#initialize variables, run 100 epochs of training algorithm
"""
Explanation: To train a model built in TensorFlow, a loss function needs to be defined, most commonly as a reduction operation. An optimizer object needs to be defined, and the minimize() method called in order to update the variables defined within the model to minimize the selected loss function. When creating optimizer objects, choices about the learning rate have to be made - these, in combination with the number of training epochs, can greatly influence the model training process. With the approapriate learning rate, the optimization can quickly converge.
End of explanation
"""
#generate XOR training data
import numpy as np
x_train = np.array([[0,0],[0,1],[1,0],[1,1]])
y_train = np.array([[0],[1],[1],[0]])
#import matplotlib for visualization
%matplotlib inline
import matplotlib.pyplot as plt
#logical indices of data where the outputs are 1 and 0
t = np.where(y_train==1)[0]
f = np.where(y_train==0)[0]
#scatter plot of the data
plt.scatter(x_train[t,0],x_train[t,1],c='b',marker='x',s=70)
plt.scatter(x_train[f,0],x_train[f,1],c='r',marker='o',s=70)
"""
Explanation: Logistic Regression
Logistic regression is a very common and simple linear model for classification purposes, based on linear regression and the logistic function:
$$y = \frac{1}{1+e^{-(w^{T}x + b)}}$$
Due to the nature of the logistic function, it produces output values in the range $[0,1]$, thus providing a probability for each class given in the output. Similar to linear regression, the variables defined within the logistic regression model are parameters trainable by various optimization algorithms.
Let us build a logistic regression for the well-known XOR problem.
End of explanation
"""
#define placeholders for the data
#define variables for the trainable parameters of the model
#create a tensor to calculate the model output
#define the loss function, create the optimizer and the training operation
#train the model
"""
Explanation: Exercise:
The model input $x$ is a placeholder for a data
The trainable model parameters $w$ and $b$ are defined as TensorFlow Variables
The model output $\hat{y}$ is a Tensor
The obesrved output $y$ is also a placeholder, where output data will be provided in order to train the model
End of explanation
"""
|
albertfxwang/grizli | examples/Grizli Demo.ipynb | mit | flt = grizli.model.GrismFLT(grism_file='ibhj34h8q_flt.fits', direct_file='ibhj34h6q_flt.fits',
pad=200, ref_file=None, ref_ext=0, seg_file=None, shrink_segimage=False)
"""
Explanation: Initialize the GrismFLT object
The GrismFLT object takes as input grism FLT files and optionally direct image FLT files or reference images and segmentation maps. For this example, provide just a pair of grism (G141) and direct (F140W) FLT files (taken from a visit on the UDF field from 3D-HST).
Note that the two FITS files provided here have been astrometrically aligned and background subtracted externally to grizli in its current implementation. These are clearly critical and non-trivial steps of the data preparation process and this functionality will be incorporated into the full grizli release shortly.
End of explanation
"""
flt.photutils_detection(detect_thresh=2, grow_seg=5, gauss_fwhm=2.,
verbose=True, save_detection=False, data_ext='SCI')
print('Number of objects in `photutils` catalog: %d' %(len(flt.catalog)))
plt.imshow(flt.seg, cmap='gray_r', origin='lower')
plt.xlim(flt.pad, flt.direct.sh[1]-flt.pad)
plt.ylim(flt.pad, flt.direct.sh[0]-flt.pad)
# Find the object near detector (x,y) = (712, 52)
xi, yi = 712+flt.pad, 52+flt.pad # nice line
#xi, yi = 779, 722 # bright red
#xi, yi = 695, 949 # big extended H-alpha
#xi, yi = 586, 337 # fainter line, [OIII]?
#xi, yi = 421, 470 # fainter line
#xi, yi = 858, 408 # bright [OIII]
#xi, yi = 940, 478 # fainter line
dr = np.sqrt((flt.catalog['x_flt']-xi)**2+(flt.catalog['y_flt']-yi)**2)
ix = np.argmin(dr)
id = flt.catalog['id'][ix]
mag = flt.catalog['mag'][ix]
x0 = flt.catalog['x_flt'][ix]+1
y0 = flt.catalog['y_flt'][ix]+1
print(' id=%d, (x,y)=(%.1f, %.1f), mag=%.2f' %(id, x0, y0, mag))
## Get properties of the object from the segmentation region alone
## regardless of whether you have the detection catalog
out = grizli.utils_c.disperse.compute_segmentation_limits(flt.seg, id,
flt.direct.data['SCI'],
flt.direct.sh)
ymin, ymax, yseg, xmin, xmax, xseg, area, segm_flux = out
print('Segment: (x,y)=(%.1f, %.1f) # zero index' %(xseg, yseg))
"""
Explanation: Catalog / segmentation image
Make a catalog/segmentation image from the direct FLT and make a full grism model for the first four grism orders for those detected objects. Uses photutils to generate the segmentation image/catalog.
End of explanation
"""
# x pixels from the center of the direct image
dx = np.arange(220)
# ytrace and wavelength at x=dx
dy, lam = flt.conf.get_beam_trace(x=x0-flt.pad, y=y0-flt.pad, dx=dx, beam='A')
# it's fast
%timeit dy, lam = flt.conf.get_beam_trace(x=x0-flt.pad, y=y0-flt.pad, dx=dx, beam='A')
### Make a figure showing the trace in the FLT frame
fig = plt.figure(figsize=[8,2])
ax = fig.add_subplot(111)
ax.imshow(flt.grism.data['SCI'], cmap='gray_r', vmin=-0.05, vmax=0.2,
interpolation='Nearest', aspect='auto')
ax.set_xlim(x0-10, x0+230); ax.set_ylim(y0-10, y0+10)
# plot the trace
ax.plot(x0+dx-1, y0+dy-1, color='red', linewidth=3, alpha=0.7)
## 0.1 micron tick marks along the trace as in the next figure
xint = np.interp(np.arange(1,1.81,0.1), lam/1.e4, dx)
yint = np.interp(np.arange(1,1.81,0.1), lam/1.e4, dy)
ax.scatter(x0+xint-1, y0+yint-1, marker='o', color='red', alpha=0.8)
ax.set_xlabel(r'$x$ (FLT)'); ax.set_ylabel(r'$y$ (FLT)')
fig.tight_layout(pad=0.1)
#fig.savefig('grizli_demo_0.pdf')
"""
Explanation: Basic trace parameters
Get basic trace parameters, conf is a grizli.grism.aXeConf object, here for G141 & F140W.
dx is simply the x pixel offset from the center of the direct image. Here it is integers, but it can be anything.
End of explanation
"""
### Make sure these are initialized
flt.object_dispersers = collections.OrderedDict()
flt.model *= 0
### Compute model of a single object and catch the output, mag=-1 will force compute all orders
single_model = flt.compute_model_orders(id=id, compute_size=True, mag=-1, in_place=False)
### The other option is to store the model "in place" in the `flt.model` attribute.
status = flt.compute_model_orders(id=id, compute_size=True, mag=-1, in_place=True)
print('These should be the same: %.3f %.3f' %(single_model[1].sum(), flt.model.sum()))
## Show it
plt.imshow(single_model[1]*10, interpolation='Nearest', vmin=-0.02, vmax=0.2,
cmap='gray_r', origin='lower')
plt.scatter(xseg, yseg, marker='o', color='r') # position in direct image
plt.xlim(flt.pad, flt.direct.sh[1]-flt.pad)
plt.ylim(flt.pad, flt.direct.sh[0]-flt.pad)
### Now compute the full FLT model
import collections
import time
## Only fit objects brighter than 26th mag (AB)
keep = flt.catalog['mag'] < 26
## Reset
flt.object_dispersers = collections.OrderedDict()
flt.model *= 0
## Helper function that loops over `self.compute_model_orders` for many objects
## Result is stored in the `self.model` attribute.
t0 = time.time()
flt.compute_full_model(ids=flt.catalog['id'][keep],
mags=flt.catalog['mag'][keep])
t1 = time.time()
print('Compute full model (%d objects): %.2f sec' %(keep.sum(), (t1-t0)*1))
"""
Explanation: Creating model spectra for contamination and fitting
This is really the fundamental kernel that allows the user to generate model grism spectra based on the morphology in the direct image. Below we demonstrate the technique for computing a single spectral model, which is placed directly into the FLT frame, and then show how to quickly generate a model of the full "scene" of the exposure.
Note that by default the model generation kernel arbitrarily assumes flat source spectra in units of $f_\lambda$, with a normalization given by the total flux density in the direct image integrated within the segmentation region.
The basic function here is compute_model_orders, which calculates the object extent based on the segmentation region and determines which orders to include based on the object brightness and the MMAG_EXTRACT_BEAMx parameters in the grism configuration file.
End of explanation
"""
t0 = time.time()
flt.compute_full_model(ids=flt.catalog['id'][keep],
mags=flt.catalog['mag'][keep])
t1 = time.time()
print('Compute full model *again* (%d objects): %.2f sec' %(keep.sum(), (t1-t0)*1))
# Full model
plt.imshow(flt.model, interpolation='Nearest', vmin=-0.02, vmax=0.2,
cmap='gray_r', origin='lower')
plt.xlim(flt.pad, flt.direct.sh[1]-flt.pad)
plt.ylim(flt.pad, flt.direct.sh[0]-flt.pad)
# residual
plt.imshow(flt.grism.data['SCI'] - flt.model, interpolation='Nearest', vmin=-0.02, vmax=0.2,
cmap='gray_r', origin='lower')
plt.xlim(flt.pad, flt.direct.sh[1]-flt.pad)
plt.ylim(flt.pad, flt.direct.sh[0]-flt.pad)
# Note, some spectra on the left side of the image aren't modeled because they fall off of
# the direct image. This can be accounted for when using reference mosaics that cover areas
# larger than the FLT frames themselves.
# Also, this is just a crude model with simple (wrong) assumptions about the shapes of the object spectra!
"""
Explanation: When compute_model_orders is run for a given object with store=True (default), the code caches helper objects for each beam (i.e., spectral order) of that object in the dictionary attribute object_dispersers. Running compute_model_orders again will be faster by a significant factor as these don't have to be recalculated (though they do take up memory).
End of explanation
"""
### Re-run again to make sure beams are stored, in case didn't run the full loop as above
if id not in flt.object_dispersers:
flt.compute_model_orders(id=id, compute_size=True, mag=-1, store=True)
### Get the beams/orders
beams = flt.object_dispersers[id]
print('Spectral orders: ', beams)
### Make a figure showing the model (top) and observed (bottom) spectra
### for the first and zeroth orders.
fig = plt.figure(figsize=[10,3])
gs = matplotlib.gridspec.GridSpec(2, 2,
width_ratios=[1,3.4],
height_ratios=[1,1])
for i, b in enumerate(['B','A']):
beam = beams[b]
print(beam.sh_beam)
model = beam.compute_model(id=id, spectrum_1d=beam.spectrum_1d, in_place=False)
vmax = model.max()
#ax = fig.add_subplot(221+i)
ax = fig.add_subplot(gs[0,i])
ax.imshow(model.reshape(beam.sh_beam), interpolation='Nearest', origin='lower', cmap='viridis_r',
vmin=-0.1*vmax, vmax=vmax)
ax.set_title('Beam %s' %(b))
### Cutout of observed data
sci_cutout = beam.cutout_from_full_image(flt.grism.data['SCI'])
ax = fig.add_subplot(gs[1,i]) #fig.add_subplot(223+i)
ax.imshow(sci_cutout, interpolation='Nearest', origin='lower', cmap='viridis_r',
vmin=-0.1*vmax, vmax=vmax)
fig.tight_layout(pad=0.1)
"""
Explanation: Spectral orders
The dispersion objects for each beam of each id are stored in the flt.object_dispersers attribute. We can show the observed and model spectra for each order with the following.
End of explanation
"""
reload(grizli.model)
print('Available computed beams/orders for id=%d: %s\n' %(id, flt.object_dispersers[id].keys()))
beam = flt.object_dispersers[id]['A'] # can choose other orders if available
beam.compute_model()
print('`beam` class: %s\n' %(beam.__class__))
### BeamCutout object
co = grizli.model.BeamCutout(flt, beam, conf=flt.conf)
print('`co` class: %s\n' %(co.__class__))
print('Object %d, ' %(co.id) +
'total flux density within the segmentation region: %.3e erg/s/cm2/A'%(co.beam.total_flux))
### Show the direct image
plt.imshow(co.beam.direct*(co.beam.seg == id), interpolation='Nearest', cmap='gray_r', origin='lower')
### Can write the BeamCutout object to a normal FITS file
co.write_fits(root='galaxy', clobber=True)
## The direct image extensions have EXTNAME=1 (e.g., ('SCI',1)) and
## the grism extensions have EXTNAME=2
im = pyfits.open('galaxy_%05d.g141.A.fits' %(id))
print(im[0].header.cards)
print(im.info())
## Can initialize a BeamCutout object from the FITS file
## independent of the `flt` and `beam` objects as above.
co2 = grizli.model.BeamCutout(fits_file='galaxy_%05d.g141.A.fits' %(id))
# test
print('Flux is the same?: %.2e %.2e' %(co.beam.total_flux, co2.beam.total_flux))
# Show the spectrum cutout
plt.imshow(co.grism.data['SCI'], interpolation='Nearest', vmin=-0.02, vmax=0.2,
cmap='gray_r', origin='lower')
# Show the contamination model, which was cutout of `flt.model`
plt.imshow(co.contam, interpolation='Nearest', vmin=-0.02, vmax=0.2,
cmap='gray_r', origin='lower')
"""
Explanation: The BeamCutout object
To interact more closely with an individual object, its information can be extracted from the full exposure with the BeamCutout class. This object will contain the high-level GrismDisperser object useful for generating the model spectra and it will also have tools for analyzing and fitting the observed spectra.
It also makes detailed cutouts of the parent direct and grism images preserving the native WCS information.
End of explanation
"""
##### quick demo of a cartoon "break" spectrum showing how compute_model works
xspec = np.arange(1.e4, 2.e4,10.)
yspec = (xspec > 1.3e4)*1. # zero at xspec < 1.3e4
dummy_spectrum = co.beam.compute_model(spectrum_1d=[xspec, yspec], in_place=False)
## Plot it
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(dummy_spectrum.reshape(co.beam.sh_beam), interpolation='Nearest', vmin=-0.02, vmax=0.2,
cmap='gray_r', origin='lower')
## Helper functions for 2D plot axes
co.beam.twod_axis_labels(limits=[1.0, 1.81, 0.1], wscale=1.e4, mpl_axis=ax)
co.beam.twod_xlim(1.05, 1.75, wscale=1.e4, mpl_axis=ax)
ax.set_xlabel(r'$\lambda$ / $\mu$m')
# beam.compute_model is quite fast, if still rate-limiting for, e.g.,
# MCMC simulations with thousands of calls, though generating the
# model spectra and evaluating the likelihood is probably slower.
%timeit co.beam.compute_model(in_place=True)
# Can be a bit slower for high resolution template specra since have
# to do some interpolation along the way
xspec = np.arange(1.e4, 2.e4,10.)
yspec = (xspec > 1.4e4)*1. # zero at xspec < 1.4e4
%timeit beam.compute_model(in_place=True, spectrum_1d=[xspec, yspec])
xspec = np.arange(1.e4, 2.e4,0.1) # high-res spectrum, slower
yspec = (xspec > 1.4e4)*1. # zero at xspec < 1.4e4
%timeit beam.compute_model(in_place=True, spectrum_1d=[xspec, yspec])
"""
Explanation: BeamCutout.beam.compute_model
This is the high-level function for computing the 2D spectral model. By default (with no user input) it assumes a flat $f_\lambda$ spectrum normalized to the total flux within the segmentation region.
To compute a different arbitrary spectrum, provide a parameter spectrum_1d = [wave, flux].
End of explanation
"""
# flat continuum, by default `in_place=False` returns a flat array that needs to be reshaped
cmodel = co.beam.compute_model(in_place=False).reshape(co.beam.sh_beam)
# 1D optimal extraction (Horne 1986)
xspec, yspec, yerr = co.beam.optimal_extract(co.grism.data['SCI'], bin=0, ivar=co.ivar) #data
xspecm, yspecm, yerrm = co.beam.optimal_extract(cmodel, bin=0, ivar=co.ivar) # continuum model
xspecc, yspecc, yerrc = co.beam.optimal_extract(co.contam, bin=0, ivar=co.ivar) # contamination model
eb = plt.errorbar(xspec/1.e4, yspec, yerr, linestyle='None', marker='o', markersize=3, color='black',
alpha=0.5, label='Data (id=%d)' %(co.beam.id))
pl = plt.plot(xspecm/1.e4, yspecm, color='red', linewidth=2, alpha=0.8,
label=r'Flat $f_\lambda$ (%s)' %(co.direct.filter))
pl = plt.plot(xspecc/1.e4, yspecc, color='0.8', linewidth=2, alpha=0.8,
label=r'Contamination')
plt.legend(loc='upper right', fontsize=10)
"""
Explanation: 1D "optimal" extractions
The BeamCutout class includes a method for computing a 1D optimal extraction of the object spectrum following Horne (1986), where "optimal" refers to the weighting that minimizes the variance in the resulting spectrum by scaling by the object profile along the spatial axis. Of course collapsing to 1D always throws away the spatial information that is needed to interpret the morphology of the spectrum, and this information is often scientifically useful, for example in the case of extracting spatially-resolved 2D emission line maps. Nevertheless, the 1D extractions are often useful for plotting, provided that the modeling is done on the full 2D spectrum and the model 1D spectrum is simply extracted in the same way as the data for plotting.
End of explanation
"""
#### Demo fitting function.
## Note that inverse variances are precomputed and stored in BeamCutout.ivar for chisq calculations
t0 = time.time()
out = co.simple_line_fit(fwhm=48, grid=[1.12e4, 1.65e4, 1, 10], poly_order=3)
line_centers, coeffs, chi2, ok_data, best_model, best_model_cont, best_line_center, best_line_flux = out
t1 = time.time()
print('Number of calls: %d, time=%.3f sec' %(len(line_centers), t1-t0))
print('min(chi_nu) = %.2f, nu~%d' %(chi2.min()/ok_data.sum(), ok_data.sum())) # non-masked pixels
# found a line!
print('z_Halpha ~ %.3f (external z_spec = %.3f)' %(line_centers[np.argmin(chi2)]/6563.-1, 0.895))
# plot chisq as a function of line wavelength
plt.plot(line_centers, chi2); plt.xlabel('line wavelength'); plt.ylabel(r'$\chi^2$')
for dchi2 in [1,4,9]: plt.plot(line_centers, chi2*0+chi2.min()+dchi2, color='%s'%(dchi2/12.))
#### show best-fit models in 1D
print(best_model.shape, best_model_cont.shape)
xspec_line, yspec_line, yerr_line = co.beam.optimal_extract(best_model, bin=0, ivar=co.ivar) # line model
xspec_cont, yspec_cont, yerr_cont = co.beam.optimal_extract(best_model_cont, bin=0, ivar=co.ivar) # continuum model
plt.errorbar(xspec/1.e4, yspec, yerr, linestyle='None', marker='o', markersize=3, color='black',
alpha=0.5, label='Data (id=%d)' %(co.beam.id))
plt.plot(xspecm/1.e4, yspecm, color='red', linewidth=2, alpha=0.8,
label=r'Flat $f_\lambda$ (%s)' %(co.direct.filter))
plt.plot(xspecc/1.e4, yspecc, color='0.8', linewidth=2, alpha=0.8,
label=r'Contamination')
plt.plot(xspec_cont/1.e4, yspec_cont, color='orange', linewidth=2, alpha=0.8,
label=r'Tilted continuum + Line')
plt.fill_between(xspec_cont/1.e4, yspec_line, yspec_cont, color='orange', alpha=0.3)
plt.legend(loc='upper right', fontsize=10)
### Function for making a nice figure of the fit
fig = co.show_simple_fit_results(out)
fig.savefig('test.pdf', dpi=300)
"""
Explanation: Fitting demo: continuum slope and a single emission line
The BeamCutout.simple_line_fit method demonstrates a fitting routine that fits for emission line strengths and the continuum shape, where the line centers are fit along a grid spanning the wavelength range of the G141 grism.
The function first computes the simple flat continuum 2D model spectrum, $C_{i,j}$, as shown earlier. Then at each line wavelength, $\lambda$, it computes an emission line only model spectrum where the line flux is normalized to unity, $L_{i,j}$. Scale factors ($\alpha_C$, $\alpha_L$) are then computed with standard least squares techniques (sklearn.linear_model) to fit the sum of these models to the observed 2D spectrum $S_{i,j}$, which has known (2D) variance, $\sigma^2_{i,j}$ taken directly from the ERR extensions of the FLT images.
We include an additional term for modifying the slope of the continuum model, $\alpha_m$, so the final coefficient equation is
$\alpha_C C_{i,j} + \alpha_m \hat\lambda_j C_{i,j} + \alpha_L L_{i,j} = S_{i,j}$,
where $\hat\lambda_j$ is the wavelength of pixel column $j$, suitably normalized so that $\alpha_m$ is of order unity.
For BeamCutout.simple_line_fit, the fwhm parameter specifies the width of the test emission line and grid is a list of parameters [wave_min, wave_max, dwave, skip]. To get smooth $\chi^2$ functions, choose fwhm of order or larger than the grism pixel size (e.g., 46 Å for WFC3/G141).
End of explanation
"""
|
UWashington-Astro300/Astro300-W17 | Intro_to_OO.ipynb | mit | import numpy as np
from astropy import units as u
class SpaceRock(object):
def __init__(self, name=None, ab_mag=None, albedo=None):
self.name = name
self.ab_mag = ab_mag
self.albedo = albedo
# Create some fake data:
name = "Geralt of Rivia"
ab_mag = 5.13
albedo = 0.131
# Initialize a SpaceRock object:
new_asteroid = SpaceRock(name=name, ab_mag=ab_mag, albedo=albedo)
"""
Explanation: Object-oriented programming
There are many different ways to program in Python. In this class we will be (mostly) programing in what is
called the Procedural style.
However, at its heart, Python is an Objected-Oriented programming language.
The Objected-Oriented style of programming has many advantages, but is less straightforward than Procedural programming
As you progress in your programming life, there will come a time when you find that moving to the Objected-Oriented paradigm will make your life easier.
I would like to say I created this introduction to aid in your exploration of Objected-Oriented Python, but I did not. I just shamelessly stole and modified Brett Morris' awesome work.
Example - Asteroids
In our introduction to Python, we used a dataset of main belt asteroids.
This dataset contained the following data for the asteroids:
Names
Absolute Magnitudes (H)
Albedo (A)
Let us look at this data from an Objected-Oriented point of view.
Defining a new object
To create a new object, you use the class command, rather than the def command that you would use for functions,
python
class SpaceRock(object):
We've named the new object SpaceRock - object names in python should be uppercase without underscores separating words (whereas functions are usually all lowercase and words are separated by underscores).
The __init__ method
Now we will define how you call the SpaceRock constructor (the call that creates new SpaceRock objects). Let's say you want to be able to create a asteroid like this...
python
new_asteroid = SpaceRock(name=name, ab_mag=ab_mag, albedo=albedo)
All Python objects get initialized with a function called __init__ defined within the class, like this:
python
class SpaceRock(object):
def __init__(self, name=None, ab_mag=None, albedo=None):
You define the __init__ function like all other functions, except that the first argument is always called self. This self is the shorthand variable that you use to refer to the SpaceRock object within the __init__ method.
Attributes
Objects have attributes, which are like variables stored on an object. We'll want to store the values above into the SpaceRock object, each with their own attribute, like this:
python
class SpaceRock(object):
def __init__(self, name=None, ab_mag=None, albedo=None):
self.name = name
self.ab_mag = ab_mag
self.albedo = albedo
Each attribute is defined by setting self.<attribute name> = <value>. All attributes should be defined within the __init__ method.
A Working Example
Let's now create an instance of the SpaceRock object, and see how it works:
End of explanation
"""
new_asteroid.name
new_asteroid.albedo
"""
Explanation: We can see what values are stored in each attribute like this:
End of explanation
"""
class SpaceRock(object):
def __init__(self, name=None, ab_mag=None, albedo=None):
self.name = name
self.ab_mag = ab_mag
self.albedo = albedo
def diameter(self):
result = (1329.0 / np.sqrt(self.albedo)) * (10 ** (-0.2 * self.ab_mag))
return result * u.km
new_asteroid = SpaceRock(name=name, ab_mag=ab_mag, albedo=albedo)
new_asteroid.diameter()
"""
Explanation: Methods
So far this just looks like another way to store your data. It becomes more powerful when you write methods for your object. Methods can be thought of as functions associated with an object.
You can now access the attributes of the object within methods by calling self.<attribute name>.
Let's make a simple method for the SpaceRock object, which determines the size of the asteroid.
End of explanation
"""
from astropy.table import QTable
rock_table = QTable.read('MainBelt_small.csv', format='ascii.csv')
print(rock_table)
name = rock_table['Name']
ab_mag = rock_table['H']
albedo = rock_table['A']
rocks = SpaceRock(name=name, ab_mag=ab_mag, albedo=albedo)
rocks.diameter()
"""
Explanation: Real data
Lets use some real data. A short version of the MainBelt.csv dataset from last week
End of explanation
"""
class SpaceRock(object):
def __init__(self, name=None, ab_mag=None, albedo=None):
self.name = name
self.ab_mag = ab_mag
self.albedo = albedo
def diameter(self):
result = (1329.0 / np.sqrt(self.albedo)) * (10 ** (-0.2 * self.ab_mag))
return result * u.km
def two_diameter(self):
result = 2.0 * self.diameter()
return result
rocks = SpaceRock(name=name, ab_mag=ab_mag, albedo=albedo)
rocks.two_diameter()
rocks.diameter()
"""
Explanation: One of the nice things about creating a Class is that all of the methods within the Class know about each other.
For example: I want to create a new method that uses the results of the diameter method I already defined.
Easy, just use the variable self.diameter() in my new method
End of explanation
"""
class SpaceRock(object):
def __init__(self, name = None, ab_mag = None, albedo = None,
semi_major= None, ecc = None):
self.name = name
self.ab_mag = ab_mag
self.albedo = albedo
self.semi_major = semi_major
self.ecc = ecc
def diameter(self):
result = (1329.0 / np.sqrt(self.albedo)) * (10 ** (-0.2 * self.ab_mag))
return result * u.km
def two_diameter(self):
result = 2.0 * self.diameter()
return result
def find_perihelion(self):
result = self.semi_major * ( 1.0 - self.ecc )
return result * u.AU
another_rock_table = QTable.read('PHA.csv', format='ascii.csv')
print(another_rock_table)
name = another_rock_table['Name']
ab_mag = another_rock_table['H']
albedo = another_rock_table['A']
semi_major = another_rock_table['a']
ecc = another_rock_table['ecc']
more_rocks = SpaceRock(name=name, ecc = ecc, semi_major=semi_major, ab_mag=ab_mag, albedo=albedo)
more_rocks.diameter()
more_rocks.find_perihelion()
for Idx,Value in enumerate(more_rocks.find_perihelion()):
rock_name = more_rocks.name[Idx]
my_string = "The Asteroid {0} has a perihelion distance of {1:.2f}".format(rock_name, Value)
print(my_string)
"""
Explanation: As you modify your Class all of the methods within the Class know about the modifications.
For example: Let us add some more attributes to our Asteroid data.
End of explanation
"""
class SpaceRock(object):
"""Container for Asteroids"""
def __init__(self, name = None, ab_mag = None, albedo = None,
semi_major= None, ecc = None):
"""
Parameters
----------
name : string
Name of the target
ab_mag : array-like
Absolute Magnitude of each Asteroid
albedo : array-like
Albedo of each Asteroid
semi_major : array-like
Semi Major Axis of each Asteroid in AU
ecc : array-like
Eccentricity of each Asteroid
"""
self.name = name
self.ab_mag = ab_mag
self.albedo = albedo
self.semi_major = semi_major
self.ecc = ecc
def diameter(self):
"""
Determine the diameter (in km) of the Asteroids
"""
result = (1329.0 / np.sqrt(self.albedo)) * (10 ** (-0.2 * self.ab_mag))
return result * u.km
def two_diameter(self):
"""
Determine twice the diameter (in km) of the Asteroids
"""
result = 2.0 * self.diameter()
return result
def find_perihelion(self):
"""
Determine the perihelion distance of the Asteroids in AU
"""
result = self.semi_major * ( 1.0 - self.ecc )
return result * u.AU
rocks = SpaceRock(name=name, ab_mag=ab_mag, albedo=albedo)
"""
Explanation: Documentation
If you want to share your code with collaborators or with your future self, you should include documentation. We've neglected that above, so let's add in some docstrings!
End of explanation
"""
rocks?
"""
Explanation: Now you can see the documentation on the module within the Notebooks by typing
rocks?
...you can see the documentation for each method by typing
rocks.diameter?
End of explanation
"""
|
dostrebel/working_place_ds_17 | 04 modules, requests/02 module - homework.ipynb | mit | import requests
import pandas as pd
"""
Explanation: Modules, Requests und arbeiten mit APIs
0. Importiere die Module requests und pandas, die verwenden würdest
End of explanation
"""
http://rpc.geocoder.us/service
http://api.nytimes.com/svc/search/v1
http://www.openhazards.com/data/GetEarthquakeProbability
http://postcodes.io
http://open-platform.theguardian.com/access/
https://api.citybik.es/v2/
"""
Explanation: 1. Suche ein paar Beispiele von APIs. Von solchen, die mit und ohne Schlüssel, verwendet werden können und liste sie auf.
End of explanation
"""
url1 = 'http://api.openhazards.com/GetEarthquakeProbability?q=San+Francisco,+CA&m=6.8&r=100'
response = requests.get(url1)
response
response.text
response.json
"""
Explanation: 2. Lese eine dieser APIs mit requests ein und rufe die Ergebnisse auf.
End of explanation
"""
url_august = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-08-01&endtime=2017-08-31'
response = requests.get(url_august)
response
"""
Explanation: 3. Zurück zum Erdbeben API. Wie viele Erdbeben wurden im August 2017 verzeichnet?
End of explanation
"""
url_juli = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-07-01&endtime=2017-07-31'
response
response.text
"""
Explanation: 4. Und wie viele Erdbeben im Juli?
End of explanation
"""
month = ["01, "02", "03", "04]
url
url_jan = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-01-01&endtime=2017-01-31'
response = requests.get(url_jan)
response
response.text
url_feb = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-02-01&endtime=2017-02-28'
response = requests.get(url_feb)
response
response.text
url_märz = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-03-01&endtime=2017-03-31'
response = requests.get(url_märz)
response
response.text
url_april = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-04-01&endtime=2017-04-30'
response = requests.get(url_april)
response.text
url_mai = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-05-01&endtime=2017-05-31'
response = requests.get(url_mai)
response
url_juni = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-06-01&endtime=2017-06-30'
response = requests.get(url_juni)
response
response.text
url_juli = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-07-01&endtime=2017-07-31'
response
url_september = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-09-01&endtime=2017-09-30'
response = requests.get(url_september)
response
response.text
"""
Explanation: 5. In welchem Monat des aktuellen Jahres wurden am meisten Erdbeben verzeichnet?
End of explanation
"""
dct_lst = [{'Monat' : 'Januar', 'Erdbeben' : 11074}, {'Monat' : 'Februar', 'Erdbeben' : 7576}, {'Monat' : 'März', 'Erdbeben' : 8871}, {'Monat' : 'April', 'Erdbeben' : 10409}, {'Monat' : 'Mai', 'Erdbeben' : '?'}, {'Monat' : 'Juni', 'Erdbeben' : 9880}, {'Monat' : 'Juli', 'Erdbeben' : 11252}, {'Monat' : 'August', 'Erdbeben' : '?'}, {'Monat' : 'September', 'Erdbeben' : 9642}]
pd.DataFrame(dct_lst)
df = pd.DataFrame(dct_lst)
df.sort_values(by='Monat')
"""
Explanation: 6. Wandle das in einen Pandas Dataframe um
End of explanation
"""
df.to_csv('Monate.csv')
"""
Explanation: 7. Und speicher das Ergebnis ab
End of explanation
"""
|
minesh1291/Practicing-Kaggle | MNIST_2017/dump_/women_2018_gridsearchCV.ipynb | gpl-3.0 | #the seed information
df_seeds = pd.read_csv('../input/WNCAATourneySeeds_SampleTourney2018.csv')
#tour information
df_tour = pd.read_csv('../input/WRegularSeasonCompactResults_PrelimData2018.csv')
"""
Explanation: First we import some datasets of interest
End of explanation
"""
df_seeds['seed_int'] = df_seeds['Seed'].apply( lambda x : int(x[1:3]) )
df_winseeds = df_seeds.loc[:, ['TeamID', 'Season', 'seed_int']].rename(columns={'TeamID':'WTeamID', 'seed_int':'WSeed'})
df_lossseeds = df_seeds.loc[:, ['TeamID', 'Season', 'seed_int']].rename(columns={'TeamID':'LTeamID', 'seed_int':'LSeed'})
df_dummy = pd.merge(left=df_tour, right=df_winseeds, how='left', on=['Season', 'WTeamID'])
df_concat = pd.merge(left=df_dummy, right=df_lossseeds, on=['Season', 'LTeamID'])
"""
Explanation: Now we separate the winners from the losers and organize our dataset
End of explanation
"""
df_concat['DiffSeed'] = df_concat[['LSeed', 'WSeed']].apply(lambda x : 0 if x[0] == x[1] else 1, axis = 1)
"""
Explanation: Now we match the detailed results to the merge dataset above
End of explanation
"""
#prepares sample submission
df_sample_sub = pd.read_csv('../input/WSampleSubmissionStage2.csv')
df_sample_sub['Season'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[0]) )
df_sample_sub['TeamID1'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[1]) )
df_sample_sub['TeamID2'] = df_sample_sub['ID'].apply(lambda x : int(x.split('_')[2]) )
"""
Explanation: Here we get our submission info
End of explanation
"""
winners = df_concat.rename( columns = { 'WTeamID' : 'TeamID1',
'LTeamID' : 'TeamID2',
'WScore' : 'Team1_Score',
'LScore' : 'Team2_Score'}).drop(['WSeed', 'LSeed', 'WLoc'], axis = 1)
winners['Result'] = 1.0
losers = df_concat.rename( columns = { 'WTeamID' : 'TeamID2',
'LTeamID' : 'TeamID1',
'WScore' : 'Team2_Score',
'LScore' : 'Team1_Score'}).drop(['WSeed', 'LSeed', 'WLoc'], axis = 1)
losers['Result'] = 0.0
train = pd.concat( [winners, losers], axis = 0).reset_index(drop = True)
train['Score_Ratio'] = train['Team1_Score'] / train['Team2_Score']
train['Score_Total'] = train['Team1_Score'] + train['Team2_Score']
train['Score_Pct'] = train['Team1_Score'] / train['Score_Total']
"""
Explanation: Training Data Creation
End of explanation
"""
df_sample_sub['Season'].unique()
"""
Explanation: We will only consider years relevant to our test submission
End of explanation
"""
train_test_inner = pd.merge( train.loc[ train['Season'].isin([2018]), : ].reset_index(drop = True),
df_sample_sub.drop(['ID', 'Pred'], axis = 1),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'inner' )
train_test_inner.head()
"""
Explanation: Now lets just look at TeamID2, or just the second team info.
End of explanation
"""
team1d_num_ot = train_test_inner.groupby(['Season', 'TeamID1'])['NumOT'].median().reset_index()\
.set_index('Season').rename(columns = {'NumOT' : 'NumOT1'})
team2d_num_ot = train_test_inner.groupby(['Season', 'TeamID2'])['NumOT'].median().reset_index()\
.set_index('Season').rename(columns = {'NumOT' : 'NumOT2'})
num_ot = team1d_num_ot.join(team2d_num_ot).reset_index()
#sum the number of ot calls and subtract by one to prevent overcounting
num_ot['NumOT'] = num_ot[['NumOT1', 'NumOT2']].apply(lambda x : round( x.sum() ), axis = 1 )
num_ot.head()
"""
Explanation: From the inner join, we will create data per team id to estimate the parameters we are missing that are independent of the year. Essentially, we are trying to estimate the average behavior of the team across the year.
End of explanation
"""
team1d_score_spread = train_test_inner.groupby(['Season', 'TeamID1'])[['Score_Ratio', 'Score_Pct']].median().reset_index()\
.set_index('Season').rename(columns = {'Score_Ratio' : 'Score_Ratio1', 'Score_Pct' : 'Score_Pct1'})
team2d_score_spread = train_test_inner.groupby(['Season', 'TeamID2'])[['Score_Ratio', 'Score_Pct']].median().reset_index()\
.set_index('Season').rename(columns = {'Score_Ratio' : 'Score_Ratio2', 'Score_Pct' : 'Score_Pct2'})
score_spread = team1d_score_spread.join(team2d_score_spread).reset_index()
#geometric mean of score ratio of team 1 and inverse of team 2
score_spread['Score_Ratio'] = score_spread[['Score_Ratio1', 'Score_Ratio2']].apply(lambda x : ( x[0] * ( x[1] ** -1.0) ), axis = 1 ) ** 0.5
#harmonic mean of score pct
score_spread['Score_Pct'] = score_spread[['Score_Pct1', 'Score_Pct2']].apply(lambda x : 0.5*( x[0] ** -1.0 ) + 0.5*( 1.0 - x[1] ) ** -1.0, axis = 1 ) ** -1.0
score_spread.head()
"""
Explanation: Here we look at the comparable statistics. For the TeamID2 column, we would consider the inverse of the ratio, and 1 minus the score attempt percentage.
End of explanation
"""
X_train = train_test_inner.loc[:, ['Season', 'NumOT', 'Score_Ratio', 'Score_Pct']]
train_labels = train_test_inner['Result']
train_test_outer = pd.merge( train.loc[ train['Season'].isin([2014, 2015, 2016, 2017]), : ].reset_index(drop = True),
df_sample_sub.drop(['ID', 'Pred'], axis = 1),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'outer' )
train_test_outer = train_test_outer.loc[ train_test_outer['Result'].isnull(),
['TeamID1', 'TeamID2', 'Season']]
train_test_missing = pd.merge( pd.merge( score_spread.loc[:, ['TeamID1', 'TeamID2', 'Season', 'Score_Ratio', 'Score_Pct']],
train_test_outer, on = ['TeamID1', 'TeamID2', 'Season']),
num_ot.loc[:, ['TeamID1', 'TeamID2', 'Season', 'NumOT']],
on = ['TeamID1', 'TeamID2', 'Season'])
"""
Explanation: Now lets create a model just solely based on the inner group and predict those probabilities.
We will get the teams with the missing result.
End of explanation
"""
X_test = train_test_missing.loc[:, ['Season', 'NumOT', 'Score_Ratio', 'Score_Pct']]
n = X_train.shape[0]
train_test_merge = pd.concat( [X_train, X_test], axis = 0 ).reset_index(drop = True)
train_test_merge = pd.concat( [pd.get_dummies( train_test_merge['Season'].astype(object) ),
train_test_merge.drop('Season', axis = 1) ], axis = 1 )
train_test_merge = pd.concat( [pd.get_dummies( train_test_merge['NumOT'].astype(object) ),
train_test_merge.drop('NumOT', axis = 1) ], axis = 1 )
X_train = train_test_merge.loc[:(n - 1), :].reset_index(drop = True)
X_test = train_test_merge.loc[n:, :].reset_index(drop = True)
x_max = X_train.max()
x_min = X_train.min()
X_train = ( X_train - x_min ) / ( x_max - x_min + 1e-14)
X_test = ( X_test - x_min ) / ( x_max - x_min + 1e-14)
train_labels.value_counts()
X_train.head()
from sklearn.linear_model import LogisticRegressionCV
model = LogisticRegressionCV(cv=80,scoring="neg_log_loss",random_state=1
#,penalty="l1"
#,Cs= Cs_#list(np.arange(1e-7,1e-9,-0.5e-9)) # [0.5,0.1,0.01,0.001] #list(np.power(1, np.arange(-10, 10)))
#,max_iter=1000, tol=1e-11
#,solver="liblinear"
#,n_jobs=4
)
model.fit(X_train, train_labels)
#---
Cs = model.Cs_
list(np.power(10.0, np.arange(-10, 10)))
dir(model)
sco = model.scores_[1].mean(axis=0)
#---
import matplotlib.pyplot as plt
plt.plot(Cs
#np.log10(Cs)
,sco)
# plt.ylabel('some numbers')
plt.show()
sco.min()
Cs_= list(np.arange(1.1e-9 - 5e-11
,1.051e-9
,0.2e-13))
len(Cs_)
Cs_= list(np.arange(1e-11
,9.04e-11#1.0508e-9
,0.2e-12))
len(Cs_)
#Cs_= list(np.arange(5.6e-13 - ( (0.01e-13)*1)
# ,5.61e-13 - ( (0.01e-13)*1)#1.0508e-9
# ,0.2e-15))
#len(Cs_)
Cs_= list(np.arange(1e-11
,5.5e-11#1.0508e-9
,0.2e-12))
len(Cs_)
Cs_= list(np.arange(1e-14
,5.5e-11#1.0508e-9
,0.2e-12))
len(Cs_)#awsome
#Cs_= list(np.arange(1.5e-11
# ,2.53e-11#1.0508e-9
# ,0.2e-13)) #+[3.761e-11]
#len(Cs_)
#X_train.dtypes
Cs_= list(np.arange(1e-15
,0.51e-10 #1.0508e-9
,0.1e-12))
len(Cs_)#new again
Cs_= list(np.arange(9e-14
,10.1e-13 #1.0508e-9
,0.1e-14))
len(Cs_)#new again cont. lowerlevel
Cs_= list(np.arange(9e-14
,10.1e-13 #1.0508e-9
,0.1e-14))
len(Cs_)#new again cont. lowerlevel
#LogisticRegressionCV(Cs=10, class_weight=None, cv=107, dual=False,
# fit_intercept=True, intercept_scaling=1.0, max_iter=100,
# multi_class='ovr', n_jobs=1, penalty='l2', random_state=2,
# refit=True, scoring='neg_log_loss', solver='lbfgs', tol=0.0001,
# verbose=0) #-0.7
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(scoring="neg_log_loss",random_state=1
#,penalty="l1"
,C=8.129999999999969e-13#list(np.arange(1e-7,1e-9,-0.5e-9)) # [0.5,0.1,0.01,0.001] #list(np.power(1, np.arange(-10, 10)))
,max_iter=1000, tol=1e-11
#,solver="liblinear"
,n_jobs=4)
model.fit(X_train, train_labels)
#---
Cs = model.Cs_
list(np.power(10.0, np.arange(-10, 10)))
dir(model)
sco = model.scores_[1].mean(axis=0)
#---
import matplotlib.pyplot as plt
plt.plot(Cs
#np.log10(Cs)
,sco)
# plt.ylabel('some numbers')
plt.show()
Cs= list(np.linspace(9e-15
,10.1e-14 #1.0508e-9
,200))
len(Cs)#new again cont. lowerlevel
from sklearn import svm, grid_search, datasets
parameters = dict(C=Cs)
model = LogisticRegression(random_state=1
#,penalty="l1"
,C=8.129999999999969e-13#list(np.arange(1e-7,1e-9,-0.5e-9)) # [0.5,0.1,0.01,0.001] #list(np.power(1, np.arange(-10, 10)))
,max_iter=1000, tol=1e-11
,solver="lbfgs"
,n_jobs=1)
clf = grid_search.GridSearchCV(model, parameters,scoring="neg_log_loss",cv=80,n_jobs=8)
clf.fit(X_train, train_labels)
scores = [x[1] for x in clf.grid_scores_]
scores = np.array(scores).reshape(len(Cs))
plt.plot(Cs, scores)
plt.legend()
plt.xlabel('Cs')
plt.ylabel('Mean score')
plt.show()
print("C:",clf.best_estimator_.C," loss:",clf.best_score_)
clf.grid_scores_
scores = [x[1] for x in clf.grid_scores_]
scores = np.array(scores).reshape(len(Cs))
plt.plot(Cs, scores)
plt.legend()
plt.xlabel('Cs')
plt.ylabel('Mean score')
plt.show()
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(clf.grid_scores_)
# plt.ylabel('some numbers')
plt.show()
index_min = np.argmin(sco)
Cs_[index_min] #3.761e-11
sco.min()
#list(np.power(10.0, np.arange(-10, 10)))
#list(np.arange(0.5,1e-4,-0.05))
print(sco.max())
#-0.6931471779248422
print(sco.min() < -0.693270048530996)
print(sco.min()+0.693270048530996)
sco.min()
import matplotlib.pyplot as plt
plt.plot(model.scores_[1])
# plt.ylabel('some numbers')
plt.show()
"""
Explanation: We scale our data for our keras classifier, and make sure our categorical variables are properly processed.
End of explanation
"""
train_test_inner['Pred1'] = model.predict_proba(X_train)[:,1]
train_test_missing['Pred1'] = model.predict_proba(X_test)[:,1]
"""
Explanation: Here we store our probabilities
End of explanation
"""
sub = pd.merge(df_sample_sub,
pd.concat( [train_test_missing.loc[:, ['Season', 'TeamID1', 'TeamID2', 'Pred1']],
train_test_inner.loc[:, ['Season', 'TeamID1', 'TeamID2', 'Pred1']] ],
axis = 0).reset_index(drop = True),
on = ['Season', 'TeamID1', 'TeamID2'], how = 'outer')
"""
Explanation: We merge our predictions
End of explanation
"""
team1_probs = sub.groupby('TeamID1')['Pred1'].apply(lambda x : (x ** -1.0).mean() ** -1.0 ).fillna(0.5).to_dict()
team2_probs = sub.groupby('TeamID2')['Pred1'].apply(lambda x : (x ** -1.0).mean() ** -1.0 ).fillna(0.5).to_dict()
"""
Explanation: We get the 'average' probability of success for each team
End of explanation
"""
sub['Pred'] = sub[['TeamID1', 'TeamID2','Pred1']]\
.apply(lambda x : team1_probs.get(x[0]) * ( 1 - team2_probs.get(x[1]) ) if np.isnan(x[2]) else x[2],
axis = 1)
sub = sub.drop_duplicates(subset=["ID"], keep='first')
sub[['ID', 'Pred']].to_csv('sub.csv', index = False)
sub[['ID', 'Pred']].head(20)
"""
Explanation: Any missing value for the prediciton will be imputed with the product of the probabilities calculated above. We assume these are independent events.
End of explanation
"""
|
yashdeeph709/Algorithms | PythonBootCamp/Complete-Python-Bootcamp-master/GUI/4 - Widget List.ipynb | apache-2.0 | import ipywidgets as widgets
# Show all available widgets!
widgets.Widget.widget_types.values()
"""
Explanation: Widget List
This lecture will serve as a reference for widgets, providing a list of the GUI widgets available!
Complete list
For a complete list of the GUI widgets available to you, you can list the registered widget types. Widget and DOMWidget, not listed below, are base classes.
End of explanation
"""
widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Test:',
)
"""
Explanation: Numeric widgets
There are 8 widgets distributed with IPython that are designed to display numeric values. Widgets exist for displaying integers and floats, both bounded and unbounded. The integer widgets share a similar naming scheme to their floating point counterparts. By replacing Float with Int in the widget name, you can find the Integer equivalent.
FloatSlider
End of explanation
"""
widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Test',
orientation='vertical',
)
"""
Explanation: Sliders can also be displayed vertically.
End of explanation
"""
widgets.FloatProgress(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Loading:',
)
"""
Explanation: FloatProgress
End of explanation
"""
widgets.BoundedFloatText(
value=7.5,
min=5.0,
max=10.0,
description='Text:',
)
"""
Explanation: BoundedFloatText
End of explanation
"""
widgets.FloatText(
value=7.5,
description='Any:',
)
"""
Explanation: FloatText
End of explanation
"""
widgets.ToggleButton(
description='Click me',
value=False,
)
"""
Explanation: Boolean widgets
There are three widgets that are designed to display a boolean value.
ToggleButton
End of explanation
"""
widgets.Checkbox(
description='Check me',
value=True,
)
"""
Explanation: Checkbox
End of explanation
"""
widgets.Valid(
value=True,
)
"""
Explanation: Valid
The valid widget provides a read-only indicator.
End of explanation
"""
from IPython.display import display
w = widgets.Dropdown(
options=['1', '2', '3'],
value='2',
description='Number:',
)
display(w)
# Show value
w.value
"""
Explanation: Selection widgets
There are four widgets that can be used to display single selection lists, and one that can be used to display multiple selection lists. All inherit from the same base class. You can specify the enumeration of selectable options by passing a list. You can also specify the enumeration as a dictionary, in which case the keys will be used as the item displayed in the list and the corresponding value will be returned when an item is selected.
Dropdown
End of explanation
"""
w = widgets.Dropdown(
options={'One': 1, 'Two': 2, 'Three': 3},
value=2,
description='Number:')
display(w)
w.value
"""
Explanation: The following is also valid:
End of explanation
"""
widgets.RadioButtons(
description='Pizza topping:',
options=['pepperoni', 'pineapple', 'anchovies'],
)
"""
Explanation: RadioButtons
End of explanation
"""
widgets.Select(
description='OS:',
options=['Linux', 'Windows', 'OSX'],
)
"""
Explanation: Select
End of explanation
"""
widgets.ToggleButtons(
description='Speed:',
options=['Slow', 'Regular', 'Fast'],
)
"""
Explanation: ToggleButtons
End of explanation
"""
w = widgets.SelectMultiple(
description="Fruits",
options=['Apples', 'Oranges', 'Pears'])
display(w)
w.value
"""
Explanation: SelectMultiple
Multiple values can be selected with <kbd>shift</kbd> and/or <kbd>ctrl</kbd> (or <kbd>command</kbd>) pressed and mouse clicks or arrow keys.
End of explanation
"""
widgets.Text(
description='String:',
value='Hello World',
)
"""
Explanation: String widgets
There are 4 widgets that can be used to display a string value. Of those, the Text and Textarea widgets accept input. The Latex and HTML widgets display the string as either Latex or HTML respectively, but do not accept input.
Text
End of explanation
"""
widgets.Textarea(
description='String:',
value='Hello World',
)
"""
Explanation: Textarea
End of explanation
"""
widgets.Latex(
value="$$\\frac{n!}{k!(n-k)!}$$",
)
"""
Explanation: Latex
End of explanation
"""
widgets.HTML(
value="Hello <b>World</b>"
)
"""
Explanation: HTML
End of explanation
"""
widgets.Button(description='Click me')
"""
Explanation: Button
End of explanation
"""
|
yingchi/fastai-notes | deeplearning1/nbs/mnist_yingchi.ipynb | apache-2.0 | from theano.sandbox import cuda
cuda.use('gpu1')
%matplotlib inline
from importlib import reload
import utils; reload(utils)
from utils import *
from __future__ import division, print_function
"""
Explanation: Model Building for MNIST
End of explanation
"""
batch_size = 64
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# Because MNIST is grey-scale images, it does not have the color column,
# Let's add one empty dim to the X data
X_test = np.expand_dims(X_test, 1)
X_train = np.expand_dims(X_train, 1)
X_train.shape
y_train[:5]
y_train = onehot(y_train)
y_test = onehot(y_test)
y_train[:5]
"""
Explanation: Setup
End of explanation
"""
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
def norm_input(x): return (x-mean_px)/std_px
"""
Explanation: Now, let's normalize the inputs
End of explanation
"""
def get_lin_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
lm = get_lin_model()
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, y_train, batch_size=64)
test_batches = gen.flow(X_test, y_test, batch_size=64)
lm.fit_generator(batches, batches.N, nb_epoch=1,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: Linear model
Why not we just fine-tune the imagenet model?
Because imageNet is 214 x 214 and is full-color. Here we have 28 x 28 and greyscale.
So we need to start from scratch.
End of explanation
"""
lm.optimizer.lr = 0.1
lm.fit_generator(batches, batches.N, nb_epoch=3,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: It's always recommended to start with epoch 1 and a low learning rate. Defaut is 0.0001
End of explanation
"""
def get_fc_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Flatten(),
Dense(512, activation='softmax'),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
fc = get_fc_model()
"""
Explanation: Single Dense Layer
End of explanation
"""
fc.fit_generator(batches, batches.N, nb_epoch=1,
validation_data=test_batches, nb_val_samples=test_batches.N)
fc.optimizer.lr=0.01
fc.fit_generator(batches, batches.N, nb_epoch=4,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: As before, let's start with 1 epoch and a default low learning rate.
End of explanation
"""
def get_model():
model = Sequential([
Lambda(norm_input, input_shape=(1,28, 28)),
Convolution2D(32,3,3, activation='relu'),
Convolution2D(32,3,3, activation='relu'),
MaxPooling2D(),
Convolution2D(64,3,3, activation='relu'),
Convolution2D(64,3,3, activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
model = get_model()
model.fit_generator(batches, batches.N, nb_epoch=1,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.1
model.fit_generator(batches, batches.N, nb_epoch=1,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.01
model.fit_generator(batches, batches.N, nb_epoch=8,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: Basic 'VGG-style' CNN
End of explanation
"""
model = get_model()
# Now, we don't user the default settings for ImageDataGenerator
gen = image.ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
batches = gen.flow(X_train, y_train, batch_size=64)
test_batches = gen.flow(X_test, y_test, batch_size=64)
model.fit_generator(batches, batches.N, nb_epoch=1,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.1
model.fit_generator(batches, batches.N, nb_epoch=4,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.01
model.fit_generator(batches, batches.N, nb_epoch=8,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: Data Augmentation
End of explanation
"""
def get_model_bn():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Convolution2D(32,3,3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(32,3,3, activation='relu'),
MaxPooling2D(),
BatchNormalization(axis=1),
Convolution2D(64,3,3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(64,3,3, activation='relu'),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
model = get_model_bn()
model.fit_generator(batches, batches.N, nb_epoch=1,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.1
model.fit_generator(batches, batches.N, nb_epoch=4,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.001
model.fit_generator(batches, batches.N, nb_epoch=12,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: Batchnorm + data augmentation
End of explanation
"""
def get_model_bn_do():
model = Sequential([
Lambda(norm_input, input_shape=(1,28,28)),
Convolution2D(32,3,3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(32,3,3, activation='relu'),
MaxPooling2D(),
BatchNormalization(axis=1),
Convolution2D(64,3,3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(64,3,3, activation='relu'),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation='relu'),
BatchNormalization(),
Dropout(0.5),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
model = get_model_bn_do()
model.optimizer.lr=0.01
model.fit_generator(batches, batches.N, nb_epoch=12,
validation_data=test_batches, nb_val_samples=test_batches.N)
"""
Explanation: Batchnorm + dropout + data augmentation
End of explanation
"""
def fit_model():
model = get_model_bn_do()
model.fit_generator(batches, batches.N, nb_epoch=1, verbose=0,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.1
model.fit_generator(batches, batches.N, nb_epoch=4, verbose=0,
validation_data=test_batches, nb_val_samples=test_batches.N)
model.optimizer.lr=0.01
model.fit_generator(batches, batches.N, nb_epoch=12, verbose=0,
validation_data=test_batches, nb_val_samples=test_batches.N)
# model.optimizer.lr=0.001
# model.fit_generator(batches, batches.N, nb_epoch=18, verbose=0,
# validation_data=test_batches, nb_val_samples=test_batches.N)
return model
# Return a list of models
models = [fit_model() for i in range(6)]
path = 'data/mnist/'
model_path = path + 'models/'
for i, m in enumerate(models):
m.save_weights(model_path+'cnn-mnist23-'+str(i)+'.pkl')
evals = np.array([m.evaluate(X_test, y_test, batch_size=256) for m in models])
evals.mean(axis=0)
all_preds = np.stack([m.predict(X_test, batch_size=256) for m in models])
all_preds.shape
avg_preds = all_preds.mean(axis=0)
keras.metrics.categorical_accuracy(y_test, avg_preds).eval()
"""
Explanation: Ensembling
Ensembling is a way that can often improve your accuracy. It takes many models and combines them together.
End of explanation
"""
|
cliburn/sta-663-2017 | homework/01_Functions_Loops_Branching_Solutions.ipynb | mit | scores = [ 84, 76, 67, 23, 83, 23, 50, 100, 32, 84, 22, 41, 27,
29, 71, 85, 47, 77, 39, 25, 85, 69, 22, 66, 100, 92,
97, 46, 81, 88, 67, 20, 52, 62, 39, 36, 79, 54, 74,
64, 33, 68, 85, 69, 84, 30, 68, 100, 71, 33, 21, 95,
92, 72, 53, 50, 31, 82, 53, 68, 49, 37, 40, 21, 94,
30, 54, 58, 92, 95, 73, 80, 81, 56, 44, 22, 69, 70,
25, 50, 59, 32, 65, 79, 27, 62, 27, 31, 78, 88, 68,
53, 79, 69, 89, 38, 80, 55, 92, 55]
attendances = [17, 19, 21, 14, 10, 20, 14, 9, 6, 21, 5, 23, 21, 4, 5, 21, 20,
2, 14, 14, 21, 22, 3, 0, 11, 0, 0, 4, 20, 14, 23, 16, 24, 5,
12, 11, 22, 20, 15, 23, 0, 20, 20, 6, 4, 14, 6, 18, 17, 0, 18,
6, 3, 19, 24, 7, 9, 15, 18, 10, 2, 15, 21, 2, 9, 21, 20, 11,
24, 23, 14, 22, 4, 12, 7, 19, 6, 18, 23, 6, 14, 6, 1, 12, 7,
11, 22, 21, 7, 22, 24, 4, 10, 17, 21, 15, 0, 20, 3, 20]
# Your answer here
def grade(score, attendance):
"""Function that returns grade based on score and attendance."""
if attendance > 12:
if score >= 90:
return 'A'
elif score >= 80:
return 'B'
elif score >= 65:
return 'C'
else:
return 'D'
else:
if score >= 90:
return 'B'
elif score >= 80:
return 'C'
else:
return 'D'
counts = {}
for score, attendance in zip(scores, attendances):
g = grade(score, attendance)
counts[g] = counts.get(g, 0) + 1
for g in 'ABCD':
print(g, counts[g])
"""
Explanation: Functions, loops and branching
The following exercises let you practice Python syntax. Do not use any packages not in the standard library except for matplotlib.pyplot which has been imported for you.
If you have not done much programming, these exercises will be challenging. Don't give up! For this first exercise, solutions are provided, but try not to refer to them unless you are desperate.
1. Grading (20 points)
Write a function to assign grades to a student such that
A = [90 - 100]
B = [80 - 90)
C = [65 - 80)
D = [0, 65)
where square brackets indicate inclusive boundaries and parentheses indicate exclusive boundaries. However, studens whose attendance is 12 days or fewer get their grade reduced by one (A to B, B to C, C to D, and D stays D). The function should take a score and an attendance as an argument and return A, B, C or D as appropriate.(10 points)
- Count the number of students with each grade from the given scores. (10 points)
End of explanation
"""
# Your answer here
def henon(x, y, a=1.4, b=0.3):
"""Henon map."""
return (1 - a*x**2 + y, b*x)
henon(1, 1)
n = 1000
n_store = 50
aa = [i/100 for i in range(100, 141)]
xxs = []
for a in aa:
xs = []
x, y = 1, 1
for i in range(n - n_store):
x, y = henon(x, y, a=a)
for i in range(n_store):
x, y = henon(x, y, a=a)
xs.append(x)
xxs.append(xs)
%matplotlib inline
import matplotlib.pyplot as plt
for a, xs in zip(aa, xxs):
plt.scatter([a]*n_store, xs, s=1)
"""
Explanation: 2. The Henon map and chaos. (25 points)
The Henon map takes a pont $(x_n, y_n)$ in the plane and maps it to
$$
x_{n+1} = 1 - a x_n^2 + y_n \
y_{n+1} = b x_n
$$
Write a function for the Henon map. It should take the current (x, y) value and return a new pair of coordinates. Set a=1.4 and b=0.3 as defatult arguments. What is the output for x=1 and y=1? (5 points)
Using a for loop that increments the value of $a$ from 1.1 to 1.4 in steps of 0.01, save the last 50 $x$-terms in the iterated Henon map stopping at $x_{1000}$ for each value of $a$. Use $x_0 = 1$ and $y_0 = 1$ for each value of $a$, leaveing fixed $b = 0.3$.(10 points)
Make a scatter plot of each $(a, x)$ value with $a$ on the horizontal axis and $x$ on the vertical axis. Use the plt.scatter function with s=1 to make the plot. (10 points)
End of explanation
"""
# Your answer here
def collatz(n):
"""Returns Collatz sequence starting with n."""
seq = [n]
while n != 1:
if n % 2 == 0:
n = n // 2
else:
n = 3*n + 1
seq.append(n)
return seq
"""
Explanation: 3. Collatz numbers - Euler project problem 14. (25 points)
The following iterative sequence is defined for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Write a function to generate the iterative sequence described (15 points)
Which starting number, under one million, produces the longest chain? (10 points)
NOTE: Once the chain starts the terms are allowed to go above one million.
End of explanation
"""
def collatz_count(n):
"""Returns Collatz sequence starting with n."""
count = 1
while n != 1:
if n % 2 == 0:
n = n // 2
else:
n = 3*n + 1
count += 1
return count
%%time
best_n = 1
best_length = 1
for n in range(2, 1000000):
length = len(collatz(n))
if length > best_length:
best_length = length
best_n = n
print(best_n, best_length)
"""
Explanation: Generator version
End of explanation
"""
%%time
best_n = 1
best_length = 1
seen = set([])
for n in range(2, 1000000):
if n in seen:
continue
seq = collatz(n)
seen.update(seq)
length = len(seq)
if length > best_length:
best_length = length
best_n = n
print(best_n, best_length)
"""
Explanation: A simple optimization
Ignore starting numbers that have been previously generated since they cannot be longer than the generating sequence.
End of explanation
"""
# Your answer here
import urllib.request
response = urllib.request.urlopen('http://www.gutenberg.org/files/4300/4300-0.txt')
text = response.read().decode()
"""
Explanation: 4. Reading Ulysses. (30 points)
Write a program to download the text of Ulysses (5 points)
Open the downloaded file and read the entire sequence into a single string variable called text, discarding the header information (i.e. text should start with \n\n*** START OF THIS PROJECT GUTENBERG EBOOK ULYSSES ***\n\n\n\n\n). Also remove the footer information (i.e. text should not include anything from End of the Project Gutenberg EBook of Ulysses, by James Joyce). (10 points)
Find and report the starting index (counting from zero) and length of the longest word in text. For simplicity, a word is defined here to be any sequence of characters with no space between the characters (i.e. a word may include punctuation or numbers, just not spaces). If there are ties, report the starting index and length of the last word found. For example, in "the quick brow fox jumps over the lazy dog." the longest word is jumps which starts at index 19 and has length 5, and 'dog.' would be considered a 4-letter word (15 points).
End of explanation
"""
with open('ulysses.txt', 'w') as f:
f.write(text)
with open('ulysses.txt') as f:
text = f.read()
start_string = '\n\n*** START OF THIS PROJECT GUTENBERG EBOOK ULYSSES ***\n\n\n\n\n'
stop_string = 'End of the Project Gutenberg EBook of Ulysses, by James Joyce'
start_idx = text.find(start_string)
stop_idx = text.find(stop_string)
text = text[(start_idx + len(start_string)):stop_idx]
best_len = 0
best_word = ''
for word in set(text.split()):
if len(word) > best_len:
best_len = len(word)
best_word = word
best_word
"""
Explanation: ```python
Alternative version using requests library
Although not officially part of the standard libaray,
it is so widely used that the standard docs point to it
"The Requests package is recommended for a higher-level HTTP client interface."
import requests
url = 'http://www.gutenberg.org/files/4300/4300-0.txt'
text = requests.get(url).text
```
End of explanation
"""
idx = text.rfind(best_word,)
idx, best_len
text[idx:(idx+best_len)]
"""
Explanation: We are looking for the last word found, so search backwards from the end with rfind
End of explanation
"""
|
kkkddder/dmc | notebooks/week-6/02-using a pre-trained model with Keras.ipynb | apache-2.0 | import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import sys
import re
import pickle
"""
Explanation: Lab 6.2 - Using a pre-trained model with Keras
In this section of the lab, we will load the model we trained in the previous section, along with the training data and mapping dictionaries, and use it to generate longer sequences of text.
Let's start by importing the libraries we will be using:
End of explanation
"""
pickle_file = '-basic_data.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
X = save['X']
y = save['y']
char_to_int = save['char_to_int']
int_to_char = save['int_to_char']
del save # hint to help gc free up memory
print('Training set', X.shape, y.shape)
"""
Explanation: Next, we will import the data we saved previously using the pickle library.
End of explanation
"""
# define the LSTM model
model = Sequential()
model.add(LSTM(128, return_sequences=False, input_shape=(X.shape[1], X.shape[2])))
# model.add(Dropout(0.50))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
"""
Explanation: Now we need to define the Keras model. Since we will be loading parameters from a pre-trained model, this needs to match exactly the definition from the previous lab section. The only difference is that we will comment out the dropout layer so that the model uses all the hidden neurons when doing the predictions.
End of explanation
"""
# load the parameters from the pretrained model
filename = "-basic_LSTM.hdf5"
model.load_weights(filename)
model.compile(loss='categorical_crossentropy', optimizer='adam')
"""
Explanation: Next we will load the parameters from the model we trained previously, and compile it with the same loss and optimizer function.
End of explanation
"""
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def generate(sentence, sample_length=50, diversity=0.35):
generated = sentence
sys.stdout.write(generated)
for i in range(sample_length):
x = np.zeros((1, X.shape[1], X.shape[2]))
for t, char in enumerate(sentence):
x[0, t, char_to_int[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = int_to_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print
"""
Explanation: We also need to rewrite the sample() and generate() helper functions so that we can use them in our code:
End of explanation
"""
prediction_length = 500
seed_from_text = "america has shown that progress is possible. last year, income gains were larger for households at t"
seed_original = "and as people around the world began to hear the tale of the lowly colonists who overthrew an empire"
for seed in [seed_from_text, seed_original]:
generate(seed, prediction_length, .50)
print "-" * 20
"""
Explanation: Now we can use the generate() function to generate text of any length based on our imported pre-trained model and a seed text of our choice. For best result, the length of the seed text should be the same as the length of training sequences (100 in the previous lab section).
In this case, we will test the overfitting of the model by supplying it two seeds:
one which comes verbatim from the training text, and
one which comes from another earlier speech by Obama
If the model has not overfit our training data, we should expect it to produce reasonable results for both seeds. If it has overfit, it might produce pretty good results for something coming directly from the training set, but perform poorly on a new seed. This means that it has learned to replicate our training text, but cannot generalize to produce text based on other inputs. Since the original article was very short, however, the entire vocabulary of the model might be very limited, which is why as input we use a part of another speech given by Obama, instead of completely random text.
Since we have not trained the model for that long, we will also use a lower temperature to get the model to generate more accurate if less diverse results. Try running the code a few times with different temperature settings to generate different results.
End of explanation
"""
|
ellisztamas/faps | docs/tutorials/.ipynb_checkpoints/08_data_cleaning_in_Amajus-checkpoint.ipynb | mit | import numpy as np
from pandas import DataFrame as df
import faps as fp
import matplotlib.pyplot as plt
%pylab inline
print("Created using FAPS version {}.".format(fp.__version__))
"""
Explanation: Data cleaning for Antirrhinum majus data set from 2012
End of explanation
"""
progeny = fp.read_genotypes('../../data/offspring_SNPs_2012.csv', mothers_col=1, genotype_col=2)
adults = fp.read_genotypes('../../data/parents_SNPs_2012.csv')
iix = [i in adults.names for i in progeny.mothers.tolist()]
progeny = progeny.subset(iix)
"""
Explanation: Tom Ellis, June 2017
In this notebook we will examine an empirical dataset using the snapdragon Antirrhinum majus.
In 2012 we collected open-pollinated seed capsules from wild mothers and genotypes samples of the offsping. A single seed capsule contains up to several hundred seeds from between 1 and lots of pollen donors. We also collected tissue and GPS positions for as many of the adults reproductive plants as we could find.
These data are those desribed and analysed by Ellis et al. (2018), and are available from the IST Austria data repository (DOI:10.15479/AT:ISTA:95).
Below, we will do an initial data inspection to weed out dubious loci and individuals. It can be argued that this process was overly conservative, and we threw out a lot of useful data, so you need not necessarily be so critical of your own data.
Data inspection
Import genotype data for the reproductive adults and offspring. The latter includes information on the ID of the maternal mother.
End of explanation
"""
all([progeny.markers[i] == adults.markers[i] for i in range(progeny.nloci)])
"""
Explanation: Genotype information
As a sanity check, confirm that the marker names really do match.
End of explanation
"""
print(
"Adults:", adults.missing_data().max(),
"\nProgeny:", progeny.missing_data().max()
)
"""
Explanation: Tissue from the adults and progeny was dried in different ways. For the progeny, I didnt use enough silica gel to dry the tissue rapidly, and the DNA became degraded. Reflecting this, although genotype dropouts (the rate at which genotype information at a single locus fails to amplify) is respectable for the adults, but dire for the offspring.
End of explanation
"""
fig = plt.figure(figsize=(10,10))
fig.subplots_adjust(wspace=0.2, hspace=0.2)
mdo = fig.add_subplot(2,2,1)
mdo.hist(progeny.missing_data('marker'), bins=np.arange(0, 1, 0.05))
mdo.set_xlabel("Missing data")
mdo.set_ylabel("Number of loci")
mdo.set_title('Per locus: offspring')
indo = fig.add_subplot(2,2,2)
indo.hist(progeny.missing_data(by='individual'), bins=np.arange(0, 1, 0.05))
indo.set_xlabel("Missing data")
indo.set_ylabel("Number of loci")
indo.set_title('Per indiviudual: offspring')
mda = fig.add_subplot(2,2,3)
mda.hist(adults.missing_data('marker'), bins=np.arange(0, 1, 0.05))
mda.set_xlabel("Missing data")
mda.set_ylabel("Number of loci")
mda.set_title('Per locus: adults')
inda = fig.add_subplot(2,2,4)
inda.hist(adults.missing_data(by='individual'), bins=np.arange(0, 1, 0.05))
inda.set_xlabel("Missing data")
inda.set_ylabel("Number of loci")
inda.set_title('Per indiviudual: adults')
"""
Explanation: Luckily a lot of this is driven by a small number of loci/individuals with very high dropout rates.
End of explanation
"""
print(
"Adults:", adults.missing_data(by='individual').max(),
"\nProgeny:", progeny.missing_data('individual').max()
)
"""
Explanation: Although overall per locus drop-out rates are low for the adults, there are some individuals with alarmingly high amounts of missing data. Candidates with very few loci typed can come out as being highly compatible with many offspring, just because there is insufficient information to exclude them.
End of explanation
"""
print(
"Adults:", len(np.array(adults.names)[adults.missing_data(1) > 0.05]),
"\nProgeny:", len(np.array(progeny.names)[progeny.missing_data(1) > 0.05])
)
adults = adults.subset(individuals= adults.missing_data(1) < 0.05)
progeny = progeny.subset( individuals= progeny.missing_data(1) < 0.05)
"""
Explanation: Count, then remove individuals with >5% missing data.
End of explanation
"""
fig = plt.figure(figsize=(10,10))
fig.subplots_adjust(wspace=0.2, hspace=0.2)
mdo = fig.add_subplot(2,2,1)
mdo.hist(progeny.missing_data('marker'), bins=np.arange(0, 0.7, 0.05))
mdo.set_xlabel("Missing data")
mdo.set_ylabel("Number of loci")
mdo.set_title('Per locus: offspring')
indo = fig.add_subplot(2,2,2)
indo.hist(progeny.missing_data(by='individual'), bins=np.arange(0, 0.7, 0.05))
indo.set_xlabel("Missing data")
indo.set_ylabel("Number of loci")
indo.set_title('Per indiviudual: offspring')
mda = fig.add_subplot(2,2,3)
mda.hist(adults.missing_data('marker'), bins=np.arange(0, 0.7, 0.05))
mda.set_xlabel("Missing data")
mda.set_ylabel("Number of loci")
mda.set_title('Per locus: adults')
inda = fig.add_subplot(2,2,4)
inda.hist(adults.missing_data(by='individual'), bins=np.arange(0, 0.7, 0.05))
inda.set_xlabel("Missing data")
inda.set_ylabel("Number of loci")
inda.set_title('Per indiviudual: adults')
"""
Explanation: Histograms look much better. It would still worth removing some of the dubious loci with high drop-out rates though.
End of explanation
"""
print(np.array(progeny.markers)[progeny.missing_data(0) >= 0.1])
progeny= progeny.subset(loci= progeny.missing_data(0) < 0.1)
adults = adults.subset(loci = progeny.missing_data(0) < 0.1)
"""
Explanation: Remove the loci with dropouts >10% from both the offspring and adult datasets.
End of explanation
"""
plt.scatter(adults.allele_freqs(), adults.heterozygosity(0))
plt.xlabel('Allele frequency')
plt.ylabel('Heterozygosity')
plt.show()
"""
Explanation: Allele frequency and heterozygosity generally show the convex pattern one would expect. An exception is the locus with allele frequency at around 0.4, but heterozygosity >0.7, which is suspect, and indicative of a possible outlier.
End of explanation
"""
print(
"Heterozygosity > 0.7:", adults.markers[adults.heterozygosity(0) >0.7],
"\nHeterozygosity < 0.2:", progeny.markers[adults.heterozygosity(0) < 0.2]
)
progeny = progeny.subset(loci= (adults.heterozygosity(0) > 0.2) * (adults.heterozygosity(0) < 0.7))
adults = adults.subset( loci= (adults.heterozygosity(0) > 0.2) * (adults.heterozygosity(0) < 0.7))
"""
Explanation: Loci with low heterozygosity are not dangerous in themselves; they might contribute some information, albeit little. To be on the safe side, let's remove loci with less than 0.2 heterozygosity, and the errant locus with high heterozygosity.
End of explanation
"""
fig = plt.figure(figsize=(10,10))
fig.subplots_adjust(wspace=0.1, hspace=0.2)
afp = fig.add_subplot(2,2,1)
afp.hist(adults.allele_freqs())
afp.set_title('Adults')
afp.set_xlabel("Allele frequency")
afo = fig.add_subplot(2,2,2)
afo.hist(progeny.allele_freqs())
afo.set_title('Offspring')
afo.set_xlabel("Allele frequency")
hetp = fig.add_subplot(2,2,3)
hetp.hist(adults.heterozygosity(0))
hetp.set_xlabel("Heterozygosity")
heto = fig.add_subplot(2,2,4)
heto.hist(progeny.heterozygosity(0))
heto.set_xlabel("Heterozygosity")
"""
Explanation: Summary of genotype data
This leaves us with a dataset of 61 loci for which allele frequency and heterozygosity are highest around 0.5, which is what we would like. In particular, heterozygosity (and hence homozygosity) among the adults is humped around 0.5, which is a good sign that parents should be readily distinguishable. There is nevertheless substantial spread towards zero and one for the progeny data however, which is less than ideal.
End of explanation
"""
# Check that the mother of each offspring is found in the array of adults,
# and select only those offspring.
ix = [i for i in range(progeny.size) if progeny.mothers[i] in adults.names]
progeny = progeny.subset(ix)
# Genotype data on those adults that are also mothers.
mothers = adults.subset(progeny.parent_index('m', adults.names))
"""
Explanation: The effective number of loci can be seen as the number of loci at which one can make compare the offspring, maternal and candidate paternal genotype (i.e. those loci with no missing data). Given how high dropouts are in the offspring, it is worthwhile to check the effective number of loci for this dataset.
To calculate the effective number of loci, we need genotype data for the mothers.
End of explanation
"""
neloci = fp.effective_nloci(progeny, mothers, adults)
plt.hist(neloci.flatten(), bins=np.arange(45.5,63.5,1))
plt.show()
"""
Explanation: In fact, effective number of loci is good. The minimum number of valid loci to compare is 46, and in 99% of cases there are 57 or more loci.
End of explanation
"""
print(
"Total n. loci:", adults.nloci,
"\nMean per-locus missing data in the adults:", adults.missing_data(by = 'marker').mean(),
"\nMean per-locus missing data in the progeny:", progeny.missing_data(by = 'marker').mean(),
"\nMinimum heterozygosity at any locus:", adults.heterozygosity(by = 'marker').min(),
"\nMaximum heterozygosity at any locus:", adults.heterozygosity(by = 'marker').max(),
"\nLowest minor-allele frequency:", adults.allele_freqs().min(),
"\nHighest minor-allele frequency:", adults.allele_freqs().max()
)
"""
Explanation: Finally, print some summary statistics about the quality of the genotype information in the data set.
End of explanation
"""
prlist = progeny.split(progeny.mothers)
len(prlist)
"""
Explanation: Example family: L1872
Note from August 2021: this section includes some old and fairly ugly list comprehensions that aren't the clearest way to do things, which I hope to update at some point. If you want to apply what follows to your own work, perhaps don't worry too much about what they are doing, and just try to see which names you would need to substitute to apply it to your own data.
The progeny dataset consists of offspring from multiple families that were genotyped at the same time. It was convenient to consider them as one so far to tidy up the genotype data, but for subsequent analysis we need to split them up into their constituent full sib families. This is easy to do with split, which returns a list of genotypeArray objects.
End of explanation
"""
ex_progeny = prlist["L1872"]
ex_mother = adults.subset(ex_progeny.parent_index('m', adults.names))
ex_progeny.size
"""
Explanation: By way of a sanity check we will examine one of the largest families in detail. After the data filtering above, there are 20 offspring from mother L1872. Distributions of missing data, heterozygosity and allele frequency at each locus suggest no reason for alarm.
End of explanation
"""
allele_freqs = adults.allele_freqs() # population allele frequencies
ex_patlik = fp.paternity_array(
ex_progeny,
ex_mother,
adults,
mu = 0.0015,
missing_parents=0.1
)
ex_sc = fp.sibship_clustering(ex_patlik, 1000)
"""
Explanation: Family structure
Cluster the family into sibships. I have set the proportion of missing parents to 0.1; we have removed 140 of the 2219 (6%) candidates logged as alive in 2012, and I allow for 10% of candidates having been missed. In fact the results do not depend on the parameter unless it is unrealistically high.
End of explanation
"""
from scipy.cluster.hierarchy import dendrogram
dendrogram(ex_sc.linkage_matrix, orientation='left', color_threshold=0,
above_threshold_color='black')
plt.show()
"""
Explanation: We can first look at the dendrogram of relatedness between individuals derived from the array of paternity likleihoods.
End of explanation
"""
print(
"Most-probable partition:", ex_sc.mlpartition,
"\nUnique families:", np.unique(ex_sc.mlpartition),
"\nPosterior probability of most-probable partition:", np.exp(ex_sc.prob_partitions.max())
)
"""
Explanation: We can compare this to the most-probable partition structure to get a rough idea of what as going on. This partition groups offspring into 6 full sibships and has a posterior probability of 0.74. The partition structure simply labels individuals 0 to 20 with a unique, arbitrary identifier. For example, individuals 2 and 3 are grouped into an especially large family labelled '1'.
End of explanation
"""
postpat = ex_sc.posterior_paternity_matrix()
# Add a label for missing fathers to the end of
adults.names = np.append(adults.names, "missing")
# names of most probable candidates
mx = np.array([np.where(postpat[i].max() == postpat[i])[0][0] for i in range(ex_progeny.size)])
# Print a dataframe summarising this
df([adults.names[mx], np.exp(postpat.max(1))]).T
"""
Explanation: We can recover posterior probabilties of paternity for each candidate on each offspring using posterior_paternity_matrix. For most offspring, there is a single candidate with a probability of paternity close to one.
End of explanation
"""
fig = plt.figure(figsize=(15,6))
nf = fig.add_subplot(1,2,1)
nf.plot(range(1,ex_progeny.size+1), ex_sc.nfamilies())
nf.set_xlabel('Number of families')
nf.set_ylabel('Probability')
fs = fig.add_subplot(1,2,2)
fs.plot(range(1,ex_progeny.size+1), ex_sc.family_size())
fs.set_xlabel('Family size')
plt.show()
"""
Explanation: Family sizes
Consistent with the results for many families (shown below), the posterior distributions for family size suggest many small families and a smaller number of larger families.
End of explanation
"""
gps_pos = np.genfromtxt('../../data/amajus_GPS_2012.csv', delimiter=',', skip_header=1, usecols=[3,4]) # import CSV file
gps_lab = np.genfromtxt('../../data/amajus_GPS_2012.csv', delimiter=',', skip_header=1, usecols=0, dtype='str') # import CSV file
# subset GPS data to match the genotype data.
ix = [i for i in range(len(gps_lab)) if gps_lab[i] in adults.names]
gps_pos, gps_lab = gps_pos[ix], gps_lab[ix]
ix =[i for i in range(len(gps_lab)) if gps_lab[i] in adults.names[mx]]
gps_cands = gps_pos[ix]
gps_ex = gps_pos[gps_lab == "L1872"].squeeze()
"""
Explanation: Geographic positions
Intuitively, one would expect most pollen donors to be fairly close to the mother. Since the most probable partition had fairly strong support and identified a set of candidates with posterior probabilities close to one, it is reasonable to use these individuals to get an idea of where the pollen donors are to be found.
First, import GPS data and make sure sample IDs match genotype data.
End of explanation
"""
second = np.sort(postpat, 1)[:, 1]
sx = np.array([np.where(second[i] == postpat[i])[0][0] for i in range(ex_progeny.size)])
gps_sec = gps_pos[np.unique(sx)]
fig = plt.figure(figsize=(16.9/2.54,6.75/2.54))
#plt.figure(figsize=(12.5,5)
plt.xlabel('East-West positition (m)')
plt.ylabel('North-South positition (m)')
plt.xlim(-2500,2000)
plt.ylim(-500,1500)
plt.scatter(gps_pos[:,0], gps_pos[:,1], s=5, color='green', alpha=0.5)
plt.scatter(gps_sec[:,0], gps_sec[:,1], color='gold')
plt.scatter(gps_cands[:,0],gps_cands[:,1], color='blue')
plt.scatter(gps_ex[0], gps_ex[1], color='red', s=40, edgecolors='black')
plt.show()
"""
Explanation: The map below shows the spatial positions of all individuals in the sample in green. Overlaid are the mother in red, and top candidates in blue. The likley candidates are indeed found close to the mother along the lower (southern-most) road, with two individuals on the upper (northern) road. This gives us no cause to doubt the validity of the paternity results.
End of explanation
"""
dists = np.sqrt((gps_ex[0] - gps_cands[:,0])**2 + (gps_ex[1] - gps_cands[:,1])**2)
print("Mean dispersal of top candidates =",mean(dists), "metres")
plt.hist(dists, bins=np.arange(0,650,50))
plt.show()
"""
Explanation: We can use these data to get a very rough dispersal kernal. Most pollen comes from within 50m of the maternal plant.
End of explanation
"""
dists2 = np.sqrt((gps_ex[0] - gps_sec[:,0])**2 + (gps_ex[1] - gps_sec[:,1])**2)
print("Mean dispersal of second candidates =",mean(dists2), "metres")
"""
Explanation: In contrast, the second-most-likely candidates are on average more than 800m from the maternal plant.
End of explanation
"""
plt.hist([prlist[k].size for k in prlist.keys()], bins=np.arange(0,25))
plt.show()
"""
Explanation: Multiple families
The code becomes more challenging because we will need to perform operations on every element in this list. Luckily this is straightforward in Python if we use list comprehensions. For example, we can pull out and plot the number of offspring in each half-sibling array:
End of explanation
"""
# split into maternal families
mlist = mothers.split(progeny.mothers)
prlist = progeny.split(progeny.mothers)
# families with 20 or more offspring
prog17 = {k : prlist[k] for k in prlist.keys() if prlist[k].size >=17}
mlist = {k : mlist[k] for k in prlist.keys() if prlist[k].size >=17}
# take the first 17 offspring
prog17 = {k : v.subset(range(17)) for k,v in prog17.items()}
mlist = {k : v.subset(range(17)) for k,v in mlist.items()}
"""
Explanation: All of these families are samples from much larger half sib arrays, so comparing full-sibship sizes and number is even more difficult if there are different numbers of offspring. For this reason we can pick out only those families with 17 or more offspring.
This cell splits genotype data into maternal families of 17 or more offspring, then pick 17 offspring at random (there is no meaning in the order of individuals in the genotypeArray object, so taking the first 17 is tantamount to choosing at random). This leaves us with 18 familes of 17 offspring.
End of explanation
"""
allele_freqs = adults.allele_freqs() # population allele frequencies
adults.names = adults.names[:-1] # Remove 'missing' from candidate names
from time import time
t0=time()
patlik = fp.paternity_array(prog17, mlist, adults, mu=0.0013, missing_parents=0.1)
print("Completed in {} seconds.".format(time() - t0))
"""
Explanation: Calculate likelihoods of paternity for each family. This took 3 seconds on a 2010 Macbook Pro; your mileage may vary. In order to do so we also need population allele frequencies, and to remove the entry for missing fathers from the vector of candidate names that we added previously.
End of explanation
"""
t1 = time()
sc = fp.sibship_clustering(patlik)
print("Completed in {} seconds.".format(time() - t1))
"""
Explanation: The next step is clustering each family into full sibships.
End of explanation
"""
nfamilies = [x.nfamilies() for x in sc.values()]
nfamilies = np.array(nfamilies)
famsize = [x.family_size() for x in sc.values()]
famsize = np.array(famsize)
"""
Explanation: Calculate probability distributions for family size and number of families for each array.
End of explanation
"""
fig = plt.figure(figsize=(16.9/2.54, 6/2.54))
fig.subplots_adjust(wspace=0.3, hspace=0.1)
nf = fig.add_subplot(1,2,1)
nf.set_ylabel('Probability density')
nf.set_xlabel('Number of families')
nf.set_ylim(-0.005,0.2)
nf.set_xlim(0,18)
nf.bar(np.arange(0.5,17.5), nfamilies.sum(0)/nfamilies.sum(), color='1', width=1)
nf.bar(np.arange(3.5,16.5), (nfamilies.sum(0)/nfamilies.sum())[3:16], color='0.75', width=1)
fs = fig.add_subplot(1,2,2)
fs.set_xlabel('Family size')
#fs.set_ylabel('Probability density')
fs.set_ylim(-0.05,0.8)
fs.set_xlim(0,17)
fs.bar(np.arange(0.5,17.5), famsize.sum(0)/famsize.sum(), color='1', width=1)
fs.bar(np.arange(0.5,6.5), (famsize.sum(0)/famsize.sum())[:6], color='0.75', width=1)
plt.show()
"""
Explanation: Plots below show the probability distributions for the number and sizes of families. Grey bars show 95% credible intervals (see CDF plots below). Samples of 17 offspring are divided into between four and 16 full-sibling families consisting of between one and eight individuals. Most families seem to be small, with a smaller number of large families.
End of explanation
"""
fig = plt.figure(figsize=(15, 6))
fig.subplots_adjust(wspace=0.3, hspace=0.1)
nf = fig.add_subplot(1,2,1)
nf.set_ylabel('Cumulative density')
nf.set_xlabel('Number of families')
nf.set_xlim(0,20)
nf.set_ylim(0,1.05)
nf.plot(np.arange(1,18), np.cumsum(nfamilies.sum(0)/nfamilies.sum()))
nf.axhline(0.975, 0.05, 0.95, linestyle='dashed')
nf.axhline(0.025, 0.05, 0.95, linestyle='dashed')
nf.grid()
fs = fig.add_subplot(1,2,2)
fs.set_ylabel('Cumulative density')
fs.set_xlabel('Family size')
fs.set_xlim(0,21)
fs.set_ylim(0,1.05)
fs.plot(np.arange(1,18), np.cumsum(famsize.sum(0)/famsize.sum()))
fs.axhline(0.975, 0.05, 0.95, linestyle='dashed')
fs.axhline(0.025, 0.05, 0.95, linestyle='dashed')
fs.grid()
"""
Explanation: Cumulative probability density plots demonstrate the credible intervals for family size and number.
End of explanation
"""
|
LucaCanali/Miscellaneous | Spark_Physics/Dimuon_mass_spectrum/Dimuon_mass_spectrum_histogram_Spark_DataFrame_Colab_version.ipynb | apache-2.0 | # Run this if you need to install Apache Spark (PySpark)
! pip install pyspark
# install sparkhistogram
! pip install sparkhistogram
"""
Explanation: Histogram of the Dimuon Mass Spectrum
This implements the dimuon mass spectrum analysis, a "Hello World!" example for data analysis in High Energy Physics. It is intended as a technology demonstrator for the use Apache Spark for High Energy Physics.
The workload and data:
- The input data is a series of candidate muon events.
- The job output is a histogram of the dimuon mass spectrum, where several peaks (resonances) can be identified corresponding to well-know particles (e.g. the Z boson at 91 Gev).
- The computation is based on https://root.cern.ch/doc/master/df102__NanoAODDimuonAnalysis_8C.html and CERN open data from the CMS collaboration linked there.
- See also https://github.com/LucaCanali/Miscellaneous/tree/master/Spark_Physics
Author and contact: Luca.Canali@cern.ch
January, 2022
Install Apache Spark
Make sure you have Spark installed, this is how you can do it:
simply run pip install pyspark
as an alternative download Spark from http://spark.apache.org/downloads.html
note: Spark version used for testing this notebook: Spark 3.2.1
End of explanation
"""
# Download the data (2 GB) if not yet available locally
! wget https://sparkdltrigger.web.cern.ch/sparkdltrigger/Run2012BC_DoubleMuParked_Muons.orc
"""
Explanation: Download the data
End of explanation
"""
# Start the Spark Session
from pyspark.sql import SparkSession
spark = (SparkSession.builder
.appName("dimuon mass")
.master("local[*]")
.config("spark.driver.memory", "2g")
.config("spark.sql.orc.enableNestedColumnVectorizedReader", "true")
.getOrCreate()
)
# Read data with the muon candidate events
# download data with wget as detailed above
# further details of the available datasets at
# https://github.com/LucaCanali/Miscellaneous/tree/master/Spark_Physics
path = "./"
df_muons = spark.read.orc(path + "Run2012BC_DoubleMuParked_Muons.orc")
df_muons.printSchema()
print(f"Number of events: {df_muons.count()}")
# Apply filters to the input data
# - select only events with 2 muons
# - select only events where the 2 muons have opposite charge
df_muons = df_muons.filter("nMuon == 2").filter("Muon_charge[0] != Muon_charge[1]")
# This computes the 4-vectors sum for the 2 moun system
# using formulas from special relativity, in the limit E >> muons rest mass
# see also http://edu.itp.phys.ethz.ch/hs10/ppp1/2010_11_02.pdf
# and https://en.wikipedia.org/wiki/Invariant_mass
df_with_dimuonmass = df_muons.selectExpr("""
sqrt(2 * Muon_pt[0] * Muon_pt[1] *
( cosh(Muon_eta[0] - Muon_eta[1]) - cos(Muon_phi[0] - Muon_phi[1]) )
) as Dimuon_mass""")
# This defines the DataFrame transformation to compute the Dimuon mass spectrum
# The result is a histogram with (energy) bin values and event counts foreach bin
# Requires sparkhistogram
# See https://github.com/LucaCanali/Miscellaneous/blob/master/Spark_Notes/Spark_DataFrame_Histograms.md
from sparkhistogram import computeHistogram
# histogram parameters
min_val = 0.25
max_val = 300
num_bins = 30000
# use the helper function computeHistogram in the package sparkhistogram
histogram_data = computeHistogram(df_with_dimuonmass, "Dimuon_mass", min_val, max_val, num_bins)
# The action toPandas() here triggers the computation.
# Histogram data is fetched into the driver as a Pandas Dataframe.
%time histogram_data_pandas=histogram_data.toPandas()
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
plt.rcParams.update({'font.size': 20, 'figure.figsize': [14,10]})
f, ax = plt.subplots()
# cut the first and last bin
x = histogram_data_pandas["value"]
y = histogram_data_pandas["count"]
# line plot
ax.plot(x, y, '-')
# the plot is in log-log axis to better show the peaks
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim(min_val, max_val)
ax.set_ylim(1, 6e5)
ax.set_xlabel('$m_{dimuon}$ (GeV)')
ax.set_ylabel('Number of Events')
ax.set_title("Distribution of the Dimuon Mass Spectrum")
# Label for the resonances spectrum peaks
txt_opts = {'horizontalalignment': 'center',
'verticalalignment': 'center',
'transform': ax.transAxes}
plt.text(0.85, 0.75, 'Z', **txt_opts)
plt.text(0.55, 0.77, r"$\Upsilon$(1,2,3S)", **txt_opts)
plt.text(0.37, 0.95, r"J/$\Psi$", **txt_opts)
plt.text(0.40, 0.77, r"$\Psi$'", **txt_opts)
plt.text(0.22, 0.80, r"$\phi$", **txt_opts)
plt.text(0.16, 0.83, r"$\rho,\omega$", **txt_opts)
plt.text(0.11, 0.78, r"$\eta$", **txt_opts);
plt.show()
spark.stop()
"""
Explanation: Dimuon mass spectrum calculation with Spark DataFrame API
End of explanation
"""
|
CAChemE/curso-python-datos | notebooks_vacios/060-ScikitLearn-Intro.ipynb | bsd-3-clause | # X_train, X_test, Y_train, Y_test =
# preserve
X_train.shape, Y_train.shape
# preserve
X_test.shape, Y_test.shape
"""
Explanation: Introducción al aprendizaje automático con scikit-learn
En los últimos tiempos habrás oído hablar de machine learning, deep learning, reinforcement learning, muchas más cosas que contienen la palabra learning y, por supuesto, Big Data. Con los avances en capacidad de cálculo de los últimos años y la popularización de lenguajes de alto nivel, hemos entrado de lleno en la fiebre de hacer que las máquinas aprendan. En esta clase veremos cómo utilizar el paquete scikit-learn de Python para poder crear modelos predictivos a partir de nuestros datos de una manera rápida y sencilla.
En primer lugar vamos a probar con un ejemplo muy sencillo: ajustar una recta a unos datos. Esto difícilmente se puede llamar machine learning, pero nos servirá para ver cómo es la forma de trabajar con scikit-learn, cómo se entrenan los modelos y cómo se calculan las predicciones.
En primer lugar fabricamos unos datos distribuidos a lo largo de una recta con un poco de ruido:
El proceso para usar scikit-learn es el siguiente:
Separar los datos en matriz de características features y variable a predecir y
Seleccionar el modelo
Elegir los hiperparámetros
Ajustar o entrenar el modelo (model.fit)
Predecir con datos nuevos (model.predict)
<div class="alert alert-info">Tenemos que hacer este `reshape` para transformar nuestro vector en una matriz de columnas. Rara vez tendremos que repetir este paso, puesto que en la práctica siempre tendremos varias variables.</div>
Para calcular el error, en el módulo sklearn.metrics tenemos varias funciones útiles:
Y ahora predecimos con datos nuevos:
¡Y ya está! Lo básico de scikit-learn está aquí. Lo próximo será usar diferentes tipos de modelos y examinar con rigor su rendimiento para poder seleccionar el que mejor funcione para nuestros datos.
Introducción rápida al aprendizaje automático
En aprendizaje automático tenemos dos tipos de problemas:
Aprendizaje supervisado, cuando tengo datos etiquetados, es decir: conozco la variable a predecir de un cierto número de observaciones. Pasándole esta información al algoritmo, este será capaz de predecir dicha variable cuando reciba observaciones nuevas. Dependiendo de la naturaleza de la variable a predecir, tendremos a su vez:
Regresión, si es continua (como el caso anterior), o
Clasificación, si es discreta o categórica (sí/no, color de ojos, etc)
Aprendizaje no supervisado, cuando no tenemos datos etiquetados y por tanto no tengo ninguna información a priori. En este caso usaremos los algoritmos para descubrir patrones en los datos y agruparlos, pero tendremos que manualmente inspeccionar el resultado después y ver qué sentido podemos darle a esos grupos.
En función de la naturaleza de nuestro problema, scikit-learn proporciona una gran variedad de algoritmos que podemos elegir.
Clasificación
En scikit-learn tenemos disponibles muchos datasets clásicos de ejemplo que podemos utilizar para practicar. Uno de ellos es el dataset MNIST, que consiste en imágenes escaneadas de números escritos a mano por funcionarios de los EEUU. Para cargarlo, importamos la función correspondiente de sklearn.datasets:
Ya tenemos los datos separados en matriz de características y vector de predicción. En este caso, tendré 64 = 8x8 características (un valor numérico por cada pixel de la imagen) y mi variable a predecir será el número en sí.
Siempre que se hace aprendizaje supervisado, se ha de dividir el dataset en una parte para entrenamiento y otra para test (incluso a veces hay una partición más para validación)
End of explanation
"""
# Inicializamos el modelo
# Lo entrenamos
"""
Explanation: Para visualizar estas imágenes tendremos que hacer un .reshape:
Ten en cuenta que nosotros sabemos qué número es cada imagen porque somos humanos y podemos leerlas. El ordenador lo sabe porque están etiquetadas, pero ¿qué pasa si viene una imagen nueva? Para eso tendremos que construir un modelo de clasificación. En este caso aplicaremos la regresión logística
End of explanation
"""
# Vemos los resultados para los datos de test
"""
Explanation: Y una vez que hemos ajustado el modelo, comprobemos cuáles son sus predicciones usando los mismos datos de entrenamiento:
End of explanation
"""
# preserve
# https://github.com/amueller/scipy-2016-sklearn/blob/master/notebooks/05%20Supervised%20Learning%20-%20Classification.ipynb
from sklearn.datasets import make_blobs
# preserve
features, labels = make_blobs(centers=[[6, 0], [2, -1]], random_state=0)
features.shape
# preserve
plt.scatter(features[:, 0], features[:, 1], c=labels)
"""
Explanation: De nuevo usamos sklearn.metrics para medir la eficacia del algoritmo:
¡Parece que hemos acertado prácticamente todas! Más tarde volveremos sobre este porcentaje de éxito, que bien podría ser engañoso. De momento, representemos otra medida de éxito que es la matriz de confusión:
Clustering y reducción de dimensionalidad
Una vez que hemos visto los dos tipos de problemas supervisados, vamos a ver cómo se trabajan los problemas no supervisados. En primer lugar vamos a fabricar dos nubes de puntos usando la función make_blobs:
End of explanation
"""
# preserve
xmin, xmax = features[:, 0].min(), features[:, 0].max()
ymin, ymax = features[:, 1].min(), features[:, 1].max()
xx, yy = np.meshgrid(
np.linspace(xmin, xmax),
np.linspace(ymin, ymax)
)
mesh = np.c_[xx.ravel(), yy.ravel()]
mesh
# http://pybonacci.org/2015/01/14/introduccion-a-machine-learning-con-python-parte-1/
"""
Explanation: Hemos creado dos grupos y algunos puntos se solapan, pero ¿qué pasaría si no tuviésemos esta información visual? Vamos a emplear un modelo de clustering para agrupar los datos: en este caso KMeans
Observa que por defecto tenemos 8 clusters. Veamos qué ocurre:
Ahora no pasamos la información de las etiquetas al algoritmo a la hora de entrenar. En la práctica por supuesto no la tendremos.
Y ahora preparamos el código para representar todas las regiones:
End of explanation
"""
# preseve
"""
Explanation: Si lo metemos todo en una función interactiva:
End of explanation
"""
# preserve
import pandas as pd
def load_iris_df():
from sklearn.datasets import load_iris
iris = load_iris()
features, labels = iris.data, iris.target
df = pd.DataFrame(features, columns=iris.feature_names)
df["species"] = pd.Categorical.from_codes(iris.target, categories=iris.target_names)
#df = df.replace({'species': {0: iris.target_names[0], 1: iris.target_names[1], 2: iris.target_names[2]}})
return df
iris_df = load_iris_df()
# preserve
iris_df.head()
# preserve
_ = pd.tools.plotting.scatter_matrix(iris_df, c=iris_df["species"].cat.codes, figsize=(10, 10))
"""
Explanation: Reducción de dimensionalidad
Vamos a rescatar nuestro dataset de los dígitos y tratar de visualizarlo en dos dimensiones, lo que se conoce como reducción de dimensionalidad.
Y ahora proyectamos los datos usando .transform:
Ejercicio
Visualiza el dataset de las flores (load_iris) utilizando las funciones que tienes más abajo. ¿Hay alguna forma clara de separar las tres especies de flores?
Separa el dataset en matriz de características features y vector de etiquetas labels. Conviértelos a arrays de NumPy usando .as_matrix().
Reduce la dimensionalidad del dataset a 2 usando sklearn.manifold.Isomap o sklearn.decomposition.PCA y usa un algoritmo de clustering con 3 clusters. ¿Se parecen los clusters que aparecen a los grupos originales?
Predice el tipo de flor usando un algoritmo de clasificación. Visualiza la matriz de confusión. ¿Cuál es el porcentaje de aciertos del algoritmo? ¿Es más certero en algún tipo de flor en concreto? ¿Concuerda esto con lo que pensaste en el apartado 1?
End of explanation
"""
|
dracolytch/ml-agents | python/PPO.ipynb | apache-2.0 | import numpy as np
import os
import tensorflow as tf
from ppo.history import *
from ppo.models import *
from ppo.trainer import Trainer
from unityagents import *
"""
Explanation: Unity ML Agents
Proximal Policy Optimization (PPO)
Contains an implementation of PPO as described here.
End of explanation
"""
### General parameters
max_steps = 5e5 # Set maximum number of steps to run environment.
run_path = "ppo" # The sub-directory name for model and summary statistics
load_model = False # Whether to load a saved model.
train_model = True # Whether to train the model.
summary_freq = 10000 # Frequency at which to save training statistics.
save_freq = 50000 # Frequency at which to save model.
env_name = "environment" # Name of the training environment file.
### Algorithm-specific parameters for tuning
gamma = 0.99 # Reward discount rate.
lambd = 0.95 # Lambda parameter for GAE.
time_horizon = 2048 # How many steps to collect per agent before adding to buffer.
beta = 1e-3 # Strength of entropy regularization
num_epoch = 5 # Number of gradient descent steps per batch of experiences.
epsilon = 0.2 # Acceptable threshold around ratio of old and new policy probabilities.
buffer_size = 2048 # How large the experience buffer should be before gradient descent.
learning_rate = 3e-4 # Model learning rate.
hidden_units = 64 # Number of units in hidden layer.
batch_size = 64 # How many experiences per gradient descent update step.
"""
Explanation: Hyperparameters
End of explanation
"""
env = UnityEnvironment(file_name=env_name)
print(str(env))
brain_name = env.brain_names[0]
"""
Explanation: Load the environment
End of explanation
"""
tf.reset_default_graph()
# Create the Tensorflow model graph
ppo_model = create_agent_model(env, lr=learning_rate,
h_size=hidden_units, epsilon=epsilon,
beta=beta, max_step=max_steps)
is_continuous = (env.brains[brain_name].action_space_type == "continuous")
use_observations = (env.brains[brain_name].number_observations > 0)
use_states = (env.brains[brain_name].state_space_size > 0)
model_path = './models/{}'.format(run_path)
summary_path = './summaries/{}'.format(run_path)
if not os.path.exists(model_path):
os.makedirs(model_path)
if not os.path.exists(summary_path):
os.makedirs(summary_path)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
# Instantiate model parameters
if load_model:
print('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(init)
steps = sess.run(ppo_model.global_step)
summary_writer = tf.summary.FileWriter(summary_path)
info = env.reset(train_mode=train_model)[brain_name]
trainer = Trainer(ppo_model, sess, info, is_continuous, use_observations, use_states)
while steps <= max_steps:
if env.global_done:
info = env.reset(train_mode=train_model)[brain_name]
# Decide and take an action
new_info = trainer.take_action(info, env, brain_name)
info = new_info
trainer.process_experiences(info, time_horizon, gamma, lambd)
if len(trainer.training_buffer['actions']) > buffer_size and train_model:
# Perform gradient descent with experience buffer
trainer.update_model(batch_size, num_epoch)
if steps % summary_freq == 0 and steps != 0 and train_model:
# Write training statistics to tensorboard.
trainer.write_summary(summary_writer, steps)
if steps % save_freq == 0 and steps != 0 and train_model:
# Save Tensorflow model
save_model(sess, model_path=model_path, steps=steps, saver=saver)
steps += 1
sess.run(ppo_model.increment_step)
# Final save Tensorflow model
if steps != 0 and train_model:
save_model(sess, model_path=model_path, steps=steps, saver=saver)
env.close()
export_graph(model_path, env_name)
"""
Explanation: Train the Agent(s)
End of explanation
"""
export_graph(model_path, env_name)
"""
Explanation: Export the trained Tensorflow graph
Once the model has been trained and saved, we can export it as a .bytes file which Unity can embed.
End of explanation
"""
|
google/starthinker | colabs/dbm_to_bigquery.ipynb | apache-2.0 | !pip install git+https://github.com/google/starthinker
"""
Explanation: DV360 Report To BigQuery
Move existing DV360 reports into a BigQuery table.
License
Copyright 2020 Google LLC,
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Disclaimer
This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.
This code generated (see starthinker/scripts for possible source):
- Command: "python starthinker_ui/manage.py colab"
- Command: "python starthinker/tools/colab.py [JSON RECIPE]"
1. Install Dependencies
First install the libraries needed to execute recipes, this only needs to be done once, then click play.
End of explanation
"""
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
"""
Explanation: 2. Set Configuration
This code is required to initialize the project. Fill in required fields and press play.
If the recipe uses a Google Cloud Project:
Set the configuration project value to the project identifier from these instructions.
If the recipe has auth set to user:
If you have user credentials:
Set the configuration user value to your user credentials JSON.
If you DO NOT have user credentials:
Set the configuration client value to downloaded client credentials.
If the recipe has auth set to service:
Set the configuration service value to downloaded service credentials.
End of explanation
"""
FIELDS = {
'auth_read':'user', # Credentials used for reading data.
'auth_write':'service', # Authorization used for writing data.
'dbm_report_id':'', # DV360 report ID given in UI, not needed if name used.
'dbm_report_name':'', # Name of report, not needed if ID used.
'dbm_dataset':'', # Existing BigQuery dataset.
'dbm_table':'', # Table to create from this report.
'dbm_schema':'', # Schema provided in JSON list format or empty value to auto detect.
'is_incremental_load':False, # Clear data in destination table during this report's time period, then append report data to destination table.
}
print("Parameters Set To: %s" % FIELDS)
"""
Explanation: 3. Enter DV360 Report To BigQuery Recipe Parameters
Specify either report name or report id to move a report.
A schema is recommended, if not provided it will be guessed.
The most recent valid file will be moved to the table.
Modify the values below for your use case, can be done multiple times, then click play.
End of explanation
"""
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'dbm':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'user','description':'Credentials used for reading data.'}},
'report':{
'report_id':{'field':{'name':'dbm_report_id','kind':'integer','order':2,'default':'','description':'DV360 report ID given in UI, not needed if name used.'}},
'name':{'field':{'name':'dbm_report_name','kind':'string','order':3,'default':'','description':'Name of report, not needed if ID used.'}}
},
'out':{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Authorization used for writing data.'}},
'dataset':{'field':{'name':'dbm_dataset','kind':'string','order':4,'default':'','description':'Existing BigQuery dataset.'}},
'table':{'field':{'name':'dbm_table','kind':'string','order':5,'default':'','description':'Table to create from this report.'}},
'schema':{'field':{'name':'dbm_schema','kind':'json','order':6,'description':'Schema provided in JSON list format or empty value to auto detect.'}},
'header':True,
'is_incremental_load':{'field':{'name':'is_incremental_load','kind':'boolean','order':7,'default':False,'description':"Clear data in destination table during this report's time period, then append report data to destination table."}}
}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
"""
Explanation: 4. Execute DV360 Report To BigQuery
This does NOT need to be modified unless you are changing the recipe, click play.
End of explanation
"""
|
oresat/oresat-ground-station | eb-ground-station/structure/weather-station-augmented-design/loadAnalysis.ipynb | gpl-3.0 | import numpy as np
import sys
import matplotlib.pyplot as plt
import sympy as sym
import pandas as pd
import magnitude as mag
from magnitude import mg
mag.new_mag('lbm', mag.Magnitude(0.45359237, kg=1))
mag.new_mag('lbf', mg(4.4482216152605, 'N'))
mag.new_mag('mph', mg(0.44704, 'm/s'))
from IPython.display import display, Markdown, Latex
def printBig(*message): # like print(), but with header2 formatting
display(Markdown( '## ' + ' '.join([str(x) for x in message]) ))
# drag force:
def dragf(rho, A, Cd, v): return(mg(1/2)*rho*A*Cd*v**2)
# second area moment of an annulus:
def I_annulus(OD, ID): return( mg(np.pi/4)*( (OD/mg(2))**4 - (ID/mg(2))**4 ) )
# area of an annulus:
def annularArea(ID, OD): return(mg(np.pi)*(OD**2/mg(4)-ID**2/mg(4)))
print(sys.version)
%matplotlib inline
sym.init_printing()
"""
Explanation: Wind Loading Analysis for the EB Ground Station
Intro
Explanations
Resources
Average daily wind data for Portland
Free historical weather data on Wunderground.
Note: It only displays the data one year at a time, at most... Anyone want to write a little scraping script?
NOAA report on the 2007 storm. (Costal data)
There's this city-data thread where someone claims that 116 mph winds were observed on the Morrison bridge during the Columbus Day storm. Sadly, they do not cite any sources...
TODO
get the missing parameters
get the drag coefficient for the antenna/station (with ice)
you can estimate this if you know the dimensions of the antenna. Just model it as a whole bunch of cylinders. The Cd of a cylinder is well known.
Slight annoyance: C_d is a function of Re. The smallest conductors on the antenna will have a different Re than the mast, for example. Buuuuut, a factor of 2 or 3 change in Re usually corresponds to a very small change in C_d. (should be confirmed)
get the maximum expected wind speed
get the frontal area for the antenna, with ice
It would also be neat if we could calculate the corresponding ice thickness.
get an acceptable factor of safety on the wind speed
get the actual value for the center of mass
get the mass of the blocks
get the mass of the tubes
get the mass of the antenna/equipment (with ice)
put things in terms of initial parameters
CoP in terms of the station dimensions
wind loading in terms of speed and Cd
make a non-shitty version of the loading diagram
Problem Description
Failure Mode: Tipping
We want to know how heavy the feet of the ground station need to be in order to keep it from tipping over in high winds.
We are assuming the worst-case is that antenna will be covered in ice during a wind storm. The criterion for tipping is when the moment due to wind is equal and opposite to the moment due to gravity.
I'm choosing the foot weight as the "trim" variable, since it's a lot easier to just swap out some larger blocks than to find out, say, that we need to redesign the ground station for a larger mast diameter.
Failure Mode: Bending
We also want to know the necessary size and material of the mast, so that it doesn't bend.
Stress analysis will be done in SolidWorks.
Solution
Design Process
choose arbitrary dimensions for the ground station
find the worst-case loading
determine if any part of the ground station will yield
If it does, go to 1.
determine if the ground station will tip over
If it will, add weight to the feet.
Information I/O:
Empirical C_d data: (profile of antenna conductors, typical conductor diameter, ripped plots from Munson book, wind speed) -> approximate C_d for the whole antenna (probably about 1.2)
Drag equation: (wind speed, air density, C_d, frontal area) -> drag on antenna
(same collection of variables as above) -> drag on mast, legs
(leg dimensions, material) -> leg CoM
2nd law for moments: (CoMs of all components, masses of all components, drag on antenna) -> necessary foot mass to prevent tipping
Setup
Imports
End of explanation
"""
FS= mg(1) # factor of safety (NOT SURE IF BUILDING CODES HAVE ONE BUILT IN)
"""
Explanation: Diagram
input parameters
End of explanation
"""
g= mg(9.81,'m/s2') # gravitational acceleration
rho_air= mg(1.225,'kg/m3') # density of air at 0 C
mu_air= mg(1.7e-5,'Pa s') # dynamic viscosity of air at 0 C
nu_air= mu_air/rho_air # kinematic viscosity of air at 0 C
rho_steel= mg(7.8e3,'kg/m3') # density of steel
rho_al= mg(2.7e3,'kg/m3') # density of aluminum
rho_fg= mg(1520,'kg/m3') # density of fiberglass
rho_concrete= mg(2300,'kg/m3') # density of concrete
sigma_y_al= mg(276,'MPa') # tensile yield strength of 6061-T6 aluminum
"""
Explanation: physical quantities
End of explanation
"""
Cd= mg(1.2) # Drag coefficient of a cylinder/antenna at relevant speeds, (THIS IS A GUESS)
# Fg= mg(300,'lbf') # weight of the ground station (THIS IS A GUESS)
v_wind_actual= mg(100,'mph') # max expexted wind speed (WHAT'S THE JUSTIFICATION HERE?)
v_wind= v_wind_actual*FS # max expexted wind speed (WHAT'S THE JUSTIFICATION HERE?)
# legs
Lxy_leg= mg(77,'inch') # length of a leg projected in the XY plane
L_leg= mg(78.75,'inch') # length of a leg
L_truss= mg(48.75,'inch') # length of the beam fixing a leg
# feet
Lxy_foot= mg(18.5,'inch') # side length of the foot's footprint
Lz_foot= mg(6,'inch') # height of the feet
# antennas
A_70cm= mg(4.0,'ft2') # frontal area of the 70 cm antenna
# A_70cm= mg(0,'ft2')
m_70cm= mg(7.5,'lbm') # mass of the 70 cm antenna
A_2m= mg(5.0,'ft2') # frontal area of the 2 m antenna
# A_2m= mg(0,'ft2')
m_2m= mg(9.5, 'lbm') # mass fo the 2 m antenna
# mast
L_mast= mg(180,'inch') # length of the mast
OD_mast= mg(1.9,'inch') # outer diameter of the mast
ID_mast= mg(1.48,'inch') # inner diameter of the mast
# cross boom
OD_crossBoom= mg(2.0,'inch') # outer diameter of the cross boom
ID_crossBoom= mg(1.5,'inch') # inner diameter of the cross boom
L_crossBoom= mg(60+60+10.75,'inch') # length of the cross boom
rho_crossBoom= rho_fg # density of the cross boom
# legs
OD_leg= mg(1.0,'inch') # outer diameter of the legs
ID_leg= OD_leg-mg(2*0.2,'inch') # inner diameter of the legs (GUESSING A 0.2" wall)
"""
Explanation: ground station parameters
End of explanation
"""
# geometry
# L_leg^2 = Lz_leg^2 + Lxy_leg^2
Lz_leg= np.sqrt(L_leg**2 - Lxy_leg**2)
Lx_leg= Lxy_leg*mg(np.cos(np.pi/6)) # length of a leg projected in the X axis
Ly_leg= Lxy_leg*mg(np.cos(np.pi/3)) # length of a leg projected in the X axis
# frontal areas
A_mast= L_mast*OD_mast
A_crossBoom= L_crossBoom*OD_crossBoom
A_legs= mg(2)*(Lx_leg*OD_leg) +(Lz_leg*OD_leg)
A_total= A_70cm+A_2m+A_mast+A_crossBoom+A_legs
# masses
m_foot= rho_concrete*Lxy_foot**2*Lz_foot
m_mast= rho_al*L_mast*annularArea(ID=ID_mast, OD=OD_mast)
m_leg= rho_steel*L_leg*annularArea(ID=ID_leg, OD=OD_mast)
m_crossBoom= rho_fg*L_crossBoom*annularArea(ID=ID_crossBoom, OD=OD_crossBoom)
m_structure= m_mast+mg(3)*m_leg+m_crossBoom+m_70cm+m_2m
# centers of mass
Lz_CoM_70cm= L_mast
Lz_CoM_2m= L_mast
Lz_CoM_mast= L_mast/mg(2)
Lz_CoM_foot= Lz_foot/mg(2)
Lz_CoM_leg= Lz_foot+Lz_leg/mg(2)
Lz_CoM_crossBoom= L_mast
print(
'area contributions',
'70cm:', A_70cm.ounit('m2'),
'2m:', A_2m.ounit('m2'),
'mast:', A_mast,
'cross boom:', A_crossBoom,
'legs:', A_legs,
'total:', A_total,
sep='\n')
"""
Explanation: secondary parameters
End of explanation
"""
# center of pressure for the whole station
# assuming the CoP for each element is at the CoM (no lift on elem, constant density of elem)
Lz_CoP= (
A_mast*Lz_CoM_mast
+A_70cm*Lz_CoM_70cm
+A_2m*Lz_CoM_2m
+A_crossBoom*Lz_CoM_crossBoom
+A_legs*Lz_CoM_leg
)/A_total
# height of the CoM of the whole station
# (only useful for finding critical tipping angle)
Lz_CoM_total= (
Lz_CoM_mast*m_mast
+Lz_CoM_2m*m_2m
+Lz_CoM_70cm*m_70cm
+Lz_CoM_crossBoom*m_crossBoom
+Lz_CoM_leg*m_leg*mg(3)
)/m_structure
# drag on the station
#D_wind= mg(1/2)*rho_air*A_total*Cd*v_wind**2
D_wind= dragf(rho=rho_air, A=A_total, Cd=Cd, v=v_wind)
"""
Explanation: Calculations
I'm counting the mass of the feet separately, since they aren't rigidly attached to the rest of the ground station. I assume that if the station tips, it will rotate around the pivots on two of the feet. Meanwhile the third (upstream) foot would dangle from the pivot, keeping its CoM directly below the pivot. So, unlike all the other components, I don't assume that the CoM of the feet is centered on the mast. I take it to be the mass of a single foot on the upstream (-X) leg.
Tipping
Technically, I'm breaking the right hand rule. I'm using the -Y axis to be the direction of positive rotation. So, on the diagram, a positive rotation would be counter-clockwise. This is just because I'm used to thinking of positive rotation as "out-of-the-page", and the -Y axis is out-of-the-page on the diagram.
find drag, CoP, CoM
End of explanation
"""
# Moments exerted on the station
M_wind= -D_wind*(Lz_CoP-Lz_foot) # negative sign from CW direction
M_structure= m_structure*g*Lx_leg
M_foot= m_foot*g*(Lx_leg+Lxy_leg)
# 0 == M_wind + M_structure + M_foot + M_balast
M_ballast= -M_wind-M_structure-M_foot
# M_ballast == m_ballast*g*(Lx_leg+Lxy_leg)
m_ballast= M_ballast/g/(Lx_leg+Lxy_leg) # ballast on the foot
# m_ballast= M_ballast/g/(Lx_leg) # ballast on the mast
"""
Explanation: balance moments and find required ballast
End of explanation
"""
# # I= pi/4*(r_o^2 - r_i^2)
# I_mast= I_annulus(OD= OD_mast, ID= ID_mast)
# M_bending= D_wind*(Lz_CoP - Lz_leg)
# # sigma_max= M*y/I
# # max stress in the mast, assuming it's anchored at the height of the foot
# sigma_max_mast= M_bending*(OD_mast/mg(2))/I_mast
# print(
# 'tension from bending, if the mast is welded at the height of the feet:',
# sigma_max_mast.ounit('MPa')
# )
# print('tensile yield stress of the mast:', sigma_y_al.ounit('MPa'))
"""
Explanation: bending on a welded mast
(probably not relevant)
End of explanation
"""
print('m_ballast:', m_ballast.ounit('lbm'))
if (m_ballast.val <= 0):
print('No ballast needed. (Wind will not tip station)')
else:
print('mass of the required ballast per foot:', m_ballast.ounit('lbm'))
print('mass of a foot (for comparison):', m_foot.ounit('lbm '))
"""
Explanation: report results
End of explanation
"""
F_p_legsDrag= mg(1/2)*dragf(rho=rho_air, A=A_legs, Cd=Cd, v=v_wind)
F_p_boom= dragf(
rho=rho_air, Cd=Cd, v=v_wind,
A= A_70cm+A_2m+A_crossBoom
)
F_d_mast= dragf(rho=rho_air, Cd=Cd, A=A_mast, v=v_wind) # distributed over the mast
# about the base:
# sum(M) == 0 == (F_p_legsReact-F_p_legsDrag)*Lz_leg - F_p_boom*L_mast - F_d_mast*L_mast/2
# sum(F) == 0 == -F_p_legsReact + F_p_legsDrag + F_p_boom + F_p_baseReact + F_d_mast
F_p_legsReact= mg(1)/Lz_leg*( F_p_boom*L_mast + F_d_mast*L_mast/mg(2) ) + F_p_legsDrag
F_p_baseReact= F_p_legsReact - F_p_legsDrag - F_p_boom + F_d_mast
"""
Explanation: Bending
determine the loads applied to the mast
F_p is a point load. F_d is a distributed load.
Drag on the legs is beared equally by the feet and mast (by symmetry)
If I ever add in the calculation for the drag on the armpit beams,
I'll need to add half their drag to this number for the same reason.
And, I'll probably just assume that they have no reaction forces, since they
would make things statically indeterminate, which sucks.
Use 2nd law for forces and moments to get the reaction forces... Should probably make a diagram explaining where these loads are applied.
End of explanation
"""
# x = sym.symbols('x')
# print(F_p_boom.ounit('N'))
# # load per length, as a function of length along the mast:
# Fdist_expr_mast= F_d_mast/L_mast \
# + F_p_baseReact*mg(sym.DiracDelta(x),'/m') \
# + (-F_p_legsReact+F_p_legsDrag)*mg(sym.DiracDelta(x-Lz_leg.toval(ounit='m')),'/m') \
# + F_p_boom*mg(sym.DiracDelta(x-L_mast.toval(ounit='m')),'/m')
# print('load distribution expression:\n', Fdist_expr_mast.toval(ounit='N/m'), 'N/m')
# # shear load, as a function of length along the mast:
# F_expr_mast= mg(sym.integrate(Fdist_expr_mast.toval(ounit='N/m'), x),'N')
# print('shear load expression:\n', F_expr_mast.toval(ounit='N'), 'N')
# # bending moment, as a function of length along the mast:
# M_expr_mast= mg(sym.integrate(F_expr_mast.toval(ounit='N'),x), 'N m')
# print('bending load expression:\n', M_expr_mast.toval(ounit='N m'), 'N m')
x = sym.symbols('x')
# load per length, as a function of length along the mast:
Fdist_expr_mast= F_d_mast.toval('N')/L_mast.toval('m') \
+ F_p_baseReact.toval('N')*sym.DiracDelta(x) \
+ (-F_p_legsReact.toval('N')+F_p_legsDrag.toval('N'))*sym.DiracDelta(x-Lz_leg.toval(ounit='m')) \
+ F_p_boom.toval('N')*sym.DiracDelta(x-L_mast.toval(ounit='m'))
print('load distribution expression:\n', Fdist_expr_mast.toval(ounit='N/m'), 'N/m')
# shear load, as a function of length along the mast:
F_expr_mast= mg(sym.integrate(Fdist_expr_mast.toval(ounit='N/m'), x),'N')
print('shear load expression:\n', F_expr_mast.toval(ounit='N'), 'N')
# bending moment, as a function of length along the mast:
M_expr_mast= mg(sym.integrate(F_expr_mast.toval(ounit='N'),x), 'N m')
print('bending load expression:\n', M_expr_mast.toval(ounit='N m'), 'N m')
"""
Explanation: symbolically find loading on the mast
End of explanation
"""
xs= np.linspace(-1e-6, L_mast.toval(ounit='m')+1e-6, 300)
Fsf= sym.lambdify(x,F_expr_mast.toval(ounit='N'), ['numpy','sympy'])
Fs= mg(np.array([Fsf(x) for x in xs]),'N')
Vs= Fs/annularArea(OD=OD_mast, ID=ID_mast)
plt.plot(xs, Vs.toval(ounit='MPa'))
print(F_d_mast, F_p_baseReact, F_p_legsReact, F_p_legsDrag, F_p_boom, sep='\n')
Msf= sym.lambdify(x, M_expr_mast.toval(ounit='N m'), ['numpy', 'sympy'])
Ms= mg(np.array([Msf(x) for x in xs]),'N m')
plt.figure()
plt.plot(xs, Ms.toval(ounit='N m'))
sigmas= Ms*(OD_mast/mg(2))/I_mast
plt.figure()
plt.plot(xs, sigmas.toval(ounit='MPa'))
#OD_mast_new=
#ID_mast_new=
print(D_wind.ounit('lbf'))
print(Lz_CoP.ounit('ft'))
print(m_ballast.ounit('lbm'))
"""
Explanation:
End of explanation
"""
|
fastai/course-v3 | nbs/dl2/translation.ipynb | apache-2.0 | path = Config().data_path()/'giga-fren'
"""
Explanation: Reduce original dataset to questions
End of explanation
"""
#! wget https://s3.amazonaws.com/fast-ai-nlp/giga-fren.tgz -P {path}
#! tar xf {path}/giga-fren.tgz -C {path}
# with open(path/'giga-fren.release2.fixed.fr') as f:
# fr = f.read().split('\n')
# with open(path/'giga-fren.release2.fixed.en') as f:
# en = f.read().split('\n')
# re_eq = re.compile('^(Wh[^?.!]+\?)')
# re_fq = re.compile('^([^?.!]+\?)')
# en_fname = path/'giga-fren.release2.fixed.en'
# fr_fname = path/'giga-fren.release2.fixed.fr'
# lines = ((re_eq.search(eq), re_fq.search(fq))
# for eq, fq in zip(open(en_fname, encoding='utf-8'), open(fr_fname, encoding='utf-8')))
# qs = [(e.group(), f.group()) for e,f in lines if e and f]
# qs = [(q1,q2) for q1,q2 in qs]
# df = pd.DataFrame({'fr': [q[1] for q in qs], 'en': [q[0] for q in qs]}, columns = ['en', 'fr'])
# df.to_csv(path/'questions_easy.csv', index=False)
# del en, fr, lines, qs, df # free RAM or restart the nb
### fastText pre-trained word vectors https://fasttext.cc/docs/en/crawl-vectors.html
#! wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.fr.300.bin.gz -P {path}
#! wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.bin.gz -P {path}
#! gzip -d {path}/cc.fr.300.bin.gz
#! gzip -d {path}/cc.en.300.bin.gz
path.ls()
"""
Explanation: You only need to execute the setup cells once, uncomment to run. The dataset can be downloaded here.
End of explanation
"""
df = pd.read_csv(path/'questions_easy.csv')
df.head()
"""
Explanation: Put them in a DataBunch
Our questions look like this now:
End of explanation
"""
df['en'] = df['en'].apply(lambda x:x.lower())
df['fr'] = df['fr'].apply(lambda x:x.lower())
"""
Explanation: To make it simple, we lowercase everything.
End of explanation
"""
def seq2seq_collate(samples:BatchSamples, pad_idx:int=1, pad_first:bool=True, backwards:bool=False) -> Tuple[LongTensor, LongTensor]:
"Function that collect samples and adds padding. Flips token order if needed"
samples = to_data(samples)
max_len_x,max_len_y = max([len(s[0]) for s in samples]),max([len(s[1]) for s in samples])
res_x = torch.zeros(len(samples), max_len_x).long() + pad_idx
res_y = torch.zeros(len(samples), max_len_y).long() + pad_idx
if backwards: pad_first = not pad_first
for i,s in enumerate(samples):
if pad_first:
res_x[i,-len(s[0]):],res_y[i,-len(s[1]):] = LongTensor(s[0]),LongTensor(s[1])
else:
res_x[i,:len(s[0]):],res_y[i,:len(s[1]):] = LongTensor(s[0]),LongTensor(s[1])
if backwards: res_x,res_y = res_x.flip(1),res_y.flip(1)
return res_x,res_y
"""
Explanation: The first thing is that we will need to collate inputs and targets in a batch: they have different lengths so we need to add padding to make the sequence length the same;
End of explanation
"""
class Seq2SeqDataBunch(TextDataBunch):
"Create a `TextDataBunch` suitable for training an RNN classifier."
@classmethod
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=32, val_bs:int=None, pad_idx=1,
pad_first=False, device:torch.device=None, no_check:bool=False, backwards:bool=False, **dl_kwargs) -> DataBunch:
"Function that transform the `datasets` in a `DataBunch` for classification. Passes `**dl_kwargs` on to `DataLoader()`"
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
collate_fn = partial(seq2seq_collate, pad_idx=pad_idx, pad_first=pad_first, backwards=backwards)
train_sampler = SortishSampler(datasets[0].x, key=lambda t: len(datasets[0][t][0].data), bs=bs//2)
train_dl = DataLoader(datasets[0], batch_size=bs, sampler=train_sampler, drop_last=True, **dl_kwargs)
dataloaders = [train_dl]
for ds in datasets[1:]:
lengths = [len(t) for t in ds.x.items]
sampler = SortSampler(ds.x, key=lengths.__getitem__)
dataloaders.append(DataLoader(ds, batch_size=val_bs, sampler=sampler, **dl_kwargs))
return cls(*dataloaders, path=path, device=device, collate_fn=collate_fn, no_check=no_check)
"""
Explanation: Then we create a special DataBunch that uses this collate function.
End of explanation
"""
class Seq2SeqTextList(TextList):
_bunch = Seq2SeqDataBunch
_label_cls = TextList
"""
Explanation: And a subclass of TextList that will use this DataBunch class in the call .databunch and will use TextList to label (since our targets are other texts).
End of explanation
"""
src = Seq2SeqTextList.from_df(df, path = path, cols='fr').split_by_rand_pct().label_from_df(cols='en', label_cls=TextList)
np.percentile([len(o) for o in src.train.x.items] + [len(o) for o in src.valid.x.items], 90)
np.percentile([len(o) for o in src.train.y.items] + [len(o) for o in src.valid.y.items], 90)
"""
Explanation: Thats all we need to use the data block API!
End of explanation
"""
src = src.filter_by_func(lambda x,y: len(x) > 30 or len(y) > 30)
len(src.train) + len(src.valid)
data = src.databunch()
data.save()
data = load_data(path)
data.show_batch()
"""
Explanation: We remove the items where one of the target is more than 30 tokens long.
End of explanation
"""
# Installation: https://github.com/facebookresearch/fastText#building-fasttext-for-python
import fastText as ft
fr_vecs = ft.load_model(str((path/'cc.fr.300.bin')))
en_vecs = ft.load_model(str((path/'cc.en.300.bin')))
"""
Explanation: Model
Pretrained embeddings
To install fastText:
$ git clone https://github.com/facebookresearch/fastText.git
$ cd fastText
$ pip install .
End of explanation
"""
def create_emb(vecs, itos, em_sz=300, mult=1.):
emb = nn.Embedding(len(itos), em_sz, padding_idx=1)
wgts = emb.weight.data
vec_dic = {w:vecs.get_word_vector(w) for w in vecs.get_words()}
miss = []
for i,w in enumerate(itos):
try: wgts[i] = tensor(vec_dic[w])
except: miss.append(w)
return emb
emb_enc = create_emb(fr_vecs, data.x.vocab.itos)
emb_dec = create_emb(en_vecs, data.y.vocab.itos)
torch.save(emb_enc, path/'models'/'fr_emb.pth')
torch.save(emb_dec, path/'models'/'en_emb.pth')
"""
Explanation: We create an embedding module with the pretrained vectors and random data for the missing parts.
End of explanation
"""
del fr_vecs
del en_vecs
"""
Explanation: Free some RAM
End of explanation
"""
from fastai.text.models.qrnn import QRNN, QRNNLayer
"""
Explanation: QRNN seq2seq
Our model we use QRNNs at its base (you can use GRUs or LSTMs by adapting a little bit). Using QRNNs require you have properly installed cuda (a version that matches your PyTorch install).
End of explanation
"""
class Seq2SeqQRNN(nn.Module):
def __init__(self, emb_enc, emb_dec, n_hid, max_len, n_layers=2, p_inp:float=0.15, p_enc:float=0.25,
p_dec:float=0.1, p_out:float=0.35, p_hid:float=0.05, bos_idx:int=0, pad_idx:int=1):
super().__init__()
self.n_layers,self.n_hid,self.max_len,self.bos_idx,self.pad_idx = n_layers,n_hid,max_len,bos_idx,pad_idx
self.emb_enc = emb_enc
self.emb_enc_drop = nn.Dropout(p_inp)
self.encoder = QRNN(emb_enc.weight.size(1), n_hid, n_layers=n_layers, dropout=p_enc)
self.out_enc = nn.Linear(n_hid, emb_enc.weight.size(1), bias=False)
self.hid_dp = nn.Dropout(p_hid)
self.emb_dec = emb_dec
self.decoder = QRNN(emb_dec.weight.size(1), emb_dec.weight.size(1), n_layers=n_layers, dropout=p_dec)
self.out_drop = nn.Dropout(p_out)
self.out = nn.Linear(emb_dec.weight.size(1), emb_dec.weight.size(0))
self.out.weight.data = self.emb_dec.weight.data
def forward(self, inp):
bs,sl = inp.size()
self.encoder.reset()
self.decoder.reset()
hid = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
enc_out, hid = self.encoder(emb, hid)
hid = self.out_enc(self.hid_dp(hid))
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
outs = []
for i in range(self.max_len):
emb = self.emb_dec(dec_inp).unsqueeze(1)
out, hid = self.decoder(emb, hid)
out = self.out(self.out_drop(out[:,0]))
outs.append(out)
dec_inp = out.max(1)[1]
if (dec_inp==self.pad_idx).all(): break
return torch.stack(outs, dim=1)
def initHidden(self, bs): return one_param(self).new_zeros(self.n_layers, bs, self.n_hid)
"""
Explanation: The model in itself consists in an encoder and a decoder
The encoder is a (quasi) recurrent neural net and we feed it our input sentence, producing an output (that we discard for now) and a hidden state. That hidden state is then given to the decoder (an other RNN) which uses it in conjunction with the outputs it predicts to get produce the translation. We loop until the decoder produces a padding token (or at 30 iterations to make sure it's not an infinite loop at the beginning of training).
End of explanation
"""
def seq2seq_loss(out, targ, pad_idx=1):
bs,targ_len = targ.size()
_,out_len,vs = out.size()
if targ_len>out_len: out = F.pad(out, (0,0,0,targ_len-out_len,0,0), value=pad_idx)
if out_len>targ_len: targ = F.pad(targ, (0,out_len-targ_len,0,0), value=pad_idx)
return CrossEntropyFlat()(out, targ)
def seq2seq_acc(out, targ, pad_idx=1):
bs,targ_len = targ.size()
_,out_len,vs = out.size()
if targ_len>out_len: out = F.pad(out, (0,0,0,targ_len-out_len,0,0), value=pad_idx)
if out_len>targ_len: targ = F.pad(targ, (0,out_len-targ_len,0,0), value=pad_idx)
out = out.argmax(2)
return (out==targ).float().mean()
"""
Explanation: Loss function
The loss pads output and target so that they are of the same size before using the usual flattened version of cross entropy. We do the same for accuracy.
End of explanation
"""
class NGram():
def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))
def get_grams(x, n, max_n=5000):
return x if n==1 else [NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(pred, targ, n, max_n=5000):
pred_grams,targ_grams = get_grams(pred, n, max_n=max_n),get_grams(targ, n, max_n=max_n)
pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)
class CorpusBLEU(Callback):
def __init__(self, vocab_sz):
self.vocab_sz = vocab_sz
self.name = 'bleu'
def on_epoch_begin(self, **kwargs):
self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4
def on_batch_end(self, last_output, last_target, **kwargs):
last_output = last_output.argmax(dim=-1)
for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()):
self.pred_len += len(pred)
self.targ_len += len(targ)
for i in range(4):
c,t = get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)
self.corrects[i] += c
self.counts[i] += t
def on_epoch_end(self, last_metrics, **kwargs):
precs = [c/t for c,t in zip(self.corrects,self.counts)]
len_penalty = exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1
bleu = len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25)
return add_metrics(last_metrics, bleu)
"""
Explanation: Bleu metric (see dedicated notebook)
In translation, the metric usually used is BLEU, see the corresponding notebook for the details.
End of explanation
"""
emb_enc = torch.load(path/'models'/'fr_emb.pth')
emb_dec = torch.load(path/'models'/'en_emb.pth')
model = Seq2SeqQRNN(emb_enc, emb_dec, 256, 30, n_layers=2)
learn = Learner(data, model, loss_func=seq2seq_loss, metrics=[seq2seq_acc, CorpusBLEU(len(data.y.vocab.itos))])
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8, 1e-2)
"""
Explanation: We load our pretrained embeddings to create the model.
End of explanation
"""
def get_predictions(learn, ds_type=DatasetType.Valid):
learn.model.eval()
inputs, targets, outputs = [],[],[]
with torch.no_grad():
for xb,yb in progress_bar(learn.dl(ds_type)):
out = learn.model(xb)
for x,y,z in zip(xb,yb,out):
inputs.append(learn.data.train_ds.x.reconstruct(x))
targets.append(learn.data.train_ds.y.reconstruct(y))
outputs.append(learn.data.train_ds.y.reconstruct(z.argmax(1)))
return inputs, targets, outputs
inputs, targets, outputs = get_predictions(learn)
inputs[700], targets[700], outputs[700]
inputs[701], targets[701], outputs[701]
inputs[2513], targets[2513], outputs[2513]
inputs[4000], targets[4000], outputs[4000]
"""
Explanation: So how good is our model? Let's see a few predictions.
End of explanation
"""
class TeacherForcing(LearnerCallback):
def __init__(self, learn, end_epoch):
super().__init__(learn)
self.end_epoch = end_epoch
def on_batch_begin(self, last_input, last_target, train, **kwargs):
if train: return {'last_input': [last_input, last_target]}
def on_epoch_begin(self, epoch, **kwargs):
self.learn.model.pr_force = 1 - 0.5 * epoch/self.end_epoch
class Seq2SeqQRNN(nn.Module):
def __init__(self, emb_enc, emb_dec, n_hid, max_len, n_layers=2, p_inp:float=0.15, p_enc:float=0.25,
p_dec:float=0.1, p_out:float=0.35, p_hid:float=0.05, bos_idx:int=0, pad_idx:int=1):
super().__init__()
self.n_layers,self.n_hid,self.max_len,self.bos_idx,self.pad_idx = n_layers,n_hid,max_len,bos_idx,pad_idx
self.emb_enc = emb_enc
self.emb_enc_drop = nn.Dropout(p_inp)
self.encoder = QRNN(emb_enc.weight.size(1), n_hid, n_layers=n_layers, dropout=p_enc)
self.out_enc = nn.Linear(n_hid, emb_enc.weight.size(1), bias=False)
self.hid_dp = nn.Dropout(p_hid)
self.emb_dec = emb_dec
self.decoder = QRNN(emb_dec.weight.size(1), emb_dec.weight.size(1), n_layers=n_layers, dropout=p_dec)
self.out_drop = nn.Dropout(p_out)
self.out = nn.Linear(emb_dec.weight.size(1), emb_dec.weight.size(0))
self.out.weight.data = self.emb_dec.weight.data
self.pr_force = 0.
def forward(self, inp, targ=None):
bs,sl = inp.size()
hid = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
enc_out, hid = self.encoder(emb, hid)
hid = self.out_enc(self.hid_dp(hid))
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
res = []
for i in range(self.max_len):
emb = self.emb_dec(dec_inp).unsqueeze(1)
outp, hid = self.decoder(emb, hid)
outp = self.out(self.out_drop(outp[:,0]))
res.append(outp)
dec_inp = outp.data.max(1)[1]
if (dec_inp==self.pad_idx).all(): break
if (targ is not None) and (random.random()<self.pr_force):
if i>=targ.shape[1]: break
dec_inp = targ[:,i]
return torch.stack(res, dim=1)
def initHidden(self, bs): return one_param(self).new_zeros(self.n_layers, bs, self.n_hid)
emb_enc = torch.load(path/'models'/'fr_emb.pth')
emb_dec = torch.load(path/'models'/'en_emb.pth')
model = Seq2SeqQRNN(emb_enc, emb_dec, 256, 30, n_layers=2)
learn = Learner(data, model, loss_func=seq2seq_loss, metrics=[seq2seq_acc, CorpusBLEU(len(data.y.vocab.itos))],
callback_fns=partial(TeacherForcing, end_epoch=8))
learn.fit_one_cycle(8, 1e-2)
inputs, targets, outputs = get_predictions(learn)
inputs[700],targets[700],outputs[700]
inputs[2513], targets[2513], outputs[2513]
inputs[4000], targets[4000], outputs[4000]
#get_bleu(learn)
"""
Explanation: It's usually beginning well, but falls into easy word at the end of the question.
Teacher forcing
One way to help training is to help the decoder by feeding it the real targets instead of its predictions (if it starts with wrong words, it's very unlikely to give us the right translation). We do that all the time at the beginning, then progressively reduce the amount of teacher forcing.
End of explanation
"""
class Seq2SeqQRNN(nn.Module):
def __init__(self, emb_enc, emb_dec, n_hid, max_len, n_layers=2, p_inp:float=0.15, p_enc:float=0.25,
p_dec:float=0.1, p_out:float=0.35, p_hid:float=0.05, bos_idx:int=0, pad_idx:int=1):
super().__init__()
self.n_layers,self.n_hid,self.max_len,self.bos_idx,self.pad_idx = n_layers,n_hid,max_len,bos_idx,pad_idx
self.emb_enc = emb_enc
self.emb_enc_drop = nn.Dropout(p_inp)
self.encoder = QRNN(emb_enc.weight.size(1), n_hid, n_layers=n_layers, dropout=p_enc, bidirectional=True)
self.out_enc = nn.Linear(2*n_hid, emb_enc.weight.size(1), bias=False)
self.hid_dp = nn.Dropout(p_hid)
self.emb_dec = emb_dec
self.decoder = QRNN(emb_dec.weight.size(1), emb_dec.weight.size(1), n_layers=n_layers, dropout=p_dec)
self.out_drop = nn.Dropout(p_out)
self.out = nn.Linear(emb_dec.weight.size(1), emb_dec.weight.size(0))
self.out.weight.data = self.emb_dec.weight.data
self.pr_force = 0.
def forward(self, inp, targ=None):
bs,sl = inp.size()
hid = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
enc_out, hid = self.encoder(emb, hid)
hid = hid.view(2,self.n_layers, bs, self.n_hid).permute(1,2,0,3).contiguous()
hid = self.out_enc(self.hid_dp(hid).view(self.n_layers, bs, 2*self.n_hid))
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
res = []
for i in range(self.max_len):
emb = self.emb_dec(dec_inp).unsqueeze(1)
outp, hid = self.decoder(emb, hid)
outp = self.out(self.out_drop(outp[:,0]))
res.append(outp)
dec_inp = outp.data.max(1)[1]
if (dec_inp==self.pad_idx).all(): break
if (targ is not None) and (random.random()<self.pr_force):
if i>=targ.shape[1]: break
dec_inp = targ[:,i]
return torch.stack(res, dim=1)
def initHidden(self, bs): return one_param(self).new_zeros(2*self.n_layers, bs, self.n_hid)
emb_enc = torch.load(path/'models'/'fr_emb.pth')
emb_dec = torch.load(path/'models'/'en_emb.pth')
model = Seq2SeqQRNN(emb_enc, emb_dec, 256, 30, n_layers=2)
learn = Learner(data, model, loss_func=seq2seq_loss, metrics=[seq2seq_acc, CorpusBLEU(len(data.y.vocab.itos))],
callback_fns=partial(TeacherForcing, end_epoch=8))
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8, 1e-2)
inputs, targets, outputs = get_predictions(learn)
inputs[700], targets[700], outputs[700]
inputs[701], targets[701], outputs[701]
inputs[4001], targets[4001], outputs[4001]
#get_bleu(learn)
"""
Explanation: Bidir
A second things that might help is to use a bidirectional model for the encoder.
End of explanation
"""
def init_param(*sz): return nn.Parameter(torch.randn(sz)/math.sqrt(sz[0]))
class Seq2SeqQRNN(nn.Module):
def __init__(self, emb_enc, emb_dec, n_hid, max_len, n_layers=2, p_inp:float=0.15, p_enc:float=0.25,
p_dec:float=0.1, p_out:float=0.35, p_hid:float=0.05, bos_idx:int=0, pad_idx:int=1):
super().__init__()
self.n_layers,self.n_hid,self.max_len,self.bos_idx,self.pad_idx = n_layers,n_hid,max_len,bos_idx,pad_idx
self.emb_enc = emb_enc
self.emb_enc_drop = nn.Dropout(p_inp)
self.encoder = QRNN(emb_enc.weight.size(1), n_hid, n_layers=n_layers, dropout=p_enc, bidirectional=True)
self.out_enc = nn.Linear(2*n_hid, emb_enc.weight.size(1), bias=False)
self.hid_dp = nn.Dropout(p_hid)
self.emb_dec = emb_dec
emb_sz = emb_dec.weight.size(1)
self.decoder = QRNN(emb_sz + 2*n_hid, emb_dec.weight.size(1), n_layers=n_layers, dropout=p_dec)
self.out_drop = nn.Dropout(p_out)
self.out = nn.Linear(emb_sz, emb_dec.weight.size(0))
self.out.weight.data = self.emb_dec.weight.data #Try tying
self.enc_att = nn.Linear(2*n_hid, emb_sz, bias=False)
self.hid_att = nn.Linear(emb_sz, emb_sz)
self.V = init_param(emb_sz)
self.pr_force = 0.
def forward(self, inp, targ=None):
bs,sl = inp.size()
hid = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
enc_out, hid = self.encoder(emb, hid)
hid = hid.view(2,self.n_layers, bs, self.n_hid).permute(1,2,0,3).contiguous()
hid = self.out_enc(self.hid_dp(hid).view(self.n_layers, bs, 2*self.n_hid))
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
res = []
enc_att = self.enc_att(enc_out)
for i in range(self.max_len):
hid_att = self.hid_att(hid[-1])
u = torch.tanh(enc_att + hid_att[:,None])
attn_wgts = F.softmax(u @ self.V, 1)
ctx = (attn_wgts[...,None] * enc_out).sum(1)
emb = self.emb_dec(dec_inp)
outp, hid = self.decoder(torch.cat([emb, ctx], 1)[:,None], hid)
outp = self.out(self.out_drop(outp[:,0]))
res.append(outp)
dec_inp = outp.data.max(1)[1]
if (dec_inp==self.pad_idx).all(): break
if (targ is not None) and (random.random()<self.pr_force):
if i>=targ.shape[1]: break
dec_inp = targ[:,i]
return torch.stack(res, dim=1)
def initHidden(self, bs): return one_param(self).new_zeros(2*self.n_layers, bs, self.n_hid)
emb_enc = torch.load(path/'models'/'fr_emb.pth')
emb_dec = torch.load(path/'models'/'en_emb.pth')
model = Seq2SeqQRNN(emb_enc, emb_dec, 256, 30, n_layers=2)
learn = Learner(data, model, loss_func=seq2seq_loss, metrics=[seq2seq_acc, CorpusBLEU(len(data.y.vocab.itos))],
callback_fns=partial(TeacherForcing, end_epoch=8))
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8, 3e-3)
inputs, targets, outputs = get_predictions(learn)
inputs[700], targets[700], outputs[700]
inputs[701], targets[701], outputs[701]
inputs[4002], targets[4002], outputs[4002]
"""
Explanation: Attention
Attention is a technique that uses the output of our encoder: instead of discarding it entirely, we use it with our hidden state to pay attention to specific words in the input sentence for the predictions in the output sentence. Specifically, we compute attention weights, then add to the input of the decoder the linear combination of the output of the encoder, with those attention weights.
End of explanation
"""
|
xboard/xboard.github.io | ipynb/IDH-Longevity.ipynb | mpl-2.0 | %matplotlib inline
import pandas as pd
import requests as req
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind, ttest_rel
from scipy.stats import gaussian_kde
from statsmodels.formula.api import ols, mixedlm, gee
from statsmodels.stats.outliers_influence import OLSInfluence
from statsmodels.regression.linear_model import OLSResults
from patsy import dmatrix
np.set_printoptions(precision=3)
"""
Explanation: IDH
End of explanation
"""
idhm_df = pd.read_csv("../data/brazil_states_idhl_2000_2010.csv", index_col=0)
idhm_df
"""
Explanation: Carregando dados de IDH-M da Wikipedia
Fontes
Estados: http://pt.wikipedia.org/wiki/Lista_de_unidades_federativas_do_Brasil_por_IDH
End of explanation
"""
idhm_df.describe()
f = plt.figure(14)
idhm_df[["I2000","I2010","Ratio"]].hist(bins=10)
plt.figure()
sns.kdeplot(idhm_df["I2000"], shade=True);
sns.kdeplot(idhm_df["I2010"], shade=True);
sns.kdeplot(idhm_df["Ratio"], shade=True);
"""
Explanation: Análise
End of explanation
"""
ttest_rel(idhm_df['I2000'], idhm_df['I2010'])
import scipy
import scikits.bootstrap as bootstrap
# compute 95% confidence intervals around the mean
CIs00 = bootstrap.ci(data=idhm_df["I2000"])
CIs10 = bootstrap.ci(data=idhm_df["I2010"])
CIsR = bootstrap.ci(data=idhm_df["Ratio"])
print("IDHM 2000 mean 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs00)))
print("IDHM 2010 mean 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs10)))
print("IDHM ratio mean 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIsR)))
CIs00 = bootstrap.ci(data=idhm_df["I2000"], statfunction=scipy.median)
CIs10 = bootstrap.ci(data=idhm_df["I2010"], statfunction=scipy.median)
CIsR = bootstrap.ci(data=idhm_df["Ratio"], statfunction=scipy.median)
print("IDHM 2000 median 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs00)))
print("IDHM 2010 median 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs10)))
print("IDHM ratio median 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIsR)))
"""
Explanation: Testando hipótese
A diferença média entre os IDHs de 2000 e 2010 é estatisticamente significativa?
End of explanation
"""
state_parties_df = pd.read_csv("../data/brazil_states_parties_2000-2010.csv", index_col=0)
state_parties_df
state_regions_df = pd.read_csv("../data/brazil_states_regions.csv", index_col=0)
state_regions_df
df = idhm_df.merge(state_parties_df, on="Estado")
df = df.merge(state_regions_df, on="Estado")
df
sns.factorplot("idh_level_2000","Ratio",data=df, kind="box")
sns.factorplot("Regiao","Ratio",data=df, kind="box")
sns.set()
sns.pairplot(df, hue="idh_level_2000", size=2.5)
sns.coefplot("Ratio ~ PT + PSDB + Outros + C(idh_level_2000) - 1", df, palette="Set1");
sns.coefplot("Ratio ~ Outros==0 + Outros - 1", df, palette="Set1");
sns.set(style="whitegrid")
sns.residplot(df.Outros,df.Ratio, color="navy", lowess=True, order=1)
sns.coefplot("Ratio ~ PT==0 + PT - 1", df, palette="Set1");
sns.set(style="whitegrid")
sns.residplot(df[df.PT>0].PT, df[df.PT>0].Ratio, color="navy", order=1)
sns.coefplot("Ratio ~ PSDB==0 + PSDB + np.multiply(PSDB, PSDB) - 1", df, palette="Set1");
sns.set(style="whitegrid")
sns.residplot(df[df.PSDB>0].PSDB, df[df.PSDB>0].Ratio, color="navy", lowess=True, order=2)
"""
Explanation: A resposta de diversos testes, para um nível de 5% de significância, mostra que há fortes evidências que sim.
Montando percentual de impacto da administração de cada partido em cada Estado da Federação.
End of explanation
"""
sns.coefplot("Ratio ~ PT + PSDB + Outros + C(idh_level_2000) - 1", df, palette="Set1");
sns.coefplot("Ratio ~ PT + PSDB + C(idh_level_2000)", df, palette="Set1");
sns.coefplot("Ratio ~ PT + Outros + C(idh_level_2000)", df, palette="Set1");
sns.coefplot("Ratio ~ PSDB + Outros + C(idh_level_2000)", df, palette="Set1");
formula = "Ratio ~ PT + PSDB + C(idh_level_2000) + C(Regiao)"
model = ols(formula, df).fit()
model.summary()
"""
Explanation: Impacto por partido ou nível do IDH-M em 2000
End of explanation
"""
sns.lmplot("I2000", "I2010", data=df, legend=True, size=10, n_boot=10000, ci=95)
sns.jointplot("I2000", "I2010", data=df, kind='resid',color=sns.color_palette()[2], size=10)
sns.coefplot("I2010 ~ I2000", data=df, intercept=True)
sns.coefplot("I2010 ~ I2000", data=df, groupby="idh_level_2000", intercept=True)
sns.lmplot("I2000", "I2010", data=df, hue="idh_level_2000", col="idh_level_2000", legend=True, size=6, n_boot=10000, ci=99)
sns.lmplot("I2000", "I2010", data=df, hue="Regiao", col="Regiao", col_wrap=2, legend=True, size=6, n_boot=10000, ci=99)
md = ols("I2010 ~ I2000 + C(Regiao)", df).fit()
print(md.summary())
rrr = md.get_robustcov_results()
rrp = rrr.outlier_test("fdr_bh", 0.1)
idx = rrp[rrp["fdr_bh(p)"] <= 0.1].index
print("Estados fora da média:\n",df.ix[idx.values])
rrp[rrp["fdr_bh(p)"] <= 0.1]
"""
Explanation: Não foi possível observar diferença significantiva entre os partidos.
Quais estados possuem diferença significativa?
Comparando 2010 com 2000
End of explanation
"""
import statsmodels.api as sm
md = gee("Ratio ~ PT + PSDB ", df.idh_level_2000, df, cov_struct=sm.cov_struct.Exchangeable())
mdf = md.fit()
print(mdf.summary())
print(mdf.cov_struct.summary())
plt.plot(mdf.fittedvalues, mdf.resid, 'o', alpha=0.5)
plt.xlabel("Fitted values", size=17)
plt.ylabel("Residuals", size=17)
sns.jointplot(mdf.fittedvalues, mdf.resid, size=10, kind="kde")
"""
Explanation: GEE
End of explanation
"""
|
tschinz/iPython_Workspace | 01_Mine/MachineLearning/tensorflow-examples_nb/0_Prerequisite/mnist_dataset_intro.ipynb | gpl-2.0 | # Import MNIST
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Load data
X_train = mnist.train.images
Y_train = mnist.train.labels
X_test = mnist.test.images
Y_test = mnist.test.labels
"""
Explanation: MNIST Dataset Introduction
Most examples are using MNIST dataset of handwritten digits. The dataset contains 60,000 examples for training and 10,000 examples for testing. The digits have been size-normalized and centered in a fixed-size image (28x28 pixels) with values from 0 to 1. For simplicity, each image has been flatten and converted to a 1-D numpy array of 784 features (28*28).
Overview
Usage
In our examples, we are using TensorFlow input_data.py script to load that dataset.
It is quite useful for managing our data, and handle:
Dataset downloading
Loading the entire dataset into numpy array:
End of explanation
"""
# Get the next 64 images array and labels
batch_X, batch_Y = mnist.train.next_batch(64)
"""
Explanation: A next_batch function that can iterate over the whole dataset and return only the desired fraction of the dataset samples (in order to save memory and avoid to load the entire dataset).
End of explanation
"""
|
scikit-optimize/scikit-optimize.github.io | 0.8/notebooks/auto_examples/sampler/initial-sampling-method.ipynb | bsd-3-clause | print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
from skopt.space import Space
from skopt.sampler import Sobol
from skopt.sampler import Lhs
from skopt.sampler import Halton
from skopt.sampler import Hammersly
from skopt.sampler import Grid
from scipy.spatial.distance import pdist
def plot_searchspace(x, title):
fig, ax = plt.subplots()
plt.plot(np.array(x)[:, 0], np.array(x)[:, 1], 'bo', label='samples')
plt.plot(np.array(x)[:, 0], np.array(x)[:, 1], 'bo', markersize=80, alpha=0.5)
# ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([-5, 10])
ax.set_ylabel("X2")
ax.set_ylim([0, 15])
plt.title(title)
n_samples = 10
space = Space([(-5., 10.), (0., 15.)])
# space.set_transformer("normalize")
"""
Explanation: Comparing initial sampling methods
Holger Nahrstaedt 2020 Sigurd Carlsen October 2019
.. currentmodule:: skopt
When doing baysian optimization we often want to reserve some of the
early part of the optimization to pure exploration. By default the
optimizer suggests purely random samples for the first n_initial_points
(10 by default). The downside to this is that there is no guarantee that
these samples are spread out evenly across all the dimensions.
Sampling methods as Latin hypercube, Sobol, Halton and Hammersly
take advantage of the fact that we know beforehand how many random
points we want to sample. Then these points can be "spread out" in
such a way that each dimension is explored.
See also the example on an integer space
sphx_glr_auto_examples_initial_sampling_method_integer.py
End of explanation
"""
x = space.rvs(n_samples)
plot_searchspace(x, "Random samples")
pdist_data = []
x_label = []
pdist_data.append(pdist(x).flatten())
x_label.append("random")
"""
Explanation: Random sampling
End of explanation
"""
sobol = Sobol()
x = sobol.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Sobol')
pdist_data.append(pdist(x).flatten())
x_label.append("sobol")
"""
Explanation: Sobol
End of explanation
"""
lhs = Lhs(lhs_type="classic", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'classic LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("lhs")
"""
Explanation: Classic Latin hypercube sampling
End of explanation
"""
lhs = Lhs(lhs_type="centered", criterion=None)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'centered LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("center")
"""
Explanation: Centered Latin hypercube sampling
End of explanation
"""
lhs = Lhs(criterion="maximin", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'maximin LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("maximin")
"""
Explanation: Maximin optimized hypercube sampling
End of explanation
"""
lhs = Lhs(criterion="correlation", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'correlation LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("corr")
"""
Explanation: Correlation optimized hypercube sampling
End of explanation
"""
lhs = Lhs(criterion="ratio", iterations=10000)
x = lhs.generate(space.dimensions, n_samples)
plot_searchspace(x, 'ratio LHS')
pdist_data.append(pdist(x).flatten())
x_label.append("ratio")
"""
Explanation: Ratio optimized hypercube sampling
End of explanation
"""
halton = Halton()
x = halton.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Halton')
pdist_data.append(pdist(x).flatten())
x_label.append("halton")
"""
Explanation: Halton sampling
End of explanation
"""
hammersly = Hammersly()
x = hammersly.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Hammersly')
pdist_data.append(pdist(x).flatten())
x_label.append("hammersly")
"""
Explanation: Hammersly sampling
End of explanation
"""
grid = Grid(border="include", use_full_layout=False)
x = grid.generate(space.dimensions, n_samples)
plot_searchspace(x, 'Grid')
pdist_data.append(pdist(x).flatten())
x_label.append("grid")
"""
Explanation: Grid sampling
End of explanation
"""
fig, ax = plt.subplots()
ax.boxplot(pdist_data)
plt.grid(True)
plt.ylabel("pdist")
_ = ax.set_ylim(0, 12)
_ = ax.set_xticklabels(x_label, rotation=45, fontsize=8)
"""
Explanation: Pdist boxplot of all methods
This boxplot shows the distance between all generated points using
Euclidian distance. The higher the value, the better the sampling method.
It can be seen that random has the worst performance
End of explanation
"""
|
jhillairet/scikit-rf | doc/source/tutorials/Connecting_Networks.ipynb | bsd-3-clause | import skrf as rf
"""
Explanation: Connecting Networks
scikit-rf supports the connection of arbitrary ports of N-port networks. It accomplishes this using an algorithm called sub-network growth[1], available through the function connect(). Note that this function takes into account port impedances. If two connected ports have different port impedances, an appropriate impedance mismatch is inserted. This capability is illustrated here with situations often encountered.
End of explanation
"""
line = rf.data.wr2p2_line # 2-port
short = rf.data.wr2p2_short # 1-port
delayshort = line ** short # --> 1-port Network
print(delayshort)
"""
Explanation: Cascading 2-port and 1-port Networks
A common problem is to connect two Networks one to the other, also known as cascading Networks, which creates a new Network. The figure below illustrates sile simple situations, where the port numbers are identified in gray:
<img src="figures/networks_connecting_2_2ports.svg" width="600">
or,
<img src="figures/networks_connecting_2port_1port.svg" width="600">
Let's illustrate this by connecting a transmission line (2-port Network) to a short-circuit (1-port Network) to create a delay short (1-port Network):
<img src="figures/networks_delay_short.svg" width="600">
Cascading Networks being a frequent operation, it can done conveniently through the ** operator or with the cascade function:
End of explanation
"""
delayshort2 = rf.cascade(line, short)
print(delayshort2 == delayshort) # the result is the same
"""
Explanation: or, equivalently using the cascade() function:
End of explanation
"""
delayshort3 = rf.connect(line, 1, short, 0)
print(delayshort3 == delayshort)
"""
Explanation: It is of course possible to connect two 2-port Networks together using the connect() function. The connect() function requires the Networks and the port numbers to connect together. In our example, the port 1 of the line is connected to the port 0 of the short:
End of explanation
"""
line1 = rf.data.wr2p2_line # 2-port
line2 = rf.data.wr2p2_line # 2-port
line3 = rf.data.wr2p2_line # 2-port
line4 = rf.data.wr2p2_line # 2-port
short = rf.data.wr2p2_short # 1-port
chain1 = line1 ** line2 ** line3 ** line4 ** short
chain2 = rf.cascade_list([line1, line2, line3, line4, short])
print(chain1 == chain2)
"""
Explanation: One often needs to cascade a chain Networks together:
<img src="figures/networks_connecting_N_2ports.svg" width="700">
or,
<img src="figures/networks_connecting_N_2ports_1port.svg" width="700">
which can be realized using chained ** or the convenient function cascade_list:
End of explanation
"""
tee = rf.data.tee
"""
Explanation: Cascacing 2N-port Networks
The cascading operator ** also works for to 2N-port Networks, width the following port scheme:
<img src="figures/networks_connecting_2_2Nports.svg" width="600">
It also works for multiple 2N-port Network. For example, assuming you want to cascade three 4-port Network ntw1, ntw2 and ntw3, you can use:
resulting_ntw = ntw1 ** ntw2 ** ntw3
This is illustrated in this example on balanced Networks.
Cascading Multi-port Networks
To make specific connections between multi-port Networks, two solutions are available, which mostly depends of the complexity of the circuit one wants to build:
For reduced number of connection(s): the connect() function
For advanced connections between many arbitrary N-port Networks, the Circuit object is more relevant since it allows defining explicitly the connections between ports and Networks. For more information, please refer to the Circuit documentation.
As an example, terminating one of the port of an a 3-port Network, such as an ideal 3-way splitter:
<img src="figures/networks_connecting_3port_1port.svg" width="600">
can be done like:
End of explanation
"""
terminated_tee = rf.connect(tee, 1, delayshort, 0)
terminated_tee
"""
Explanation: To connect port 1 of the tee, to port 0 of the delay short,
End of explanation
"""
tee.z0 = [1, 2, 3]
line.z0 = [10, 20]
# the resulting network is:
rf.connect(tee, 1, line, 0)
"""
Explanation: In the previous example, the port #2 of the 3-port Network tee becomes the port #1 of the resulting 2-port Network.
Multiple Connections of Multi-port Networks
Keeping track of the port numbering when using multiple time the connect function can be tedious (this is the reason why the Circuit object can be simpler to use).
Let's illustrate this with the following example: connecting the port #1 of a tee-junction (3-port) to the port #0 of a transmission line (2-port):
<img src="figures/networks_connecting_3port_2port.svg" width="600">
To keep track of the port scheme after the connection operation, let's change the port characteristic impedances (in red in the figure above):
End of explanation
"""
|
deepchem/deepchem | examples/tutorials/The_Basic_Tools_of_the_Deep_Life_Sciences.ipynb | mit | !pip install --pre deepchem[tensorflow]
"""
Explanation: The Basic Tools of the Deep Life Sciences
Welcome to DeepChem's introductory tutorial for the deep life sciences. This series of notebooks is a step-by-step guide for you to get to know the new tools and techniques needed to do deep learning for the life sciences. We'll start from the basics, assuming that you're new to machine learning and the life sciences, and build up a repertoire of tools and techniques that you can use to do meaningful work in the life sciences.
Scope: This tutorial will encompass both the machine learning and data handling needed to build systems for the deep life sciences.
Colab
This tutorial and the rest in the sequences are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
Why do the DeepChem Tutorial?
1) Career Advancement: Applying AI in the life sciences is a booming
industry at present. There are a host of newly funded startups and initiatives
at large pharmaceutical and biotech companies centered around AI. Learning and
mastering DeepChem will bring you to the forefront of this field and will
prepare you to enter a career in this field.
2) Humanitarian Considerations: Disease is the oldest cause of human
suffering. From the dawn of human civilization, humans have suffered from pathogens,
cancers, and neurological conditions. One of the greatest achievements of
the last few centuries has been the development of effective treatments for
many diseases. By mastering the skills in this tutorial, you will be able to
stand on the shoulders of the giants of the past to help develop new
medicine.
3) Lowering the Cost of Medicine: The art of developing new medicine is
currently an elite skill that can only be practiced by a small core of expert
practitioners. By enabling the growth of open source tools for drug discovery,
you can help democratize these skills and open up drug discovery to more
competition. Increased competition can help drive down the cost of medicine.
Getting Extra Credit
If you're excited about DeepChem and want to get more involved, there are some things that you can do right now:
Star DeepChem on GitHub! - https://github.com/deepchem/deepchem
Join the DeepChem forums and introduce yourself! - https://forum.deepchem.io
Say hi on the DeepChem gitter - https://gitter.im/deepchem/Lobby
Make a YouTube video teaching the contents of this notebook.
Prerequisites
This tutorial sequence will assume some basic familiarity with the Python data science ecosystem. We will assume that you have familiarity with libraries such as Numpy, Pandas, and TensorFlow. We'll provide some brief refreshers on basics through the tutorial so don't worry if you're not an expert.
Setup
The first step is to get DeepChem up and running. We recommend using Google Colab to work through this tutorial series. You'll also need to run the following commands to get DeepChem installed on your colab notebook. We are going to use a model based on tensorflow, because of that we've added [tensorflow] to the pip install command to ensure the necessary dependencies are also installed
End of explanation
"""
import deepchem as dc
dc.__version__
"""
Explanation: You can of course run this tutorial locally if you prefer. In this case, don't run the above cell since it will download and install Anaconda on your local machine. In either case, we can now import the deepchem package to play with.
End of explanation
"""
tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = datasets
"""
Explanation: Training a Model with DeepChem: A First Example
Deep learning can be used to solve many sorts of problems, but the basic workflow is usually the same. Here are the typical steps you follow.
Select the data set you will train your model on (or create a new data set if there isn't an existing suitable one).
Create the model.
Train the model on the data.
Evaluate the model on an independent test set to see how well it works.
Use the model to make predictions about new data.
With DeepChem, each of these steps can be as little as one or two lines of Python code. In this tutorial we will walk through a basic example showing the complete workflow to solve a real world scientific problem.
The problem we will solve is predicting the solubility of small molecules given their chemical formulas. This is a very important property in drug development: if a proposed drug isn't soluble enough, you probably won't be able to get enough into the patient's bloodstream to have a therapeutic effect. The first thing we need is a data set of measured solubilities for real molecules. One of the core components of DeepChem is MoleculeNet, a diverse collection of chemical and molecular data sets. For this tutorial, we can use the Delaney solubility data set. The property of solubility in this data set is reported in log(solubility) where solubility is measured in moles/liter.
End of explanation
"""
model = dc.models.GraphConvModel(n_tasks=1, mode='regression', dropout=0.2)
"""
Explanation: I won't say too much about this code right now. We will see many similar examples in later tutorials. There are two details I do want to draw your attention to. First, notice the featurizer argument passed to the load_delaney() function. Molecules can be represented in many ways. We therefore tell it which representation we want to use, or in more technical language, how to "featurize" the data. Second, notice that we actually get three different data sets: a training set, a validation set, and a test set. Each of these serves a different function in the standard deep learning workflow.
Now that we have our data, the next step is to create a model. We will use a particular kind of model called a "graph convolutional network", or "graphconv" for short.
End of explanation
"""
model.fit(train_dataset, nb_epoch=100)
"""
Explanation: Here again I will not say much about the code. Later tutorials will give lots more information about GraphConvModel, as well as other types of models provided by DeepChem.
We now need to train the model on the data set. We simply give it the data set and tell it how many epochs of training to perform (that is, how many complete passes through the data to make).
End of explanation
"""
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score)
print("Training set score:", model.evaluate(train_dataset, [metric], transformers))
print("Test set score:", model.evaluate(test_dataset, [metric], transformers))
"""
Explanation: If everything has gone well, we should now have a fully trained model! But do we? To find out, we must evaluate the model on the test set. We do that by selecting an evaluation metric and calling evaluate() on the model. For this example, let's use the Pearson correlation, also known as r<sup>2</sup>, as our metric. We can evaluate it on both the training set and test set.
End of explanation
"""
solubilities = model.predict_on_batch(test_dataset.X[:10])
for molecule, solubility, test_solubility in zip(test_dataset.ids, solubilities, test_dataset.y):
print(solubility, test_solubility, molecule)
"""
Explanation: Notice that it has a higher score on the training set than the test set. Models usually perform better on the particular data they were trained on than they do on similar but independent data. This is called "overfitting", and it is the reason it is essential to evaluate your model on an independent test set.
Our model still has quite respectable performance on the test set. For comparison, a model that produced totally random outputs would have a correlation of 0, while one that made perfect predictions would have a correlation of 1. Our model does quite well, so now we can use it to make predictions about other molecules we care about.
Since this is just a tutorial and we don't have any other molecules we specifically want to predict, let's just use the first ten molecules from the test set. For each one we print out the chemical structure (represented as a SMILES string) and the predicted log(solubility). To put these predictions in
context, we print out the log(solubility) values from the test set as well.
End of explanation
"""
@manual{Intro1,
title={The Basic Tools of the Deep Life Sciences},
organization={DeepChem},
author={Ramsundar, Bharath},
howpublished = {\url{https://github.com/deepchem/deepchem/blob/master/examples/tutorials/The_Basic_Tools_of_the_Deep_Life_Sciences.ipynb}},
year={2021},
}
"""
Explanation: Congratulations! Time to join the Community!
Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:
Star DeepChem on GitHub
This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.
Join the DeepChem Gitter
The DeepChem Gitter hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
Citing This Tutorial
If you found this tutorial useful please consider citing it using the provided BibTeX.
End of explanation
"""
|
Diyago/Machine-Learning-scripts | DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/keras top solution.ipynb | apache-2.0 | import numpy as np
import pandas as pd
import gc
import keras
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
from sklearn.model_selection import train_test_split
from skimage.transform import resize
import tensorflow as tf
import keras.backend as K
from keras.losses import binary_crossentropy
from keras.preprocessing.image import load_img
from keras import Model
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, Conv2D, Conv2DTranspose, MaxPooling2D, concatenate, Dropout,BatchNormalization
from keras.layers import Conv2D, Concatenate, MaxPooling2D
from keras.layers import UpSampling2D, Dropout, BatchNormalization
from tqdm import tqdm_notebook
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras.engine import InputSpec
from keras import backend as K
from keras.layers import LeakyReLU
from keras.layers import ZeroPadding2D
from keras.losses import binary_crossentropy
import keras.callbacks as callbacks
from keras.callbacks import Callback
from keras.applications.xception import Xception
from keras.layers import multiply
from keras import optimizers
from keras.legacy import interfaces
from keras.utils.generic_utils import get_custom_objects
from keras.engine.topology import Input
from keras.engine.training import Model
from keras.layers.convolutional import Conv2D, UpSampling2D, Conv2DTranspose
from keras.layers.core import Activation, SpatialDropout2D
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add
from keras.regularizers import l2
from keras.layers.core import Dense, Lambda
from keras.layers.merge import concatenate, add
from keras.layers import GlobalAveragePooling2D, Reshape, Dense, multiply, Permute
from keras.optimizers import SGD
"""
Explanation: This kernel gets 0.854 on Public LB and takes 16666 to run (both training and prediction). The model's performance can be definitely be improved by using some other tricks, one obvious way is to use KFold Cross Validation. You can train with lovasz loss to improve it further. I wanted to keep the kernel simple and run it within time limit, so it is a no frills models.I am really thankful to the kaggle community for sharing their insights
The major highlights of the kernel are:
1. The Encoder and Decoder Architecture
In the competition, everyone seems to be using ResNet34 encoder.
Keras doesn't provide pre-trained model weights for ResNet34. And kaggle didn't
support pytorch v0.4 until recently. So, i had to look for other ways..
I experimented with all the pretrained models with different decoder architectures.
After lots of interesting experiments, I found that pretrained Xception model with ResNet decoder works best.
2. Use of Pseudo-Labelling.
After reaching 0.83+, my models started overfitting on the training set.
So, i took multiple models using different encoder architecture and found
predictions in the test set that are common for every model.
For example, if three different models predict nearly the same mask on the
test set, its highly likely the predicted mask is correct.
In this way i generated two types of masks no-salt masks and some-salt
masks.I used these masks while training.
In the train set nearly 0.39% images don't have mask. I maintained this ratio
while using these masks for training (0.39*6000=2340)
3. Using normalized Gradient optimizer
The basic idea is to normalize each layer of the mini-batch stochastic gradient.
It has been shown that the normalized gradient methods having constant step size with occasionally decay, such as SGD with momentum,
have better performance in the deep convolution neural networks, than optimizers with adaptive step sizes likt Adam.
This optimizer was very useful for training my network. Normal SGD takes a very long time to converge.
4. Stochastic Weight Averaging (SWA)
It has been shown in recent paper that SWA finds much broader optima than SGD. I got a boost of around 0.003 with SWA.
It is extremely easy to implement and has very little computational overhead !!
Loading Libraries
End of explanation
"""
img_size_ori = 101
img_size_target = 128
def upsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
def downsample(img):
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)
"""
Explanation: Params and helpers
End of explanation
"""
train_df = pd.read_csv("../input/tgs-salt-identification-challenge/train.csv", index_col="id", usecols=[0])
val_ids = pd.read_csv('../input/validation-ids/validation_ids.csv')
train_df = train_df.drop(val_ids.iloc[:,0].values)
no_salt_ids = pd.read_csv("../input/pseudolabel-gen/no_salt_ids.csv", index_col="id", usecols=[0])
no_salt_ids = no_salt_ids.sample(2340)
some_salt_ids = pd.read_csv("../input/pseudolabel-gen/some_salt_ids.csv", index_col="id")
some_salt_ids = some_salt_ids.sample(3660)
depths_df = pd.read_csv("../input/tgs-salt-identification-challenge/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
"""
Explanation: Loading of training/testing ids and depths
End of explanation
"""
train_df["images"] = [np.array(load_img("../input/tgs-salt-identification-challenge/train/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(train_df.index)]
train_df["masks"] = [np.array(load_img("../input/tgs-salt-identification-challenge/train/masks/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(train_df.index)]
def rle_decode(rle_mask):
'''
rle_mask: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
if str(rle_mask)==str(np.nan):
return np.zeros((101,101))
s = rle_mask.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(101*101, dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(101,101)
"""
Explanation: Read images and masks
Load the images and masks into the DataFrame and divide the pixel values by 255.
End of explanation
"""
train_df_temp = pd.DataFrame()
train_df_temp['id'] = no_salt_ids.index
train_df_temp = train_df_temp.set_index('id')
train_df_temp = train_df_temp.join(depths_df)
train_df_temp["images"] = [np.array(load_img("../input/tgs-salt-identification-challenge/test/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(no_salt_ids.index)]
train_df_temp["masks"] = [np.zeros((img_size_ori,img_size_ori)) for idx in tqdm_notebook(no_salt_ids.index)]
train_df = train_df.append(train_df_temp)
del train_df_temp
"""
Explanation: Generating Pseudo Labels
No-Salt images
End of explanation
"""
train_df_temp = pd.DataFrame()
train_df_temp['id'] = some_salt_ids.index
train_df_temp = train_df_temp.set_index('id')
train_df_temp = train_df_temp.join(depths_df)
train_df_temp["images"] = [np.array(load_img("../input/tgs-salt-identification-challenge/test/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(some_salt_ids.index)]
train_df_temp["masks"] = [np.fliplr(np.rot90(rle_decode(some_salt_ids.loc[idx,'rle_mask']),-1)) for idx in tqdm_notebook(some_salt_ids.index)]
train_df = train_df.append(train_df_temp)
del train_df_temp
gc.collect()
"""
Explanation: Some Salt images
End of explanation
"""
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
fig, axs = plt.subplots(1, 2, figsize=(15,5))
sns.distplot(train_df.coverage, kde=False, ax=axs[0])
sns.distplot(train_df.coverage_class, bins=10, kde=False, ax=axs[1])
plt.suptitle("Salt coverage")
axs[0].set_xlabel("Coverage")
axs[1].set_xlabel("Coverage class")
"""
Explanation: Visualizing the class coverage
End of explanation
"""
ids_valid = val_ids.iloc[:,0].values
temp_df = pd.DataFrame()
temp_df["images"] = [np.array(load_img("../input/tgs-salt-identification-challenge/train/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(ids_valid)]
temp_df["masks"] = [np.array(load_img("../input/tgs-salt-identification-challenge/train/masks/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm_notebook(ids_valid)]
x_valid = np.array(temp_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)
y_valid = np.array(temp_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)
del temp_df
gc.collect()
ids_train,x_train,y_train = train_df.index.values,\
np.array(train_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1), \
np.array(train_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)
"""
Explanation: Loading Validation Set
I have fixed the validation set for all my experiments. This allows me to easily compare different model performance.
End of explanation
"""
# https://www.kaggle.com/cpmpml/fast-iou-metric-in-numpy-and-tensorflow
def get_iou_vector(A, B):
# Numpy version
batch_size = A.shape[0]
metric = 0.0
for batch in range(batch_size):
t, p = A[batch], B[batch]
true = np.sum(t)
pred = np.sum(p)
# deal with empty mask first
if true == 0:
metric += (pred == 0)
continue
# non empty mask case. Union is never empty
# hence it is safe to divide by its number of pixels
intersection = np.sum(t * p)
union = true + pred - intersection
iou = intersection / union
# iou metrric is a stepwise approximation of the real iou over 0.5
iou = np.floor(max(0, (iou - 0.45)*20)) / 10
metric += iou
# teake the average over all images in batch
metric /= batch_size
return metric
def my_iou_metric(label, pred):
# Tensorflow version
return tf.py_func(get_iou_vector, [label, pred > 0.5], tf.float64)
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred = K.cast(y_pred, 'float32')
y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
intersection = y_true_f * y_pred_f
score = 2. * K.sum(intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
return score
def dice_loss(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = y_true_f * y_pred_f
score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return 1. - score
def bce_dice_loss(y_true, y_pred):
return binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
def bce_logdice_loss(y_true, y_pred):
return binary_crossentropy(y_true, y_pred) - K.log(1. - dice_loss(y_true, y_pred))
class SnapshotCallbackBuilder:
def __init__(self, nb_epochs, nb_snapshots, init_lr=0.1):
self.T = nb_epochs
self.M = nb_snapshots
self.alpha_zero = init_lr
def get_callbacks(self, model_prefix='Model'):
callback_list = [
callbacks.ModelCheckpoint("./keras.model",monitor='val_my_iou_metric',
mode = 'max', save_best_only=True, verbose=1),
swa,
callbacks.LearningRateScheduler(schedule=self._cosine_anneal_schedule)
]
return callback_list
def _cosine_anneal_schedule(self, t):
cos_inner = np.pi * (t % (self.T // self.M)) # t - 1 is used when t has 1-based indexing.
cos_inner /= self.T // self.M
cos_out = np.cos(cos_inner) + 1
return float(self.alpha_zero / 2 * cos_out)
"""
Explanation: Calculating IOU
End of explanation
"""
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
x = BatchNormalization()(x)
if activation == True:
x = LeakyReLU(alpha=0.1)(x)
return x
def residual_block(blockInput, num_filters=16):
x = LeakyReLU(alpha=0.1)(blockInput)
x = BatchNormalization()(x)
blockInput = BatchNormalization()(blockInput)
x = convolution_block(x, num_filters, (3,3) )
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
return x
"""
Explanation: Useful Model Blocks
End of explanation
"""
def UXception(input_shape=(None, None, 3)):
backbone = Xception(input_shape=input_shape,weights='imagenet',include_top=False)
input = backbone.input
start_neurons = 16
conv4 = backbone.layers[121].output
conv4 = LeakyReLU(alpha=0.1)(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(0.1)(pool4)
# Middle
convm = Conv2D(start_neurons * 32, (3, 3), activation=None, padding="same")(pool4)
convm = residual_block(convm,start_neurons * 32)
convm = residual_block(convm,start_neurons * 32)
convm = LeakyReLU(alpha=0.1)(convm)
# 10 -> 20
deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(0.1)(uconv4)
uconv4 = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = residual_block(uconv4,start_neurons * 16)
uconv4 = residual_block(uconv4,start_neurons * 16)
uconv4 = LeakyReLU(alpha=0.1)(uconv4)
# 10 -> 20
deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(uconv4)
conv3 = backbone.layers[31].output
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(0.1)(uconv3)
uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = residual_block(uconv3,start_neurons * 8)
uconv3 = residual_block(uconv3,start_neurons * 8)
uconv3 = LeakyReLU(alpha=0.1)(uconv3)
# 20 -> 40
deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv3)
conv2 = backbone.layers[21].output
conv2 = ZeroPadding2D(((1,0),(1,0)))(conv2)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(0.1)(uconv2)
uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = residual_block(uconv2,start_neurons * 4)
uconv2 = residual_block(uconv2,start_neurons * 4)
uconv2 = LeakyReLU(alpha=0.1)(uconv2)
# 40 -> 80
deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv2)
conv1 = backbone.layers[11].output
conv1 = ZeroPadding2D(((3,0),(3,0)))(conv1)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(0.1)(uconv1)
uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = residual_block(uconv1,start_neurons * 2)
uconv1 = residual_block(uconv1,start_neurons * 2)
uconv1 = LeakyReLU(alpha=0.1)(uconv1)
# 80 -> 160
uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv1)
uconv0 = Dropout(0.1)(uconv0)
uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv0)
uconv0 = residual_block(uconv0,start_neurons * 1)
uconv0 = residual_block(uconv0,start_neurons * 1)
uconv0 = LeakyReLU(alpha=0.1)(uconv0)
uconv0 = Dropout(0.1/2)(uconv0)
output_layer = Conv2D(1, (1,1), padding="same", activation="sigmoid")(uconv0)
model = Model(input, output_layer)
model.name = 'u-xception'
return model
K.clear_session()
model = UXception(input_shape=(img_size_target,img_size_target,3))
"""
Explanation: Defining UXception Model
As mentioned above, this model uses pretrained Xception model as encoder. It uses Residual blocks in the decoder part,
End of explanation
"""
# https://github.com/titu1994/keras-normalized-optimizers
# Computes the L-2 norm of the gradient.
def l2_norm(grad):
norm = K.sqrt(K.sum(K.square(grad))) + K.epsilon()
return norm
class OptimizerWrapper(optimizers.Optimizer):
def __init__(self, optimizer):
self.optimizer = optimizers.get(optimizer)
# patch the `get_gradients` call
self._optimizer_get_gradients = self.optimizer.get_gradients
def get_gradients(self, loss, params):
grads = self._optimizer_get_gradients(loss, params)
return grads
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
# monkey patch `get_gradients`
self.optimizer.get_gradients = self.get_gradients
# get the updates
self.optimizer.get_updates(loss, params)
# undo monkey patch
self.optimizer.get_gradients = self._optimizer_get_gradients
return self.updates
def set_weights(self, weights):
self.optimizer.set_weights(weights)
def get_weights(self):
return self.optimizer.get_weights()
def get_config(self):
# properties of NormalizedOptimizer
config = {'optimizer_name': self.optimizer.__class__.__name__.lower()}
# optimizer config
optimizer_config = {'optimizer_config': self.optimizer.get_config()}
return dict(list(optimizer_config.items()) + list(config.items()))
@property
def weights(self):
return self.optimizer.weights
@property
def updates(self):
return self.optimizer.updates
@classmethod
def from_config(cls, config):
raise NotImplementedError
@classmethod
def set_normalization_function(cls, name, func):
global _NORMS
_NORMS[name] = func
@classmethod
def get_normalization_functions(cls):
global _NORMS
return sorted(list(_NORMS.keys()))
class NormalizedOptimizer(OptimizerWrapper):
def __init__(self, optimizer, normalization='l2'):
super(NormalizedOptimizer, self).__init__(optimizer)
if normalization not in _NORMS:
raise ValueError('`normalization` must be one of %s.\n'
'Provided was "%s".' % (str(sorted(list(_NORMS.keys()))), normalization))
self.normalization = normalization
self.normalization_fn = _NORMS[normalization]
self.lr = K.variable(1e-3, name='lr')
def get_gradients(self, loss, params):
grads = super(NormalizedOptimizer, self).get_gradients(loss, params)
grads = [grad / self.normalization_fn(grad) for grad in grads]
return grads
def get_config(self):
# properties of NormalizedOptimizer
config = {'normalization': self.normalization}
# optimizer config
base_config = super(NormalizedOptimizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
optimizer_config = {'class_name': config['optimizer_name'],
'config': config['optimizer_config']}
optimizer = optimizers.get(optimizer_config)
normalization = config['normalization']
return cls(optimizer, normalization=normalization)
_NORMS = {
'l2': l2_norm,
}
# register this optimizer to the global custom objects when it is imported
get_custom_objects().update({'NormalizedOptimizer': NormalizedOptimizer})
class SWA(keras.callbacks.Callback):
def __init__(self, filepath, swa_epoch):
super(SWA, self).__init__()
self.filepath = filepath
self.swa_epoch = swa_epoch
def on_train_begin(self, logs=None):
self.nb_epoch = self.params['epochs']
print('Stochastic weight averaging selected for last {} epochs.'
.format(self.nb_epoch - self.swa_epoch))
def on_epoch_end(self, epoch, logs=None):
if epoch == self.swa_epoch:
self.swa_weights = self.model.get_weights()
elif epoch > self.swa_epoch:
for i in range(len(self.swa_weights)):
self.swa_weights[i] = (self.swa_weights[i] *
(epoch - self.swa_epoch) + self.model.get_weights()[i])/((epoch - self.swa_epoch) + 1)
else:
pass
def on_train_end(self, logs=None):
self.model.set_weights(self.swa_weights)
print('Final model parameters set to stochastic weight average.')
self.model.save_weights(self.filepath)
print('Final stochastic averaged weights saved to file.')
sgd = SGD(0.01, momentum=0.9, nesterov=True)
sgd = NormalizedOptimizer(sgd, normalization='l2')
model.compile(loss=bce_dice_loss, optimizer=sgd, metrics=[my_iou_metric])
"""
Explanation: Defining the Normalized Gradient SGD Optimizer
This leads to much faster converagnce as compare to normal SGD.
End of explanation
"""
x_train = np.append(x_train, [np.fliplr(x) for x in x_train[0:3200]], axis=0)
y_train = np.append(y_train, [np.fliplr(x) for x in y_train[0:3200]], axis=0)
x_train = np.repeat(x_train,3,axis=3)
x_valid = np.repeat(x_valid,3,axis=3)
"""
Explanation: Augmentation using fliplr
I only augment the images in training set. I repeat the image in to get 3 channels which is required for using pretrained imagenet models.
End of explanation
"""
epochs = 40
snapshot = SnapshotCallbackBuilder(nb_epochs=epochs,nb_snapshots=1,init_lr=1e-3)
batch_size = 32
swa = SWA('./keras_swa.model',35)
history = model.fit(x_train, y_train,
validation_data=[x_valid, y_valid],
epochs=epochs,
batch_size=batch_size,
callbacks=snapshot.get_callbacks(),shuffle=True,verbose=2)
plt.plot(history.history['my_iou_metric'][1:])
plt.plot(history.history['val_my_iou_metric'][1:])
plt.title('model iou metric')
plt.ylabel('val_my_iou_metric')
plt.xlabel('epoch')
plt.legend(['train','Validation'], loc='upper left')
plt.show()
del ids_train, x_train, y_train,some_salt_ids,depths_df
gc.collect()
# Load best model
try:
print('using swa weight model')
model.load_weights('./keras_swa.model')
except:
model.load_weights('./keras.model')
"""
Explanation: Training the Model
End of explanation
"""
def predict_result(model,x_test,img_size_target,batch_size): # predict both orginal and reflect x
x_test_reflect = np.array([np.fliplr(x) for x in x_test])
preds_test1 = model.predict([x_test],batch_size=batch_size).reshape(-1, img_size_target, img_size_target)
preds_test2_refect = model.predict([x_test_reflect],batch_size=batch_size).reshape(-1, img_size_target, img_size_target)
preds_test2 = np.array([ np.fliplr(x) for x in preds_test2_refect] )
preds_avg = (preds_test1 +preds_test2)/2
return preds_avg
preds_valid = predict_result(model,x_valid,img_size_target,batch_size)
preds_valid = np.array([downsample(x) for x in preds_valid])
y_valid_ori = np.array([downsample(x) for x in y_valid])
max_images = 60
grid_width = 15
grid_height = int(max_images / grid_width)
fig, axs = plt.subplots(grid_height, grid_width, figsize=(grid_width, grid_height))
for i, idx in enumerate(ids_valid[:max_images]):
img = x_valid[i]
mask = y_valid[i].squeeze()
pred = preds_valid[i]
ax = axs[int(i / grid_width), i % grid_width]
ax.imshow(img, cmap="Greys")
ax.imshow(mask, alpha=0.3, cmap="Greens")
ax.imshow(pred, alpha=0.3, cmap="OrRd")
plt.suptitle("Green: salt, Red: prediction. Top-left: coverage class, top-right: salt coverage, bottom-left: depth")
"""
Explanation: Predict the validation set to do a sanity check
Again plot some sample images including the predictions.
End of explanation
"""
# src: https://www.kaggle.com/aglotero/another-iou-metric
def iou_metric(y_true_in, y_pred_in, print_table=False):
labels = y_true_in
y_pred = y_pred_in
true_objects = 2
pred_objects = 2
intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0]
# Compute areas (needed for finding the union between all objects)
area_true = np.histogram(labels, bins = true_objects)[0]
area_pred = np.histogram(y_pred, bins = pred_objects)[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
# Loop over IoU thresholds
prec = []
if print_table:
print("Thresh\tTP\tFP\tFN\tPrec.")
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
if print_table:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
if print_table:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.mean(metric)
## Scoring for last model
thresholds = np.linspace(0.3, 0.7, 31)
ious = np.array([iou_metric_batch(y_valid_ori, np.int32(preds_valid > threshold)) for threshold in tqdm_notebook(thresholds)])
threshold_best_index = np.argmax(ious)
iou_best = ious[threshold_best_index]
threshold_best = thresholds[threshold_best_index]
plt.plot(thresholds, ious)
plt.plot(threshold_best, iou_best, "xr", label="Best threshold")
plt.xlabel("Threshold")
plt.ylabel("IoU")
plt.title("Threshold vs IoU ({}, {})".format(threshold_best, iou_best))
plt.legend()
"""
Explanation: Scoring
Score the model and do a threshold optimization by the best IoU.
End of explanation
"""
max_images = 60
grid_width = 15
grid_height = int(max_images / grid_width)
fig, axs = plt.subplots(grid_height, grid_width, figsize=(grid_width, grid_height))
for i, idx in enumerate(ids_valid[:max_images]):
img = x_valid[i]
mask = y_valid[i].squeeze()
pred = preds_valid[i]
ax = axs[int(i / grid_width), i % grid_width]
ax.imshow(img, cmap="Greys")
ax.imshow(mask, alpha=0.3, cmap="Greens")
ax.imshow(np.array(np.round(pred > threshold_best), dtype=np.float32), alpha=0.3, cmap="OrRd")
plt.suptitle("Green: salt, Red: prediction. Top-left: coverage class, top-right: salt coverage, bottom-left: depth")
del x_valid, y_valid,preds_valid,y_valid_ori,train_df
gc.collect()
"""
Explanation: Another sanity check with adjusted threshold
Again some sample images with the adjusted threshold.
End of explanation
"""
train_df = pd.read_csv("../input/tgs-salt-identification-challenge/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("../input/tgs-salt-identification-challenge/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
batch_size = 500
preds_test = []
i = 0
while i < test_df.shape[0]:
index_val = test_df.index[i:i+batch_size]
# depth_val = test_df.z[i:i+batch_size]
x_test = np.array([upsample(np.array(load_img("../input/tgs-salt-identification-challenge/test/images/{}.png".format(idx), grayscale=True))) / 255 for idx in (index_val)]).reshape(-1, img_size_target, img_size_target, 1)
x_test = np.repeat(x_test,3,axis=3)
preds_test_temp = predict_result(model,x_test,img_size_target,32)
if i==0:
preds_test = preds_test_temp
else:
preds_test = np.concatenate([preds_test,preds_test_temp],axis=0)
if i%2000==0:
print('Images Processed:',i)
i += batch_size
print('Done!')
"""
Explanation: Test Set Prediction
End of explanation
"""
max_images = 60
grid_width = 15
grid_height = int(max_images / grid_width)
fig, axs = plt.subplots(grid_height, grid_width, figsize=(grid_width, grid_height))
for i, idx in enumerate(index_val[:max_images]):
img = x_test[i]
pred = preds_test[i]
ax = axs[int(i / grid_width), i % grid_width]
ax.imshow(img, cmap="Greys")
ax.imshow(np.array(np.round(pred > threshold_best), dtype=np.float32), alpha=0.3, cmap="OrRd")
"""
used for converting the decoded image to rle mask
Fast compared to previous one
"""
def rle_encode(im):
'''
im: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = im.flatten(order = 'F')
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
import time
t1 = time.time()
pred_dict = {idx: rle_encode(np.round(downsample(preds_test[i]) > threshold_best)) for i, idx in enumerate(tqdm_notebook(test_df.index.values))}
t2 = time.time()
print(f"Usedtime = {t2-t1} s")
sub = pd.DataFrame.from_dict(pred_dict,orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv('swa_xce_submission.csv')
"""
Explanation: Some Test Set Predictions
End of explanation
"""
|
astro4dev/OAD-Data-Science-Toolkit | Teaching Materials/Machine Learning/ml-training-intro/notebooks/01 - Introduction to Scikit-learn.ipynb | gpl-3.0 | from sklearn.svm import LinearSVC
"""
Explanation: Really Simple API
0) Import your model class
End of explanation
"""
svm = LinearSVC()
"""
Explanation: 1) Instantiate an object and set the parameters
End of explanation
"""
svm.fit(X_train, y_train)
"""
Explanation: 2) Fit the model
End of explanation
"""
print(svm.predict(X_train))
print(y_train)
svm.score(X_train, y_train)
svm.score(X_test, y_test)
"""
Explanation: 3) Apply / evaluate
End of explanation
"""
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=50)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
!conda upgrade scikit-learn
!pip install -U scikit-learn
"""
Explanation: And again
End of explanation
"""
# %load solutions/train_iris.py
"""
Explanation: Exercises
Load the iris dataset from the sklearn.datasets module using the load_iris function.
Split it into training and test set using train_test_split.
Then train an evaluate a classifier of your choice. Try sklearn.neighbors.KNeighborsClassifier for example.
End of explanation
"""
|
ajkavanagh/pyne-sqlalchemy-2015-04 | notebook/Reflection.ipynb | gpl-3.0 | from sqlalchemy import create_engine
engine = create_engine('sqlite:////vagrant/utils/db.sqlite')
from sqlalchemy import Table, Column, MetaData
metadata = MetaData()
connection = engine.connect()
user_table = Table('user', metadata, autoload=True, autoload_with=connection)
purchase_table = Table('purchase', metadata, autoload=True, autoload_with=connection)
"""
Explanation: Attempting to reflect a database
This is a clever way of saying: let's see if we can read the database schema using SQLAlchemy?
End of explanation
"""
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(object):
def __init__(self, title, first_name, surname):
self.title = title
self.first_name = first_name
self.surname = surname
def __repr__(self):
return "<User(id_user={}, {} {} {})>".format(self.id_user, self.title, self.first_name, self.surname)
class Purchase(object):
def __init__(self, id_user, category, item, date, price):
self.id_user = id_user
self.category = category
self.item = item
self.date = date
self.price = price
def __repr__(self):
return ("<Purchase(id_purchase={}, id_user={}, cat={}, item={}, date={}, price={})>"
.format(self.id_purchase, self.id_user, self.category, self.item, self.date, self.price))
"""
Explanation: Let's hook this up to the ORM (rather than using Core)
We need to pull in declarative_base, build a Base class, and then map the tables we reflected.
End of explanation
"""
from sqlalchemy.orm import mapper, relationship, backref
mapper(Purchase, purchase_table)
mapper(User, user_table, properties={
'purchases': relationship(Purchase, backref='user', order_by=purchase_table.c.id_purchase)
})
"""
Explanation: Now map the items together.
End of explanation
"""
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
users = session.query(User).limit(10).all()
for u in users:
print(u)
u1 = users[0]
print(u1.purchases)
from sqlalchemy import func
results = (session.query(User, func.count(Purchase.id_purchase))
.join(Purchase)
.group_by(Purchase.id_user)
.having(func.count(Purchase.id_purchase) > 10)
.order_by(func.count(Purchase.id_purchase).desc())
.limit(10)
.all())
for (u, count) in results:
print("{}, Num Purchase={}".format(u, count))
"""
Explanation: Let's grab a session and do some querying
End of explanation
"""
|
WNoxchi/Kaukasos | FAI_old/lesson1/Lesson1_recode.ipynb | mit | %matplotlib inline
from __future__ import division, print_function
import os, sys, json
# import keras as K
# os.environ['KERAS_BACKEND'] = 'theano'
sys.path.insert(1, os.path.join('../utils/'))
import utils; from utils import plots
import glob as glob
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
import vgg16;
from vgg16 import Vgg16
"""
Explanation: Using Convolutional Neural Networks
Wayne Nixalo - 2 Aug 2017
recreating some issues
Basic Setup
End of explanation
"""
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
FILES_PATH = 'http://files.fast.ai/models/'; CLASS_FILE='imagenet_class_index.json'
# Keras' get_file () is a handy function that downloads files, and caches them for reuse later
fpath = get_file(CLASS_FILE, FILES_PATH+CLASS_FILE, cache_subdir='models')
with open(fpath) as f: class_dict = json.load(f)
# Convert dictionary with string indices into an array
classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
classes[:5]
"""
Explanation: Create a VGG model from scratch in Keras
Model Setup
End of explanation
"""
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
# Mean of each channel as provided by VGG researchers
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean # subtract mean
return x[:, ::-1] # reverse axis bgr->rgb
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
model = VGG_16()
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(filters, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def FCBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
def VGG_16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FCBlock(model)
FCBlock(model)
model.add(Dense(1000, activation='softmax'))
# fname = 'vgg16.h5'
# model.load_weights(get_file(fname, self.FILE_PATH+fname, cache_subdir='models'))
model = VGG_16()
"""
Explanation: Model Creation
End of explanation
"""
|
jadelord/TomoKTH | examples/Tutorial_01-Image_loading.ipynb | gpl-3.0 | arr = io.imread('0mm_cam0.tif')
print 'Image has been loaded as a 2d numpy array with ', arr.shape, 'rows and columns. Datatype =', arr.dtype
"""
Explanation: Reading images into array
End of explanation
"""
io.implot('0mm_cam0.tif')
cd ../particle_images/
"""
Explanation: Plotting images
End of explanation
"""
io.implot('TomoImg_cam0_a00001.tif', cmap='jet')
"""
Explanation: One can also use different matplotlib colormaps while plotting the images as demonstrated below.
End of explanation
"""
io.imsave('raw_image_data.txt', arr)
"""
Explanation: Saving arrays as text files/images
End of explanation
"""
import scipy as sp
sparse_arr = sp.sparse.coo_matrix(arr)
print sparse_arr
"""
Explanation: The text file would contain many zeroes and few non-zero integers indicating the intensities at each pixel
End of explanation
"""
|
a-pagano/BigDive5 | DataScience/Day4_MongoDB.ipynb | mit | from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.phonebook
print db.collection_names()
"""
Explanation: MongoDB
Schema Free
Document Based
Supports Indexing
Not Transactional
Does not support relations (no JOIN)
Supports Autosharding
Automatic Replication and Failover
Relies on System Memory Manager
Has an Aggregation Pipeline
Builtin support for MapReduce
On Python mongodb support is provided by PyMongo library, which can be installed using:
`
$ pip install pymongo
Installing MongoDB
Installing MongoDB is as simple as going to http://www.mongodb.org/downloads and downloading it.
Create a /data/db directory then start mongod inside the mongodb downloaded package:
$ curl -O 'https://fastdl.mongodb.org/osx/mongodb-osx-x86_64-3.0.4.tgz'
$ tar zxvf mongodb-osx-x86_64-3.0.4.tgz
$ cd mongodb-osx-x86_64-3.0.4
$ mkdir data
$ ./bin/mongod --dbpath=./data
Using MongoDB
a MongoClient instance provides connection to MongoDB Server, each server can host multiple databases which can be retrieved with connection.database_name which can then contain multiple collections with different documents.
End of explanation
"""
data = {'name': 'Alessandro', 'phone': '+39123456789'}
db.people.insert(data)
print db.collection_names()
"""
Explanation: Once the database is retrieved, collections can be accessed as attributes of the database itself.
A MongoDB document is actually just a Python Dictionary, inserting a document is as simple as telling pymongo to insert the dictionary into the collection. Each document can have its own structure, can contain different data and you are not required to declare and structure of the collection. Not existing collections will be automatically created on the insertion of the first document
End of explanation
"""
db.people.insert({'name': 'Puria', 'phone': '+39123456788', 'other_phone': '+3933332323'}, w=0)
try:
db.people.insert({'name': 'Puria', 'phone': '+39123456789'}, w=2)
except Exception as e:
print e
"""
Explanation: Each inserted document will receive an ObjectId which is a uniquue identifier of the document, the ObjectId is based on some data like the current timestamp, server identifier process id and other data that guarantees it to be unique across multiple servers.
Being designed to work in a distributed and multinode environment, MongoDB handles "write safety" by the number of servers that are expected to have saved the document before considering the insert command "completed".
This is handled by the w option, which indicates the number of servers that must have saved the document before the insert command returns. Setting it to 0 makes mongodb work in fire and forget mode, which is useful when inserting a lot of documents quickly. As most drivers will actually generate the ObjectId on client that performs the insertion you will receive an ObjectId even before the document has been written.
End of explanation
"""
db.people.find_one({'name': 'Alessandro'})
"""
Explanation: Fetching back inserted document can be done using find and find_one methods of collections. Both methods accept a query expression that filters the returned documents. Omitting it means retrieving all the documents (or in case of find_one the first document).
End of explanation
"""
from bson import ObjectId
db.people.find_one({'_id': {'$gt': ObjectId('55893a1d7ab71c669f4c149e')}})
"""
Explanation: Filters in mongodb are described by Documents themselves, so in case of PyMongo they are dictionaries too.
A filter can be specified in the form {'field': value}.
By default filtering is performed by equality comparison, this can be changed by specifying a query operator in place of the value.
Query operators by convention start with a $ sign and can be specified as {'field': {'operator': value}}.
Full list of query operators is available at http://docs.mongodb.org/manual/reference/operator/query/
For example if we want to find each person that has an object id greather than 53b30ff57ab71c051823b031 we can achieve that with:
End of explanation
"""
doc = db.people.find_one({'name': 'Alessandro'})
print '\nBefore Updated:', doc
db.people.update({'name': 'Alessandro'}, {'name': 'John Doe'})
doc = db.people.find_one({'name': 'John Doe'})
print '\nAfter Update:', doc
# Go back to previous state
db.people.update({'name': 'John Doe'}, {'$set': {'phone': '+39123456789'}})
print '\nAfter $set phone:', db.people.find_one({'name': 'John Doe'})
db.people.update({'name': 'John Doe'}, {'$set': {'name': 'Alessandro'}})
print '\nAfter $set name:', db.people.find_one({'name': 'Alessandro'})
"""
Explanation: Updating Documents
Updating documents in MongoDB can be performed with the update method of the collection. Updating is actually one of the major sources of issues for new users as it doesn't change values in document like it does on SQL based databases, but instead it replaces the document with a new one.
Also note that the update operation doesn't perform update on each document identified by the query, by default only the first document is updated. To apply it to multiple documents it is required to explicitly specify the multi=true option
What you usually want to do is actually using the $set operator which changes the existing document instead of replacing it with a new one.
End of explanation
"""
db.blog.insert({'title': 'MongoDB is great!',
'author': {'name': 'Alessandro',
'surname': 'Molina',
'avatar': 'http://www.gravatar.com/avatar/7a952cebb086d2114080b4b39ed83cad.png'},
'tags': ['mongodb', 'web', 'scaling']})
db.blog.find_one({'title': 'MongoDB is great!'})
db.blog.find_one({'tags': 'mongodb'})
db.blog.find_one({'author.name': 'Alessandro'})
TAGS = ['mongodb', 'web', 'scaling', 'cooking']
import random
for postnum in range(1, 5):
db.blog.insert({'title': 'Post %s' % postnum,
'author': {'name': 'Alessandro',
'surname': 'Molina',
'avatar': 'http://www.gravatar.com/avatar/7a952cebb086d2114080b4b39ed83cad.png'},
'tags': random.sample(TAGS, 2)})
for post in db.blog.find({'tags': {'$in': ['scaling', 'cooking']}}):
print post['title'], '->', ', '.join(post['tags'])
"""
Explanation: SubDocuments
The real power of mongodb is released when you use subdocuments.
As each mongodb document is a JSON object (actually BSON, but that doesn't change much for the user), it can contain any data which is valid in JSON. Including other documents and arrays. This replaces "relations" between collections in multiple use cases and it's heavily more efficient as it returns all the data in a single query instead of having to perform multiple queries to retrieve related data.
As MongoDB fully supports subdocuments it is also possible to query on sub document fields and even query on arrays using the dot notation.
For example if you want to store a blog post in mongodb you might actually store everything, including author data and tags inside the blogpost itself:
End of explanation
"""
db.blog.ensure_index([('tags', 1)])
"""
Explanation: Indexing
Indexing is actually the most important part of MongoDB.
MongoDB has great support for indexing, and it supports single key, multi key, compound and hashed indexes. Each index type has its specific use case and can be used both for querying and sorting.
Single Key -> Those are plain indexes on a field
Multi Key -> Those are indexes created on an array field
Compound -> Those are indexes that cover more than one field.
Hashed -> Those are indexes optimized for equality comparison, they actually store the hash of the indexed value and are usually used for sharding.
In case of compound indexes they can also be used when only a part of the query filter is present into the index, there is also a special case of indexes called covering indexes which happen when the fields you are asking for are all available into the index. In that case MongoDB won't even access the collection and will directly serve you the data from the index. An index cannot be both a multi key index and a covering index.
Indexes are also ordered, so they can be created ASCENDING or DESCENDING.
Creating indexes can be done using the ensure_index method
End of explanation
"""
db.blog.find({'tags': 'mongodb'}).explain()['queryPlanner']['winningPlan']
db.blog.find({'tags': 'mongodb'}).hint([('_id', 1)]).explain()['queryPlanner']['winningPlan']
db.blog.find({'title': 'Post 1'}).explain()['queryPlanner']['winningPlan']
db.blog.ensure_index([('author.name', 1), ('title', 1)])
db.blog.find({'author.name': 'Alessandro'}, {'title': True, '_id': False}).explain()['queryPlanner']['winningPlan']
"""
Explanation: Checking which index MongoDB is using to perform a query can be done using the explain method, forcing an index into a query can be done using the hint method.
As MongoDB uses a statistical optimizer, using hint in queries can actually provide a performance boost as it avoids the "best option" lookup cost of the optimizer.
End of explanation
"""
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.twitter
# How many professors wrote a tweet?
print len(list(db.tweets.aggregate([
{'$match': {'user.description': {'$regex': 'Professor'}}}
])))
# Count them using only the pipeline
print db.tweets.aggregate([
{'$match': {'user.description': {'$regex': 'Professor'}}},
{'$group': {'_id': 'count', 'count': {'$sum': 1}}}
]).next()['count']
# Hashtags frequency
print list(db.tweets.aggregate([
{'$project': {'tags': '$entities.hashtags.text', '_id': 0}},
{'$unwind': '$tags'},
{'$group': {'_id': '$tags', 'count': {'$sum': 1}}},
{'$match': {'count': {'$gt': 20}}}
]))
"""
Explanation: Aggregation Pipeline
The aggreation pipeline provided by the aggreation framework is a powerful feature in MongoDB that permits to perform complex data analysis by passing the documents through a pipeline of operations.
MongoDB was created with the cover philosophy that you are going to store your documents depending on the way you are going to read them. So to properly design your schema you need to know how you are going to use the documents. While this approach provides great performance benefits and is more concrete in case of web application, it might not always be feasible.
In case you need to perform some kind of analysis your documents are not optimized for, you can rely on the aggreation framework to create a pipeline that transforms them in a way more practical for the kind of analysis you need.
How it works
The aggregation pipeline is a list of operations that gets executed one after the other on the documents of the collections. The first operation will be performed on all the documents, while successive operations are performed on the result of the previous steps.
If steps are able to take advantage of indexes they will, that is the case for a match or sort operator, if it appears at the begin of the pipeline. All operators start with a <span><strong>$</strong></span> sign
Stage Operators
project Reshapes each document in the stream, such as by adding new fields or removing existing fields. For each input document, outputs one document.
match Filters the document stream to allow only matching documents to pass unmodified into the next pipeline stage. match uses standard MongoDB queries. For each input document, outputs either one document (a match) or zero documents (no match).
limit Passes the first n documents unmodified to the pipeline where n is the specified limit. For each input document, outputs either one document (for the first n documents) or zero documents (after the first n documents).
skip Skips the first n documents where n is the specified skip number and passes the remaining documents unmodified to the pipeline. For each input document, outputs either zero documents (for the first n documents) or one document (if after the first n documents).
unwind Deconstructs an array field from the input documents to output a document for each element. Each output document replaces the array with an element value. For each input document, outputs n documents where n is the number of array elements and can be zero for an empty array.
group Groups input documents by a specified identifier expression and applies the accumulator expression(s), if specified, to each group. Consumes all input documents and outputs one document per each distinct group. The output documents only contain the identifier field and, if specified, accumulated fields.
sort Reorders the document stream by a specified sort key. Only the order changes; the documents remain unmodified. For each input document, outputs one document.
geoNear Returns an ordered stream of documents based on the proximity to a geospatial point. Incorporates the functionality of match, sort, and limit for geospatial data. The output documents include an additional distance field and can include a location identifier field.
out Writes the resulting documents of the aggregation pipeline to a collection. To use the $out stage, it must be the last stage in the pipeline.
Expression Operators
Each stage operator can work with one or more expression operator which allow to perform actions during that stage, for a list of expression operators see http://docs.mongodb.org/manual/reference/operator/aggregation/#expression-operators
Pipeline Examples
Examples are based on twitter database from the same S3 bucket used in MrJob examples imported in mongodb using:
$ curl -O http://panisson-bigdive.s3.amazonaws.com/twitter/2011-02-11/2011-02-11.json.aa.gz
$ gunzip 2011-02-11.json.aa.gz
$ mongoimport --db twitter --collection tweets /Users/adrianopagano/Desktop/Big_Dive/BigDive5/Data/2011-02-11.json.aa
2015-06-21T17:18:06.908+0200 connected to: localhost
2015-06-21T17:18:09.896+0200 [#########...............] twitter.tweets 19.6 MB/50.0 MB (39.3%)
2015-06-21T17:18:12.900+0200 [###################.....] twitter.tweets 41.1 MB/50.0 MB (82.2%)
2015-06-21T17:18:13.720+0200 imported 20000 documents
End of explanation
"""
freqs = db.tweets.map_reduce(
map='''function() {
var tags = this.entities.hashtags;
for(var i=0; i<tags.length; i++)
emit(tags[i].text, 1);
}''',
reduce='''function(key, values) {
return Array.sum(values);
}''',
out='tagsfrequency'
)
print(list(
db.tagsfrequency.find({'value': {'$gt': 10}}).sort([('value', -1)])
))
print freqs
db.tweets.find_one()
freqs = db.tweets.map_reduce(
map='''function() {
var tags = this.user.screen_name;
emit(tags, 1);
}''',
reduce='''function(key, values) {
return Array.sum(values);
}''',
out='namefrequency'
)
print(list(
db.namefrequency.find().sort([('value', -1)]).limit(10)
))
"""
Explanation: MapReduce
MongoDB is powered by the V8 javascript engine, this means that each mongod node is able to run javascript code.
With an high enough number of mongod nodes, you actually end up with a powerful execution environment for distributed code that also copes with the major problem of data locality.
For this reason MongoDB exposes a mapreduce function which can be leveraged in shareded environments to run map reduce jobs.
Note that the Aggregation Pipeline is usually faster than the mapReduce feature, and it scales with the number of nodes as mapReduce, so you should rely on MapReduce only when the algorithm cannot be efficiently expressed with the Aggregation Pipeline.
End of explanation
"""
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.twitter
db.tweets.map_reduce(
map='''function() {
var tags = this.entities.hashtags;
for(var i=0; i<tags.length; i++)
emit(tags[i].text, 1);
}''',
reduce='''function(key, values) {
return Array.sum(values);
}''',
out='tagsfrequency'
)
print(list(
db.tagsfrequency.find({'value': {'$gt': 10}})
))
"""
Explanation: Exporting from MongoDB
There are cases you might want to export the results of a mongodb query to make possible to process them into another system, this might be the case for an EMR job which has to perform operations on data stored on MongoDB.
The most simple solution to export those data in a format recognized by EMR and MrJob is using the mongoexport tool provided with mongodb itself. The tool is able to export data in a format recognized by MrJob JSONValueProcotol so you can upload it to S3 and directly process it from EMR.
For example, exporting all the data for the web tag, can be easily done using:
$ ./mongoexport -d phonebook -c blog -o /tmp/data.json -q '{"tags": "web"}'
This will write the data to /tmp/data.json in a format recognized by JSONValueProtocol, full list of options can be seen using --help, in the previous example the following options were used:
-d -> to specify the database
-c -> to specify the collection in the database
-o -> to write output to /tmp/data.json
-q -> to filter output by the provided query
Sharding
Sharding, or horizontal scaling, divides the data set and distributes the data over multiple servers, or shards. Each shard is an independent database, and collectively, the shards make up a single logical database.
Chunk
The whole set of data is divided in Chunks, chunk are then distributed as equally as possible through all the nodes
Shard Key
The shard key is the Document property on which chunks are decided, the range of shard key possible values is divided in chunks and each chunk is assigned to a node. Document which near values for the shard key will end up being in the same chunk and so on the same node.
Shard
Each MongoDB node or ReplicaSet that contains part of the sharded data.
Router
The routers is the interface to the cluster, each query and operation will be performed against the router. The router is then in charge of forwarding the operation to one or multiple shards and gather the results.
Config Server
The config servers keep track of chunks distribution, they know which shard contains which chunk and which values are kept inside each chunk. Whenever the router has to perform an operation or split chunks that became too big it will read and write chunks distribution from the config servers.
Setting Up a Sharded Cluster
To properly setup a sharded environment at least 1 mongos, 2 shards and 1 config server are required. That's the minimum requirement for a test environment and is not suitable for production usage.
First we need to create the directories for each node:
```
$ mkdir /tmp/mongocluster
$ mkdir /tmp/mongocluster/n0
$ mkdir /tmp/mongocluster/n1
$ mkdir /tmp/mongocluster/n2
$ mkdir /tmp/mongocluster/c0
```
Then we need to start the shards:
$ mongod --port 27016 --dbpath /tmp/mongocluster/n0
$ mongod --port 27015 --dbpath /tmp/mongocluster/n1
$ mongod --port 27014 --dbpath /tmp/mongocluster/n2
Then we need to start at least a config server:
$ mongod --configsvr --dbpath /tmp/mongocluster/c0 --port 27019
Now that all the required nodes are up, we can finaly start the mongos router which is in charge of actually providing the sharding functionality:
$ mongos --configdb 127.0.0.1:27019
Now all the required nodes are up and running, but we still didn't configure any sharded environment.
The first step required to setup a sharding environment is to actually add the nodes to the cluster.
To do so we need to connect to the mongos and issue the sh.addShard command:
$ mongo
MongoDB shell version: 3.0.4
connecting to: test
mongos> sh.addShard('127.0.0.1:27016')
{ "shardAdded" : "shard0000", "ok" : 1 }
mongos> sh.addShard('127.0.0.1:27015')
{ "shardAdded" : "shard0001", "ok" : 1 }
mongos> sh.addShard('127.0.0.1:27014')
{ "shardAdded" : "shard0002", "ok" : 1 }
Now that our shards have been added to the cluster we can turn on sharding for databases and collections.
Only sharded collections will actually be sharded across the nodes.
We are going to shard our collection of tweets, so the first step is to enable sharding for the twitter database:
mongos> sh.enableSharding('twitter')
{ "ok" : 1 }
Now we need to provide the actual sharding key for our tweets collection. Until a sharding key is provided, no sharding happens. To ensure that tweets are properly distributed across nodes we are going to shard by the screen name of the author:
mongos> sh.shardCollection("twitter.tweets", {'user.screen_name': 1})
{ "collectionsharded" : "twitter.tweets", "ok" : 1 }
Now we can finally import our data and see that it gets distributed across the nodes:
$ mongoimport --db twitter --collection tweets 2011-02-11.json.aa
To check that our data has properly distributed across nodes:
mongos> use twitter
switched to db twitter
mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("558fc718712e65efc2a378d9")
}
shards:
{ "_id" : "shard0000", "host" : "localhost:27016" }
{ "_id" : "shard0001", "host" : "localhost:27015" }
{ "_id" : "shard0002", "host" : "localhost:27014" }
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
4 : Success
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : false, "primary" : "shard0002" }
{ "_id" : "twitter", "partitioned" : true, "primary" : "shard0002" }
twitter.tweets
shard key: { "user.screen_name" : 1 }
chunks:
shard0000 2
shard0001 2
shard0002 2
{ "user.screen_name" : { "$minKey" : 1 } } -->> { "user.screen_name" : "111111121111111" } on : shard0000 Timestamp(2, 0)
{ "user.screen_name" : "111111121111111" } -->> { "user.screen_name" : "YohannaCS" } on : shard0001 Timestamp(3, 0)
{ "user.screen_name" : "YohannaCS" } -->> { "user.screen_name" : "graciadelcielo" } on : shard0000 Timestamp(4, 0)
{ "user.screen_name" : "graciadelcielo" } -->> { "user.screen_name" : "nosso_surita" } on : shard0001 Timestamp(5, 0)
{ "user.screen_name" : "nosso_surita" } -->> { "user.screen_name" : "yuuki_gei" } on : shard0002 Timestamp(5, 1)
{ "user.screen_name" : "yuuki_gei" } -->> { "user.screen_name" : { "$maxKey" : 1 } } on : shard0002 Timestamp(1, 6)
NOTE: Splitting by a date or sequential values is usually not a good idea, as you end up enforcing all the workload on the primary node that contains the most recent data.
End of explanation
"""
sc
data = sc.parallelize([1,2,3,4,5,6])
data.first()
def multiply(v):
return v*2
multdata = data.map(multiply)
print multdata
multdata.collect()
sc.defaultParallelism
values = range(20)
print values
rddvalues = sc.parallelize(values)
def sumall(i):
yield list(i) # returns the list on each core (4 cores available)
rddvalues.mapPartitions(sumall).collect()
def summap(v):
return v+1
rddvalues.map(summap).collect()
def sumtwo(a, b):
return a+b
rddvalues.reduce(sumtwo)
"""
Explanation: You will notice that the map_reduce command has now been properly split across the nodes of our cluster. Our shards should report in their logs something like:
2015-06-28T12:31:58.231+0200 I COMMAND [conn4] command twitter.$cmd command: mapReduce { mapreduce: "tweets", map: "function() {
var tags = this.entities.hashtags;
for(var i=0; i<tags.length; i++)
emit(tags[i].text, 1);
}", reduce: "function(key, values) {
return Array.sum(values);
}", out: "tmp.mrs.tweets_1435487518_0", shardedFirstPass: true } ntoreturn:1 keyUpdates:0 writeConflicts:0 numYields:0 reslen:151 locks:{ Global: { acquireCount: { r: 2225, w: 1068, W: 3 } }, MMAPV1Journal: { acquireCount: { r: 575, w: 2130 } }, Database: { acquireCount: { r: 535, w: 1060, R: 42, W: 11 } }, Collection: { acquireCount: { R: 535, W: 1063 } }, Metadata: { acquireCount: { W: 8 } } } 102ms
Performances
Journal Performances
Write performance is reduced by 5-30%
For apps that are write-heavy (1000+ writes per server) there can be slowdown due to mix of journal and data flushes.
To avoid Journal Overhead save the journal on a separate DISK from data, it will lower the journal overhead down to 3%.
Fragmentation
Files can get fragmented over time if remove() and update() are issued.
* It gets worse if documents have varied sizes
* Fragmentation wastes disk space and RAM
* Also makes writes scattered and slower (have to lookup for an empty slot in extent)
* Fragmentation can be checked by comparing size to storageSize in the collection’s stats
* nmoved=1 in logs means document has been resized and moved to another extent
PowerOf2Allocation is default on 2.6, is more efficient in case of updates/remove as each record has a size in bytes that is a power of 2 (e.g. 32, 64, 128, 256, 512...) so when updating documents they probably have not need to be moved (if document was 200bytes it will have up to 56 more bytes before needing to be reallocated) and when deleted it will leave a slot that can be reused for another document as it will match for sure the same size being rounded to powers of 2.
https://github.com/10gen-labs/storage-viz helps debugging storage, RAM and fragmentation.
Replication Lag
Secondaries underspec’d vs primaries
Access patterns between primary and secondaries
Insufficient bandwidth (Estimate required bandwidth to sync: op/sec * docsize + 40%)
Foreground index builds on secondaries
https://github.com/rueckstiess/mtools helps debugging operations logs and slow replication
PySpark
=======
An RDD can be considered as huge list without any keys
Driver --> The node that asks for execution
Master --> The node that coordinates all nodes
Worker -> The node that actually performs computation
Transformation
RDD = [1,2,3]
map
when using map the final RDD is a list of list = [[1], [1,1], [1,1,1]]
flatmap
when using flatmap the final RDD is a flat list = [1,1,1,1,1,1]
filter
= [2]
union
= RDD1 + RDD2
intersect
= RDD1 & RDD2
reduceByKey
= [(key, value)]
Actions
RDD = [1,2,3]
reduce()
= 6
collect()
= [1,2,3]
take(1)
= [1]
How to download Spark
To use Spark you need Java
Download http://blackhole.test.axantweb.com/pyspark-1.6.1.tgz
To check if it worked: expand the directory, cd into it and run ./pyspark.sh.
If error Exception in thread "main" use this solution:
sudo rm /usr/bin/java
sudo ln -s /Library/Internet\ Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/bin/java /usr/bin
To use Spark in Jupyter:
Activate virtual env where Jupyter Notebook is installed
IPYTHON_OPTS = "notebook" .pyspark.sh
if notebook in another dir: `IPYTHON_OPTS = "notebook --notebook-dir=~/path/to/notebooks" .pyspark.sh
End of explanation
"""
low = sc.parallelize(range(1,10))
high = sc.parallelize(range(5,15))
low.union(high).collect()
low.intersection(high).collect()
low.union(high).distinct().collect()
text = sc.textFile('./Jungle_Book.txt')
print text
text.count()
def splitlines(line):
return line.split()
words = text.flatMap(splitlines)
words.count()
words.take(15)
def get_freq(word):
return word, 1
def get_count(a, b): # this will be used with reduceByKey
return a+b
def switch_tuple(t): # this is needed to sort by Key (flips the passed tuple)
return t[1], t[0]
print text.flatMap(splitlines).map(get_freq).reduceByKey(get_count).map(switch_tuple).sortByKey(0, 1).collect()[:20]
"""
Explanation: When working on one value use map.
When working > 1 value use reduce.
End of explanation
"""
|
Qumulo/python-notebooks | notebooks/Auto provision a new user.ipynb | gpl-3.0 | cluster = 'XXXXX' # Qumulo cluster hostname or IP where you're setting up users
api_user = 'XXXXX' # Qumulo api user name
api_password = 'XXXXX' # Qumulo api password
base_dir = 'XXXXX' # the parent path where the users will be created.
user_name = 'XXXXX' # the new "user" to set up.
import os
import sys
import traceback
from qumulo.rest_client import RestClient
from qumulo.rest.nfs import NFSRestriction
full_path = '/'+ base_dir + '/' + user_name
rc = RestClient(cluster, 8000)
rc.login(api_user, api_password)
def create_dir(rc, name, dir_path='/'):
try:
rc.fs.create_directory(name = name, dir_path = dir_path)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Exception: %s" % exc_value)
# Create base user directory, if it doesn't already exist
create_dir(rc, name=base_dir, dir_path='/')
"""
Explanation: Auto-provision a new user
Create a home directory
Create NFS export
Create SMB share
Create quota
Set up daily snapshots
Prerequisites
Install the qumulo api via pip install qumulo_api, or download it from your Qumulo cluster on the API & Tools page
set up all the variables in the cell below
End of explanation
"""
dir_res = rc.fs.create_directory(name=user_name, dir_path='/'+ base_dir)
print("Directory '%s' created with id: %s" % (full_path, dir_res['file_number']))
dir_id = dir_res['file_number']
"""
Explanation: Create directory
End of explanation
"""
quota_res = rc.quota.create_quota(id_ = dir_id, limit_in_bytes = 20000000000)
"""
Explanation: Create 20GB Quota
End of explanation
"""
nfs_res = rc.nfs.nfs_add_share(export_path = '/' + user_name,
fs_path = full_path,
description = "%s home directory" % user_name,
restrictions = [NFSRestriction({
'read_only': False,
'host_restrictions': [],
'user_mapping': 'NFS_MAP_NONE',
'map_to_user_id': '0'})]
)
print("NFS export created: %s with id %s" % (full_path, nfs_res['id']))
"""
Explanation: Create NFS export
End of explanation
"""
smb_res = rc.smb.smb_add_share(share_name = user_name,
fs_path = full_path,
description = "%s home directory" % user_name
)
print("SMB share created: %s with id %s" % (full_path, smb_res['id']))
"""
Explanation: Create SMB share
End of explanation
"""
snap_res = rc.snapshot.create_policy(name = "User %s" % user_name,
schedule_info = {"creation_schedule":
{"frequency":"SCHEDULE_DAILY_OR_WEEKLY",
"hour":2,"minute":15,
"on_days":["MON","TUE","WED","THU","FRI","SAT","SUN"],
"timezone":"America/Los_Angeles"},
"expiration_time_to_live":"7days"
},
directory_id = str(dir_id),
enabled = True)
print("Snapshot policy created with id %s" % snap_res['id'])
"""
Explanation: Set up snapshot policy
End of explanation
"""
rc.quota.delete_quota(id_ = quota_res['id'])
rc.snapshot.delete_policy(policy_id = snap_res['id'])
rc.smb.smb_delete_share(id_ = smb_res['id'])
rc.nfs.nfs_delete_share(id_ = nfs_res['id'])
if full_path != '/': # small sanity check since tree delete is rather powerful.
rc.fs.delete_tree(path = full_path)
print("Everything is cleaned up!")
"""
Explanation: Clean up everything
End of explanation
"""
|
PMEAL/OpenPNM | examples/reference/uncategorized/the_problem_with_domain_length_and_area.ipynb | mit | import matplotlib.pyplot as plt
import openpnm as op
%config InlineBackend.figure_formats = ['svg']
import numpy as np
np.random.seed(10)
pn = op.network.Cubic(shape=[4, 4, 1])
"""
Explanation: Problem with Domain Area and Length
In order to find network properties such as permeability using Darcy's law, it is necessary to know the domain length and area. At first glance this might seem as simple as finding the maxima and minima of the pore coordinates in the direction of interest; however it is a bit more subtle than that as will be explained in this notebook. Consider the simple cubic network:
End of explanation
"""
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots()
op.topotools.plot_coordinates(pn, markersize=500, ax=ax)
op.topotools.plot_connections(pn, ax=ax)
"""
Explanation: Now let's plot the coordinates and connections between them:
End of explanation
"""
pn['pore.coords'][pn.pores('left'), :]
"""
Explanation: Let's say we'd like to know the length of the domain from left to right. The pore coordinates for the leftmost pores are:
End of explanation
"""
L = pn['pore.coords'][:, 0].max() - pn['pore.coords'][:, 0].min()
print(L)
"""
Explanation: Note that the 'minimum' value is 0.5 rather than 0.0. This is because the pore coordinates point to the center of the region that belongs to the pore. In the case of the simple cubic with a spacing of 1, pore 0 lies at [0.5, 0.5, 0.5], while the bounding box around the pore goes from [0, 0, 0], to [1, 1, 1].
Therefore, if we use pore coordinates to find the domain length we'd do:
End of explanation
"""
dn = op.network.Delaunay(points=20, shape=[1, 1, 0])
"""
Explanation: While in reality to domain is 4 pore long and the correct answer should be 4.0. The extra 0.5 of the left and right cells are not included.
It may seem simple enough to automatically add a full lattice cell to the calculation, but this cannot be assumed since many networks are random. Consider the Delaunay network:
End of explanation
"""
op.topotools.trim(network=dn, pores=dn.pores(['left', 'right', 'front', 'back']))
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots()
op.topotools.plot_coordinates(dn, markersize=500, ax=ax)
op.topotools.plot_connections(dn, ax=ax)
"""
Explanation: Let's first remove the boundary pores to ensure the network is fully random:
End of explanation
"""
pn = op.network.Cubic(shape=[4, 4, 4])
geo = op.geometry.SpheresAndCylinders(network=pn, pores=pn.Ps, throats=pn.Ts)
air = op.phases.Air(network=pn)
phys = op.physics.Standard(network=pn, phase=air, geometry=geo)
sf = op.algorithms.StokesFlow(network=pn, phase=air)
sf.set_value_BC(pores=pn.pores('left'), values=200000)
sf.set_value_BC(pores=pn.pores('right'), values=100000)
sf.run()
"""
Explanation: Now it's quite clear that finding the domain size by assuming a bounding box around the pore coordinates does not tell the whole story. How much 'extra' length should be added beyond the extreme pores? This is not possible to know based only on the pore coordinates, which is the only information OpenPNM has.
The end result of this scenario is that when computing anything that requires length and area, these values must be specified by the user. Consider the permeability coefficient:
End of explanation
"""
perm=op.metrics.AbsolutePermeability(network=pn)
K=perm.run()
print(K)
"""
Explanation: If you do not specify the domain area and length, OpenPNM will attempt to estimate these values using the faulty logic of minima and maxima of boundary pores, but will issue a warning so you know that errors are present.
End of explanation
"""
A = 16
L = 4
perm=op.metrics.AbsolutePermeability(network=pn)
perm.settings._update({
'area': A,
'length':L})
K=perm.run()
print(K)
"""
Explanation: To get correct answers, it is necessary to specify the domain sizes as follows:
End of explanation
"""
mu = air['pore.viscosity'].mean()
Q = sf.rate(pores=pn.pores('left'))
dP = 200000 - 100000
K = Q*mu*L/(A * dP)
print(K)
"""
Explanation: Of course, the most correct way to calculate K is to do it manually using Darcy's law:
End of explanation
"""
|
robertoalotufo/ia898 | deliver/Atividade_2_3.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import sys,os
ia898path = os.path.abspath('../../')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
!ls -l ../../ia898/data
f = mpimg.imread('../data/retina.tif')
plt.imshow(f,cmap='gray');
h = ia.histogram(f)
plt.plot(h);
"""
Explanation: Histograma e Estatística de uma imagem
Histograma de uma imagem
A imagem normalmente é composta de um grande número de pixels; hoje em dia,
celulares tiram fotografias com a resolução espacial que produz alguns milhões de pixels.
Uma das caracterizações ou "assinaturas" mais eficientes da imagem é seu histograma.
Estude o pequeno tutorial disponível em:
Tutorial - Histograma e Estatísticas
e após
isto, plote com adshow(iaplot()) o histograma da imagem a seguir:
End of explanation
"""
%%time
print('f.mean()=', f.mean())
print('np.median(f)=', np.median(f))
%%time
h = ia.histogram(f)
v = ia.h2stats(h)
print('média:',v[0])
print('mediano:',v[8])
"""
Explanation: Uso do matplotlib
Veja aqui neste breve tutorial como utilizar o matplotlib:
tutorial_matplotlib
Estatísticas de uma imagem
Estude a função ia636:iah2stats que calcula diversas estatísticas da imagem a partir de
seu histograma. Observe que estas estatísticas podem ser obtidas tanto diretamente da imagem como do histograma.
A grande vantagem de calcular a estatística a partir do histograma é que o histograma é tipicamente um vetor de
256 elementos enquanto que a imagem pode ser de milhões de pixels. Assim, quando se precisa calcular diversos
valores estatísticos de uma imagem, calcula-se seu histograma e posteriormente seus valores estatísticos, calculados
a partir do histogram.
Como ilustração, iremos medir o tempo para calcular a média (np.mean) e o valor mediano (np.median) fazendo o cálculo diretamente na imagem e comparar com o tempo de se calcular estas estatísticas a partir do histograma:
End of explanation
"""
plt.imshow(f,cmap='gray')
h = ia.histogram(f)
v = ia.h2stats(h)
print('mean =',v[0])
print('variance =',v[1])
print('skewness =',v[2])
print('kurtosis = ',v[3])
print('entropy = ',v[4])
print('mode = ',v[5])
print('percentil 1% = ',v[6])
print('percentil 10% = ',v[7])
print('percentil 50% = ',v[8])
print('percentil 90% = ',v[9])
print('percentil 99% = ',v[10])
"""
Explanation: Procure utilizar a função iah2stats operando no histograma para calcular estes mesmos valores.
utilize ia.iah2stats para calcular média e mediana da imagem da retina.tif
End of explanation
"""
f1 = np.random.randint(0,256, (128,256)).astype(np.uint8)
plt.imshow( f1, 'f1')
h1 = ia.histogram(f1)
plt.bar( np.arange(h1.size), h1)
plt.imshow( ia.iafig2img(fig2), 'histograma de f1')
f_2 = np.resize(np.arange(256, dtype=np.uint8), (128,256)) # imagem rampa
f2 = np.ravel(f_2)
np.random.shuffle(f2)
adshow(f2.reshape(f_2.shape), 'f2')
h2 = ia.iahistogram(f2)
fig3 = plt.figure()
plt.bar( np.arange(h2.size), h2)
adshow( ia.iafig2img(fig3), 'histograma de f2')
f_3 = np.random.randn(128,256)
f3 = ia.ianormalize(f_3).astype(np.uint8)
adshow(f3, 'f3')
h3 = ia.iahistogram(f3)
fig4 = plt.figure()
plt.bar( np.arange(h3.size), h3)
adshow( ia.iafig2img(fig4), 'histograma de f3')
"""
Explanation: Histograma de imagens aleatórias
Calcule e plote os histogramas das imagens f1, f2 e f3 a seguir, dando uma justificativa para cada caso em relação ao formato do histograma.
Aproveite para estudar as funções de geração de dados aleatórios "randint", "shuffle" e "random.normal":
End of explanation
"""
|
SnowMasaya/Chainer-with-Neural-Networks-Language-model-Hands-on-Advance | .ipynb_checkpoints/chainer-natual-language-processing-checkpoint.ipynb | mit | import time
import math
import sys
import pickle
import copy
import os
import re
import numpy as np
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
"""
Explanation: Introduction GPU
Chainer とはニューラルネットの実装を簡単にしたフレームワークです。
今回は言語の分野でニューラルネットを適用してみました。
今回は言語モデルを作成していただきます。
言語モデルとはある単語が来たときに次の単語に何が来やすいかを予測するものです。
言語モデルにはいくつか種類があるのでここでも紹介しておきます。
n-グラム言語モデル
単語の数を単純に数え挙げて作成されるモデル。考え方としてはデータにおけるある単語の頻度に近い
ニューラル言語モデル
単語の辞書ベクトルを潜在空間ベクトルに落とし込み、ニューラルネットで次の文字を学習させる手法
リカレントニューラル言語モデル
基本的なアルゴリズムはニューラル言語モデルと同一だが過去に使用した単語を入力に加えることによって文脈を考慮した言語モデルの学習が可能となる。ニューラル言語モデルとは異なり、より古い情報も取得可能
以下では、このChainerを利用しデータを準備するところから実際に言語モデルを構築し学習・評価を行うまでの手順を解説します。
各種ライブラリ導入
初期設定
データ入力
リカレントニューラル言語モデル設定
学習を始める前の設定
パラメータ更新方法(確率的勾配法)
言語の予測
もしGPUを使用したい方は、以下にまとめてあるのでご参考ください。
Chainer を用いてリカレントニューラル言語モデル作成のサンプルコードを解説してみた
1.各種ライブラリ導入
Chainerの言語処理では多数のライブラリを導入します。
End of explanation
"""
#-------------Explain7 in the Qiita-------------
n_epochs = 30
n_units = 641
batchsize = 200
bprop_len = 40
grad_clip = 0.3
gpu_ID = 0
data_dir = "data_hands_on"
checkpoint_dir = "cv"
xp = cuda.cupy if gpu_ID >= 0 else np
#-------------Explain7 in the Qiita-------------
"""
Explanation: `導入するライブラリの代表例は下記です。
numpy: 行列計算などの複雑な計算を行なうライブラリ
chainer: Chainerの導入
2.初期設定
学習回数、ユニット数、確率的勾配法に使用するデータの数、学習に使用する文字列の長さ、勾配法で使用する敷居値、学習データの格納場所、モデルの出力場所を設定しています。
End of explanation
"""
# input data
#-------------Explain1 in the Qiita-------------
def source_to_words(source):
line = source.replace("¥n", " ").replace("¥t", " ")
for spacer in ["(", ")", "{", "}", "[", "]", ",", ";", ":", "++", "!", "$", '"', "'"]:
line = line.replace(spacer, " " + spacer + " ")
words = [w.strip() for w in line.split()]
return words
def load_data():
vocab = {}
print ('%s/angular.js'% data_dir)
source = open('%s/angular_full_remake.js' % data_dir, 'r').read()
words = source_to_words(source)
freq = {}
dataset = np.ndarray((len(words),), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
freq[word] = 0
dataset[i] = vocab[word]
freq[word] += 1
print('corpus length:', len(words))
print('vocab size:', len(vocab))
return dataset, words, vocab, freq
#-------------Explain1 in the Qiita-------------
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
train_data, words, vocab, freq = load_data()
for f in ["frequent", "rarely"]:
print("{0} words".format(f))
print(sorted(freq.items(), key=lambda i: i[1], reverse=True if f == "frequent" else False)[:50])
"""
Explanation: 3.データ入力
学習用にダウンロードしたファイルをプログラムに読ませる処理を関数化しています
学習データをバイナリ形式で読み込んでいます。
文字データを確保するための行列を定義しています。
データを単語をキー、長さを値とした辞書データにして行列データセットに登録しています。
学習データ、単語の長さ、語彙数を取得しています。
上記をそれぞれ行列データとして保持しています。
End of explanation
"""
#-------------Explain2 in the Qiita-------------
class CharRNN(FunctionSet):
def __init__(self, n_vocab, n_units):
super(CharRNN, self).__init__(
embed = F.EmbedID(n_vocab, n_units),
l1_x = F.Linear(n_units, 4*n_units),
l1_h = F.Linear(n_units, 4*n_units),
l2_h = F.Linear(n_units, 4*n_units),
l2_x = F.Linear(n_units, 4*n_units),
l3 = F.Linear(n_units, n_vocab),
)
for param in self.parameters:
param[:] = np.random.uniform(-0.08, 0.08, param.shape)
def forward_one_step(self, x_data, y_data, state, train=True, dropout_ratio=0.7):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h0 = self.embed(x)
h1_in = self.l1_x(F.dropout(h0, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
h2_in = self.l2_x(F.dropout(h1, ratio=dropout_ratio, train=train)) + self.l2_h(state['h2'])
c2, h2 = F.lstm(state['c2'], h2_in)
y = self.l3(F.dropout(h2, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2}
return state, F.softmax_cross_entropy(y, t)
def predict(self, x_data, state):
x = Variable(x_data, volatile=True)
h0 = self.embed(x)
h1_in = self.l1_x(h0) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
h2_in = self.l2_x(h1) + self.l2_h(state['h2'])
c2, h2 = F.lstm(state['c2'], h2_in)
y = self.l3(h2)
state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2}
return state, F.softmax(y)
def make_initial_state(n_units, batchsize=50, train=True):
return {name: Variable(np.zeros((batchsize, n_units), dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1', 'c2', 'h2')}
#-------------Explain2 in the Qiita-------------
"""
Explanation: 4.リカレントニューラル言語モデル設定
RNNLM(リカレントニューラル言語モデルの設定を行っています)
EmbedIDで行列変換を行い、疎なベクトルを密なベクトルに変換しています。
出力が4倍の理由は入力層、出力層、忘却層、前回の出力をLSTMでは入力に使用するためです。
隠れ層に前回保持した隠れ層の状態を入力することによってLSTMを実現しています。
ドロップアウトにより過学習するのを抑えています。
予測を行なうメソッドも実装しており、入力されたデータ、状態を元に次の文字列と状態を返すような関数になっています。
モデルの初期化を行なう関数もここで定義しています。
End of explanation
"""
# Prepare RNNLM model
model = CharRNN(len(vocab), n_units)
if gpu_ID >= 0:
cuda.check_cuda_available()
cuda.get_device(gpu_ID).use()
model.to_gpu()
optimizer = optimizers.RMSprop(lr=2e-3, alpha=0.95, eps=1e-8)
optimizer.setup(model)
"""
Explanation: RNNLM(リカレントニューラル言語モデルの設定を行っています)
作成したリカレントニューラル言語モデルを導入しています。
最適化の手法はRMSpropを使用
http://qiita.com/skitaoka/items/e6afbe238cd69c899b2a
初期のパラメータを-0.1〜0.1の間で与えています。
End of explanation
"""
whole_len = train_data.shape[0]
jump = whole_len // batchsize
epoch = 0
start_at = time.time()
cur_at = start_at
state = make_initial_state(n_units, batchsize=batchsize)
cur_log_perp = 0
if gpu_ID >= 0:
accum_loss = Variable(cuda.zeros(()))
for key, value in state.items():
value.data = cuda.to_gpu(value.data)
else:
accum_loss = Variable(xp.zeros((), dtype=np.float32))
"""
Explanation: 5.学習を始める前の設定
学習データのサイズを取得
ジャンプの幅を設定(順次学習しない)
パープレキシティを0で初期化
最初の時間情報を取得
初期状態を現在の状態に付与
状態の初期化
損失を0で初期化
End of explanation
"""
for i in range(int(jump * n_epochs)):
#-------------Explain4 in the Qiita-------------
x_batch = np.array([train_data[(jump * j + i) % whole_len]
for j in range(batchsize)])
y_batch = np.array([train_data[(jump * j + i + 1) % whole_len]
for j in range(batchsize)])
if gpu_ID >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
state, loss_i = model.forward_one_step(x_batch, y_batch, state, dropout_ratio=0.7)
accum_loss += loss_i
cur_log_perp += loss_i.data
if (i + 1) % bprop_len == 0: # Run truncated BPTT
now = time.time()
cur_at = now
# print('{}/{}, train_loss = {}, time = {:.2f}'.format((i + 1)/bprop_len, jump, accum_loss.data / bprop_len, now-cur_at))
optimizer.zero_grads()
accum_loss.backward()
accum_loss.unchain_backward() # truncate
accum_loss = Variable(np.zeros((), dtype=np.float32))
if gpu_ID >= 0:
accum_loss = Variable(cuda.zeros(()))
else:
accum_loss = Variable(np.zeros((), dtype=np.float32))
optimizer.clip_grads(grad_clip)
optimizer.update()
if (i + 1) % 10000 == 0:
perp = math.exp(cuda.to_cpu(cur_log_perp) / 10000)
print('iter {} training perplexity: {:.2f} '.format(i + 1, perp))
fn = ('%s/charrnn_epoch_%i.chainermodel' % (checkpoint_dir, epoch))
pickle.dump(copy.deepcopy(model).to_cpu(), open(fn, 'wb'))
cur_log_perp = 0
if (i + 1) % jump == 0:
epoch += 1
#-------------Explain4 in the Qiita-------------
sys.stdout.flush()
"""
Explanation: 6.パラメータ更新方法(ミニバッチ)
確率的勾配法を用いて学習している。
一定のデータを選択し損失計算をしながらパラメータ更新をしている。
逐次尤度の計算も行っている。
適宜学習データのパープレキシティも計算している
バックプロパゲーションでパラメータを更新する。
truncateはどれだけ過去の履歴を見るかを表している。
optimizer.clip_gradsの部分でL2正則化をかけている。
過学習を抑えるために学習効率を徐々に下げている。
End of explanation
"""
# load model
#-------------Explain6 in the Qiita-------------
model = pickle.load(open("cv/charrnn_epoch_22.chainermodel", 'rb'))
#-------------Explain6 in the Qiita-------------
n_units = model.embed.W.shape[1]
if gpu_ID >= 0:
cuda.check_cuda_available()
cuda.get_device(gpu_ID).use()
model.to_gpu()
# initialize generator
state = make_initial_state(n_units, batchsize=1, train=False)
if gpu_ID >= 0:
for key, value in state.items():
value.data = cuda.to_gpu(value.data)
# show vocababury
ivocab = {}
ivocab = {v:k for k, v in vocab.items()}
"""
Explanation: 7.言語の予測
学習したモデルを取得
モデルからユニット数を取得
最初の空文字を設定
End of explanation
"""
# initialize generator
index = np.random.randint(0, len(vocab), 1)[0]
sampling_range = 5
prev_char = np.array([0], dtype=np.int32)
if gpu_ID >= 0:
prev_char = cuda.to_gpu(prev_char)
for i in range(1000):
if ivocab[index] in ["}", ";"]:
sys.stdout.write(ivocab[index] + "\n")
else:
sys.stdout.write(ivocab[index] + " ")
#-------------Explain7 in the Qiita-------------
state, prob = model.predict(prev_char, state)
index = np.argmax(cuda.to_cpu(prob.data))
#index = np.random.choice(prob.data.argsort()[0,-sampling_range:][::-1], 1)[0]
#-------------Explain7 in the Qiita-------------
prev_char = np.array([index], dtype=np.int32)
if gpu_ID >= 0:
prev_char = cuda.to_gpu(prev_char)
print
"""
Explanation: 学習したモデルを利用して文字の予測を行なう。
予測で出力された文字と状態を次の入力に使用する。
End of explanation
"""
|
samuelshaner/openmc | docs/source/pythonapi/examples/pandas-dataframes.ipynb | mit | %matplotlib inline
import glob
from IPython.display import Image
import matplotlib.pyplot as plt
import scipy.stats
import numpy as np
import pandas as pd
import openmc
"""
Explanation: This notebook demonstrates how systematic analysis of tally scores is possible using Pandas dataframes. A dataframe can be automatically generated using the Tally.get_pandas_dataframe(...) method. Furthermore, by linking the tally data in a statepoint file with geometry and material information from a summary file, the dataframe can be shown with user-supplied labels.
Note: that this Notebook was created using the latest Pandas v0.16.1. Everything in the Notebook will wun with older versions of Pandas, but the multi-indexing option in >v0.15.0 makes the tables look prettier.
End of explanation
"""
# Instantiate some Nuclides
h1 = openmc.Nuclide('H1')
b10 = openmc.Nuclide('B10')
o16 = openmc.Nuclide('O16')
u235 = openmc.Nuclide('U235')
u238 = openmc.Nuclide('U238')
zr90 = openmc.Nuclide('Zr90')
"""
Explanation: Generate Input Files
First we need to define materials that will be used in the problem. Before defining a material, we must create nuclides that are used in the material.
End of explanation
"""
# 1.6 enriched fuel
fuel = openmc.Material(name='1.6% Fuel')
fuel.set_density('g/cm3', 10.31341)
fuel.add_nuclide(u235, 3.7503e-4)
fuel.add_nuclide(u238, 2.2625e-2)
fuel.add_nuclide(o16, 4.6007e-2)
# borated water
water = openmc.Material(name='Borated Water')
water.set_density('g/cm3', 0.740582)
water.add_nuclide(h1, 4.9457e-2)
water.add_nuclide(o16, 2.4732e-2)
water.add_nuclide(b10, 8.0042e-6)
# zircaloy
zircaloy = openmc.Material(name='Zircaloy')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_nuclide(zr90, 7.2758e-3)
"""
Explanation: With the nuclides we defined, we will now create three materials for the fuel, water, and cladding of the fuel pin.
End of explanation
"""
# Instantiate a Materials collection
materials_file = openmc.Materials((fuel, water, zircaloy))
# Export to "materials.xml"
materials_file.export_to_xml()
"""
Explanation: With our three materials, we can now create a materials file object that can be exported to an actual XML file.
End of explanation
"""
# Create cylinders for the fuel and clad
fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.39218)
clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.45720)
# Create boundary planes to surround the geometry
# Use both reflective and vacuum boundaries to make life interesting
min_x = openmc.XPlane(x0=-10.71, boundary_type='reflective')
max_x = openmc.XPlane(x0=+10.71, boundary_type='vacuum')
min_y = openmc.YPlane(y0=-10.71, boundary_type='vacuum')
max_y = openmc.YPlane(y0=+10.71, boundary_type='reflective')
min_z = openmc.ZPlane(z0=-10.71, boundary_type='reflective')
max_z = openmc.ZPlane(z0=+10.71, boundary_type='reflective')
"""
Explanation: Now let's move on to the geometry. This problem will be a square array of fuel pins for which we can use OpenMC's lattice/universe feature. The basic universe will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces for fuel and clad, as well as the outer bounding surfaces of the problem.
End of explanation
"""
# Create a Universe to encapsulate a fuel pin
pin_cell_universe = openmc.Universe(name='1.6% Fuel Pin')
# Create fuel Cell
fuel_cell = openmc.Cell(name='1.6% Fuel')
fuel_cell.fill = fuel
fuel_cell.region = -fuel_outer_radius
pin_cell_universe.add_cell(fuel_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='1.6% Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
pin_cell_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='1.6% Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
pin_cell_universe.add_cell(moderator_cell)
"""
Explanation: With the surfaces defined, we can now construct a fuel pin cell from cells that are defined by intersections of half-spaces created by the surfaces.
End of explanation
"""
# Create fuel assembly Lattice
assembly = openmc.RectLattice(name='1.6% Fuel - 0BA')
assembly.pitch = (1.26, 1.26)
assembly.lower_left = [-1.26 * 17. / 2.0] * 2
assembly.universes = [[pin_cell_universe] * 17] * 17
"""
Explanation: Using the pin cell universe, we can construct a 17x17 rectangular lattice with a 1.26 cm pitch.
End of explanation
"""
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = assembly
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
# Create root Universe
root_universe = openmc.Universe(universe_id=0, name='root universe')
root_universe.add_cell(root_cell)
"""
Explanation: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe.
End of explanation
"""
# Create Geometry and set root Universe
geometry = openmc.Geometry()
geometry.root_universe = root_universe
# Export to "geometry.xml"
geometry.export_to_xml()
"""
Explanation: We now must create a geometry that is assigned a root universe and export it to XML.
End of explanation
"""
# OpenMC simulation parameters
min_batches = 20
max_batches = 200
inactive = 5
particles = 2500
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = min_batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': False}
settings_file.trigger_active = True
settings_file.trigger_max_batches = max_batches
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-10.71, -10.71, -10, 10.71, 10.71, 10.]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
"""
Explanation: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 5 inactive batches and 15 minimum active batches each with 2500 particles. We also tell OpenMC to turn tally triggers on, which means it will keep running until some criterion on the uncertainty of tallies is reached.
End of explanation
"""
# Instantiate a Plot
plot = openmc.Plot(plot_id=1)
plot.filename = 'materials-xy'
plot.origin = [0, 0, 0]
plot.width = [21.5, 21.5]
plot.pixels = [250, 250]
plot.color = 'mat'
# Instantiate a Plots collection and export to "plots.xml"
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
"""
Explanation: Let us also create a plot file that we can use to verify that our pin cell geometry was created successfully.
End of explanation
"""
# Run openmc in plotting mode
openmc.plot_geometry(output=False)
# Convert OpenMC's funky ppm to png
!convert materials-xy.ppm materials-xy.png
# Display the materials plot inline
Image(filename='materials-xy.png')
"""
Explanation: With the plots.xml file, we can now generate and view the plot. OpenMC outputs plots in .ppm format, which can be converted into a compressed format like .png with the convert utility.
End of explanation
"""
# Instantiate an empty Tallies object
tallies_file = openmc.Tallies()
tallies_file._tallies = []
"""
Explanation: As we can see from the plot, we have a nice array of pin cells with fuel, cladding, and water! Before we run our simulation, we need to tell the code what we want to tally. The following code shows how to create a variety of tallies.
End of explanation
"""
# Instantiate a tally Mesh
mesh = openmc.Mesh(mesh_id=1)
mesh.type = 'regular'
mesh.dimension = [17, 17]
mesh.lower_left = [-10.71, -10.71]
mesh.width = [1.26, 1.26]
# Instantiate tally Filter
mesh_filter = openmc.MeshFilter(mesh)
# Instantiate energy Filter
energy_filter = openmc.EnergyFilter([0, 0.625, 20.0e6])
# Instantiate the Tally
tally = openmc.Tally(name='mesh tally')
tally.filters = [mesh_filter, energy_filter]
tally.scores = ['fission', 'nu-fission']
# Add mesh and Tally to Tallies
tallies_file.append(tally)
"""
Explanation: Instantiate a fission rate mesh Tally
End of explanation
"""
# Instantiate tally Filter
cell_filter = openmc.CellFilter(fuel_cell.id)
# Instantiate the tally
tally = openmc.Tally(name='cell tally')
tally.filters = [cell_filter]
tally.scores = ['scatter-y2']
tally.nuclides = [u235, u238]
# Add mesh and tally to Tallies
tallies_file.append(tally)
"""
Explanation: Instantiate a cell Tally with nuclides
End of explanation
"""
# Instantiate tally Filter
distribcell_filter = openmc.DistribcellFilter(moderator_cell.id)
# Instantiate tally Trigger for kicks
trigger = openmc.Trigger(trigger_type='std_dev', threshold=5e-5)
trigger.scores = ['absorption']
# Instantiate the Tally
tally = openmc.Tally(name='distribcell tally')
tally.filters = [distribcell_filter]
tally.scores = ['absorption', 'scatter']
tally.triggers = [trigger]
# Add mesh and tally to Tallies
tallies_file.append(tally)
# Export to "tallies.xml"
tallies_file.export_to_xml()
"""
Explanation: Create a "distribcell" Tally. The distribcell filter allows us to tally multiple repeated instances of the same cell throughout the geometry.
End of explanation
"""
# Remove old HDF5 (summary, statepoint) files
!rm statepoint.*
# Run OpenMC!
openmc.run()
"""
Explanation: Now we a have a complete set of inputs, so we can go ahead and run our simulation.
End of explanation
"""
# We do not know how many batches were needed to satisfy the
# tally trigger(s), so find the statepoint file(s)
statepoints = glob.glob('statepoint.*.h5')
# Load the last statepoint file
sp = openmc.StatePoint(statepoints[-1])
"""
Explanation: Tally Data Processing
End of explanation
"""
# Find the mesh tally with the StatePoint API
tally = sp.get_tally(name='mesh tally')
# Print a little info about the mesh tally to the screen
print(tally)
"""
Explanation: Analyze the mesh fission rate tally
End of explanation
"""
# Get the relative error for the thermal fission reaction
# rates in the four corner pins
data = tally.get_values(scores=['fission'],
filters=[openmc.MeshFilter, openmc.EnergyFilter], \
filter_bins=[((1,1),(1,17), (17,1), (17,17)), \
((0., 0.625),)], value='rel_err')
print(data)
# Get a pandas dataframe for the mesh tally data
df = tally.get_pandas_dataframe(nuclides=False)
# Set the Pandas float display settings
pd.options.display.float_format = '{:.2e}'.format
# Print the first twenty rows in the dataframe
df.head(20)
# Create a boxplot to view the distribution of
# fission and nu-fission rates in the pins
bp = df.boxplot(column='mean', by='score')
# Extract thermal nu-fission rates from pandas
fiss = df[df['score'] == 'nu-fission']
fiss = fiss[fiss['energy low [eV]'] == 0.0]
# Extract mean and reshape as 2D NumPy arrays
mean = fiss['mean'].reshape((17,17))
plt.imshow(mean, interpolation='nearest')
plt.title('fission rate')
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
"""
Explanation: Use the new Tally data retrieval API with pure NumPy
End of explanation
"""
# Find the cell Tally with the StatePoint API
tally = sp.get_tally(name='cell tally')
# Print a little info about the cell tally to the screen
print(tally)
# Get a pandas dataframe for the cell tally data
df = tally.get_pandas_dataframe()
# Print the first twenty rows in the dataframe
df.head(100)
"""
Explanation: Analyze the cell+nuclides scatter-y2 rate tally
End of explanation
"""
# Get the standard deviations for two of the spherical harmonic
# scattering reaction rates
data = tally.get_values(scores=['scatter-Y2,2', 'scatter-Y0,0'],
nuclides=['U238', 'U235'], value='std_dev')
print(data)
"""
Explanation: Use the new Tally data retrieval API with pure NumPy
End of explanation
"""
# Find the distribcell Tally with the StatePoint API
tally = sp.get_tally(name='distribcell tally')
# Print a little info about the distribcell tally to the screen
print(tally)
"""
Explanation: Analyze the distribcell tally
End of explanation
"""
# Get the relative error for the scattering reaction rates in
# the first 10 distribcell instances
data = tally.get_values(scores=['scatter'], filters=[openmc.DistribcellFilter],
filter_bins=[(i,) for i in range(10)], value='rel_err')
print(data)
"""
Explanation: Use the new Tally data retrieval API with pure NumPy
End of explanation
"""
# Get a pandas dataframe for the distribcell tally data
df = tally.get_pandas_dataframe(nuclides=False)
# Print the last twenty rows in the dataframe
df.tail(20)
# Show summary statistics for absorption distribcell tally data
absorption = df[df['score'] == 'absorption']
absorption[['mean', 'std. dev.']].dropna().describe()
# Note that the maximum standard deviation does indeed
# meet the 5e-4 threshold set by the tally trigger
"""
Explanation: Print the distribcell tally dataframe
End of explanation
"""
# Extract tally data from pins in the pins divided along y=x diagonal
multi_index = ('level 2', 'lat',)
lower = df[df[multi_index + ('x',)] + df[multi_index + ('y',)] < 16]
upper = df[df[multi_index + ('x',)] + df[multi_index + ('y',)] > 16]
lower = lower[lower['score'] == 'absorption']
upper = upper[upper['score'] == 'absorption']
# Perform non-parametric Mann-Whitney U Test to see if the
# absorption rates (may) come from same sampling distribution
u, p = scipy.stats.mannwhitneyu(lower['mean'], upper['mean'])
print('Mann-Whitney Test p-value: {0}'.format(p))
"""
Explanation: Perform a statistical test comparing the tally sample distributions for two categories of fuel pins.
End of explanation
"""
# Extract tally data from pins in the pins divided along y=-x diagonal
multi_index = ('level 2', 'lat',)
lower = df[df[multi_index + ('x',)] > df[multi_index + ('y',)]]
upper = df[df[multi_index + ('x',)] < df[multi_index + ('y',)]]
lower = lower[lower['score'] == 'absorption']
upper = upper[upper['score'] == 'absorption']
# Perform non-parametric Mann-Whitney U Test to see if the
# absorption rates (may) come from same sampling distribution
u, p = scipy.stats.mannwhitneyu(lower['mean'], upper['mean'])
print('Mann-Whitney Test p-value: {0}'.format(p))
"""
Explanation: Note that the symmetry implied by the y=x diagonal ensures that the two sampling distributions are identical. Indeed, as illustrated by the test above, for any reasonable significance level (e.g., $\alpha$=0.05) one would not reject the null hypothesis that the two sampling distributions are identical.
Next, perform the same test but with two groupings of pins which are not symmetrically identical to one another.
End of explanation
"""
# Extract the scatter tally data from pandas
scatter = df[df['score'] == 'scatter']
scatter['rel. err.'] = scatter['std. dev.'] / scatter['mean']
# Show a scatter plot of the mean vs. the std. dev.
scatter.plot(kind='scatter', x='mean', y='rel. err.', title='Scattering Rates')
# Plot a histogram and kernel density estimate for the scattering rates
scatter['mean'].plot(kind='hist', bins=25)
scatter['mean'].plot(kind='kde')
plt.title('Scattering Rates')
plt.xlabel('Mean')
plt.legend(['KDE', 'Histogram'])
"""
Explanation: Note that the asymmetry implied by the y=-x diagonal ensures that the two sampling distributions are not identical. Indeed, as illustrated by the test above, for any reasonable significance level (e.g., $\alpha$=0.05) one would reject the null hypothesis that the two sampling distributions are identical.
End of explanation
"""
|
manoharan-lab/structural-color | bulk_polydispersity_tutorial.ipynb | gpl-3.0 | %matplotlib inline
import numpy as np
import time
import structcol as sc
import structcol.refractive_index as ri
from structcol import montecarlo as mc
from structcol import detector as det
from structcol import phase_func_sphere as pfs
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.misc import factorial
import os
"""
Explanation: Tutorial for polydisperseity in with bulk Monte Carlo simulations in the structureal-color package
Copyright 2016, Vinothan N. Manoharan, Victoria Hwang, Annie Stephenson
This file is part of the structural-color python package.
This package is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this package. If not, see http://www.gnu.org/licenses/.
Introduction to polydispersity with bulk Monte Carlo simulations
One of the advantages of the bulk montecarlo model is that we can sample phase functions and scattering lengths for spheres of different sizes in a bulk film. This means we can predict the reflectance of bulk films made of mixtures of spheres with different sizes, allowing us to simulate polydispersity using the bulk Monte Carlo model.
Below is an example that calculates a reflectance spectrum from a bulk film made of polydisperse spheres, where the internal structure of each sphere is the same.
Loading and using the package and module
You'll need the following imports
End of explanation
"""
# Properties of the source
wavelengths = sc.Quantity(np.arange(400., 801.,10),'nm') # wavelengths at which to calculate reflectance
# Geometric properties of the sample
num_diams = 3 # number of diams from which to sample,
# higher number takes longer but gives a more precise calculation
sphere_boundary_diam_mean = sc.Quantity(10,'um') # mean diameter of the microspheres
pdi = 0.2 # poldispersity index
particle_radius = sc.Quantity(160,'nm') # radii of the two species of particles
volume_fraction_bulk = sc.Quantity(0.63,'') # volume fraction of the spheres in the bulk film
volume_fraction_particles = sc.Quantity(0.55, '') # volume fraction of the particles in the sphere boundary
bulk_thickness = sc.Quantity('50 um') # thickness of the bulk film
boundary = 'sphere' # geometry of sample
boundary_bulk = 'film' # geometry of the bulk sample
# Refractive indices
n_particle = ri.n('vacuum', wavelengths) # refractive index of particle
n_matrix = ri.n('polystyrene', wavelengths) + 2e-5*1j # refractive index of matrix
n_matrix_bulk = ri.n('vacuum', wavelengths) # refractive index of the bulk matrix
n_medium = ri.n('vacuum', wavelengths) # refractive index of medium outside the bulk sample.
# Monte Carlo parameters
ntrajectories = 500 # number of trajectories to run with a spherical boundary
nevents = 300 # number of scattering events for each trajectory in a spherical boundary
ntrajectories_bulk = 1000 # number of trajectories to run in the bulk film
nevents_bulk = 300 # number of events to run in the bulk film
# Plot settings
sns.set_style('white') # sets white plotting background
"""
Explanation: Start by running Monte Carlo code for a single sphere
This is essentially the same as running MC for a sphere as described in montecarlo_tutorial.ipynb, only we return a few extra parameters from calc_refl_trans() and use them to calculate the phase function, scattering coefficient, and absorption coefficient for the bulk Monte Carlo simulation.
Set parameters
We have to set a few extra parameters for the bulk simulation
End of explanation
"""
# calculate diameter list to sample from
sphere_boundary_diameters = pfs.calc_diam_list(num_diams, sphere_boundary_diam_mean, pdi, equal_spacing = False)
"""
Explanation: Sample sphere boundary sizes
Calculate a list of sphere boundary diameters based on the polydispersity of the spheres. This list will be used to calculate phase functions and scattering/absorption coefficients for single spheres, to use in the bulk model. In this example, we assume each sphere has the same particle packings inside.
End of explanation
"""
reflectance_sphere = np.zeros(wavelengths.size)
p_bulk = np.zeros((sphere_boundary_diameters.size, wavelengths.size, 200))
mu_scat_bulk = sc.Quantity(np.zeros((sphere_boundary_diameters.size, wavelengths.size)),'1/um')
mu_abs_bulk = sc.Quantity(np.zeros((sphere_boundary_diameters.size, wavelengths.size)),'1/um')
for j in range(sphere_boundary_diameters.size):
# print radius to keep track of where we are in calculation
print('diameter: ' + str(sphere_boundary_diameters[j]))
for i in range(wavelengths.size):
# caculate the effective index of the sample
n_sample = ri.n_eff(n_particle[i], n_matrix[i], volume_fraction_particles)
# Calculate the phase function and scattering and absorption coefficients from the single scattering model
# (this absorption coefficient is of the scatterer, not of an absorber added to the system)
p, mu_scat, mu_abs = mc.calc_scat(particle_radius, n_particle[i], n_sample,
volume_fraction_particles, wavelengths[i])
# Initialize the trajectories
r0, k0, W0 = mc.initialize(nevents, ntrajectories, n_matrix_bulk[i], n_sample,
boundary, sample_diameter = sphere_boundary_diameters[j])
r0 = sc.Quantity(r0, 'um')
k0 = sc.Quantity(k0, '')
W0 = sc.Quantity(W0, '')
# Create trajectories object
trajectories = mc.Trajectory(r0, k0, W0)
# Generate a matrix of all the randomly sampled angles first
sintheta, costheta, sinphi, cosphi, _, _ = mc.sample_angles(nevents, ntrajectories, p)
# Create step size distribution
step = mc.sample_step(nevents, ntrajectories, mu_scat)
# Run photons
trajectories.absorb(mu_abs, step)
trajectories.scatter(sintheta, costheta, sinphi, cosphi)
trajectories.move(step)
# Calculate reflection and transmition
(refl_indices,
trans_indices,
_, _, _,
refl_per_traj, trans_per_traj,
_,_,_,_,
reflectance_sphere[i],
_,_, norm_refl, norm_trans) = det.calc_refl_trans(trajectories, sphere_boundary_diameters[j],
n_matrix_bulk[i], n_sample, boundary,
run_fresnel_traj = False,
return_extra = True)
### Calculate phase function and lscat ###
# use output of calc_refl_trans to calculate phase function, mu_scat, and mu_abs for the bulk
p_bulk[j,i,:], mu_scat_bulk[j,i], mu_abs_bulk[j,i] = pfs.calc_scat_bulk(refl_per_traj, trans_per_traj,
trans_indices,
norm_refl, norm_trans,
volume_fraction_bulk,
sphere_boundary_diameters[j],
n_matrix_bulk[i],
wavelengths[i],
plot=False, phi_dependent=False)
"""
Explanation: Run Monte Carlo for each of the sphere boundary sizes
Run Monte Carlo simulations for a sphere boundary, for all the sizes of spheres calculated above. This will give two scattering parameters for sphere size and each wavelength.
End of explanation
"""
# sample
sphere_diams_sampled = pfs.sample_diams(pdi, sphere_boundary_diameters,
sphere_boundary_diam_mean,
ntrajectories_bulk,
nevents_bulk)
# plot
sns.distplot(np.ndarray.flatten(sphere_diams_sampled), kde = False)
plt.ylabel('number sampled')
plt.xlabel('diameter (' + str(sphere_boundary_diameters.units) + ')')
"""
Explanation: Sample distribution of sphere boundary radii
Given the pdi of the sphere boundary radius and the mean radii of the sphere boundaries, sample the particle radii for each event and trajectory.
End of explanation
"""
reflectance_bulk_poly = np.zeros(wavelengths.size)
for i in range(wavelengths.size):
# print the wavelength keep track of where we are in calculation
print('wavelength: ' + str(wavelengths[i]))
# Initialize the trajectories
r0, k0, W0 = mc.initialize(nevents_bulk, ntrajectories_bulk, n_medium[i], n_matrix_bulk[i],
boundary_bulk)
r0 = sc.Quantity(r0, 'um')
W0 = sc.Quantity(W0, '')
k0 = sc.Quantity(k0, '')
# Sample angles and calculate step size based on sampled radii
sintheta, costheta, sinphi, cosphi, step, _, _ = pfs.sample_angles_step_poly(nevents_bulk, ntrajectories_bulk,
p_bulk[:,i,:],
sphere_diams_sampled,
mu_scat_bulk[:,i],
param_list = sphere_boundary_diameters)
# Create trajectories object
trajectories = mc.Trajectory(r0, k0, W0)
# Run photons
trajectories.absorb(mu_abs_bulk[0,i], step) # Note: polydisperse absorption does not currently work in the bulk
# so we arbitrarily use index 0, assuming that all scattering events
# have the same amount of absorption
trajectories.scatter(sintheta, costheta, sinphi, cosphi)
trajectories.move(step)
# calculate reflectance
reflectance_bulk_poly[i], transmittance = det.calc_refl_trans(trajectories, bulk_thickness,
n_medium[i], n_matrix_bulk[i], boundary_bulk)
"""
Explanation: Calculate reflectance of bulk polydisperse film
The only difference from a normal bulk reflectance calculation (see bulk_montecarlo_tutorial.ipynb) is that we use the function pfs.sample_angles_step_poly() instead of sample_angles() and sample_step()
Note that for mixtures of different sphere types, absorption only works in the bulk matrix, not in the spheres themselves. This is because sampling the different absorption lengths for different sphere types has not yet been implemented.
End of explanation
"""
plt.figure()
plt.plot(wavelengths, reflectance_bulk_poly, linewidth = 3)
plt.ylim([0,1])
plt.xlim([400,800])
plt.xlabel('Wavelength (nm)')
plt.ylabel('Reflectance')
plt.title('Bulk Reflectance')
"""
Explanation: Plot results
End of explanation
"""
|
bbfamily/abu | abupy_lecture/5-选股策略的开发(ABU量化使用文档).ipynb | gpl-3.0 | from __future__ import print_function
from __future__ import division
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
import sys
# 使用insert 0即只使用github,避免交叉使用了pip安装的abupy,导致的版本不一致问题
sys.path.insert(0, os.path.abspath('../'))
import abupy
# 使用沙盒数据,目的是和书中一样的数据环境
abupy.env.enable_example_env_ipython()
"""
Explanation: ABU量化系统使用文档
<center>
<img src="./image/abu_logo.png" alt="" style="vertical-align:middle;padding:10px 20px;"><font size="6" color="black"><b>第5节 选股策略的开发</b></font>
</center>
作者: 阿布
阿布量化版权所有 未经允许 禁止转载
abu量化系统github地址 (您的star是我的动力!)
本节ipython notebook
在第一节即说过:
在对的时间,遇见对的人(股票),是一种幸福
在对的时间,遇见错的人(股票),是一种悲伤
在错的时间,遇见对的人(股票),是一声叹息
在错的时间,遇见错的人(股票),是一种无奈
之前的节讲的都是择时(什么时候投资), 本节将讲解选股。
首先导入abupy中本节使用的模块:
End of explanation
"""
from abupy import AbuPickStockBase, ps, ABuRegUtil
class AbuPickRegressAngMinMax(AbuPickStockBase):
"""拟合角度选股因子示例类"""
def _init_self(self, **kwargs):
"""通过kwargs设置拟合角度边际条件,配置因子参数"""
# 暂时与base保持一致不使用kwargs.pop('a', default)方式
# fit_pick中 ang > threshold_ang_min, 默认负无穷,即默认所有都符合
self.threshold_ang_min = -np.inf
if 'threshold_ang_min' in kwargs:
# 设置最小角度阀值
self.threshold_ang_min = kwargs['threshold_ang_min']
# fit_pick中 ang < threshold_ang_max, 默认正无穷,即默认所有都符合
self.threshold_ang_max = np.inf
if 'threshold_ang_max' in kwargs:
# 设置最大角度阀值
self.threshold_ang_max = kwargs['threshold_ang_max']
@ps.reversed_result
def fit_pick(self, kl_pd, target_symbol):
"""开始根据自定义拟合角度边际参数进行选股"""
# 计算走势角度
ang = ABuRegUtil.calc_regress_deg(kl_pd.close, show=False)
# 根据参数进行角度条件判断
if self.threshold_ang_min < ang < self.threshold_ang_max:
return True
return False
def fit_first_choice(self, pick_worker, choice_symbols, *args, **kwargs):
raise NotImplementedError('AbuPickRegressAng fit_first_choice unsupported now!')
"""
Explanation: 1. 选股因子的编写
与择时小节类似,实现示例在中abu量化系统实现一个选股策略。
如下代码AbuPickRegressAngMinMax为选股因子,它的作用是将股票前期走势进行线性拟合计算一个角度,参数为选股条件,将选股条件作用于角度后进行股票的筛选。
End of explanation
"""
from abupy import AbuPickStockWorker
from abupy import AbuBenchmark, AbuCapital, AbuKLManager
# 选股条件threshold_ang_min=0.0, 即要求股票走势为向上上升趋势
stock_pickers = [{'class': AbuPickRegressAngMinMax,
'threshold_ang_min': 0.0, 'reversed': False}]
# 从这几个股票里进行选股,只是为了演示方便
# 一般的选股都会是数量比较多的情况比如全市场股票
choice_symbols = ['usNOAH', 'usSFUN', 'usBIDU', 'usAAPL', 'usGOOG',
'usTSLA', 'usWUBA', 'usVIPS']
benchmark = AbuBenchmark()
capital = AbuCapital(1000000, benchmark)
kl_pd_manger = AbuKLManager(benchmark, capital)
stock_pick = AbuPickStockWorker(capital, benchmark, kl_pd_manger,
choice_symbols=choice_symbols,
stock_pickers=stock_pickers)
stock_pick.fit()
# 打印最后的选股结果
stock_pick.choice_symbols
"""
Explanation: 上面编写的AbuPickRegressAngMinMax即为一个完整的选股策略:
选股策略必须继承自AbuPickStockBase
选股策略必须实现fit_pick,即完成通过选股阶段金融时间序列对股票决策是否选中
选股策略必须实现fit_first_choice, 但是可以raise NotImplementedError,fit_first_choice后面的章节示例
fit_pick上的装饰器ps.reversed_result稍后讲解
选股模块主要功能依托AbuPickStockWorker,其类似择时模块中的AbuPickTimeWorker,其通过init_stock_pickers()函数将所有选股因子实例化,然后在之后的fit()操作中,遍历所有选股因子,使用选股因子的fit_pick()函数,保留所有选股因子的fit_pick()都返回True的股票,只要有一个选股因子的fit_pick结果是False就将股票剔除。
详细代码请查阅 AbuPickStockWorker源代码。
本节只讲简单讲解选股使用示例,比如只想选取符合上升走势的股票:
End of explanation
"""
# 从kl_pd_manger缓存中获取选股走势数据,
# 注意get_pick_stock_kl_pd()为选股数据,get_pick_time_kl_pd()为择时
kl_pd_noah = kl_pd_manger.get_pick_stock_kl_pd('usNOAH')
# 绘制并计算角度
deg = ABuRegUtil.calc_regress_deg(kl_pd_noah.close)
print('noah 选股周期内角度={}'.format(round(deg, 3)))
"""
Explanation: 上面的实现方式和第一节中讲解择时回测的使用时一样通过分解流程方式一步一步实现使用AbuPickStockWorker进行选股,目的是为了更清晰的说明内部操作流程,编码过程会显的有些复杂臃肿。
实际上在编写完成一个策略后只需要abu.run_loop_back()函数即可以完成回测,在后面的小节中会进行讲解。
上面选股的结果将noah剔除,因为它在回测之前的选股周期内趋势为下降趋势,如下图所示:
End of explanation
"""
from abupy import ABuPickStockExecute
stock_pickers = [{'class': AbuPickRegressAngMinMax,
'threshold_ang_min': 0.0, 'threshold_ang_max': 10.0,
'reversed': False}]
ABuPickStockExecute.do_pick_stock_work(choice_symbols, benchmark,
capital, stock_pickers)
"""
Explanation: 注意上面的选股数据要使用择时回测数据之前的一段时间数据,在AbuPickStockBase中定义了xd,min_xd选股周期获取参数,
在AbuKLManager中通过get_pick_stock_kl_pd()函数配合xd,min_xd参数获取选股周期数据
更多详情请阅读AbuPickStockBase源代码与AbuKLManager源代码
上述选股代码实现在ABuPickStockExecute.do_pick_stock_work()中进行了封装,即讲AbuPickStockWorker及一些零散操作进行封装。
更多详情请阅读ABuPickStockExecute,使用示例如下所示:
eg:继续使用AbuPickRegressAngMinMax做为选股因子,如下定义threshold_ang_min=0.0, threshold_ang_max=10.0,即只选取上升趋势且上升角度小于10度的股票,下面示例使用ABuPickStockExecute.do_pick_stock_work()函数
End of explanation
"""
kl_pd_sfun = kl_pd_manger.get_pick_stock_kl_pd('usSFUN')
print('sfun 选股周期内角度={:.3f}'.format(ABuRegUtil.calc_regress_deg(kl_pd_sfun.close)))
kl_pd_baidu = kl_pd_manger.get_pick_stock_kl_pd('usBIDU')
print('bidu 选股周期内角度={:.3f}'.format(ABuRegUtil.calc_regress_deg(kl_pd_baidu.close)))
"""
Explanation: 可以看到结果sfun和baidu都符合,下面代码验证一下:
End of explanation
"""
# 和上面的代码唯一的区别就是reversed=True
stock_pickers = [{'class': AbuPickRegressAngMinMax,
'threshold_ang_min': 0.0, 'threshold_ang_max': 10.0,
'reversed': True}]
ABuPickStockExecute.do_pick_stock_work(choice_symbols, benchmark,
capital, stock_pickers)
"""
Explanation: 上面结果显示两支股票在选股周期中的价格趋势拟合角度都在0-10之间。
假设修改需求想要选取周期内趋势角度在0度-10度之外的所有股票,可以这样编写代码:
End of explanation
"""
class AbuPickStockPriceMinMax(AbuPickStockBase):
"""价格选股因子示例类"""
def _init_self(self, **kwargs):
"""通过kwargs设置选股价格边际条件,配置因子参数"""
# 暂时与base保持一致不使用kwargs.pop('a', default)方式
# fit_pick中选择 > 最小(threshold_price_min), 默认负无穷,即默认所有都符合
self.threshold_price_min = -np.inf
if 'threshold_price_min' in kwargs:
# 最小价格阀值
self.threshold_price_min = kwargs['threshold_price_min']
# fit_pick中选择 < 最大(threshold_price_max), 默认正无穷,即默认所有都符合
self.threshold_price_max = np.inf
if 'threshold_price_max' in kwargs:
# 最大价格阀值
self.threshold_price_max = kwargs['threshold_price_max']
@ps.reversed_result
def fit_pick(self, kl_pd, target_symbol):
"""开始根据自定义价格边际参数进行选股"""
if kl_pd.close.max() < self.threshold_price_max and kl_pd.close.min() > self.threshold_price_min:
# kl_pd.close的最大价格 < 最大价格阀值 且 kl_pd.close的最小价格 > 最小价格阀值
return True
return False
def fit_first_choice(self, pick_worker, choice_symbols, *args, **kwargs):
raise NotImplementedError('AbuPickStockPriceMinMax fit_first_choice unsupported now!')
"""
Explanation: 可以看到结果与之前相反除了baidu和sfun之外所有股票都被选上,由于在AbuPickStockBase中定义函数reversed_result(),它的作用就是定义结果是否反转。
具体实现请阅读AbuPickStockBase中reversed_result装饰器的实现。
使用实际很简单,在每个具体的选股因子上的fit_pick()函数上根据需要选择是否安上装饰器,如下编写价格选股因子所示:
End of explanation
"""
from abupy import AbuPickStockPriceMinMax
# 选股list使用两个不同的选股因子组合,并行同时生效
stock_pickers = [{'class': AbuPickRegressAngMinMax,
'threshold_ang_min': 0.0, 'reversed': False},
{'class': AbuPickStockPriceMinMax,
'threshold_price_min': 50.0,
'reversed': False}]
%time ABuPickStockExecute.do_pick_stock_work(choice_symbols, benchmark, capital, stock_pickers)
"""
Explanation: 备注:本节所编写的选股示例代码以内置在abupy项目代码中可通过如下方式直接导入:
from abupy import AbuPickStockPriceMinMax
from abupy import AbuPickRegressAngMinMax
2. 多个选股因子并行执行
ABuPickRegressAngMinMax: threshold_ang_min=0.0, 即要求股票走势为向上,上升趋势
ABuPickStockPriceMinMax threshold_price_min=50.0, 即要求股票在选股周期内股价最小值要大于50.0
继续使用ABuPickStockExecute,使上面两个选股因子同时生效,结果符合的只有BIDU及TSLA,代码如下所示:
End of explanation
"""
%%time
from abupy import AbuPickStockMaster
from abupy import AbuPickRegressAngMinMax, AbuPickStockPriceMinMax
# 选股list使用两个不同的选股因子组合,并行同时生效
stock_pickers = [{'class': AbuPickRegressAngMinMax,
'threshold_ang_min': 0.0, 'reversed': False},
{'class': AbuPickStockPriceMinMax,
'threshold_price_min': 50.0,
'reversed': False}]
cs = AbuPickStockMaster.do_pick_stock_with_process(capital, benchmark, stock_pickers, choice_symbols)
"""
Explanation: 3. 使用并行来提升选股运行效率
与并行择时实现方式类似,选股使用AbuPickStockMaster并行执行多个进程来提升选股效率。
具体代码请查询AbuPickStockMaster,下面为使用示例, 使用do_pick_stock_with_process()函数执行默认n_process_pick_stock=8,即默认同时运行8个进程。
备注:下面都通过import的方式导入了模块,因为在windows系统上,启动并行后,在ipython notebook中定义的类会在子进程中无法找到
End of explanation
"""
|
jch1/models | slim/slim_walkthrough.ipynb | apache-2.0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import math
import numpy as np
import tensorflow as tf
import time
from datasets import dataset_utils
# Main slim library
from tensorflow.contrib import slim
"""
Explanation: TF-Slim Walkthrough
This notebook will walk you through the basics of using TF-Slim to define, train and evaluate neural networks on various tasks. It assumes a basic knowledge of neural networks.
Table of contents
<a href="#Install">Installation and setup</a><br>
<a href='#MLP'>Creating your first neural network with TF-Slim</a><br>
<a href='#ReadingTFSlimDatasets'>Reading Data with TF-Slim</a><br>
<a href='#CNN'>Training a convolutional neural network (CNN)</a><br>
<a href='#Pretained'>Using pre-trained models</a><br>
Installation and setup
<a id='Install'></a>
Since the stable release of TF 1.0, the latest version of slim has been available as tf.contrib.slim.
To test that your installation is working, execute the following command; it should run without raising any errors.
python -c "import tensorflow.contrib.slim as slim; eval = slim.evaluation.evaluate_once"
Although, to use TF-Slim for image classification (as we do in this notebook), you also have to install the TF-Slim image models library from here. Let's suppose you install this into a directory called TF_MODELS. Then you should change directory to TF_MODELS/slim before running this notebook, so that these files are in your python path.
To check you've got these two steps to work, just execute the cell below. If it complains about unknown modules, restart the notebook after moving to the TF-Slim models directory.
End of explanation
"""
def regression_model(inputs, is_training=True, scope="deep_regression"):
"""Creates the regression model.
Args:
inputs: A node that yields a `Tensor` of size [batch_size, dimensions].
is_training: Whether or not we're currently training the model.
scope: An optional variable_op scope for the model.
Returns:
predictions: 1-D `Tensor` of shape [batch_size] of responses.
end_points: A dict of end points representing the hidden layers.
"""
with tf.variable_scope(scope, 'deep_regression', [inputs]):
end_points = {}
# Set the default weight _regularizer and acvitation for each fully_connected layer.
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(0.01)):
# Creates a fully connected layer from the inputs with 32 hidden units.
net = slim.fully_connected(inputs, 32, scope='fc1')
end_points['fc1'] = net
# Adds a dropout layer to prevent over-fitting.
net = slim.dropout(net, 0.8, is_training=is_training)
# Adds another fully connected layer with 16 hidden units.
net = slim.fully_connected(net, 16, scope='fc2')
end_points['fc2'] = net
# Creates a fully-connected layer with a single hidden unit. Note that the
# layer is made linear by setting activation_fn=None.
predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction')
end_points['out'] = predictions
return predictions, end_points
"""
Explanation: Creating your first neural network with TF-Slim
<a id='MLP'></a>
Below we give some code to create a simple multilayer perceptron (MLP) which can be used
for regression problems. The model has 2 hidden layers.
The output is a single node.
When this function is called, it will create various nodes, and silently add them to whichever global TF graph is currently in scope. When a node which corresponds to a layer with adjustable parameters (eg., a fully connected layer) is created, additional parameter variable nodes are silently created, and added to the graph. (We will discuss how to train the parameters later.)
We use variable scope to put all the nodes under a common name,
so that the graph has some hierarchical structure.
This is useful when we want to visualize the TF graph in tensorboard, or if we want to query related
variables.
The fully connected layers all use the same L2 weight decay and ReLu activations, as specified by arg_scope. (However, the final layer overrides these defaults, and uses an identity activation function.)
We also illustrate how to add a dropout layer after the first fully connected layer (FC1). Note that at test time,
we do not drop out nodes, but instead use the average activations; hence we need to know whether the model is being
constructed for training or testing, since the computational graph will be different in the two cases
(although the variables, storing the model parameters, will be shared, since they have the same name/scope).
End of explanation
"""
with tf.Graph().as_default():
# Dummy placeholders for arbitrary number of 1d inputs and outputs
inputs = tf.placeholder(tf.float32, shape=(None, 1))
outputs = tf.placeholder(tf.float32, shape=(None, 1))
# Build model
predictions, end_points = regression_model(inputs)
# Print name and shape of each tensor.
print("Layers")
for k, v in end_points.items():
print('name = {}, shape = {}'.format(v.name, v.get_shape()))
# Print name and shape of parameter nodes (values not yet initialized)
print("\n")
print("Parameters")
for v in slim.get_model_variables():
print('name = {}, shape = {}'.format(v.name, v.get_shape()))
"""
Explanation: Let's create the model and examine its structure.
We create a TF graph and call regression_model(), which adds nodes (tensors) to the graph. We then examine their shape, and print the names of all the model variables which have been implicitly created inside of each layer. We see that the names of the variables follow the scopes that we specified.
End of explanation
"""
def produce_batch(batch_size, noise=0.3):
xs = np.random.random(size=[batch_size, 1]) * 10
ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise)
return [xs.astype(np.float32), ys.astype(np.float32)]
x_train, y_train = produce_batch(200)
x_test, y_test = produce_batch(200)
plt.scatter(x_train, y_train)
"""
Explanation: Let's create some 1d regression data .
We will train and test the model on some noisy observations of a nonlinear function.
End of explanation
"""
def convert_data_to_tensors(x, y):
inputs = tf.constant(x)
inputs.set_shape([None, 1])
outputs = tf.constant(y)
outputs.set_shape([None, 1])
return inputs, outputs
# The following snippet trains the regression model using a mean_squared_error loss.
ckpt_dir = '/tmp/regression_model/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
inputs, targets = convert_data_to_tensors(x_train, y_train)
# Make the model.
predictions, nodes = regression_model(inputs, is_training=True)
# Add the loss function to the graph.
loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)
# The total loss is the uers's loss plus any regularization losses.
total_loss = slim.losses.get_total_loss()
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.005)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training inside a session.
final_loss = slim.learning.train(
train_op,
logdir=ckpt_dir,
number_of_steps=5000,
save_summaries_secs=5,
log_every_n_steps=500)
print("Finished training. Last batch loss:", final_loss)
print("Checkpoint saved in %s" % ckpt_dir)
"""
Explanation: Let's fit the model to the data
The user has to specify the loss function and the optimizer, and slim does the rest.
In particular, the slim.learning.train function does the following:
For each iteration, evaluate the train_op, which updates the parameters using the optimizer applied to the current minibatch. Also, update the global_step.
Occasionally store the model checkpoint in the specified directory. This is useful in case your machine crashes - then you can simply restart from the specified checkpoint.
End of explanation
"""
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_train, y_train)
predictions, end_points = regression_model(inputs, is_training=True)
# Add multiple loss nodes.
mean_squared_error_loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)
absolute_difference_loss = slim.losses.absolute_difference(predictions, targets)
# The following two ways to compute the total loss are equivalent
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss1 = mean_squared_error_loss + absolute_difference_loss + regularization_loss
# Regularization Loss is included in the total loss by default.
# This is good for training, but not for testing.
total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) # Will initialize the parameters with random weights.
total_loss1, total_loss2 = sess.run([total_loss1, total_loss2])
print('Total Loss1: %f' % total_loss1)
print('Total Loss2: %f' % total_loss2)
print('Regularization Losses:')
for loss in slim.losses.get_regularization_losses():
print(loss)
print('Loss Functions:')
for loss in slim.losses.get_losses():
print(loss)
"""
Explanation: Training with multiple loss functions.
Sometimes we have multiple objectives we want to simultaneously optimize.
In slim, it is easy to add more losses, as we show below. (We do not optimize the total loss in this example,
but we show how to compute it.)
End of explanation
"""
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
# Create the model structure. (Parameters will be loaded below.)
predictions, end_points = regression_model(inputs, is_training=False)
# Make a session which restores the old parameters from a checkpoint.
sv = tf.train.Supervisor(logdir=ckpt_dir)
with sv.managed_session() as sess:
inputs, predictions, targets = sess.run([inputs, predictions, targets])
plt.scatter(inputs, targets, c='r');
plt.scatter(inputs, predictions, c='b');
plt.title('red=true, blue=predicted')
"""
Explanation: Let's load the saved model and use it for prediction.
End of explanation
"""
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
predictions, end_points = regression_model(inputs, is_training=False)
# Specify metrics to evaluate:
names_to_value_nodes, names_to_update_nodes = slim.metrics.aggregate_metric_map({
'Mean Squared Error': slim.metrics.streaming_mean_squared_error(predictions, targets),
'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(predictions, targets)
})
# Make a session which restores the old graph parameters, and then run eval.
sv = tf.train.Supervisor(logdir=ckpt_dir)
with sv.managed_session() as sess:
metric_values = slim.evaluation.evaluation(
sess,
num_evals=1, # Single pass over data
eval_op=names_to_update_nodes.values(),
final_op=names_to_value_nodes.values())
names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values))
for key, value in names_to_values.items():
print('%s: %f' % (key, value))
"""
Explanation: Let's compute various evaluation metrics on the test set.
In TF-Slim termiology, losses are optimized, but metrics (which may not be differentiable, e.g., precision and recall) are just measured. As an illustration, the code below computes mean squared error and mean absolute error metrics on the test set.
Each metric declaration creates several local variables (which must be initialized via tf.initialize_local_variables()) and returns both a value_op and an update_op. When evaluated, the value_op returns the current value of the metric. The update_op loads a new batch of data, runs the model, obtains the predictions and accumulates the metric statistics appropriately before returning the current value of the metric. We store these value nodes and update nodes in 2 dictionaries.
After creating the metric nodes, we can pass them to slim.evaluation.evaluation, which repeatedly evaluates these nodes the specified number of times. (This allows us to compute the evaluation in a streaming fashion across minibatches, which is usefulf for large datasets.) Finally, we print the final value of each metric.
End of explanation
"""
import tensorflow as tf
from datasets import dataset_utils
url = "http://download.tensorflow.org/data/flowers.tar.gz"
flowers_data_dir = '/tmp/flowers'
if not tf.gfile.Exists(flowers_data_dir):
tf.gfile.MakeDirs(flowers_data_dir)
dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
"""
Explanation: Reading Data with TF-Slim
<a id='ReadingTFSlimDatasets'></a>
Reading data with TF-Slim has two main components: A
Dataset and a
DatasetDataProvider. The former is a descriptor of a dataset, while the latter performs the actions necessary for actually reading the data. Lets look at each one in detail:
Dataset
A TF-Slim
Dataset
contains descriptive information about a dataset necessary for reading it, such as the list of data files and how to decode them. It also contains metadata including class labels, the size of the train/test splits and descriptions of the tensors that the dataset provides. For example, some datasets contain images with labels. Others augment this data with bounding box annotations, etc. The Dataset object allows us to write generic code using the same API, regardless of the data content and encoding type.
TF-Slim's Dataset works especially well when the data is stored as a (possibly sharded)
TFRecords file, where each record contains a tf.train.Example protocol buffer.
TF-Slim uses a consistent convention for naming the keys and values inside each Example record.
DatasetDataProvider
A
DatasetDataProvider is a class which actually reads the data from a dataset. It is highly configurable to read the data in various ways that may make a big impact on the efficiency of your training process. For example, it can be single or multi-threaded. If your data is sharded across many files, it can read each files serially, or from every file simultaneously.
Demo: The Flowers Dataset
For convenience, we've include scripts to convert several common image datasets into TFRecord format and have provided
the Dataset descriptor files necessary for reading them. We demonstrate how easy it is to use these dataset via the Flowers dataset below.
Download the Flowers Dataset
<a id='DownloadFlowers'></a>
We've made available a tarball of the Flowers dataset which has already been converted to TFRecord format.
End of explanation
"""
from datasets import flowers
import tensorflow as tf
from tensorflow.contrib import slim
with tf.Graph().as_default():
dataset = flowers.get_split('train', flowers_data_dir)
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32, common_queue_min=1)
image, label = data_provider.get(['image', 'label'])
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
for i in range(4):
np_image, np_label = sess.run([image, label])
height, width, _ = np_image.shape
class_name = name = dataset.labels_to_names[np_label]
plt.figure()
plt.imshow(np_image)
plt.title('%s, %d x %d' % (name, height, width))
plt.axis('off')
plt.show()
"""
Explanation: Display some of the data.
End of explanation
"""
def my_cnn(images, num_classes, is_training): # is_training is not used...
with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):
net = slim.conv2d(images, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.conv2d(net, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.flatten(net)
net = slim.fully_connected(net, 192)
net = slim.fully_connected(net, num_classes, activation_fn=None)
return net
"""
Explanation: Convolutional neural nets (CNNs).
<a id='CNN'></a>
In this section, we show how to train an image classifier using a simple CNN.
Define the model.
Below we define a simple CNN. Note that the output layer is linear function - we will apply softmax transformation externally to the model, either in the loss function (for training), or in the prediction function (during testing).
End of explanation
"""
import tensorflow as tf
with tf.Graph().as_default():
# The model can handle any input size because the first layer is convolutional.
# The size of the model is determined when image_node is first passed into the my_cnn function.
# Once the variables are initialized, the size of all the weight matrices is fixed.
# Because of the fully connected layers, this means that all subsequent images must have the same
# input size as the first image.
batch_size, height, width, channels = 3, 28, 28, 3
images = tf.random_uniform([batch_size, height, width, channels], maxval=1)
# Create the model.
num_classes = 10
logits = my_cnn(images, num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
# Initialize all the variables (including parameters) randomly.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the init_op, evaluate the model outputs and print the results:
sess.run(init_op)
probabilities = sess.run(probabilities)
print('Probabilities Shape:')
print(probabilities.shape) # batch_size x num_classes
print('\nProbabilities:')
print(probabilities)
print('\nSumming across all classes (Should equal 1):')
print(np.sum(probabilities, 1)) # Each row sums to 1
"""
Explanation: Apply the model to some randomly generated images.
End of explanation
"""
from preprocessing import inception_preprocessing
import tensorflow as tf
from tensorflow.contrib import slim
def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):
"""Loads a single batch of data.
Args:
dataset: The dataset to load.
batch_size: The number of images in the batch.
height: The size of each image after preprocessing.
width: The size of each image after preprocessing.
is_training: Whether or not we're currently training or evaluating.
Returns:
images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed.
images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization.
labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes.
"""
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32,
common_queue_min=8)
image_raw, label = data_provider.get(['image', 'label'])
# Preprocess image for usage by Inception.
image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)
# Preprocess the image for display purposes.
image_raw = tf.expand_dims(image_raw, 0)
image_raw = tf.image.resize_images(image_raw, [height, width])
image_raw = tf.squeeze(image_raw)
# Batch it up.
images, images_raw, labels = tf.train.batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=1,
capacity=2 * batch_size)
return images, images_raw, labels
from datasets import flowers
# This might take a few minutes.
train_dir = '/tmp/tfslim_model/'
print('Will save model to %s' % train_dir)
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
# Create the model:
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
number_of_steps=1, # For speed, we just do 1 epoch
save_summaries_secs=1)
print('Finished training. Final batch loss %d' % final_loss)
"""
Explanation: Train the model on the Flowers dataset.
Before starting, make sure you've run the code to <a href="#DownloadFlowers">Download the Flowers</a> dataset. Now, we'll get a sense of what it looks like to use TF-Slim's training functions found in
learning.py. First, we'll create a function, load_batch, that loads batches of dataset from a dataset. Next, we'll train a model for a single step (just to demonstrate the API), and evaluate the results.
End of explanation
"""
from datasets import flowers
# This might take a few minutes.
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.DEBUG)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5),
})
print('Running evaluation Loop...')
checkpoint_path = tf.train.latest_checkpoint(train_dir)
metric_values = slim.evaluation.evaluate_once(
master='',
checkpoint_path=checkpoint_path,
logdir=train_dir,
eval_op=names_to_updates.values(),
final_op=names_to_values.values())
names_to_values = dict(zip(names_to_values.keys(), metric_values))
for name in names_to_values:
print('%s: %f' % (name, names_to_values[name]))
"""
Explanation: Evaluate some metrics.
As we discussed above, we can compute various metrics besides the loss.
Below we show how to compute prediction accuracy of the trained model, as well as top-5 classification accuracy. (The difference between evaluation and evaluation_loop is that the latter writes the results to a log directory, so they can be viewed in tensorboard.)
End of explanation
"""
from datasets import dataset_utils
url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
"""
Explanation: Using pre-trained models
<a id='Pretrained'></a>
Neural nets work best when they have many parameters, making them very flexible function approximators.
However, this means they must be trained on big datasets. Since this process is slow, we provide various pre-trained models - see the list here.
You can either use these models as-is, or you can perform "surgery" on them, to modify them for some other task. For example, it is common to "chop off" the final pre-softmax layer, and replace it with a new set of weights corresponding to some new set of labels. You can then quickly fine tune the new model on a small new dataset. We illustrate this below, using inception-v1 as the base model. While models like Inception V3 are more powerful, Inception V1 is used for speed purposes.
Take into account that VGG and ResNet final layers have only 1000 outputs rather than 1001. The ImageNet dataset provied has an empty background class which can be used to fine-tune the model to other tasks. VGG and ResNet models provided here don't use that class. We provide two examples of using pretrained models: Inception V1 and VGG-19 models to highlight this difference.
Download the Inception V1 checkpoint
End of explanation
"""
import numpy as np
import os
import tensorflow as tf
try:
import urllib2 as urllib
except ImportError:
import urllib.request as urllib
from datasets import imagenet
from nets import inception
from preprocessing import inception_preprocessing
from tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_size
with tf.Graph().as_default():
url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
image_string = urllib.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
slim.get_model_variables('InceptionV1'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index]))
"""
Explanation: Apply Pre-trained Inception V1 model to Images.
We have to convert each image to the size expected by the model checkpoint.
There is no easy way to determine this size from the checkpoint itself.
So we use a preprocessor to enforce this.
End of explanation
"""
from datasets import dataset_utils
import tensorflow as tf
url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
"""
Explanation: Download the VGG-16 checkpoint
End of explanation
"""
import numpy as np
import os
import tensorflow as tf
try:
import urllib2
except ImportError:
import urllib.request as urllib
from datasets import imagenet
from nets import vgg
from preprocessing import vgg_preprocessing
from tensorflow.contrib import slim
image_size = vgg.vgg_16.default_image_size
with tf.Graph().as_default():
url = 'https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg'
image_string = urllib.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(vgg.vgg_arg_scope()):
# 1000 classes instead of 1001.
logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
# Shift the index of a class name by one.
print('Probability %0.2f%% => [%s]' % (probabilities[index] * 100, names[index+1]))
"""
Explanation: Apply Pre-trained VGG-16 model to Images.
We have to convert each image to the size expected by the model checkpoint.
There is no easy way to determine this size from the checkpoint itself.
So we use a preprocessor to enforce this. Pay attention to the difference caused by 1000 classes instead of 1001.
End of explanation
"""
# Note that this may take several minutes.
import os
from datasets import flowers
from nets import inception
from preprocessing import inception_preprocessing
from tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_size
def get_init_fn():
"""Returns a function run by the chief worker to warm-start the training."""
checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"]
exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
return slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
variables_to_restore)
train_dir = '/tmp/inception_finetuned/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
init_fn=get_init_fn(),
number_of_steps=2)
print('Finished training. Last batch loss %f' % final_loss)
"""
Explanation: Fine-tune the model on a different set of labels.
We will fine tune the inception model on the Flowers dataset.
End of explanation
"""
import numpy as np
import tensorflow as tf
from datasets import flowers
from nets import inception
from tensorflow.contrib import slim
image_size = inception.inception_v1.default_image_size
batch_size = 3
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, images_raw, labels = load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
checkpoint_path = tf.train.latest_checkpoint(train_dir)
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_path,
slim.get_variables_to_restore())
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
sess.run(tf.initialize_local_variables())
init_fn(sess)
np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
for i in range(batch_size):
image = np_images_raw[i, :, :, :]
true_label = np_labels[i]
predicted_label = np.argmax(np_probabilities[i, :])
predicted_name = dataset.labels_to_names[predicted_label]
true_name = dataset.labels_to_names[true_label]
plt.figure()
plt.imshow(image.astype(np.uint8))
plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
plt.axis('off')
plt.show()
"""
Explanation: Apply fine tuned model to some images.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.17/_downloads/e52b6a53120d8703a6509530cf6251dc/plot_roi_erpimage_by_rt.ipynb | bsd-3-clause | # Authors: Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne.event import define_target_events
from mne.channels import make_1020_channel_selections
print(__doc__)
"""
Explanation: ===========================================================
Plot single trial activity, grouped by ROI and sorted by RT
===========================================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
The EEGLAB example file - containing an experiment with button press responses
to simple visual stimuli - is read in and response times are calculated.
Regions of Interest are determined by the channel types (in 10/20 channel
notation, even channels are right, odd are left, and 'z' are central). The
median and the Global Field Power within each channel group is calculated,
and the trials are plotted, sorting by response time.
End of explanation
"""
data_path = mne.datasets.testing.data_path()
fname = data_path + "/EEGLAB/test_raw.set"
montage = data_path + "/EEGLAB/test_chans.locs"
event_id = {"rt": 1, "square": 2} # must be specified for str events
eog = {"FPz", "EOG1", "EOG2"}
raw = mne.io.read_raw_eeglab(fname, eog=eog, montage=montage,
stim_channel=False)
events = mne.events_from_annotations(raw, event_id)[0]
"""
Explanation: Load EEGLAB example data (a small EEG dataset)
End of explanation
"""
# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = .7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
tmin=0., tmax=tmax, new_id=2)
epochs = mne.Epochs(raw, events=new_events, tmax=tmax + .1,
event_id={"square": 2})
"""
Explanation: Create Epochs
End of explanation
"""
# Parameters for plotting
order = rts.argsort() # sorting from fast to slow trials
selections = make_1020_channel_selections(epochs.info, midline="12z")
# The actual plots (GFP)
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='gfp',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
"""
Explanation: Plot using GFP
End of explanation
"""
epochs.plot_image(group_by=selections, order=order, sigma=1.5,
overlay_times=rts / 1000., combine='median',
ts_args=dict(vlines=[0, rts.mean() / 1000.]))
"""
Explanation: Plot using median
End of explanation
"""
|
mismosmi/idea2birds | src/evaluate.ipynb | mit | import numpy as np
import scipy as sp
import birds
import argparse
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.animation import FuncAnimation
from matplotlib.collections import PathCollection
from IPython.display import HTML
from scipy.optimize import curve_fit
#%matplotlib ipympl
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
"""
Explanation: Bird Simulation Evaluation Script
Imports & Preparations
End of explanation
"""
figpath = '../img/'
figwidth = 4 #figure width in inches
figsize = (figwidth,figwidth*2.5/4)
"""
Explanation: Figure output Settings
End of explanation
"""
frames = 1000
birds.param_record = False
birds.trace = None
birds.flock = birds.Flock()
fig = plt.figure(figsize=(5, 5*birds.flock.args['height']/birds.flock.args['width']), facecolor="white")
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0], aspect=1, frameon=False)
birds.collection = birds.MarkerCollection(birds.flock.args['n'])
ax.add_collection(birds.collection._collection)
ax.set_xlim(0, birds.flock.args['width'])
ax.set_ylim(0, birds.flock.args['height'])
ax.set_xticks([])
ax.set_yticks([])
animation = FuncAnimation(fig, birds.update, interval=10, frames=frames)
HTML(animation.to_html5_video())
"""
Explanation: Run with default Settings
End of explanation
"""
def avg_with_error(f, avg_time, va_t = False, eta_index = 0, prerun_time = 500):
var = np.zeros(avg_time)
va_avg = 0
for t in range(avg_time):
va_tmp = f.get_va()
va_avg += va_tmp
var[t] = va_tmp
f.run()
va_avg /= avg_time
if va_t is not False:
va_t[eta_index, prerun_time:] = var
var = np.sum((var-va_avg)**2 / avg_time)
return va_avg, var
res = 30
prerun_time = 500
averaging_time = 1000
repeat = 3
rho=4
Eta = np.linspace(0.,7.,res)
N = [50,100,400]
va_t = np.zeros((6, prerun_time+averaging_time))
va = np.zeros((len(N),res))
vas = np.zeros(repeat)
errorbars = np.zeros_like(va)
variance = np.zeros(repeat)
for c,n in enumerate(N):
for i,eta in enumerate(Eta):
for j in range(repeat):
f = birds.Flock(n=n,eta=eta,rho=rho)
record_time = (n==100 and i%5==0 and j==0)
for t in range(prerun_time):
f.run()
if record_time:
va_t[int(i/5),t]=f.get_va()
if record_time:
va_avg, vari = avg_with_error(f, averaging_time, va_t, int(i/5), prerun_time)
else:
va_avg, vari = avg_with_error(f, averaging_time)
vas[j] = va_avg
variance[j] = vari
va[c][i] = vas.sum()/repeat
errorbars[c][i] = np.sqrt(variance.sum()/repeat)
print('Has been run.')
plt.figure(figsize=(10,8))
x = np.linspace(0,1500,1500)
for e,vt in enumerate(va_t):
plt.plot(x, vt, label='Eta = '+ '%1.2f' % Eta[e*5])
plt.legend()
plt.xlim([0, 40])
plt.xlabel('$t$')
plt.ylabel('$v_a$')
plt.show()
len(x)
"""
Explanation: Find moving phase
Run with varying Eta
End of explanation
"""
prec = 0.05
# Initial square/lin fit to determinine p0-parameters
def finvsquare(x, sqwidth):
return 1-(x*sqwidth)**2
def flin(x, m, b):
return m*x+b
# find array index of eta_c as first approx to split fit in linear- and phase-relation-parts
vamin = np.argmin(va[0])
# initial linear fit for better guess of eta_c
linparams = sp.optimize.curve_fit(flin,Eta[vamin:len(Eta)],va[0][vamin:len(va[0])],p0=[0.2,-0.5],sigma=errorbars[0][vamin:len(errorbars[0])])
valin = flin(Eta,*linparams[0])
# set fit parameters for square/lin fit
eta_c_ind = np.where(np.absolute(va[0] - valin) > prec)[0][-1]
x = Eta[0:eta_c_ind]
y = va[0][0:eta_c_ind]
err = errorbars[0][0:eta_c_ind]
# fit it with square + line
sqwidth = sp.optimize.curve_fit(finvsquare,x,y,p0=1/6,sigma=err)
linparams = sp.optimize.curve_fit(flin,Eta[eta_c_ind:len(Eta)],va[0][eta_c_ind:len(va[0])],p0=[0.2,-0.5],sigma=errorbars[0][eta_c_ind:len(errorbars[0])])
# calculate second approx for eta_c from fit
p,q = linparams[0][0]/sqwidth[0][0]**2,(linparams[0][1]-1)/sqwidth[0][0]**2
eta_c_0 = -p/2 + np.sqrt((p/2)**2 - q)
# recalculate fit parameters
eta_c_ind = np.where(Eta < eta_c_0)[0][-1]
x = Eta[0:eta_c_ind]
y = va[0][0:eta_c_ind]
err = errorbars[0][0:eta_c_ind]
# fit beta
xlog = np.log(eta_c_0-x)
ylog = np.log(y)
errlog = np.log(err)
# logarithmic fit
def fphase_temp_log(x, beta, offset):
return beta * x + offset
tempparams = sp.optimize.curve_fit(fphase_temp_log,xlog,ylog,p0=[0.5,0],sigma=errlog)
# final fit for eta_c and beta using former fits as start values
def fphase(x, eta_c, beta, offset):
return (eta_c - x)**beta * np.e**offset
def fphaselog(x, eta_c, beta, offset):
return np.log(eta_c - x)*beta + offset
phaseparams = sp.optimize.curve_fit(fphaselog,x,ylog,p0=[eta_c_0,*tempparams[0]],sigma=errlog)
"""
Explanation: Fit $\eta_c$
End of explanation
"""
plt.figure(figsize=figsize)
for c,n in enumerate(N):
plt.errorbar(Eta, va[c], yerr=errorbars[c], fmt='.', label="N="+str(n))
x = np.linspace(Eta[0],Eta[-1])
xphase = x[np.where(phaseparams[0][0] > x)]
plt.plot(x,finvsquare(x,sqwidth[0]), label='Square Fit', linewidth=0.5)
plt.plot(xphase,fphase(xphase,*phaseparams[0]), label='Phase relation Fit')
plt.plot(x,flin(x,*linparams[0]), label='linear Fit', linewidth=0.5)
plt.xlabel("$\\eta$")
plt.ylabel("$v_a$")
plt.xlim([0,5.5])
plt.ylim([0,1])
plt.legend()
plt.savefig(figpath+'va_over_eta.eps')
"""
Explanation: Plot $v_a$ over $\eta$
End of explanation
"""
plt.figure(figsize=figsize)
plt.ylim(ymin=0.2)
plt.xlim(xmin=0.01)
eta_c = 4.5 # This is a guess of the critical eta value. There must be a better way of determining it
for c,n in enumerate(N):
plt.plot( (eta_c-Eta)/eta_c, va[c],'.',label="N="+str(n))
ca = plt.gca()
ca.set_xscale('log')
ca.set_yscale('log')
plt.xlabel("$\\frac{\\eta_c - \\eta}{\\eta_c}$")
plt.ylabel("$v_a$")
x = (eta_c-Eta)/eta_c
select = x > 0
x = np.log(x[select])
y = np.log(va[-1][select])
coef = np.polyfit(x,y,1)
plt.plot(np.logspace(-1,0,10), np.logspace(-1,0,10)**coef[0], label="$\\beta=$"+str(coef[0]))
plt.legend();
# if you come up with a better naming scheme PLEASE change this
plt.savefig(figpath+'va_over_etac_minus_eta_over_etac.eps')
print('eta_c='+str(eta_c))
"""
Explanation: Plot $v_a$ over $(\eta_c - \eta)/\eta_c)$
End of explanation
"""
res = 15
time = 1000
averaging_time = 500
repeat = 5
eta = .3
Rho = np.logspace(-3,-0, res)
N = [100]
va = np.zeros((len(N), res))
vas = np.zeros(repeat)
errorbars = np.zeros_like(va)
variance = np.zeros(repeat)
for c,n in enumerate(N):
for i,rho in enumerate(Rho):
for j in range(repeat):
f = birds.Flock(n=n, eta=eta, rho=rho)
for t in range(time):
f.run()
va_avg, vari = avg_with_error(f, averaging_time)
vas[j] = va_avg
variance[j] = vari
va[c][i] = vas.sum()/repeat
errorbars[c][i] = np.sqrt(variance.sum()/repeat)
plt.figure(figsize=figsize)
for c,n in enumerate(N):
plt.errorbar(Rho, va[c], yerr=errorbars[c], fmt='.', label="N="+str(n))
plt.xlabel("$\\rho$")
plt.ylabel("$v_a$")
plt.legend()
plt.title("Alignment dependance on density");
plt.savefig(figpath+'va_over_rho.eps')
"""
Explanation: Run with varying density
End of explanation
"""
res = 20
time = 1000
averaging_time = 1000
repeat = 3
eta = 0
Angle = np.linspace(1,180,res,dtype=int)
Rho= [0.01,0.1,1] # np.logspace(-3, 0, 5)
n = 100
va = np.zeros((len(Rho), res))
vas = np.zeros(repeat)
errorbars = np.zeros_like(va)
variance = np.zeros(repeat)
for c,rho in enumerate(Rho):
for i,angle in enumerate(Angle):
for j in range(repeat):
f = birds.Flock(n=n, eta=eta, rho=rho, angle=angle)
for t in range(time):
f.run()
va_avg, vari = avg_with_error(f, averaging_time)
vas[j] = va_avg
variance[j] = vari
va[c][i] = vas.sum()/repeat
errorbars[c][i] = np.sqrt(variance.sum()/repeat)
plt.figure()
for c,rho in enumerate(Rho):
plt.errorbar(Angle/360*2*np.pi, va[c], yerr=errorbars[c], fmt='.', label="$\\rho$="+str(np.round(rho,decimals=4)))
plt.xlabel("$\\theta_{cone}$")
plt.ylabel("$v_a$")
plt.legend()
plt.title("Alignment dependance on angle");
plt.savefig(figpath+'va_over_angle.eps')
"""
Explanation: Run with varying angle
This has to be done for low rho and eta. This should be evident from the graphs above as a high value of eta reduces the alignment going to almost zero.
A high density causes more alignment, and thus if we attempt running with higher densities, they all align anyway.
End of explanation
"""
|
coursemdetw/reveal2 | content/notebook/.ipynb_checkpoints/Elements of Evolutionary Algorithms-checkpoint.ipynb | mit | import random
from deap import algorithms, base, creator, tools
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
def evalOneMax(individual):
return (sum(individual),)
"""
Explanation: <img src='http://www.puc-rio.br/sobrepuc/admin/vrd/brasao/download/ass_vertpb_reduz4.jpg' align='left'/>
Demostration Class 02
Elements of Evolutionary Algorithms
Luis Martí, LIRA/DEE/PUC-Rio
http://lmarti.com; lmarti@ele.puc-rio.br
Advanced Evolutionary Computation: Theory and Practice
The notebook is better viewed rendered as slides. You can convert it to slides and view them by:
- using nbconvert with a command like:
bash
$ ipython nbconvert --to slides --post serve <this-notebook-name.ipynb>
- installing Reveal.js - Jupyter/IPython Slideshow Extension
- using the online IPython notebook slide viewer (some slides of the notebook might not be properly rendered).
This and other related IPython notebooks can be found at the course github repository:
* https://github.com/lmarti/evolutionary-computation-course
In this demonstration class we will deal with the features and problems shared by most evolutionary algorithms.
Note: Most of the material used in this notebook comes from DEAP documentation.
Elements to take into account using evolutionary algorithms
Individual representation (binary, Gray, floating-point, etc.);
evaluation and fitness assignment;
mating selection, that establishes a partial order of individuals in the population using their fitness function value as reference and determines the degree at which individuals in the population will take part in the generation of new (offspring) individuals.
variation, that applies a range of evolution-inspired operators, like crossover, mutation, etc., to synthesize offspring individuals from the current (parent) population. This process is supposed to prime the fittest individuals so they play a bigger role in the generation of the offspring.
environmental selection, that merges the parent and offspring individuals to produce the population that will be used in the next iteration. This process often involves the deletion of some individuals using a given criterion in order to keep the amount of individuals bellow a certain threshold.
stopping criterion, that determines when the algorithm shoulod be stopped, either because the optimum was reach or because the optimization process is not progressing.
Hence a 'general' evolutionary algorithm can be described as
```
def evolutionary_algorithm():
'Pseudocode of an evolutionary algorithm'
populations = [] # a list with all the populations
populations[0] = initialize_population(pop_size)
t = 0
while not stop_criterion(populations[t]):
fitnesses = evaluate(populations[t])
offspring = matting_and_variation(populations[t],
fitnesses)
populations[t+1] = environmental_selection(
populations[t],
offspring)
t = t+1
```
Python libraries for evolutionary computation
PaGMO/PyGMO
Inspyred
Distributed Evolutionary Algorithms in Python (DEAP)
There are potentially many more, feel free to give me some feedback on this.
<table>
<tr>
<td width='47%'>
<img src='https://raw.githubusercontent.com/DEAP/deap/master/doc/_static/deap_long.png' title="DEAP logo" width='92%' align='center'/>
</td>
<td>
<ul>
<li> Open source Python library with,
<li> genetic algorithm using any representation;
<li> evolutionary strategies (including CMA-ES);
<li> multi-objective optimization from the start;
<li> co-evolution (cooperative and competitive) of multiple populations;
<li> parallelization of the evaluations (and more) using SCOOP;
<li> statistics keeping, and;
<li> benchmarks module containing some common test functions.
<li> [https://github.com/DEAP/deap](https://github.com/DEAP/deap)
</ul>
</td>
</tr>
</table>
Lets start with an example and analyze it
The One Max problem
Maximize the number of ones in a binary string (list, vector, etc.).
More formally, from the set of binary strings of length $n$,
$$\mathcal{S}=\left{s_1,\ldots,s_n\right}, \text{ with } s_i=\left{0,1\right}.$$
Find $s^\ast\in\mathcal{S}$ such that
$$s^\ast = \operatorname*{arg\,max}{s\in\mathcal{S}} \sum{i=1}^{n}{s_i}.$$
Its clear that the optimum is an all-ones string.
Coding the problem
End of explanation
"""
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, n=100)
toolbox.register("population", tools.initRepeat, list,
toolbox.individual)
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)
"""
Explanation: Defining the elements
End of explanation
"""
pop = toolbox.population(n=300)
"""
Explanation: Running the experiment
End of explanation
"""
result = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2,
ngen=10, verbose=False)
print('Current best fitness:', evalOneMax(tools.selBest(pop, k=1)[0]))
result = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2,
ngen=50, verbose=False)
print('Current best fitness:', evalOneMax(tools.selBest(pop, k=1)[0]))
"""
Explanation: Lets run only 10 generations
End of explanation
"""
import random
from deap import base
from deap import creator
from deap import tools
IND_SIZE = 5
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox1 = base.Toolbox()
toolbox1.register("attr_float", random.random)
toolbox1.register("individual", tools.initRepeat, creator.Individual,
toolbox1.attr_float, n=IND_SIZE)
"""
Explanation: Essential features
deap.creator: meta-factory allowing to create classes that will fulfill the needs of your evolutionary algorithms.
deap.base.Toolbox: A toolbox for evolution that contains the evolutionary operators. You may populate the toolbox with any other function by using the register() method
deap.base.Fitness([values]): The fitness is a measure of quality of a solution. If values are provided as a tuple, the fitness is initalized using those values, otherwise it is empty (or invalid). You should inherit from this class to define your custom fitnesses.
Defining an individual
First import the required modules and register the different functions required to create individuals that are a list of floats with a minimizing two objectives fitness.
End of explanation
"""
ind1 = toolbox1.individual()
"""
Explanation: The first individual can now be built
End of explanation
"""
print ind1
print ind1.fitness.valid
"""
Explanation: Printing the individual ind1 and checking if its fitness is valid will give something like this
End of explanation
"""
def evaluate(individual):
# Do some hard computing on the individual
a = sum(individual)
b = len(individual)
return a, 1. / b
ind1.fitness.values = evaluate(ind1)
print ind1.fitness.valid
print ind1.fitness
"""
Explanation: The individual is printed as its base class representation (here a list) and the fitness is invalid because it contains no values.
Evaluation
The evaluation is the most "personal" part of an evolutionary algorithm
* it is the only part of the library that you must write yourself.
* A typical evaluation function takes one individual as argument and return its fitness as a tuple.
* A fitness is a list of floating point values and has a property valid to know if this individual shall be re-evaluated.
* The fitness is set by setting the values to the associated tuple.
For example, the following evaluates the previously created individual ind1 and assign its fitness to the corresponding values.
End of explanation
"""
mutant = toolbox1.clone(ind1)
ind2, = tools.mutGaussian(mutant, mu=0.0, sigma=0.2, indpb=0.2)
del mutant.fitness.values
"""
Explanation: Dealing with single objective fitness is not different, the evaluation function must return a tuple because single-objective is treated as a special case of multi-objective.
Mutation
The next kind of operator that we will present is the mutation operator.
There is a variety of mutation operators in the deap.tools module.
Each mutation has its own characteristics and may be applied to different type of individual.
Be careful to read the documentation of the selected operator in order to avoid undesirable behaviour.
The general rule for mutation operators is that they only mutate, this means that an independent copy must be made prior to mutating the individual if the original individual has to be kept or is a reference to an other individual (see the selection operator).
In order to apply a mutation (here a gaussian mutation) on the individual ind1, simply apply the desired function.
End of explanation
"""
print ind2 is mutant
print mutant is ind2
"""
Explanation: The fitness’ values are deleted because they not related to the individual anymore. As stated above, the mutation does mutate and only mutate an individual it is not responsible of invalidating the fitness nor anything else. The following shows that ind2 and mutant are in fact the same individual.
End of explanation
"""
child1, child2 = [toolbox1.clone(ind) for ind in (ind1, ind2)]
tools.cxBlend(child1, child2, 0.5)
del child1.fitness.values
del child2.fitness.values
"""
Explanation: Crossover
There is a variety of crossover operators in the deap.tools module.
Each crossover has its own characteristics and may be applied to different type of individuals.
Be careful to read the documentation of the selected operator in order to avoid undesirable behaviour.
The general rule for crossover operators is that they only mate individuals, this means that an independent copies must be made prior to mating the individuals if the original individuals have to be kept or is are references to other individuals (see the selection operator).
Lets apply a crossover operation to produce the two children that are cloned beforehand.
End of explanation
"""
selected = tools.selBest([child1, child2], 2)
print child1 in selected
"""
Explanation: Selection
Selection is made among a population by the selection operators that are available in the deap.operators module.
The selection operator usually takes as first argument an iterable container of individuals and the number of individuals to select. It returns a list containing the references to the selected individuals.
The selection is made as follow.
End of explanation
"""
from deap import base
from deap import tools
toolbox1 = base.Toolbox()
def evaluateInd(individual):
# Do some computation
return result,
toolbox1.register("mate", tools.cxTwoPoint)
toolbox1.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2)
toolbox1.register("select", tools.selTournament, tournsize=3)
toolbox1.register("evaluate", evaluateInd)
"""
Explanation: Using the Toolbox
The toolbox is intended to contain all the evolutionary tools, from the object initializers to the evaluation operator.
It allows easy configuration of each algorithms.
The toolbox has basically two methods, register() and unregister(), that are used to add or remove tools from the toolbox.
The usual names for the evolutionary tools are mate(), mutate(), evaluate() and select(), however, any name can be registered as long as it is unique. Here is how they are registered in the toolbox.
End of explanation
"""
def checkBounds(min, max):
def decorator(func):
def wrapper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in xrange(len(child)):
if child[i] > max:
child[i] = max
elif child[i] < min:
child[i] = min
return offspring
return wrapper
return decorator
toolbox.register("mate_example", tools.cxBlend, alpha=0.2)
toolbox.register("mutate_example", tools.mutGaussian, mu=0, sigma=2)
MIN = 0; MAX = 10
toolbox.decorate("mate_example", checkBounds(MIN, MAX))
toolbox.decorate("mutate_example", checkBounds(MIN, MAX))
"""
Explanation: Tool Decoration
A powerful feature that helps to control very precise thing during an evolution without changing anything in the algorithm or operators.
A decorator is a wrapper that is called instead of a function.
It is asked to make some initialization and termination work before and after the actual function is called.
For example, in the case of a constrained domain, one can apply a decorator to the mutation and crossover in order to keep any individual from being out-of-bound.
The following defines a decorator that checks if any attribute in the list is out-of-bound and clips it if it is the case.
* The decorator is defined using three functions in order to receive the min and max arguments.
* Whenever the mutation or crossover is called, bounds will be check on the resulting individuals.
End of explanation
"""
from deap import algorithms
NGEN = 20 # number of generations
CXPB = 0.6
MUTPB = 0.05
for g in range(NGEN):
# Select and clone the next generation individuals
offspring = map(toolbox.clone, toolbox.select(pop, len(pop)))
# Apply crossover and mutation on the offspring
offspring = algorithms.varAnd(offspring, toolbox, CXPB, MUTPB)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring
pop[:] = offspring
"""
Explanation: This will work on crossover and mutation because both return a tuple of individuals. The mutation is often considered to return a single individual but again like for the evaluation, the single individual case is a special case of the multiple individual case.
Variations
Variations allows to build simple algorithms using predefined small building blocks.
In order to use a variation, the toolbox must be set to contain the required operators.
For example, in the lastly presented complete algorithm, the crossover and mutation are regrouped in the varAnd() function, this function requires the toolbox to contain the mate() and mutate() functions. The variations can be used to simplify the writing of an algorithm as follow.
End of explanation
"""
from deap import algorithms
result = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=50)
"""
Explanation: Algorithms
There are several algorithms implemented in the algorithms module.
They are very simple and reflect the basic types of evolutionary algorithms present in the literature.
The algorithms use a Toolbox as defined in the last sections.
In order to setup a toolbox for an algorithm, you must register the desired operators under a specified names, refer to the documentation of the selected algorithm for more details.
Once the toolbox is ready, it is time to launch the algorithm.
The simple evolutionary algorithm takes 5 arguments, a population, a toolbox, a probability of mating each individual at each generation (cxpb), a probability of mutating each individual at each generation (mutpb) and a number of generations to accomplish (ngen).
End of explanation
"""
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
"""
Explanation: Computing Statistics
Often, one wants to compile statistics on what is going on in the optimization. The Statistics are able to compile such data on arbitrary attributes of any designated object. To do that, one need to register the desired statistic functions inside the stats object using the exact same syntax as the toolbox.
End of explanation
"""
import numpy
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
"""
Explanation: The statistics object is created using a key as first argument. This key must be supplied a function that will later be applied to the data on which the statistics are computed. The previous code sample uses the fitness.values attribute of each element.
End of explanation
"""
pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, ngen=0,
stats=stats, verbose=True)
"""
Explanation: The statistical functions are now registered.
The register function expects an alias as first argument and a function operating on vectors as second argument.
Any subsequent argument is passed to the function when called. The creation of the statistics object is now complete.
Predefined Algorithms
When using a predefined algorithm such as eaSimple(), eaMuPlusLambda(), eaMuCommaLambda(), or eaGenerateUpdate(), the statistics object previously created can be given as argument to the algorithm.
End of explanation
"""
record = stats.compile(pop)
"""
Explanation: Statistics will automatically be computed on the population every generation.
The verbose argument prints the statistics on screen while the optimization takes place.
Once the algorithm returns, the final population and a Logbook are returned.
See the next section or the Logbook documentation for more information.
Writing Your Own Algorithm
When writing your own algorithm, including statistics is very simple. One need only to compile the statistics on the desired object.
For example, compiling the statistics on a given population is done by calling the compile() method.
End of explanation
"""
>>> print(record)
{'std': 4.96, 'max': 63.0, 'avg': 50.2, 'min': 39.0}
"""
Explanation: The argument to the compile function must be an iterable of elements on which the key will be called. Here, our population (pop) contains individuals.
The statistics object will call the key function on every individual to retrieve their fitness.values attribute.
The resulting array of values is finally given the each statistic function and the result is put into the record dictionary under the key associated with the function.
Printing the record reveals its nature.
End of explanation
"""
logbook = tools.Logbook()
logbook.record(gen=0, evals=30, **record)
"""
Explanation: Logging Data
Once the data is produced by the statistics, one can save it for further use in a Logbook.
The logbook is intended to be a chronological sequence of entries (as dictionaries).
It is directly compliant with the type of data returned by the statistics objects, but not limited to this data.
In fact, anything can be incorporated in an entry of the logbook.
End of explanation
"""
gen, avg = logbook.select("gen", "avg")
"""
Explanation: The record() method takes a variable number of argument, each of which is a data to be recorded. In the last example, we saved the generation, the number of evaluations and everything contained in the record produced by a statistics object using the star magic. All record will be kept in the logbook until its destruction.
After a number of records, one may want to retrieve the information contained in the logbook.
End of explanation
"""
logbook.header = "gen", "avg", "spam"
"""
Explanation: The select() method provides a way to retrieve all the information associated with a keyword in all records. This method takes a variable number of string arguments, which are the keywords used in the record or statistics object. Here, we retrieved the generation and the average fitness using a single call to select.
Printing to Screen
A logbook can be printed to screen or file.
Its __str__() method returns a header of each key inserted in the first record and the complete logbook for each of these keys.
The row are in chronological order of insertion while the columns are in an undefined order.
The easiest way to specify an order is to set the header attribute to a list of strings specifying the order of the columns.
End of explanation
"""
print(logbook)
"""
Explanation: The result is:
End of explanation
"""
gen = logbook.select("gen")
fit_mins = logbook.chapters["fitness"].select("min")
size_avgs = logbook.chapters["size"].select("avg")
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fit_mins, "b-", label="Minimum Fitness")
ax1.set_xlabel("Generation")
ax1.set_ylabel("Fitness", color="b")
for tl in ax1.get_yticklabels():
tl.set_color("b")
ax2 = ax1.twinx()
line2 = ax2.plot(gen, size_avgs, "r-", label="Average Size")
ax2.set_ylabel("Size", color="r")
for tl in ax2.get_yticklabels():
tl.set_color("r")
lns = line1 + line2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc="center right")
plt.show()
"""
Explanation: Plotting Features
One of the most common operation when an optimization is finished is to plot the data during the evolution.
The Logbook allows to do this very efficiently.
Using the select method, one can retrieve the desired data and plot it using matplotlib.
End of explanation
"""
from math import sin
from deap import base
from deap import tools
def evalFct(individual):
"""Evaluation function for the individual."""
x = individual[0]
return (x - 5)**2 * sin(x) * (x/3),
def feasible(individual):
"""Feasability function for the individual. Returns True if feasible False
otherwise."""
if 3 < individual[0] < 5:
return True
return False
def distance(individual):
"""A distance function to the feasability region."""
return (individual[0] - 5.0)**2
toolbox = base.Toolbox()
toolbox.register("evaluate", evalFct)
toolbox.decorate("evaluate", tools.DeltaPenality(feasible, 7.0, distance))
"""
Explanation: <img src='http://deap.readthedocs.org/en/master/_images/twin_logbook.png' width='92%'/>
Constraint Handling
We have already seen some alternatives.
Penality functions are the most basic way of handling constrains for individuals that cannot be evaluated or are forbiden for problem specific reasons, when falling in a given region.
The penality function gives a fitness disavantage to theses individuals based on the amount of constraint violation in the solution.
<img src='http://deap.readthedocs.org/en/master/_images/constraints.png' width='92%'/>
In DEAP, a penality function can be added to any evaluation function using the DeltaPenality decorator provided in the tools module.
End of explanation
"""
|
Diyago/Machine-Learning-scripts | DEEP LEARNING/Pytorch from scratch/TODO/Autoencoders/denoising-autoencoder/Denoising_Autoencoder_Solution.ipynb | apache-2.0 | import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# load the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# Create training and test dataloaders
num_workers = 0
# how many samples per batch to load
batch_size = 20
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
"""
Explanation: Denoising Autoencoder
Sticking with the MNIST dataset, let's add noise to our data and see if we can define and train an autoencoder to de-noise the images.
<img src='notebook_ims/autoencoder_denoise.png' width=70%/>
Let's get started by importing our libraries and getting the dataset.
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# get one image from the batch
img = np.squeeze(images[0])
fig = plt.figure(figsize = (5,5))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
"""
Explanation: Visualize the Data
End of explanation
"""
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class ConvDenoiser(nn.Module):
def __init__(self):
super(ConvDenoiser, self).__init__()
## encoder layers ##
# conv layer (depth from 1 --> 32), 3x3 kernels
self.conv1 = nn.Conv2d(1, 32, 3, padding=1)
# conv layer (depth from 32 --> 16), 3x3 kernels
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
# conv layer (depth from 16 --> 8), 3x3 kernels
self.conv3 = nn.Conv2d(16, 8, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
## decoder layers ##
# transpose layer, a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(8, 8, 3, stride=2) # kernel_size=3 to get to a 7x7 image output
# two more transpose layers with a kernel of 2
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 32, 2, stride=2)
# one, final, normal conv layer to decrease the depth
self.conv_out = nn.Conv2d(32, 1, 3, padding=1)
def forward(self, x):
## encode ##
# add hidden layers with relu activation function
# and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x)
# add third hidden layer
x = F.relu(self.conv3(x))
x = self.pool(x) # compressed representation
## decode ##
# add transpose conv layers, with relu activation function
x = F.relu(self.t_conv1(x))
x = F.relu(self.t_conv2(x))
x = F.relu(self.t_conv3(x))
# transpose again, output should have a sigmoid applied
x = F.sigmoid(self.conv_out(x))
return x
# initialize the NN
model = ConvDenoiser()
print(model)
"""
Explanation: Denoising
As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1.
We'll use noisy images as input and the original, clean images as targets.
Below is an example of some of the noisy images I generated and the associated, denoised images.
<img src='notebook_ims/denoising.png' />
Since this is a harder problem for the network, we'll want to use deeper convolutional layers here; layers with more feature maps. You might also consider adding additional layers. I suggest starting with a depth of 32 for the convolutional layers in the encoder, and the same depths going backward through the decoder.
TODO: Build the network for the denoising autoencoder. Add deeper and/or additional layers compared to the model above.
End of explanation
"""
# specify loss function
criterion = nn.MSELoss()
# specify loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# number of epochs to train the model
n_epochs = 20
# for adding noise to images
noise_factor=0.5
for epoch in range(1, n_epochs+1):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data in train_loader:
# _ stands in for labels, here
# no need to flatten images
images, _ = data
## add random noise to the input images
noisy_imgs = images + noise_factor * torch.randn(*images.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# clear the gradients of all optimized variables
optimizer.zero_grad()
## forward pass: compute predicted outputs by passing *noisy* images to the model
outputs = model(noisy_imgs)
# calculate the loss
# the "target" is still the original, not-noisy images
loss = criterion(outputs, images)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*images.size(0)
# print avg training statistics
train_loss = train_loss/len(train_loader)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch,
train_loss
))
"""
Explanation: Training
We are only concerned with the training images, which we can get from the train_loader.
In this case, we are actually adding some noise to these images and we'll feed these noisy_imgs to our model. The model will produce reconstructed images based on the noisy input. But, we want it to produce normal un-noisy images, and so, when we calculate the loss, we will still compare the reconstructed outputs to the original images!
Because we're comparing pixel values in input and output images, it will be best to use a loss that is meant for a regression task. Regression is all about comparing quantities rather than probabilistic values. So, in this case, I'll use MSELoss. And compare output images and input images as follows:
loss = criterion(outputs, images)
End of explanation
"""
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# add noise to the test images
noisy_imgs = images + noise_factor * torch.randn(*images.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# get sample outputs
output = model(noisy_imgs)
# prep images for display
noisy_imgs = noisy_imgs.numpy()
# output is resized into a batch of iages
output = output.view(batch_size, 1, 28, 28)
# use detach when it's an output that requires_grad
output = output.detach().numpy()
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25,4))
# input images on top row, reconstructions on bottom
for noisy_imgs, row in zip([noisy_imgs, output], axes):
for img, ax in zip(noisy_imgs, row):
ax.imshow(np.squeeze(img), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
"""
Explanation: Checking out the results
Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is.
End of explanation
"""
|
wangzexian/summrerschool2015 | debug/Debug.ipynb | bsd-3-clause | import numpy as np
import theano
import theano.tensor as T
x = T.vector()
y = T.vector()
z = x + x
z = z * y
f = theano.function([x, y], z)
f(np.ones((2,)), np.ones((3,)))
"""
Explanation: Error messages
Very important
Have lots of information in them
Take the time to read them.
If you get multiple error messages, the first one is the most important, then the last one, then the others in the middle.
An error message has 4 parts:
The stack trace
The error itself and a small description (the most important) (only 1 line)
Sometimes: extra details
Sometimes: hint to help you get more information
Here is an example of code with a user error. Don't try to find the error by code inspection, check only the error:
End of explanation
"""
# TODO: finish to define the mode below
mode=...
import numpy as np
import theano
import theano.tensor as T
x = T.vector()
y = T.vector()
z = x + x
z.name = "z1"
z = z * y
z.name = "z2"
f = theano.function([x, y], z, mode=mode)
f(np.ones((2,)), np.ones((3,)))
"""
Explanation: Where in your code does this error come from?
The HINT tells you how to get a better error message.
In Python, you can pass the parameter mode=theano.Mode(optimizer=TODO) to theano.function, instead of changing the Theano flag.
This allows you to see in your code, which line caused the problem!
End of explanation
"""
import theano
import theano.tensor as T
from theano.tests.breakpoint import PdbBreakpoint
input, target = T.fvectors(['x', 'y'])
mse = (input - target) ** 2
# Conditional breakpoint to be activated if the total
# MSE > 100. The breakpoint will monitor the inputs,
# targets as well as the individual error values
breakpointOp = PdbBreakpoint("MSE too high")
condition = T.gt(mse.sum(), 100)
mse, monitored_input, monitored_target = breakpointOp(
condition, mse, input, target)
# Compile the theano function
fct = theano.function([input, target], mse)
# Use the function
print fct([10, 0], [10, 5]) # Will NOT activate the breakpoint
print fct([0, 0], [10, 5]) # Will activate the breakpoint
"""
Explanation: Stack trace
The stack trace can be very useful. You don't need to understand the part in Theano (where the file is inside theano/), just check the part coming from your code files.
DebugMode
Checks and double-checks everything, extremely slow.
Compares Python, C and GPU implementations.
Compares values before and after each optimization.
By default, raises an error on NaN.
Sensitive: so frequently reported errors are OK.
Use the Theano flag mode=DebugMode or the the parameter mode=theano.compile.DebugMode() to theano.function().
Python debugger (PDB)
PDB is similar to GDB
n: next
c: continue
l: list
p var: print
s: step,
bt: print the stack trace
...
To get into PDB:
import pdb; pdb.set_trace()
python -m pdb script.py
To get into PDB with Theano:
Some Theano flags:
mode=FAST_COMPILE disables most of optimizations, and C code generation.
linker=py disables C code generation.
warn_float64=pdb
...
PdbBreakpoint: breakpoint during execution.
Breakpoint during execution
End of explanation
"""
import theano
x = theano.tensor.vector()
o = theano.printing.Print("a message")(x)
f = theano.function([x], o)
d = f([3, 4])
"""
Explanation: Printing during execution
End of explanation
"""
o = theano.printing.Print("Attributes of x:", attrs=('min', 'mean', 'max'))(x)
f = theano.function([x], o)
d = f([3, 1, 4, 9])
"""
Explanation: Printing attributes of a variable
End of explanation
"""
import numpy
import theano
import theano.compile.nanguardmode
from theano import tensor as T
x = T.matrix()
w = theano.shared(numpy.random.randn(5, 7).astype(theano.config.floatX))
y = T.dot(x, w)
mode=theano.compile.nanguardmode.NanGuardMode(nan_is_error=True,
inf_is_error=True,
big_is_error=True)
fun = theano.function(
[x], y, mode=mode)
infa = numpy.tile(
(numpy.asarray(100.) ** 1000000), (3, 5))
fun(infa)
"""
Explanation: Most Frequent NaN Causes
Hyperparameters (ex: learning rate)
Initialization of parameters
Numerical Stability
Algorithm Related
Run in NanGuardMode, DebugMode, or MonitorMode.
NanGuardMode
Can check for:
* Nan
* Inf
* Big values (greater than 1e10)
End of explanation
"""
# Can also be 'off', 'ignore', 'raise', 'pdb'
theano.config.compute_test_value = 'warn'
# input which will be of shape (5, 10)
x, y = T.matrices('xy')
# provide Theano with a default test-value
x.tag.test_value = numpy.random.rand(5, 10)
y.tag.test_value = numpy.random.rand(4, 10)
x + y # warn about the shape error
"""
Explanation: Test Value
Give sample values to symbolic variables and have the graph execute
as it is being built. This allows to get some type of error like shape
errors when you build the graph instead of during the execution.
End of explanation
"""
|
Kaggle/learntools | notebooks/deep_learning/raw/tut4_transfer_learning.ipynb | apache-2.0 | from IPython.display import YouTubeVideo
YouTubeVideo('mPFq5KMxKVw', width=800, height=450)
"""
Explanation: Intro
At the end of this lesson, you will be able to use transfer learning to build highly accurate computer vision models for your custom purposes, even when you have relatively little data.
Lesson
End of explanation
"""
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, GlobalAveragePooling2D
num_classes = 2
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
my_new_model = Sequential()
my_new_model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))
my_new_model.add(Dense(num_classes, activation='softmax'))
# Say not to train first layer (ResNet) model. It is already trained
my_new_model.layers[0].trainable = False
"""
Explanation: Sample Code
Specify Model
End of explanation
"""
my_new_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
"""
Explanation: Compile Model
End of explanation
"""
from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
image_size = 224
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
'../input/urban-and-rural-photos/train',
target_size=(image_size, image_size),
batch_size=24,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'../input/urban-and-rural-photos/val',
target_size=(image_size, image_size),
class_mode='categorical')
my_new_model.fit_generator(
train_generator,
steps_per_epoch=3,
validation_data=validation_generator,
validation_steps=1)
"""
Explanation: Fit Model
End of explanation
"""
|
konstantinstadler/pymrio | doc/source/notebooks/metadata.ipynb | gpl-3.0 | import pymrio
io = pymrio.load_test()
io.meta
io.meta('Loaded the pymrio test sytem')
"""
Explanation: Metadata and change recording
Each pymrio core system object contains a field 'meta' which stores meta data as well as changes to the MRIO system. This data is stored as json file in the root of a saved MRIO data and accessible through the attribute '.meta':
End of explanation
"""
io.calc_all()
io.aggregate(region_agg = 'global')
io.meta
"""
Explanation: We can now do several steps to modify the system, for example:
End of explanation
"""
io.meta.note('First round of calculations finished')
io.meta
"""
Explanation: Notes can added at any time:
End of explanation
"""
io.save_all('/tmp/foo')
io_new = pymrio.load_all('/tmp/foo')
io_new.meta
"""
Explanation: In addition, all file io operations are recorde in the meta data:
End of explanation
"""
io_new.meta.change_meta('Version', 'v2')
io_new.meta
"""
Explanation: The top level meta data can be changed as well. These changes will also be recorded in the history:
End of explanation
"""
io_new.meta.history
"""
Explanation: To get the full history list, use:
End of explanation
"""
io_new.meta.modification_history
"""
Explanation: This can be restricted to one of the history types by:
End of explanation
"""
io_new.meta.note_history
"""
Explanation: or
End of explanation
"""
|
tpin3694/tpin3694.github.io | machine-learning/reshape_an_array.ipynb | mit | # Load library
import numpy as np
"""
Explanation: Title: Reshape An Array
Slug: reshape_an_array
Summary: How to reshape a NumPy array.
Date: 2017-09-04 12:00
Category: Machine Learning
Tags: Vectors Matrices Arrays
Authors: Chris Albon
Preliminaries
End of explanation
"""
# Create a 4x3 matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
"""
Explanation: Create Array
End of explanation
"""
# Reshape matrix into 2x6 matrix
matrix.reshape(2, 6)
"""
Explanation: Reshape Array
End of explanation
"""
|
james-prior/cohpy | 20160708-dojo-fibonacci-unroll-for-speed.ipynb | mit | from itertools import islice
"""
Explanation: This plays with optimizing a fibonacci generator function for speed.
Study loop unrolling.
End of explanation
"""
def fibonacci():
a, b = 0, 1
while True:
yield a
a, b = b, a + b
n = 45
known_good_output = tuple(islice(fibonacci(), n))
# known_good_output
%timeit sum(islice(fibonacci(), n))
"""
Explanation: First we start with straightforward fibonacci generator function.
End of explanation
"""
def fibonacci():
a, b = 0, 1
while True:
yield a
c = a + b
yield b
a = b + c
yield c
b = c + a
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n))
"""
Explanation: Next, we unroll the loop. Note that there are no assignments that just move things around. There is no wasted motion inside the loop.
It reminds me of the
[musical round](https://en.wikipedia.org/wiki/Round_(music)
Three Blind Mice.
End of explanation
"""
def fibonacci():
a, b = 0, 1
while True:
yield a
c = a + b
yield b
a = b + c
yield c
b = c + a
yield a
c = a + b
yield b
a = b + c
yield c
b = c + a
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n))
def fibonacci():
a, b = 0, 1
yield a
yield b
while True:
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n))
def fibonacci():
a, b = 0, 1
yield a
yield b
while True:
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n))
def fibonacci():
a, b = 0, 1
yield a
yield b
while True:
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n))
def fibonacci():
a, b = 0, 1
yield a
yield b
while True:
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
c = a + b
yield c
a = b + c
yield a
b = c + a
yield b
assert(known_good_output == tuple(islice(fibonacci(), n)))
%timeit sum(islice(fibonacci(), n))
"""
Explanation: Next, we unroll the loop more and more to see if that makes the generator faster.
End of explanation
"""
|
krondor/nlp-dsx-pot | Watson Developer APIs for Facebook Data.ipynb | gpl-3.0 | !pip install --upgrade watson-developer-cloud
!pip install --upgrade beautifulsoup4
"""
Explanation: Analyze Facebook Data Using IBM Watson and IBM Data Platform
This is a three-part notebook written in Python_3.5 meant to show how anyone can enrich and analyze a combined dataset of unstructured and strucutured information with IBM Watson and IBM Data Platform. For this example we are using a standard Facebook Analytics export which features texts from posts, articles and thumbnails, along with standard performance metrics such as likes, shares, and impressions.
Part I will use the Natual Language Understanding, Visual Recognition and Tone Analyzer Services from IBM Watson to enrich the Facebook Posts, Thumbnails, and Articles by pulling out Emotion Tones, Social Tones, Language Tones, Entities, Keywords, and Document Sentiment. The end result of Part I will be additional features and metrics we can test, analyze, and visualize in Part III.
Part II will be used to set up the visualizations and tests we will run in Part III. The end result of Part II will be multiple Pandas DataFrames that will contain the values, and metrics needed to find insights from the Part III tests and experiments.
Part III will include services from IBM's Data Platform, including IBM's own data visualization library PixieDust. In Part III we will run analysis on the data from the Facebook Analytics export, such as the number of likes, comments, shares, to the overall reach for each post, and will compare it to the enriched data we pulled in Part I.
You should only need to change data in the Setup portion of this notebook. All places where you see <span style="color: red"> User Input </span> is where you should be adding inputs.
Table of Contents
Part I - Enrich<br>
Setup<br>
1.1 Install Watson Developer Cloud and BeautifulSoup Packages<br>
1.2 Install PixieDust<br>
1.3 Import Packages and Libraries<br>
1.4 Add Service Credentials From Bluemix for Watson Services<br>
Load Data<br>
2.1 Load Data From SoftLayer's Object Storage as a Pandas DataFrame<br>
Prepare Data<br>
3.1 Data Cleansing with Python<br>
3.2 Beautiful Soup to Extract Thumbnails and Extented Links<br>
Enrich Data<br>
4.1 NLU for Post Text<br>
4.2 NLU for Thumbnail Text<br>
4.3 NLU for Article Text<br>
4.4 Tone Analyzer for Post Text<br>
4.5 Tone Analyzer for Article Text<br>
4.6 Visual Recognition<br>
Write Data<br>
5.1 Convert DataFrame to new CSV<br>
5.2 Write Data to SoftLayer's Object Storage<br>
Part II - Data Preperation<br>
Prepare Data<br>
1.1 Create Multiple DataFrames for Visualizations<br>
1.2 Create A Consolidated Tone Dataframe<br>
1.3 Create A Consolidated Keyword Dataframe<br>
1.4 Create A Consolidated Entity Dataframe<br>
Part III - Analyze<br>
Setup<br>
1.1 Assign Variables<br>
Load Data<br>
2.1 Load Data From SoftLayer's Object Storage as a Spark SQL DataFrame<br>
Visualize Data<br>
3.1 Run PixieDust Visualization Library with Display() API
Learn more about the technology used:
Natual Language Understanding
Tone Analyzer
Beautiful Soup
PixieDust (Part III)
Sample Documents
Sample Facebook Posts - This is a sample export of IBM Watson's Facebook Page. Engagement metrics such as clicks, impressions, etc. are all changed and do not reflect any actual post performance data.
<a id="part1"></a>
Part I - Enrich
1. Setup
<a id="setup1"></a>
1.1 Install Latest Watson Developer Cloud and Beautiful Soup Packages
End of explanation
"""
!pip install --user --upgrade pixiedust
"""
Explanation: If WDC or BS4 was just installed or upgraded, <span style="color: red">restart the kernel</span> before continuing
<a id="pixie"></a>
1.2 Install PixieDust Library
This notebook provides an overview of how to use the PixieDust Library to analyze and visualize various data sets. If you are new to PixieDust or would like to learn more about the library, please go to this Introductory Notebook or visit the PixieDust Github. The Setup section for this notebook uses instructions from the Intro To PixieDust notebook
To ensure you are running the latest version of PixieDust uncomment and run the following cell. Do not run this cell if you installed PixieDust locally from source and want to continue to run PixieDust from source.
End of explanation
"""
import json
import sys
import watson_developer_cloud
from watson_developer_cloud import ToneAnalyzerV3, VisualRecognitionV3
import watson_developer_cloud.natural_language_understanding.features.v1 as features
import operator
from functools import reduce
from io import StringIO
import numpy as np
from bs4 import BeautifulSoup as bs
from operator import itemgetter
from os.path import join, dirname
import pandas as pd
import numpy as np
import requests
import pixiedust
"""
Explanation: <a id="setup2"></a>
1.3 Import Packages and Libraries
To check if you have package already installed, open new cell and write: help.('Package Name')
End of explanation
"""
nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(version='2017-02-27',
username='$USERNAME',
password='$PASSWORD')
tone_analyzer = ToneAnalyzerV3(version='2016-05-19',
username='$USERNAME',
password='$PASSWORD')
visual_recognition = VisualRecognitionV3('2016-05-20', api_key='$APIKEY')
"""
Explanation: <a id='setup3'></a>
1.4 Add Service Credentials From Bluemix for Watson Services
To create your own service and API keys for either NLU or Tone Analyzer go to the Watson Services on Bluemix.
After creating a service for NLU and Tone Analyzer, replace the credentials in the section below
<span style="color: red"> User Input</span>
End of explanation
"""
# The code was removed by DSX for sharing.
"""
Explanation: <a id='load'></a>
2. Load Data
2.1 Load data from Object Storage
IBM® Object Storage for Bluemix® provides provides you with access to a fully provisioned Swift Object Storage account to manage your data. Object Storage uses OpenStack Identity (Keystone) for authentication and can be accessed directly by using OpenStack Object Storage (Swift) API v3.
<span style="color: red"> User Input</span>
Insert data you want to enrich by clicking on the 1001 icon on the upper right hand of the screen. Click "Insert to code" under the file you want to enrich. The make sure you've clicked the cell below and then choose "Insert Pandas DataFrame."
End of explanation
"""
#Make sure this equals the dataframe variable above. Usually it is df_data_1, but it can change
df = df_data_2
"""
Explanation: <span style="color: red"> User Input</span>
End of explanation
"""
# The code was removed by DSX for sharing.
"""
Explanation: <span style="color: red"> User Input</span>
Put in the credentials for the file you want to enrich by clicking on the 1001 icon on the upper right hand of the screen. Click the cell below, then click "Insert to code" under the file you want to enrich. Choose "Insert Credentials." CHANGE THE NAME TO credentials_1
End of explanation
"""
#choose any name to save your file
localfilename = 'OutputFile.csv'
"""
Explanation: <span style="color: red"> User Input</span>
End of explanation
"""
df.rename(columns={'Post Message': 'Text'}, inplace=True)
df = df.drop([0])
#df.head()
df_http= df["Text"].str.partition("http")
df_www = df["Text"].str.partition("www")
#combine delimiters with actual links
df_http["Link"] = df_http[1].map(str) + df_http[2]
df_www["Link1"] = df_www[1].map(str) + df_www[2]
#include only Link columns
df_http.drop(df_http.columns[0:3], axis=1, inplace = True)
df_www.drop(df_www.columns[0:3], axis=1, inplace = True)
#merge http and www dataframes
dfmerge = pd.concat([df_http, df_www], axis=1)
#the following steps will allow you to merge data columns from the left to the right
dfmerge = dfmerge.apply(lambda x: x.str.strip()).replace('', np.nan)
#use fillna to fill any blanks with the Link1 column
dfmerge["Link"].fillna(dfmerge["Link1"], inplace = True)
#delete Link1 (www column)
dfmerge.drop("Link1", axis=1, inplace = True)
#combine Link data frame
df = pd.concat([dfmerge,df], axis = 1)
# # make sure text column is a string
df["Text"] = df["Text"].astype("str")
# #strip links from Text column
df['Text'] = df['Text'].apply(lambda x: x.split('http')[0])
df['Text'] = df['Text'].apply(lambda x: x.split('www')[0])
#df.head()
#Pull thumbnail descriptions using beautiful soup
#changes links from objects to strings
for link in df.Link:
df.Link.to_string()
#create empty list to store descriptions
description = []
#use BeautifulSoup to pull descriptions from links
for url in df["Link"]:
try:
#if there's no description
if pd.isnull(url):
description.append("")
else:
page3= requests.get(url)
soup3= bs(page3.text,"lxml")
#Capture both capatalized 'Description' and lower case
desc= soup3.find(attrs={'name':'Description'})
if desc == None:
desc= soup3.find(attrs={'name':'description'})
description.append(desc['content'])
#this exception will save you from 404 errors
except Exception:
description.append("")
continue
#save to df and add column titled 'Thumbnails'
df["Thumbnails"] = description
#df['Thumbnails'].head()
#df.head()
piclinks = []
for url in df["Link"]:
try:
if pd.isnull(url):
piclinks.append("")
else:
page3= requests.get(url)
soup3= bs(page3.text,"lxml")
pic = soup3.find('meta', property ="og:image")
if pic:
piclinks.append(pic["content"])
else:
piclinks.append("")
except:
piclinks.append("")
df["Image"] = piclinks
"""
Explanation: <a id='prepare'></a>
3. Prepare Data
<a id='prepare1'></a>
3.1 Data Cleansing with Python
Renaming columns, removing noticable noise in the data, pulling out URLs and appending to a new column to run through NLU
End of explanation
"""
#converts shortened links to their original form
shortlink = df["Link"]
extendedlink = []
for link in shortlink:
#create empty list to store
try:
extended_link = requests.Session().head(link, allow_redirects=True).url
extendedlink.append(extended_link)
except:
# catch *all* exceptions
e = sys.exc_info()[0]
extendedlink.append('')
pass
df["Extended Links"] = extendedlink
"""
Explanation: <span style="color: red">Optional<span>: Convert shortened links to full links (Note: NLU requires full links)
Use requests module to pull extended lists. This is only necessary if the Facebook page uses different links than the articles themselves. For this example we are using IBM Watson's Facebook export which uses an IBM link.
End of explanation
"""
# Extract the free form text response from the data frame
# If you are using this script for a diff CSV, you will have to change this column name
free_form_responses = df['Text']
# define the list of enrichments to apply
# if you are modifying this script add or remove the enrichments as needed
f = [features.Entities(), features.Keywords(),features.Emotion(),features.Sentiment()]#'typed-rels'
# Create a list to store the enriched data
overallSentimentScore = []
overallSentimentType = []
highestEmotion = []
highestEmotionScore = []
kywords = []
entities = []
# Go thru every reponse and enrich the text using NLU
for idx, response in enumerate(free_form_responses):
#print("Processing record number: ", idx, " and text: ", response)
try:
enriched_json = json.loads(json.dumps(nlu.analyze(text=response, features=f)))
#print(enriched_json)
# get the SENTIMENT score and type
if 'sentiment' in enriched_json:
if('score' in enriched_json['sentiment']["document"]):
overallSentimentScore.append(enriched_json["sentiment"]["document"]["score"])
else:
overallSentimentScore.append('0')
if('label' in enriched_json['sentiment']["document"]):
overallSentimentType.append(enriched_json["sentiment"]["document"]["label"])
else:
overallSentimentType.append('0')
# read the EMOTIONS into a dict and get the key (emotion) with maximum value
if 'emotion' in enriched_json:
me = max(enriched_json["emotion"]["document"]["emotion"].items(), key=operator.itemgetter(1))[0]
highestEmotion.append(me)
highestEmotionScore.append(enriched_json["emotion"]["document"]["emotion"][me])
else:
highestEmotion.append("")
highestEmotionScore.append("")
#iterate and get KEYWORDS with a confidence of over 50%
if 'keywords' in enriched_json:
#print((enriched_json['keywords']))
tmpkw = []
for kw in enriched_json['keywords']:
if(float(kw["relevance"]) >= 0.5):
#print("kw is: ", kw, "and val is ", kw["text"])
tmpkw.append(kw["text"])#str(kw["text"]).strip('[]')
#convert multiple keywords in a list to a string
if(len(tmpkw) > 1):
tmpkw = "".join(reduce(lambda a, b: a + ', ' + b, tmpkw))
elif(len(tmpkw) == 0):
tmpkw = ""
else:
tmpkw = "".join(reduce(lambda a, b='': a + b , tmpkw))
kywords.append(tmpkw)
else:
kywords.append("")
#iterate and get Entities with a confidence of over 30%
if 'entities' in enriched_json:
#print((enriched_json['entities']))
tmpent = []
for ent in enriched_json['entities']:
if(float(ent["relevance"]) >= 0.3):
tmpent.append(ent["type"])
#convert multiple concepts in a list to a string
if(len(tmpent) > 1):
tmpent = "".join(reduce(lambda a, b: a + ', ' + b, tmpent))
elif(len(tmpent) == 0):
tmpent = ""
else:
tmpent = "".join(reduce(lambda a, b='': a + b , tmpent))
entities.append(tmpent)
else:
entities.append("")
except:
# catch *all* exceptions
e = sys.exc_info()[0]
overallSentimentScore.append(' ')
overallSentimentType.append(' ')
highestEmotion.append(' ')
highestEmotionScore.append(' ')
kywords.append(' ')
entities.append(' ')
pass
# Create columns from the list and append to the dataframe
if highestEmotion:
df['TextHighestEmotion'] = highestEmotion
if highestEmotionScore:
df['TextHighestEmotionScore'] = highestEmotionScore
if overallSentimentType:
df['TextOverallSentimentType'] = overallSentimentType
if overallSentimentScore:
df['TextOverallSentimentScore'] = overallSentimentScore
df['TextKeywords'] = kywords
df['TextEntities'] = entities
"""
Explanation: <a id='enrich'></a>
4. Enrichment Time!
<a id='nlupost'></a>
4.1 NLU for the Post Text
Below uses Natural Language Understanding to iterate through each post and extract the enrichment features we want to use in our future analysis.
Each feature we extract will be appended to the .csv in a new column we determine at the end of this script. If you want to run this same script for the other columns, define free_form_responses to the column name, if you are using URLs, change text=response parameter to url=response, and update the new column names as you see fit.
End of explanation
"""
#choose first of Keywords,Concepts, Entities
df["MaxTextKeywords"] = df["TextKeywords"].apply(lambda x: x.split(',')[0])
df["MaxTextEntity"] = df["TextEntities"].apply(lambda x: x.split(',')[0])
#df.head()
"""
Explanation: After we extract all of the Keywords and Entities from each Post, we have a column with multiple Keywords, and Entities separated by commas. For our Analysis in Part II we wanted also wanted the top Keyword and Entity for each Post. Because of this, we added two new columns to capture the MaxTextKeyword and MaxTextEntity
End of explanation
"""
# Extract the thumbnail text from the data frame
# If you are using this script for a diff CSV, you will have to change this column name
free_form_responses= df['Thumbnails']
# define the list of enrichments to apply
# if you are modifying this script add or remove the enrichments as needed
f = [features.Entities(), features.Keywords(),features.Emotion(),features.Sentiment()]#'typed-rels'
# Create a list to store the enriched data
overallSentimentScore = []
overallSentimentType = []
highestEmotion = []
highestEmotionScore = []
kywords = []
entities = []
# Go thru every reponse and enrich the text using NLU
for idx, response in enumerate(free_form_responses):
#print("Processing record number: ", idx, " and text: ", response)
try:
enriched_json = json.loads(json.dumps(nlu.analyze(text=response, features=f)))
#print(enriched_json)
# get the SENTIMENT score and type
if 'sentiment' in enriched_json:
if('score' in enriched_json['sentiment']["document"]):
overallSentimentScore.append(enriched_json["sentiment"]["document"]["score"])
else:
overallSentimentScore.append("")
if('label' in enriched_json['sentiment']["document"]):
overallSentimentType.append(enriched_json["sentiment"]["document"]["label"])
else:
overallSentimentType.append("")
# read the EMOTIONS into a dict and get the key (emotion) with maximum value
if 'emotion' in enriched_json:
me = max(enriched_json["emotion"]["document"]["emotion"].items(), key=operator.itemgetter(1))[0]
highestEmotion.append(me)
highestEmotionScore.append(enriched_json["emotion"]["document"]["emotion"][me])
else:
highestEmotion.append("")
highestEmotionScore.append("")
#iterate and get KEYWORDS with a confidence of over 50%
if 'keywords' in enriched_json:
#print((enriched_json['keywords']))
tmpkw = []
for kw in enriched_json['keywords']:
if(float(kw["relevance"]) >= 0.5):
#print("kw is: ", kw, "and val is ", kw["text"])
tmpkw.append(kw["text"])#str(kw["text"]).strip('[]')
#convert multiple keywords in a list to a string
if(len(tmpkw) > 1):
tmpkw = "".join(reduce(lambda a, b: a + ', ' + b, tmpkw))
elif(len(tmpkw) == 0):
tmpkw = ""
else:
tmpkw = "".join(reduce(lambda a, b='': a + b , tmpkw))
kywords.append(tmpkw)
#iterate and get Entities with a confidence of over 30%
if 'entities' in enriched_json:
#print((enriched_json['entities']))
tmpent = []
for ent in enriched_json['entities']:
if(float(ent["relevance"]) >= 0.3):
tmpent.append(ent["type"])
#convert multiple concepts in a list to a string
if(len(tmpent) > 1):
tmpent = "".join(reduce(lambda a, b: a + ', ' + b, tmpent))
elif(len(tmpent) == 0):
tmpent = ""
else:
tmpent = "".join(reduce(lambda a, b='': a + b , tmpent))
entities.append(tmpent)
else:
entities.append("")
except:
# catch *all* exceptions
e = sys.exc_info()[0]
overallSentimentScore.append(' ')
overallSentimentType.append(' ')
highestEmotion.append(' ')
highestEmotionScore.append(' ')
kywords.append(' ')
entities.append(' ')
pass
# print(len(highestEmotion))
# print(len(highestEmotionScore))
# print(len(overallSentimentType))
# print(len(overallSentimentScore))
# print(len(kywords))
# print(len(entities))
# Create columns from the list and append to the dataframe
if highestEmotion:
df['ThumbnailHighestEmotion'] = highestEmotion
if highestEmotionScore:
df['ThumbnailHighestEmotionScore'] = highestEmotionScore
if overallSentimentType:
df['ThumbnailOverallSentimentType'] = overallSentimentType
if overallSentimentScore:
df['ThumbnailOverallSentimentScore'] = overallSentimentScore
df['ThumbnailKeywords'] = kywords
df['ThumbnailEntities'] = entities
#choose first of Keywords,Concepts,Entities
df["MaxThumbnailKeywords"] = df["ThumbnailKeywords"].apply(lambda x: x.split(',')[0])
df["MaxThumbnailEntity"] = df["ThumbnailEntities"].apply(lambda x: x.split(',')[0])
#df.head()
"""
Explanation: <a id='nlutn'></a>
4.2 NLU for Thumbnail Text
We will repeat the same process for Thumbnails and Article Text.
End of explanation
"""
# Run links through NLU and return Titles, and NLU Enrichment on full articles
# If you are using this script for a diff CSV, you will have to change this column name
free_form_responses = df['Extended Links']
# define the list of enrichments to apply
# if you are modifying this script add or remove the enrichments as needed
f = [features.Entities(), features.Keywords(),features.Emotion(),features.Sentiment()]#'typed-rels'
# Create a list to store the enriched data
overallSentimentScore = []
overallSentimentType = []
highestEmotion = []
highestEmotionScore = []
kywords = []
entities = []
# Go thru every reponse and enrich the text using NLU
for idx, response in enumerate(free_form_responses):
#print("Processing record number: ", idx, " and text: ", response)
try:
enriched_json = json.loads(json.dumps(nlu.analyze(url=response, features=f)))
#print(enriched_json)
# get the SENTIMENT score and type
if 'sentiment' in enriched_json:
if('score' in enriched_json['sentiment']["document"]):
overallSentimentScore.append(enriched_json["sentiment"]["document"]["score"])
else:
overallSentimentScore.append('None')
if('label' in enriched_json['sentiment']["document"]):
overallSentimentType.append(enriched_json["sentiment"]["document"]["label"])
else:
overallSentimentType.append('')
# read the EMOTIONS into a dict and get the key (emotion) with maximum value
if 'emotion' in enriched_json:
me = max(enriched_json["emotion"]["document"]["emotion"].items(), key=operator.itemgetter(1))[0]
highestEmotion.append(me)
highestEmotionScore.append(enriched_json["emotion"]["document"]["emotion"][me])
else:
highestEmotion.append('')
highestEmotionScore.append('')
#iterate and get KEYWORDS with a confidence of over 50%
if 'keywords' in enriched_json:
#print((enriched_json['keywords']))
tmpkw = []
for kw in enriched_json['keywords']:
if(float(kw["relevance"]) >= 0.5):
#print("kw is: ", kw, "and val is ", kw["text"])
tmpkw.append(kw["text"])#str(kw["text"]).strip('[]')
#convert multiple keywords in a list to a string
if(len(tmpkw) > 1):
tmpkw = "".join(reduce(lambda a, b: a + ', ' + b, tmpkw))
elif(len(tmpkw) == 0):
tmpkw = ""
else:
tmpkw = "".join(reduce(lambda a, b='': a + b , tmpkw))
kywords.append(tmpkw)
else:
kywords.append("")
#iterate and get Entities with a confidence of over 30%
if 'entities' in enriched_json:
#print((enriched_json['entities']))
tmpent = []
for ent in enriched_json['entities']:
if(float(ent["relevance"]) >= 0.3):
tmpent.append(ent["type"])
#convert multiple concepts in a list to a string
if(len(tmpent) > 1):
tmpent = "".join(reduce(lambda a, b: a + ', ' + b, tmpent))
elif(len(tmpent) == 0):
tmpent = ""
else:
tmpent = "".join(reduce(lambda a, b='': a + b , tmpent))
entities.append(tmpent)
else:
entities.append("")
except:
# catch *all* exceptions
e = sys.exc_info()[0]
overallSentimentScore.append(' ')
overallSentimentType.append(' ')
highestEmotion.append(' ')
highestEmotionScore.append(' ')
kywords.append(' ')
# concepts.append(' ')
entities.append(' ')
pass
# Create columns from the list and append to the dataframe
if highestEmotion:
df['LinkHighestEmotion'] = highestEmotion
if highestEmotionScore:
df['LinkHighestEmotionScore'] = highestEmotionScore
if overallSentimentType:
df['LinkOverallSentimentType'] = overallSentimentType
if overallSentimentScore:
df['LinkOverallSentimentScore'] = overallSentimentScore
df['LinkKeywords'] = kywords
# df['TextConcepts'] = concepts
df['LinkEntities'] = entities
df["MaxLinkKeywords"] = df["LinkKeywords"].apply(lambda x: x.split(',')[0])
df["MaxLinkEntity"] = df["LinkEntities"].apply(lambda x: x.split(',')[0])
#df.head()
"""
Explanation: <a id='nlulink'></a>
4.3 NLU for Article Text
End of explanation
"""
# Extract the free form text response from the data frame
# If you are using this script for a diff CSV, you will have to change this column name
free_form_responses = df['Text']
#Create a list to store the enriched data
highestEmotionTone = []
emotionToneScore = []
languageToneScore = []
highestLanguageTone = []
socialToneScore = []
highestSocialTone = []
for idx, response in enumerate(free_form_responses):
#print("Processing record number: ", idx, " and text: ", response)
try:
enriched_json = json.loads(json.dumps(tone_analyzer.tone(text=response)))
#print(enriched_json)
if 'tone_categories' in enriched_json['document_tone']:
me = max(enriched_json["document_tone"]["tone_categories"][0]["tones"], key = itemgetter('score'))['tone_name']
highestEmotionTone.append(me)
you = max(enriched_json["document_tone"]["tone_categories"][0]["tones"], key = itemgetter('score'))['score']
emotionToneScore.append(you)
me1 = max(enriched_json["document_tone"]["tone_categories"][1]["tones"], key = itemgetter('score'))['tone_name']
highestLanguageTone.append(me1)
you1 = max(enriched_json["document_tone"]["tone_categories"][1]["tones"], key = itemgetter('score'))['score']
languageToneScore.append(you1)
me2 = max(enriched_json["document_tone"]["tone_categories"][2]["tones"], key = itemgetter('score'))['tone_name']
highestSocialTone.append(me2)
you2 = max(enriched_json["document_tone"]["tone_categories"][2]["tones"], key = itemgetter('score'))['score']
socialToneScore.append(you2)
except:
# catch *all* exceptions
e = sys.exc_info()[0]
emotionToneScore.append(' ')
highestEmotionTone.append(' ')
languageToneScore.append(' ')
highestLanguageTone.append(' ')
socialToneScore.append(' ')
highestSocialTone.append(' ')
pass
if highestEmotionTone:
df['highestEmotionTone'] = highestEmotionTone
if emotionToneScore:
df['emotionToneScore'] = emotionToneScore
if languageToneScore:
df['languageToneScore'] = languageToneScore
if highestLanguageTone:
df['highestLanguageTone'] = highestLanguageTone
if highestSocialTone:
df['highestSocialTone'] = highestSocialTone
if socialToneScore:
df['socialToneScore'] = socialToneScore
#df.head()
"""
Explanation: <a id='tonepost'></a>
4.4 Tone Analyzer for Post Text
End of explanation
"""
# Extract the free form text response from the data frame
# If you are using this script for a diff CSV, you will have to change this column name
free_form_responses = df['Link']
# define the list of enrichments to apply
# if you are modifying this script add or remove the enrichments as needed
f = [features.MetaData()]#'typed-rels'
article_text = []
for idx, response in enumerate(free_form_responses):
try:
enriched_json = json.loads(json.dumps(nlu.analyze(url=response, features=f,return_analyzed_text=True)))
#print(enriched_json)
article_text.append(enriched_json["analyzed_text"])
except:
article_text.append("")
#save to dataframe
df["Article Text"] = article_text
#df.head()
"""
Explanation: <a id='enrich2'></a>
4.5 Tone Analyzer for Article Text
Unlike NLU, Tone Analyzer cannot iterate through a URL so here we use NLU to pull the Article Text from the URL and append it to the original dataframe.
To do this, we pull out the MetaData feature, and make sure the return_analyzed_text parameter is set to True.
End of explanation
"""
# Extract the free form text response from the data frame
# If you are using this script for a diff CSV, you will have to change this column name
free_form_responses = df['Article Text']
#Create a list to store the enriched data
highestEmotionTone = []
emotionToneScore = []
languageToneScore = []
highestLanguageTone = []
socialToneScore = []
highestSocialTone = []
for idx, response in enumerate(free_form_responses):
#print("Processing record number: ", idx, " and text: ", response)
try:
enriched_json = json.loads(json.dumps(tone_analyzer.tone(text=response)))
#print(enriched_json)
if 'tone_categories' in enriched_json['document_tone']:
maxTone = max(enriched_json["document_tone"]["tone_categories"][0]["tones"], key = itemgetter('score'))['tone_name']
highestEmotionTone.append(maxTone)
maxToneScore = max(enriched_json["document_tone"]["tone_categories"][0]["tones"], key = itemgetter('score'))['score']
emotionToneScore.append(maxToneScore)
maxLanguageTone = max(enriched_json["document_tone"]["tone_categories"][1]["tones"], key = itemgetter('score'))['tone_name']
highestLanguageTone.append(maxLanguageTone)
maxLanguageScore = max(enriched_json["document_tone"]["tone_categories"][1]["tones"], key = itemgetter('score'))['score']
languageToneScore.append(maxLanguageScore)
maxSocial = max(enriched_json["document_tone"]["tone_categories"][2]["tones"], key = itemgetter('score'))['tone_name']
highestSocialTone.append(maxSocial)
maxSocialScore = max(enriched_json["document_tone"]["tone_categories"][2]["tones"], key = itemgetter('score'))['score']
socialToneScore.append(maxSocialScore)
except:
# catch *all* exceptions
e = sys.exc_info()[0]
emotionToneScore.append(' ')
highestEmotionTone.append(' ')
languageToneScore.append(' ')
highestLanguageTone.append(' ')
socialToneScore.append(' ')
highestSocialTone.append(' ')
pass
if highestEmotionTone:
df['articlehighestEmotionTone'] = highestEmotionTone
if emotionToneScore:
df['articleEmotionToneScore'] = emotionToneScore
if languageToneScore:
df['articlelanguageToneScore'] = languageToneScore
if highestLanguageTone:
df['articlehighestLanguageTone'] = highestLanguageTone
if highestSocialTone:
df['articlehighestSocialTone'] = highestSocialTone
if socialToneScore:
df['articlesocialToneScore'] = socialToneScore
#df.head()
df.head()
"""
Explanation: Similar to the script for NLU, we are now using Tone Analyzer to iterate through the newly created and appended Article Text column which contains all of the free form text from the articles contained in the Facebook posts.
We are using Tone Analyzer to gather the tope Social, Writing and Emotion Tones from the Articles and appending them, along with their respective scores to the .csv
End of explanation
"""
piclinks = df["Image"]
picclass = []
for pic in enumerate(piclinks):
try:
enriched_json = json.loads(json.dumps(visual_recognition.classify(images_url=pic), indent=2))
#print(enriched_json)
classes = enriched_json['images'][0]["classifiers"][0]["classes"]
length = len(classes)
tpicclass = []
#for each class within one picture
for n in range(0,length):
#iclass is one class
iclass = classes[n]
#for confidence level .70
if float(iclass["score"]>=.70):
tpicclass.append(iclass["class"])
if(len(tpicclass) > 1):
tpicclass = "".join(reduce(lambda a, b: a + ', ' + b, tpicclass))
elif(len(tpicclass) == 0):
tpicclass = ""
else:
tpicclass = "".join(reduce(lambda a, b: a + ', ' + b, tpicclass))
picclass.append(tpicclass)
except:
# catch *all* exceptions
e = sys.exc_info()[0]
picclass.append(' ')
pass
df["PicClass"] = picclass
"""
Explanation: <a id='visual'></a>
4.6 Visual Recognition
End of explanation
"""
def put_file(credentials, local_file_name):
"""This functions returns a StringIO object containing
the file content from Bluemix Object Storage V3."""
f = open(local_file_name,'r',encoding="utf-8")
my_data = f.read()
data_to_send = my_data.encode("utf-8")
url1 = ''.join(['https://identity.open.softlayer.com', '/v3/auth/tokens'])
data = {'auth': {'identity': {'methods': ['password'],
'password': {'user': {'name': credentials['username'],'domain': {'id': credentials['domain_id']},
'password': credentials['password']}}}}}
headers1 = {'Content-Type': 'application/json'}
resp1 = requests.post(url=url1, data=json.dumps(data), headers=headers1)
resp1_body = resp1.json()
#print(resp1_body)
for e1 in resp1_body['token']['catalog']:
if(e1['type']=='object-store'):
for e2 in e1['endpoints']:
if(e2['interface']=='public'and e2['region']=='dallas'):
url2 = ''.join([e2['url'],'/', credentials['container'], '/', local_file_name])
print(url2)
s_subject_token = resp1.headers['x-subject-token']
#print(s_subject_token)
#print(credentials['container'])
headers2 = {'X-Auth-Token': s_subject_token, 'accept': 'application/json'}
resp2 = requests.put(url=url2, headers=headers2, data = data_to_send )
print(resp2)
#choose any name to save your file
df.to_csv(localfilename,index=False)
"""
Explanation: <a id='write'></a>
Enrichment is now COMPLETE!
<a id='write1'></a>
Last step is to write and save the enriched dataframe to SoftLayer's Object Storage.
Since we already created the localfilename variable in the Setup stage and defined the necessary credentials, this snippet will work for all new files and does not need to be changed.
End of explanation
"""
put_file(credentials_1,localfilename)
"""
Explanation: <a id='write2'></a> Make sure to change the "credential" argument below matches the variable name of the credentials you imported in the Setup Phase.
End of explanation
"""
#Determine which data points are tied to metrics and put them in a list
metrics = ["Lifetime Post Total Reach", "Lifetime Post organic reach", "Lifetime Post Paid Reach", "Lifetime Post Total Impressions", "Lifetime Post Organic Impressions",
"Lifetime Post Paid Impressions", "Lifetime Engaged Users", "Lifetime Post Consumers", "Lifetime Post Consumptions", "Lifetime Negative feedback", "Lifetime Negative Feedback from Users",
"Lifetime Post Impressions by people who have liked your Page", "Lifetime Post reach by people who like your Page", "Lifetime Post Paid Impressions by people who have liked your Page",
"Lifetime Paid reach of a post by people who like your Page", "Lifetime People who have liked your Page and engaged with your post", "Lifetime Talking About This (Post) by action type - comment",
"Lifetime Talking About This (Post) by action type - like", "Lifetime Talking About This (Post) by action type - share", "Lifetime Post Stories by action type - comment", "Lifetime Post Stories by action type - like",
"Lifetime Post Stories by action type - share", "Lifetime Post consumers by type - link clicks", "Lifetime Post consumers by type - other clicks", "Lifetime Post consumers by type - photo view", "Lifetime Post Consumptions by type - link clicks",
"Lifetime Post Consumptions by type - other clicks", "Lifetime Post Consumptions by type - photo view", "Lifetime Negative feedback - hide_all_clicks", "Lifetime Negative feedback - hide_clicks",
"Lifetime Negative Feedback from Users by Type - hide_all_clicks", "Lifetime Negative Feedback from Users by Type - hide_clicks"]
"""
Explanation: <a id="part2"></a>
Part II - Data Preparation
<a id='prepare'></a>
1. Prepare Data
<a id='visualizations'></a>
1.1 Prepare Multiple DataFrames for Visualizations
Before we can create the separate tables for each Watson feature we need to organize and reformat the data. First, we need to determine which data points are tied to metrics. Second, we need to make sure make sure each metric is numeric. (This is necessary for PixieDust in Part III)
End of explanation
"""
#Create a list with only Post Tone Values
post_tones = ["Text","highestEmotionTone", "emotionToneScore", "languageToneScore", "highestLanguageTone", "highestSocialTone", "socialToneScore"]
#Append dataframe with these metrics
post_tones.extend(metrics)
#Create a new dataframe with tones and metrics
df_post_tones = df[post_tones]
#Determine which tone values are suppose to be numeric and ensure they are numeric.
post_numeric_values = ["emotionToneScore", "languageToneScore", "socialToneScore"]
for i in post_numeric_values:
df_post_tones[i] = pd.to_numeric(df_post_tones[i], errors='coerce')
#Make all metrics numeric
for i in metrics:
df_post_tones[i] = pd.to_numeric(df_post_tones[i], errors='coerce')
#Drop NA Values in Tone Enrichment Columns
df_post_tones.dropna(subset=["socialToneScore"] , inplace = True)
#Add in a column to distinguish what portion the enrichment was happening
df_post_tones["Type"] = "Post"
#df_post_tones.info()
"""
Explanation: <a id='tone'></a>
1.2 Create A Consolidated Tone Dataframe
Post Tone Dataframe
End of explanation
"""
#Create a list with only Article Tone Values
article_tones = ["Text", "articlehighestEmotionTone", "articleEmotionToneScore", "articlelanguageToneScore", "articlehighestLanguageTone", "articlehighestSocialTone", "articlesocialToneScore"]
#Append dataframe with these metrics
article_tones.extend(metrics)
#Create a new dataframe with tones and metrics
df_article_tones = df[article_tones]
#Determine which values are suppose to be numeric and ensure they are numeric.
art_numeric_values = ["articleEmotionToneScore", "articlelanguageToneScore", "articlesocialToneScore"]
for i in art_numeric_values:
df_article_tones[i] = pd.to_numeric(df_article_tones[i], errors='coerce')
#Make all metrics numeric
for i in metrics:
df_article_tones[i] = pd.to_numeric(df_article_tones[i], errors='coerce')
#Drop NA Values in Tone Enrichment Columns
df_article_tones.dropna(subset=["articlesocialToneScore"] , inplace = True)
#Add in a column to distinguish what portion the enrichment was happening
df_article_tones["Type"] = "Article"
#df_article_tones.head()
"""
Explanation: Article Tone Dataframe
End of explanation
"""
#first make the Column Headers the same
df_post_tones.rename(columns={"highestEmotionTone":"Emotion Tone", "emotionToneScore":"Emotion Tone Score", "languageToneScore": "Language Tone Score", "highestLanguageTone": "Language Tone", "highestSocialTone": "Social Tone", "socialToneScore":"Social Tone Score"
}, inplace=True)
df_article_tones.rename(columns={"articlehighestEmotionTone":"Emotion Tone", "articleEmotionToneScore":"Emotion Tone Score", "articlelanguageToneScore": "Language Tone Score", "articlehighestLanguageTone": "Language Tone", "articlehighestSocialTone": "Social Tone", "articlesocialToneScore":"Social Tone Score"
}, inplace=True)
#Combine into one data frame
df_tones = pd.concat([df_post_tones, df_article_tones])
#df_tones.head()
"""
Explanation: Combine Post and Article Dataframes to Make One Tone Dataframe
End of explanation
"""
#Create a list with only Article Keywords
article_keywords = ["Text", "MaxLinkKeywords"]
#Append dataframe with these metrics
article_keywords.extend(metrics)
#Create a new dataframe with keywords and metrics
df_article_keywords = df[article_keywords]
#Make all metrics numeric
for i in metrics:
df_article_keywords[i] = pd.to_numeric(df_article_keywords[i], errors='coerce')
#Drop NA Values in Keywords Column
df_article_keywords['MaxLinkKeywords'].replace(' ', np.nan, inplace=True)
df_article_keywords.dropna(subset=['MaxLinkKeywords'], inplace=True)
#Add in a column to distinguish what portion the enrichment was happening
df_article_keywords["Type"] = "Article"
#df_article_keywords.head()
"""
Explanation: <a id='keyword'></a>
1.3 Create A Consolidated Keyword Dataframe
#### Article Keyword Dataframe
End of explanation
"""
#Create a list with only Thumbnail Keywords
thumbnail_keywords = ["Text", "MaxThumbnailKeywords"]
#Append dataframe with these metrics
thumbnail_keywords.extend(metrics)
#Create a new dataframe with keywords and metrics
df_thumbnail_keywords = df[thumbnail_keywords]
#Make all metrics numeric
for i in metrics:
df_thumbnail_keywords[i] = pd.to_numeric(df_thumbnail_keywords[i], errors='coerce')
#Drop NA Values in Keywords Column
df_thumbnail_keywords['MaxThumbnailKeywords'].replace(' ', np.nan, inplace=True)
df_thumbnail_keywords.dropna(subset=['MaxThumbnailKeywords'], inplace=True)
#Add in a column to distinguish what portion the enrichment was happening
df_thumbnail_keywords["Type"] = "Thumbnails"
#df_thumbnail_keywords.head()
"""
Explanation: Thumbnail Keyword Dataframe
End of explanation
"""
#Create a list with only Thumbnail Keywords
post_keywords = ["Text", "MaxTextKeywords"]
#Append dataframe with these metrics
post_keywords.extend(metrics)
#Create a new dataframe with keywords and metrics
df_post_keywords = df[post_keywords]
#Make all metrics numeric
for i in metrics:
df_post_keywords[i] = pd.to_numeric(df_post_keywords[i], errors='coerce')
#Drop NA Values in Keywords Column
df_post_keywords['MaxTextKeywords'].replace(' ', np.nan, inplace=True)
df_post_keywords.dropna(subset=['MaxTextKeywords'], inplace=True)
#Add in a column to distinguish what portion the enrichment was happening
df_post_keywords["Type"] = "Posts"
# df_post_keywords.info()
"""
Explanation: Post Keyword Dataframe
End of explanation
"""
#first make the Column Headers the same
df_post_keywords.rename(columns={"MaxTextKeywords": "Keywords"}, inplace=True)
df_thumbnail_keywords.rename(columns={"MaxThumbnailKeywords":"Keywords"}, inplace=True)
df_article_keywords.rename(columns={"MaxLinkKeywords":"Keywords"}, inplace=True)
#Combine into one data frame
df_keywords = pd.concat([df_post_keywords, df_thumbnail_keywords, df_article_keywords])
df_keywords = df_keywords[df_keywords["Lifetime Post Consumptions"]>700]
#df_keywords
"""
Explanation: Combine Post, Thumbnail, and Article Dataframes to Make One Keywords Dataframe
End of explanation
"""
#Create a list with only Article Keywords
article_entities = ["Text", "MaxLinkEntity"]
#Append dataframe with these metrics
article_entities.extend(metrics)
#Create a new dataframe with keywords and metrics
df_article_entities = df[article_entities]
#Make all metrics numeric
for i in metrics:
df_article_entities[i] = pd.to_numeric(df_article_entities[i], errors='coerce')
#Drop NA Values in Keywords Column
df_article_entities['MaxLinkEntity'] = df["MaxLinkEntity"].replace(r'\s+', np.nan, regex=True)
df_article_entities.dropna(subset=['MaxLinkEntity'], inplace=True)
#Add in a column to distinguish what portion the enrichment was happening
df_article_entities["Type"] = "Article"
#df_article_entities
"""
Explanation: <a id='entity'></a>
1.4 Create A Consolidated Entity Dataframe
Article Entity Dataframe
End of explanation
"""
#Create a list with only Thumbnail Keywords
thumbnail_entities = ["Text", "MaxThumbnailEntity"]
#Append dataframe with these metrics
thumbnail_entities.extend(metrics)
#Create a new dataframe with keywords and metrics
df_thumbnail_entities = df[thumbnail_entities]
#Make all metrics numeric
for i in metrics:
df_thumbnail_entities[i] = pd.to_numeric(df_thumbnail_entities[i], errors='coerce')
#Drop NA Values in Keywords Column
df_thumbnail_entities['MaxThumbnailEntity'] = df_thumbnail_entities['MaxThumbnailEntity'].replace(r'\s+', np.nan, regex=True)
df_thumbnail_entities.dropna(subset=['MaxThumbnailEntity'], inplace=True)
#Add in a column to distinguish what portion the enrichment was happening
df_thumbnail_entities["Type"] = "Thumbnails"
#df_thumbnail_entities
"""
Explanation: Thumbnail Entity Dataframe
End of explanation
"""
#Create a list with only Thumbnail Keywords
post_entities = ["Text", "MaxTextEntity"]
#Append dataframe with these metrics
post_entities.extend(metrics)
#Create a new dataframe with keywords and metrics
df_post_entities = df[post_entities]
#Make all metrics numeric
for i in metrics:
df_post_entities[i] = pd.to_numeric(df_post_entities[i], errors='coerce')
#Drop NA Values in Keywords Column
df_post_entities['MaxTextEntity'] = df_post_entities['MaxTextEntity'].replace(r'\s+', np.nan, regex=True)
df_post_entities.dropna(subset=['MaxTextEntity'], inplace=True)
#Add in a column to distinguish what portion the enrichment was happening
df_post_entities["Type"] = "Posts"
#df_post_entities
"""
Explanation: Post Entity Dataframe
End of explanation
"""
#first make the Column Headers the same
df_post_entities.rename(columns={"MaxTextEntity": "Entities"}, inplace=True)
df_thumbnail_entities.rename(columns={"MaxThumbnailEntity":"Entities"}, inplace=True)
df_article_entities.rename(columns={"MaxLinkEntity":"Entities"}, inplace=True)
#Combine into one data frame
df_entities = pd.concat([df_post_entities, df_thumbnail_entities, df_article_entities])
df_entities["Entities"] = df_entities["Entities"].replace('', np.nan)
df_entities.dropna(subset=["Entities"], inplace=True)
#df_entities
"""
Explanation: Combine Post, Thumbnail, and Article Dataframes to Make One Entity Dataframe
End of explanation
"""
entities = df_entities
tones = df_tones
keywords = df_keywords
"""
Explanation: <a id="part3"></a>
Part III
<a id='2setup'></a>
1. Setup
<a id='2setup2'></a>
1.1 Assign Variables
Assign new dataframes to variables.
End of explanation
"""
display(tones)
"""
Explanation: <a id=''></a>
2. Visualize Data
<a id=''></a>
2.1 Run PixieDust Visualization Library with Display() API
PixieDust lets you visualize your data in just a few clicks using the display() API. You can find more info at https://ibm-cds-labs.github.io/pixiedust/displayapi.html. The following cell creates a DataFrame and uses the display() API to create a bar chart:
The first thing we can do is see how Lifetime Post Consumption is related to emotion tone. Clicking the "Options" icon allows you to change the metrics.
End of explanation
"""
display(tones)
"""
Explanation: We can use also use a pie chart to identify how post consumption was broken up by tone.
End of explanation
"""
display(entities)
"""
Explanation: We can find out how mean post clicks differed by entity.
End of explanation
"""
display(keywords)
"""
Explanation: Finally we can see how post consumption was associated with certain keywords.
End of explanation
"""
|
shameeriqbal/pandas-tutorial | notebooks/3.Control_structures.ipynb | mit | def print_n_stars(n):
"""
Prints n no. of stars
Arguments:
n: number of stars to print
"""
for i in range(n):
print "*", # having a ',' tells print not to insert a new line after print
print '' # inserts a new line after the loop
return
"""
Explanation: This exercise will show case two things:
1. Control structures
2. How to structure your program correctly
Control structures
Similar to other languages Python support various control structures such as:
1. If...else
2. For
3. While
4. Break, continue
5. But, no swtich
Making our Program DRY
DRY stands for Don't repeat yourselves
One should not repeat the same functionality or copy the same code again. So, we use functions just like the one we did in the previous exercise.
However, the way we structured the program does not work well in the context of import.
Define a new function
End of explanation
"""
if __name__ == '__main__':
m = 5
i = 0
while True:
if i < m:
i = i +1
print_n_stars(i)
elif i==m:
i = i +1
print('Elif reached')
else:
print('Stopping the loop')
break
"""
Explanation: Changing how function is called
Instead of immedialtely calling the function, we wrap the function call with an if condition:
~~~~
if name == 'main':
~~~~
Which is check if the script is run alone.
End of explanation
"""
|
planet-os/notebooks | aws/era5-s3-via-boto.ipynb | mit | # Initialize notebook environment.
%matplotlib inline
import boto3
import botocore
import datetime
import matplotlib.pyplot as plt
import os.path
import xarray as xr
"""
Explanation: Accessing ERA5 Data on S3
This notebook explores how to access ERA5 data stored on a public S3 bucket as part of the AWS Public Dataset program.. We'll examine how the data is organized in S3, download sample files in NetCDF format, and perform some simple analysis on the data.
ERA5 provides hourly estimates of a large number of atmospheric, land and oceanic climate variables. The data cover the Earth on a 30km grid and resolve the atmosphere using 137 levels from the surface up to a height of 80km.
A first segment of the ERA5 dataset is now available for public use (2008 to within 3 months of real time). Subsequent releases of ERA5 will cover the earlier decades. The entire ERA5 dataset from 1950 to present is expected to be available for use by early 2019.
The ERA5 data available on S3 contains an initial subset of 15 near surface variables. If there are additional variables you would like to see on S3, please contact datahub@intertrust.com with your request. We'll be evaluating the feedback we receive and potentially adding more variables in the future.
End of explanation
"""
era5_bucket = 'era5-pds'
# AWS access / secret keys required
# s3 = boto3.resource('s3')
# bucket = s3.Bucket(era5_bucket)
# No AWS keys required
client = boto3.client('s3', config=botocore.client.Config(signature_version=botocore.UNSIGNED))
"""
Explanation: Setting Up S3 Access Using Boto
We'll use boto to access the S3 bucket. Below, we'll set the bucket ID and create a resource to access it.
Note that although the bucket is public, boto requires the presence of an AWS access key and secret key to use a s3 resource. To request data anonymously, we'll use a low-level client instead.
End of explanation
"""
paginator = client.get_paginator('list_objects')
result = paginator.paginate(Bucket=era5_bucket, Delimiter='/')
for prefix in result.search('CommonPrefixes'):
print(prefix.get('Prefix'))
"""
Explanation: ERA5 Data Structure on S3
The ERA5 data is chunked into distinct NetCDF files per variable, each containing a month of hourly data. These files are organized in the S3 bucket by year, month, and variable name.
The data is structured as follows:
/{year}/{month}/main.nc
/data/{var1}.nc
/{var2}.nc
/{....}.nc
/{varN}.nc
where year is expressed as four digits (e.g. YYYY) and month as two digits (e.g. MM). Individual data variables (var1 through varN) use names corresponding to CF standard names convention plus any applicable additional info, such as vertical coordinate.
For example, the full file path for air temperature for January 2008 is:
/2008/01/data/air_temperature_at_2_metres.nc
Note that due to the nature of the ERA5 forecast timing, which is run twice daily at 06:00 and 18:00 UTC, the monthly data file begins with data from 07:00 on the first of the month and continues through 06:00 of the following month. We'll see this in the coordinate values of a data file we download later in the notebook.
Granule variable structure and metadata attributes are stored in main.nc. This file contains coordinate and auxiliary variable data. This file is also annotated using NetCDF CF metadata conventions.
We can use the paginate method to list the top level key prefixes in the bucket, which corresponds to the available years of ERA5 data.
End of explanation
"""
keys = []
date = datetime.date(2018,1,1) # update to desired date
prefix = date.strftime('%Y/%m/')
response = client.list_objects_v2(Bucket=era5_bucket, Prefix=prefix)
response_meta = response.get('ResponseMetadata')
if response_meta.get('HTTPStatusCode') == 200:
contents = response.get('Contents')
if contents == None:
print("No objects are available for %s" % date.strftime('%B, %Y'))
else:
for obj in contents:
keys.append(obj.get('Key'))
print("There are %s objects available for %s\n--" % (len(keys), date.strftime('%B, %Y')))
for k in keys:
print(k)
else:
print("There was an error with your request.")
"""
Explanation: Let's take a look at the objects available for a specific month using boto's list_objects_v2 method.
End of explanation
"""
metadata_file = 'main.nc'
metadata_key = prefix + metadata_file
client.download_file(era5_bucket, metadata_key, metadata_file)
ds_meta = xr.open_dataset('main.nc', decode_times=False)
ds_meta.info()
"""
Explanation: Downloading Files
Let's download main.nc file for that month and use xarray to inspect the metadata relating to the data files.
End of explanation
"""
# select date and variable of interest
date = datetime.date(2017,8,1)
var = 'air_temperature_at_2_metres'
# file path patterns for remote S3 objects and corresponding local file
s3_data_ptrn = '{year}/{month}/data/{var}.nc'
data_file_ptrn = '{year}{month}_{var}.nc'
year = date.strftime('%Y')
month = date.strftime('%m')
s3_data_key = s3_data_ptrn.format(year=year, month=month, var=var)
data_file = data_file_ptrn.format(year=year, month=month, var=var)
if not os.path.isfile(data_file): # check if file already exists
print("Downloading %s from S3..." % s3_data_key)
client.download_file(era5_bucket, s3_data_key, data_file)
ds = xr.open_dataset(data_file)
ds.info
"""
Explanation: Now let's acquire data for a single variable over the course of a month. Let's download air temperature for August of 2017 and open the NetCDF file using xarray.
Note that the cell below may take some time to execute, depending on your connection speed. Most of the variable files are roughly 1 GB in size.
End of explanation
"""
ds.coords.values()
"""
Explanation: The ds.info output above shows us that there are three dimensions to the data: lat, lon, and time0; and one data variable: air_temperature_at_2_metres. Let's inspect the coordinate values to see what they look like...
End of explanation
"""
# location coordinates
locs = [
{'name': 'santa_monica', 'lon': -118.496245, 'lat': 34.010341},
{'name': 'tallinn', 'lon': 24.753574, 'lat': 59.436962},
{'name': 'honolulu', 'lon': -157.835938, 'lat': 21.290014},
{'name': 'cape_town', 'lon': 18.423300, 'lat': -33.918861},
{'name': 'dubai', 'lon': 55.316666, 'lat': 25.266666},
]
# convert westward longitudes to degrees east
for l in locs:
if l['lon'] < 0:
l['lon'] = 360 + l['lon']
locs
ds_locs = xr.Dataset()
# interate through the locations and create a dataset
# containing the temperature values for each location
for l in locs:
name = l['name']
lon = l['lon']
lat = l['lat']
var_name = name
ds2 = ds.sel(lon=lon, lat=lat, method='nearest')
lon_attr = '%s_lon' % name
lat_attr = '%s_lat' % name
ds2.attrs[lon_attr] = ds2.lon.values.tolist()
ds2.attrs[lat_attr] = ds2.lat.values.tolist()
ds2 = ds2.rename({var : var_name}).drop(('lat', 'lon'))
ds_locs = xr.merge([ds_locs, ds2])
ds_locs.data_vars
"""
Explanation: In the coordinate values, we can see that longitude is expressed as degrees east, ranging from 0 to 359.718 degrees. Latitude is expressed as degrees north, ranging from -89.784874 to 89.784874. And finally the time0 coordinate, ranging from 2017-08-01T07:00:00Z to 2017-09-01T06:00:00Z.
As mentioned above, due to the forecast run timing the first forecast run of the month results in data beginning at 07:00, while the last produces data through September 1 at 06:00.
Temperature at Specific Locations
Let's create a list of various locations and plot their temperature values during the month. Note that the longitude values of the coordinates below are not given in degrees east, but rather as a mix of eastward and westward values. The data's longitude coordinate is degrees east, so we'll convert these location coordinates accordingly to match the data.
End of explanation
"""
def kelvin_to_celcius(t):
return t - 273.15
def kelvin_to_fahrenheit(t):
return t * 9/5 - 459.67
ds_locs_f = ds_locs.apply(kelvin_to_fahrenheit)
df_f = ds_locs_f.to_dataframe()
df_f.describe()
"""
Explanation: Convert Units and Create a Dataframe
Temperature data in the ERA5 dataset uses Kelvin. Let's convert it to something more meaningful. I've chosen to use Fahrenheit, because as a U.S. citizen (and stubborn metric holdout) Celcius still feels foreign to me ;-)
While we're at it, let's also convert the dataset to a pandas dataframe and use the describe method to display some statistics about the data.
End of explanation
"""
# readability please
plt.rcParams.update({'font.size': 16})
ax = df_f.plot(figsize=(18, 10), title="ERA5 Air Temperature at 2 Meters", grid=1)
ax.set(xlabel='Date', ylabel='Air Temperature (deg F)')
plt.show()
ax = df_f.plot.box(figsize=(18, 10))
ax.set(xlabel='Location', ylabel='Air Temperature (deg F)')
plt.show()
"""
Explanation: Show Me Some Charts!
Finally, let's plot the temperature data for each of the locations over the period. The first plot displays the hourly temperature for each location over the month.
The second plot is a box plot. A box plot is a method for graphically depicting groups of numerical data through their quartiles. The box extends from the Q1 to Q3 quartile values of the data, with a line at the median (Q2). The whiskers extend from the edges of box to show the range of the data. The position of the whiskers is set by default to 1.5 * IQR (IQR = Q3 - Q1) from the edges of the box. Outlier points are those past the end of the whiskers.
End of explanation
"""
|
minxuancao/shogun | doc/ipython-notebooks/pca/pca_notebook.ipynb | gpl-3.0 | %pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
# import all shogun classes
from modshogun import *
"""
Explanation: Principal Component Analysis in Shogun
By Abhijeet Kislay (GitHub ID: <a href='https://github.com/kislayabhi'>kislayabhi</a>)
This notebook is about finding Principal Components (<a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a>) of data (<a href="http://en.wikipedia.org/wiki/Unsupervised_learning">unsupervised</a>) in Shogun. Its <a href="http://en.wikipedia.org/wiki/Dimensionality_reduction">dimensional reduction</a> capabilities are further utilised to show its application in <a href="http://en.wikipedia.org/wiki/Data_compression">data compression</a>, image processing and <a href="http://en.wikipedia.org/wiki/Facial_recognition_system">face recognition</a>.
End of explanation
"""
#number of data points.
n=100
#generate a random 2d line(y1 = mx1 + c)
m = random.randint(1,10)
c = random.randint(1,10)
x1 = random.random_integers(-20,20,n)
y1=m*x1+c
#generate the noise.
noise=random.random_sample([n]) * random.random_integers(-35,35,n)
#make the noise orthogonal to the line y=mx+c and add it.
x=x1 + noise*m/sqrt(1+square(m))
y=y1 + noise/sqrt(1+square(m))
twoD_obsmatrix=array([x,y])
#to visualise the data we must plot it.
rcParams['figure.figsize'] = 7, 7
figure,axis=subplots(1,1)
xlim(-50,50)
ylim(-50,50)
axis.plot(twoD_obsmatrix[0,:],twoD_obsmatrix[1,:],'o',color='green',markersize=6)
#the line from which we generated the data is plotted in red
axis.plot(x1[:],y1[:],linewidth=0.3,color='red')
title('One-Dimensional sub-space with noise')
xlabel("x axis")
_=ylabel("y axis")
"""
Explanation: Some Formal Background (Skip if you just want code examples)
PCA is a useful statistical technique that has found application in fields such as face recognition and image compression, and is a common technique for finding patterns in data of high dimension.
In machine learning problems data is often high dimensional - images, bag-of-word descriptions etc. In such cases we cannot expect the training data to densely populate the space, meaning that there will be large parts in which little is known about the data. Hence it is expected that only a small number of directions are relevant for describing the data to a reasonable accuracy.
The data vectors may be very high dimensional, they will therefore typically lie closer to a much lower dimensional 'manifold'.
Here we concentrate on linear dimensional reduction techniques. In this approach a high dimensional datapoint $\mathbf{x}$ is 'projected down' to a lower dimensional vector $\mathbf{y}$ by:
$$\mathbf{y}=\mathbf{F}\mathbf{x}+\text{const}.$$
where the matrix $\mathbf{F}\in\mathbb{R}^{\text{M}\times \text{D}}$, with $\text{M}<\text{D}$. Here $\text{M}=\dim(\mathbf{y})$ and $\text{D}=\dim(\mathbf{x})$.
From the above scenario, we assume that
The number of principal components to use is $\text{M}$.
The dimension of each data point is $\text{D}$.
The number of data points is $\text{N}$.
We express the approximation for datapoint $\mathbf{x}^n$ as:$$\mathbf{x}^n \approx \mathbf{c} + \sum\limits_{i=1}^{\text{M}}y_i^n \mathbf{b}^i \equiv \tilde{\mathbf{x}}^n.$$
* Here the vector $\mathbf{c}$ is a constant and defines a point in the lower dimensional space.
* The $\mathbf{b}^i$ define vectors in the lower dimensional space (also known as 'principal component coefficients' or 'loadings').
* The $y_i^n$ are the low dimensional co-ordinates of the data.
Our motive is to find the reconstruction $\tilde{\mathbf{x}}^n$ given the lower dimensional representation $\mathbf{y}^n$(which has components $y_i^n,i = 1,...,\text{M})$. For a data space of dimension $\dim(\mathbf{x})=\text{D}$, we hope to accurately describe the data using only a small number $(\text{M}\ll \text{D})$ of coordinates of $\mathbf{y}$.
To determine the best lower dimensional representation it is convenient to use the square distance error between $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:$$\text{E}(\mathbf{B},\mathbf{Y},\mathbf{c})=\sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}[x_i^n - \tilde{x}i^n]^2.$$
* Here the basis vectors are defined as $\mathbf{B} = [\mathbf{b}^1,...,\mathbf{b}^\text{M}]$ (defining $[\text{B}]{i,j} = b_i^j$).
* Corresponding low dimensional coordinates are defined as $\mathbf{Y} = [\mathbf{y}^1,...,\mathbf{y}^\text{N}].$
* Also, $x_i^n$ and $\tilde{x}_i^n$ represents the coordinates of the data points for the original and the reconstructed data respectively.
* The bias $\mathbf{c}$ is given by the mean of the data $\sum_n\mathbf{x}^n/\text{N}$.
Therefore, for simplification purposes we centre our data, so as to set $\mathbf{c}$ to zero. Now we concentrate on finding the optimal basis $\mathbf{B}$( which has the components $\mathbf{b}^i, i=1,...,\text{M} $).
Deriving the optimal linear reconstruction
To find the best basis vectors $\mathbf{B}$ and corresponding low dimensional coordinates $\mathbf{Y}$, we may minimize the sum of squared differences between each vector $\mathbf{x}$ and its reconstruction $\tilde{\mathbf{x}}$:
$\text{E}(\mathbf{B},\mathbf{Y}) = \sum\limits_{n=1}^{\text{N}}\sum\limits_{i=1}^{\text{D}}\left[x_i^n - \sum\limits_{j=1}^{\text{M}}y_j^nb_i^j\right]^2 = \text{trace} \left( (\mathbf{X}-\mathbf{B}\mathbf{Y})^T(\mathbf{X}-\mathbf{B}\mathbf{Y}) \right)$
where $\mathbf{X} = [\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Considering the above equation under the orthonormality constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$ (i.e the basis vectors are mutually orthogonal and of unit length), we differentiate it w.r.t $y_k^n$. The squared error $\text{E}(\mathbf{B},\mathbf{Y})$ therefore has zero derivative when:
$y_k^n = \sum_i b_i^kx_i^n$
By substituting this solution in the above equation, the objective becomes
$\text{E}(\mathbf{B}) = (\text{N}-1)\left[\text{trace}(\mathbf{S}) - \text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)\right],$
where $\mathbf{S}$ is the sample covariance matrix of the data.
To minimise equation under the constraint $\mathbf{B}^T\mathbf{B} = \mathbf{I}$, we use a set of Lagrange Multipliers $\mathbf{L}$, so that the objective is to minimize:
$-\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right)+\text{trace}\left(\mathbf{L}\left(\mathbf{B}^T\mathbf{B} - \mathbf{I}\right)\right).$
Since the constraint is symmetric, we can assume that $\mathbf{L}$ is also symmetric. Differentiating with respect to $\mathbf{B}$ and equating to zero we obtain that at the optimum
$\mathbf{S}\mathbf{B} = \mathbf{B}\mathbf{L}$.
This is a form of eigen-equation so that a solution is given by taking $\mathbf{L}$ to be diagonal and $\mathbf{B}$ as the matrix whose columns are the corresponding eigenvectors of $\mathbf{S}$. In this case,
$\text{trace}\left(\mathbf{S}\mathbf{B}\mathbf{B}^T\right) =\text{trace}(\mathbf{L}),$
which is the sum of the eigenvalues corresponding to the eigenvectors forming $\mathbf{B}$. Since we wish to minimise $\text{E}(\mathbf{B})$, we take the eigenvectors with the largest corresponding eigenvalues.
Whilst the solution to this eigen-problem is unique, this only serves to define the solution subspace since one may rotate and scale $\mathbf{B}$ and $\mathbf{Y}$ such that the value of the squared loss is exactly the same. The justification for choosing the non-rotated eigen solution is given by the additional requirement that the principal components corresponds to directions of maximal variance.
Maximum variance criterion
We aim to find that single direction $\mathbf{b}$ such that, when the data is projected onto this direction, the variance of this projection is maximal amongst all possible such projections.
The projection of a datapoint onto a direction $\mathbf{b}$ is $\mathbf{b}^T\mathbf{x}^n$ for a unit length vector $\mathbf{b}$. Hence the sum of squared projections is: $$\sum\limits_{n}\left(\mathbf{b}^T\mathbf{x}^n\right)^2 = \mathbf{b}^T\left[\sum\limits_{n}\mathbf{x}^n(\mathbf{x}^n)^T\right]\mathbf{b} = (\text{N}-1)\mathbf{b}^T\mathbf{S}\mathbf{b} = \lambda(\text{N} - 1)$$
which ignoring constants, is simply the negative of the equation for a single retained eigenvector $\mathbf{b}$(with $\mathbf{S}\mathbf{b} = \lambda\mathbf{b}$). Hence the optimal single $\text{b}$ which maximises the projection variance is given by the eigenvector corresponding to the largest eigenvalues of $\mathbf{S}.$ The second largest eigenvector corresponds to the next orthogonal optimal direction and so on. This explains why, despite the squared loss equation being invariant with respect to arbitrary rotation of the basis vectors, the ones given by the eigen-decomposition have the additional property that they correspond to directions of maximal variance. These maximal variance directions found by PCA are called the $\text{principal} $ $\text{directions}.$
There are two eigenvalue methods through which shogun can perform PCA namely
* Eigenvalue Decomposition Method.
* Singular Value Decomposition.
EVD vs SVD
The EVD viewpoint requires that one compute the eigenvalues and eigenvectors of the covariance matrix, which is the product of $\mathbf{X}\mathbf{X}^\text{T}$, where $\mathbf{X}$ is the data matrix. Since the covariance matrix is symmetric, the matrix is diagonalizable, and the eigenvectors can be normalized such that they are orthonormal:
$\mathbf{S}=\frac{1}{\text{N}-1}\mathbf{X}\mathbf{X}^\text{T},$
where the $\text{D}\times\text{N}$ matrix $\mathbf{X}$ contains all the data vectors: $\mathbf{X}=[\mathbf{x}^1,...,\mathbf{x}^\text{N}].$
Writing the $\text{D}\times\text{N}$ matrix of eigenvectors as $\mathbf{E}$ and the eigenvalues as an $\text{N}\times\text{N}$ diagonal matrix $\mathbf{\Lambda}$, the eigen-decomposition of the covariance $\mathbf{S}$ is
$\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\mathbf{X}^\text{T}\mathbf{E}=\mathbf{X}^\text{T}\mathbf{E}\mathbf{\Lambda}\Longrightarrow\mathbf{X}^\text{T}\mathbf{X}\tilde{\mathbf{E}}=\tilde{\mathbf{E}}\mathbf{\Lambda},$
where we defined $\tilde{\mathbf{E}}=\mathbf{X}^\text{T}\mathbf{E}$. The final expression above represents the eigenvector equation for $\mathbf{X}^\text{T}\mathbf{X}.$ This is a matrix of dimensions $\text{N}\times\text{N}$ so that calculating the eigen-decomposition takes $\mathcal{O}(\text{N}^3)$ operations, compared with $\mathcal{O}(\text{D}^3)$ operations in the original high-dimensional space. We then can therefore calculate the eigenvectors $\tilde{\mathbf{E}}$ and eigenvalues $\mathbf{\Lambda}$ of this matrix more easily. Once found, we use the fact that the eigenvalues of $\mathbf{S}$ are given by the diagonal entries of $\mathbf{\Lambda}$ and the eigenvectors by
$\mathbf{E}=\mathbf{X}\tilde{\mathbf{E}}\mathbf{\Lambda}^{-1}$
On the other hand, applying SVD to the data matrix $\mathbf{X}$ follows like:
$\mathbf{X}=\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}$
where $\mathbf{U}^\text{T}\mathbf{U}=\mathbf{I}\text{D}$ and $\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\text{N}$ and $\mathbf{\Sigma}$ is a diagonal matrix of the (positive) singular values. We assume that the decomposition has ordered the singular values so that the upper left diagonal element of $\mathbf{\Sigma}$ contains the largest singular value.
Attempting to construct the covariance matrix $(\mathbf{X}\mathbf{X}^\text{T})$from this decomposition gives:
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)^\text{T}$
$\mathbf{X}\mathbf{X}^\text{T} = \left(\mathbf{U}\mathbf{\Sigma}\mathbf{V}^\text{T}\right)\left(\mathbf{V}\mathbf{\Sigma}\mathbf{U}^\text{T}\right)$
and since $\mathbf{V}$ is an orthogonal matrix $\left(\mathbf{V}^\text{T}\mathbf{V}=\mathbf{I}\right),$
$\mathbf{X}\mathbf{X}^\text{T}=\left(\mathbf{U}\mathbf{\Sigma}^\mathbf{2}\mathbf{U}^\text{T}\right)$
Since it is in the form of an eigen-decomposition, the PCA solution given by performing the SVD decomposition of $\mathbf{X}$, for which the eigenvectors are then given by $\mathbf{U}$, and corresponding eigenvalues by the square of the singular values.
CPCA Class Reference (Shogun)
CPCA class of Shogun inherits from the CPreprocessor class. Preprocessors are transformation functions that doesn't change the domain of the input features. Specifically, CPCA performs principal component analysis on the input vectors and keeps only the specified number of eigenvectors. On preprocessing, the stored covariance matrix is used to project vectors into eigenspace.
Performance of PCA depends on the algorithm used according to the situation in hand.
Our PCA preprocessor class provides 3 method options to compute the transformation matrix:
$\text{PCA(EVD)}$ sets $\text{PCAmethod == EVD}$ : Eigen Value Decomposition of Covariance Matrix $(\mathbf{XX^T}).$
The covariance matrix $\mathbf{XX^T}$ is first formed internally and then
its eigenvectors and eigenvalues are computed using QR decomposition of the matrix.
The time complexity of this method is $\mathcal{O}(D^3)$ and should be used when $\text{N > D.}$
$\text{PCA(SVD)}$ sets $\text{PCAmethod == SVD}$ : Singular Value Decomposition of feature matrix $\mathbf{X}$.
The transpose of feature matrix, $\mathbf{X^T}$, is decomposed using SVD. $\mathbf{X^T = UDV^T}.$
The matrix V in this decomposition contains the required eigenvectors and
the diagonal entries of the diagonal matrix D correspond to the non-negative
eigenvalues.The time complexity of this method is $\mathcal{O}(DN^2)$ and should be used when $\text{N < D.}$
$\text{PCA(AUTO)}$ sets $\text{PCAmethod == AUTO}$ : This mode automagically chooses one of the above modes for the user based on whether $\text{N>D}$ (chooses $\text{EVD}$) or $\text{N<D}$ (chooses $\text{SVD}$)
PCA on 2D data
Step 1: Get some data
We will generate the toy data by adding orthogonal noise to a set of points lying on an arbitrary 2d line. We expect PCA to recover this line, which is a one-dimensional linear sub-space.
End of explanation
"""
#convert the observation matrix into dense feature matrix.
train_features = RealFeatures(twoD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=2 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = PCA(EVD)
#since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by
#setting the target dimension to 2 to visualize both the eigen vectors.
#However, in future examples we will get rid of this step by implementing it directly.
preprocessor.set_target_dim(2)
#Centralise the data by subtracting its mean from it.
preprocessor.init(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get_mean()
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
"""
Explanation: Step 2: Subtract the mean.
For PCA to work properly, we must subtract the mean from each of the data dimensions. The mean subtracted is the average across each dimension. So, all the $x$ values have $\bar{x}$ subtracted, and all the $y$ values have $\bar{y}$ subtracted from them, where:$$\bar{\mathbf{x}} = \frac{\sum\limits_{i=1}^{n}x_i}{n}$$ $\bar{\mathbf{x}}$ denotes the mean of the $x_i^{'s}$
Shogun's way of doing things :
Preprocessor PCA performs principial component analysis on input feature vectors/matrices. It provides an interface to set the target dimension by $\text{set_target_dim method}.$ When the $\text{init()}$ method in $\text{PCA}$ is called with proper
feature matrix $\text{X}$ (with say $\text{N}$ number of vectors and $\text{D}$ feature dimension), a transformation matrix is computed and stored internally.It inherenty also centralizes the data by subtracting the mean from it.
End of explanation
"""
#Get the eigenvectors(We will get two of these since we set the target to 2).
E = preprocessor.get_transformation_matrix()
#Get all the eigenvalues returned by PCA.
eig_value=preprocessor.get_eigenvalues()
e1 = E[:,0]
e2 = E[:,1]
eig_value1 = eig_value[0]
eig_value2 = eig_value[1]
"""
Explanation: Step 3: Calculate the covariance matrix
To understand the relationship between 2 dimension we define $\text{covariance}$. It is a measure to find out how much the dimensions vary from the mean $with$ $respect$ $to$ $each$ $other.$$$cov(X,Y)=\frac{\sum\limits_{i=1}^{n}(X_i-\bar{X})(Y_i-\bar{Y})}{n-1}$$
A useful way to get all the possible covariance values between all the different dimensions is to calculate them all and put them in a matrix.
Example: For a 3d dataset with usual dimensions of $x,y$ and $z$, the covariance matrix has 3 rows and 3 columns, and the values are this:
$$\mathbf{S} = \quad\begin{pmatrix}cov(x,x)&cov(x,y)&cov(x,z)\cov(y,x)&cov(y,y)&cov(y,z)\cov(z,x)&cov(z,y)&cov(z,z)\end{pmatrix}$$
Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix
Find the eigenvectors $e^1,....e^M$ of the covariance matrix $\mathbf{S}$.
Shogun's way of doing things :
Step 3 and Step 4 are directly implemented by the PCA preprocessor of Shogun toolbar. The transformation matrix is essentially a $\text{D}$$\times$$\text{M}$ matrix, the columns of which correspond to the eigenvectors of the covariance matrix $(\text{X}\text{X}^\text{T})$ having top $\text{M}$ eigenvalues.
End of explanation
"""
#find out the M eigenvectors corresponding to top M number of eigenvalues and store it in E
#Here M=1
#slope of e1 & e2
m1=e1[1]/e1[0]
m2=e2[1]/e2[0]
#generate the two lines
x1=range(-50,50)
x2=x1
y1=multiply(m1,x1)
y2=multiply(m2,x2)
#plot the data along with those two eigenvectors
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x1[:], y1[:], linewidth=0.7, color='black')
axis.plot(x2[:], y2[:], linewidth=0.7, color='blue')
p1 = Rectangle((0, 0), 1, 1, fc="black")
p2 = Rectangle((0, 0), 1, 1, fc="blue")
legend([p1,p2],["1st eigenvector","2nd eigenvector"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Eigenvectors selection')
xlabel("x axis")
_=ylabel("y axis")
"""
Explanation: Step 5: Choosing components and forming a feature vector.
Lets visualize the eigenvectors and decide upon which to choose as the $principle$ $component$ of the data set.
End of explanation
"""
#The eigenvector corresponding to higher eigenvalue(i.e eig_value2) is choosen (i.e e2).
#E is the feature vector.
E=e2
"""
Explanation: In the above figure, the blue line is a good fit of the data. It shows the most significant relationship between the data dimensions.
It turns out that the eigenvector with the $highest$ eigenvalue is the $principle$ $component$ of the data set.
Form the matrix $\mathbf{E}=[\mathbf{e}^1,...,\mathbf{e}^M].$
Here $\text{M}$ represents the target dimension of our final projection
End of explanation
"""
#transform all 2-dimensional feature matrices to target-dimensional approximations.
yn=preprocessor.apply_to_feature_matrix(train_features)
#Since, here we are manually trying to find the eigenvector corresponding to the top eigenvalue.
#The 2nd row of yn is choosen as it corresponds to the required eigenvector e2.
yn1=yn[1,:]
"""
Explanation: Step 6: Projecting the data to its Principal Components.
This is the final step in PCA. Once we have choosen the components(eigenvectors) that we wish to keep in our data and formed a feature vector, we simply take the vector and multiply it on the left of the original dataset.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by
$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$
Here the $\mathbf{E}^T$ is the matrix with the eigenvectors in rows, with the most significant eigenvector at the top. The mean adjusted data, with data items in each column, with each row holding a seperate dimension is multiplied to it.
Shogun's way of doing things :
Step 6 can be performed by shogun's PCA preprocessor as follows:
The transformation matrix that we got after $\text{init()}$ is used to transform all $\text{D-dim}$ feature matrices (with $\text{D}$ feature dimensions) supplied, via $\text{apply_to_feature_matrix methods}$.This transformation outputs the $\text{M-Dim}$ approximation of all these input vectors and matrices (where $\text{M}$ $\leq$ $\text{min(D,N)}$).
End of explanation
"""
x_new=(yn1 * E[0]) + tile(mean_x,[n,1]).T[0]
y_new=(yn1 * E[1]) + tile(mean_y,[n,1]).T[0]
"""
Explanation: Step 5 and Step 6 can be applied directly with Shogun's PCA preprocessor (from next example). It has been done manually here to show the exhaustive nature of Principal Component Analysis.
Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
"""
figure, axis = subplots(1,1)
xlim(-50, 50)
ylim(-50, 50)
axis.plot(x[:], y[:],'o',color='green', markersize=5, label="green")
axis.plot(x_new, y_new, 'o', color='blue', markersize=5, label="red")
title('PCA Projection of 2D data into 1D subspace')
xlabel("x axis")
ylabel("y axis")
#add some legend for information
p1 = Rectangle((0, 0), 1, 1, fc="r")
p2 = Rectangle((0, 0), 1, 1, fc="g")
p3 = Rectangle((0, 0), 1, 1, fc="b")
legend([p1,p2,p3],["normal projection","2d data","1d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
#plot the projections in red:
for i in range(n):
axis.plot([x[i],x_new[i]],[y[i],y_new[i]] , color='red')
"""
Explanation: The new data is plotted below
End of explanation
"""
rcParams['figure.figsize'] = 8,8
#number of points
n=100
#generate the data
a=random.randint(1,20)
b=random.randint(1,20)
c=random.randint(1,20)
d=random.randint(1,20)
x1=random.random_integers(-20,20,n)
y1=random.random_integers(-20,20,n)
z1=-(a*x1+b*y1+d)/c
#generate the noise
noise=random.random_sample([n])*random.random_integers(-30,30,n)
#the normal unit vector is [a,b,c]/magnitude
magnitude=sqrt(square(a)+square(b)+square(c))
normal_vec=array([a,b,c]/magnitude)
#add the noise orthogonally
x=x1+noise*normal_vec[0]
y=y1+noise*normal_vec[1]
z=z1+noise*normal_vec[2]
threeD_obsmatrix=array([x,y,z])
#to visualize the data, we must plot it.
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
#plot the noisy data generated by distorting a plane
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p2],["3d data"],loc='center left', bbox_to_anchor=(1, 0.5))
title('Two dimensional subspace with noise')
xx, yy = meshgrid(range(-30,30), range(-30,30))
zz=-(a * xx + b * yy + d) / c
"""
Explanation: PCA on a 3d data.
Step1: Get some data
We generate points from a plane and then add random noise orthogonal to it. The general equation of a plane is: $$\text{a}\mathbf{x}+\text{b}\mathbf{y}+\text{c}\mathbf{z}+\text{d}=0$$
End of explanation
"""
#convert the observation matrix into dense feature matrix.
train_features = RealFeatures(threeD_obsmatrix)
#PCA(EVD) is choosen since N=100 and D=3 (N>D).
#However we can also use PCA(AUTO) as it will automagically choose the appropriate method.
preprocessor = PCA(EVD)
#If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their
#eigenvalues.
preprocessor.set_target_dim(2)
preprocessor.init(train_features)
#get the mean for the respective dimensions.
mean_datapoints=preprocessor.get_mean()
mean_x=mean_datapoints[0]
mean_y=mean_datapoints[1]
mean_z=mean_datapoints[2]
"""
Explanation: Step 2: Subtract the mean.
End of explanation
"""
#get the required eigenvectors corresponding to top 2 eigenvalues.
E = preprocessor.get_transformation_matrix()
"""
Explanation: Step 3 & Step 4: Calculate the eigenvectors of the covariance matrix
End of explanation
"""
#This can be performed by shogun's PCA preprocessor as follows:
yn=preprocessor.apply_to_feature_matrix(train_features)
"""
Explanation: Steps 5: Choosing components and forming a feature vector.
Since we performed PCA for a target $\dim = 2$ for the $3 \dim$ data, we are directly given
the two required eigenvectors in $\mathbf{E}$
E is automagically filled by setting target dimension = M. This is different from the 2d data example where we implemented this step manually.
Step 6: Projecting the data to its Principal Components.
End of explanation
"""
new_data=dot(E,yn)
x_new=new_data[0,:]+tile(mean_x,[n,1]).T[0]
y_new=new_data[1,:]+tile(mean_y,[n,1]).T[0]
z_new=new_data[2,:]+tile(mean_z,[n,1]).T[0]
#all the above points lie on the same plane. To make it more clear we will plot the projection also.
fig=pyplot.figure()
ax=fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z,marker='o', color='g')
ax.set_xlabel('x label')
ax.set_ylabel('y label')
ax.set_zlabel('z label')
legend([p1,p2,p3],["normal projection","3d data","2d projection"],loc='center left', bbox_to_anchor=(1, 0.5))
title('PCA Projection of 3D data into 2D subspace')
for i in range(100):
ax.scatter(x_new[i], y_new[i], z_new[i],marker='o', color='b')
ax.plot([x[i],x_new[i]],[y[i],y_new[i]],[z[i],z_new[i]],color='r')
"""
Explanation: Step 7: Form the approximate reconstruction of the original data $\mathbf{x}^n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\tilde{\mathbf{x}}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
"""
rcParams['figure.figsize'] = 10, 10
import os
def get_imlist(path):
""" Returns a list of filenames for all jpg images in a directory"""
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.pgm')]
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/training/')
#set no. of rows that the images will be resized.
k1=100
#set no. of columns that the images will be resized.
k2=100
filenames = get_imlist(path_train)
filenames = array(filenames)
#n is total number of images that has to be analysed.
n=len(filenames)
"""
Explanation: PCA Performance
Uptill now, we were using the EigenValue Decomposition method to compute the transformation matrix$\text{(N>D)}$ but for the next example $\text{(N<D)}$ we will be using Singular Value Decomposition.
Practical Example : Eigenfaces
The problem with the image representation we are given is its high dimensionality. Two-dimensional $\text{p} \times \text{q}$ grayscale images span a $\text{m=pq}$ dimensional vector space, so an image with $\text{100}\times\text{100}$ pixels lies in a $\text{10,000}$ dimensional image space already.
The question is, are all dimensions really useful for us?
$\text{Eigenfaces}$ are based on the dimensional reduction approach of $\text{Principal Component Analysis(PCA)}$. The basic idea is to treat each image as a vector in a high dimensional space. Then, $\text{PCA}$ is applied to the set of images to produce a new reduced subspace that captures most of the variability between the input images. The $\text{Pricipal Component Vectors}$(eigenvectors of the sample covariance matrix) are called the $\text{Eigenfaces}$. Every input image can be represented as a linear combination of these eigenfaces by projecting the image onto the new eigenfaces space. Thus, we can perform the identfication process by matching in this reduced space. An input image is transformed into the $\text{eigenspace,}$ and the nearest face is identified using a $\text{Nearest Neighbour approach.}$
Step 1: Get some data.
Here data means those Images which will be used for training purposes.
End of explanation
"""
# we will be using this often to visualize the images out there.
def showfig(image):
imgplot=imshow(image, cmap='gray')
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
import Image
from scipy import misc
# to get a hang of the data, lets see some part of the dataset images.
fig = pyplot.figure()
title('The Training Dataset')
for i in range(49):
fig.add_subplot(7,7,i+1)
train_img=array(Image.open(filenames[i]).convert('L'))
train_img=misc.imresize(train_img, [k1,k2])
showfig(train_img)
"""
Explanation: Lets have a look on the data:
End of explanation
"""
#To form the observation matrix obs_matrix.
#read the 1st image.
train_img = array(Image.open(filenames[0]).convert('L'))
#resize it to k1 rows and k2 columns
train_img=misc.imresize(train_img, [k1,k2])
#since Realfeatures accepts only data of float64 datatype, we do a type conversion
train_img=array(train_img, dtype='double')
#flatten it to make it a row vector.
train_img=train_img.flatten()
# repeat the above for all images and stack all those vectors together in a matrix
for i in range(1,n):
temp=array(Image.open(filenames[i]).convert('L'))
temp=misc.imresize(temp, [k1,k2])
temp=array(temp, dtype='double')
temp=temp.flatten()
train_img=vstack([train_img,temp])
#form the observation matrix
obs_matrix=train_img.T
"""
Explanation: Represent every image $I_i$ as a vector $\Gamma_i$
End of explanation
"""
train_features = RealFeatures(obs_matrix)
preprocessor=PCA(AUTO)
preprocessor.set_target_dim(100)
preprocessor.init(train_features)
mean=preprocessor.get_mean()
"""
Explanation: Step 2: Subtract the mean
It is very important that the face images $I_1,I_2,...,I_M$ are $centered$ and of the $same$ size
We observe here that the no. of $\dim$ for each image is far greater than no. of training images. This calls for the use of $\text{SVD}$.
Setting the $\text{PCA}$ in the $\text{AUTO}$ mode does this automagically according to the situation.
End of explanation
"""
#get the required eigenvectors corresponding to top 100 eigenvalues
E = preprocessor.get_transformation_matrix()
#lets see how these eigenfaces/eigenvectors look like:
fig1 = pyplot.figure()
title('Top 20 Eigenfaces')
for i in range(20):
a = fig1.add_subplot(5,4,i+1)
eigen_faces=E[:,i].reshape([k1,k2])
showfig(eigen_faces)
"""
Explanation: Step 3 & Step 4: Calculate the eigenvectors and eigenvalues of the covariance matrix.
End of explanation
"""
#we perform the required dot product.
yn=preprocessor.apply_to_feature_matrix(train_features)
"""
Explanation: These 20 eigenfaces are not sufficient for a good image reconstruction. Having more eigenvectors gives us the most flexibility in the number of faces we can reconstruct. Though we are adding vectors with low variance, they are in directions of change nonetheless, and an external image that is not in our database could in fact need these eigenvectors to get even relatively close to it. But at the same time we must also keep in mind that adding excessive eigenvectors results in addition of little or no variance, slowing down the process.
Clearly a tradeoff is required.
We here set for M=100.
Step 5: Choosing components and forming a feature vector.
Since we set target $\dim = 100$ for this $n \dim$ data, we are directly given the $100$ required eigenvectors in $\mathbf{E}$
E is automagically filled. This is different from the 2d data example where we implemented this step manually.
Step 6: Projecting the data to its Principal Components.
The lower dimensional representation of each data point $\mathbf{x}^n$ is given by $$\mathbf{y}^n=\mathbf{E}^T(\mathbf{x}^n-\mathbf{m})$$
End of explanation
"""
re=tile(mean,[n,1]).T[0] + dot(E,yn)
#lets plot the reconstructed images.
fig2 = pyplot.figure()
title('Reconstructed Images from 100 eigenfaces')
for i in range(1,50):
re1 = re[:,i].reshape([k1,k2])
fig2.add_subplot(7,7,i)
showfig(re1)
"""
Explanation: Step 7: Form the approximate reconstruction of the original image $I_n$
The approximate reconstruction of the original datapoint $\mathbf{x}^n$ is given by : $\mathbf{x}^n\approx\text{m}+\mathbf{E}\mathbf{y}^n$
End of explanation
"""
#set path of the training images
path_train=os.path.join(SHOGUN_DATA_DIR, 'att_dataset/testing/')
test_files=get_imlist(path_train)
test_img=array(Image.open(test_files[0]).convert('L'))
rcParams.update({'figure.figsize': (3, 3)})
#we plot the test image , for which we have to identify a good match from the training images we already have
fig = pyplot.figure()
title('The Test Image')
showfig(test_img)
#We flatten out our test image just the way we have done for the other images
test_img=misc.imresize(test_img, [k1,k2])
test_img=array(test_img, dtype='double')
test_img=test_img.flatten()
#We centralise the test image by subtracting the mean from it.
test_f=test_img-mean
"""
Explanation: Recognition part.
In our face recognition process using the Eigenfaces approach, in order to recognize an unseen image, we proceed with the same preprocessing steps as applied to the training images.
Test images are represented in terms of eigenface coefficients by projecting them into face space$\text{(eigenspace)}$ calculated during training. Test sample is recognized by measuring the similarity distance between the test sample and all samples in the training. The similarity measure is a metric of distance calculated between two vectors. Traditional Eigenface approach utilizes $\text{Euclidean distance}$.
End of explanation
"""
#We have already projected our training images into pca subspace as yn.
train_proj = yn
#Projecting our test image into pca subspace
test_proj = dot(E.T, test_f)
"""
Explanation: Here we have to project our training image as well as the test image on the PCA subspace.
The Eigenfaces method then performs face recognition by:
1. Projecting all training samples into the PCA subspace.
2. Projecting the query image into the PCA subspace.
3. Finding the nearest neighbour between the projected training images and the projected query image.
End of explanation
"""
#To get Eucledian Distance as the distance measure use EuclideanDistance.
workfeat = RealFeatures(mat(train_proj))
testfeat = RealFeatures(mat(test_proj).T)
RaRb=EuclideanDistance(testfeat, workfeat)
#The distance between one test image w.r.t all the training is stacked in matrix d.
d=empty([n,1])
for i in range(n):
d[i]= RaRb.distance(0,i)
#The one having the minimum distance is found out
min_distance_index = d.argmin()
iden=array(Image.open(filenames[min_distance_index]))
title('Identified Image')
showfig(iden)
"""
Explanation: Shogun's way of doing things:
Shogun uses CEuclideanDistance class to compute the familiar Euclidean distance for real valued features. It computes the square root of the sum of squared disparity between the corresponding feature dimensions of two data points.
$\mathbf{d(x,x')=}$$\sqrt{\mathbf{\sum\limits_{i=0}^{n}}|\mathbf{x_i}-\mathbf{x'_i}|^2}$
End of explanation
"""
|
imcgreer/simqso | examples/bossqsos_example.ipynb | bsd-3-clause | M1450 = linspace(-30,-22,20)
zz = arange(0.7,3.5,0.5)
ple = bossqsos.BOSS_DR9_PLE()
lede = bossqsos.BOSS_DR9_LEDE()
for z in zz:
if z<2.2:
qlf = ple if z<2.2 else lede
plot(M1450,qlf(M1450,z),label='z=%.1f'%z)
legend(loc='lower left')
xlim(-21.8,-30.2)
xlabel("$M_{1450}$")
ylabel("log Phi")
"""
Explanation: Input luminosity function
End of explanation
"""
_ = bossqsos.qsoSimulation(bossqsos.simParams,saveSpectra=True)
"""
Explanation: Run the simulation, save the spectra
End of explanation
"""
wave,qsos = load_sim_output('boss_dr9qlf_sim','.')
"""
Explanation: Simulation outputs
End of explanation
"""
qsos[::40]
"""
Explanation: the table of simulated quasars, including redshift, luminosity, synthetic flux/mags in nine bands, and "observed" photometry with errors included.
also includes details of the model inputs for each quasar: slopes is the set of broken power law slopes defining the continuum, emLines is the set of Gaussian parameters for each emission line (wave, EW, sigma) measured in the rest frame.
End of explanation
"""
_ = hist(qsos['obsMag'][:,1],linspace(17,22,20),log=True)
"""
Explanation: the distribution in g-band magnitude:
End of explanation
"""
scatter(qsos['obsMag'][:,0]-qsos['obsMag'][:,1],qsos['obsMag'][:,1]-qsos['obsMag'][:,2],
c=qsos['z'],cmap=cm.autumn_r,alpha=0.7)
colorbar()
xlabel('u-g')
ylabel('g-r')
xlim(-0.75,3)
ylim(-0.5,1.5)
"""
Explanation: color-color diagram from observed magnitudes, including errors:
End of explanation
"""
qsodatahdr = fits.getheader('boss_dr9qlf_sim.fits',1)
for i,n in enumerate(qsodatahdr['LINENAME'].split(',')):
print('%d:%s, '% (i,n,),end=" ")
print()
"""
Explanation: the list of emission lines in the model:
End of explanation
"""
scatter(qsos['absMag'],qsos['emLines'][:,13,1],c=qsos['z'],cmap=cm.autumn_r)
colorbar()
xlabel("$M_{1450}$")
ylabel("CIV equivalent width $\AA$")
"""
Explanation: broad CIV equivalent width, displaying the Baldwin Effect:
End of explanation
"""
figure(figsize=(14,4))
plot(wave/1e4,qsos['spec'][0])
yscale('log')
xlabel('wave [micron]')
"""
Explanation: Example spectra
for this example the wavelength cutoff is 30 micron, but the model doesn't include warm dust and thus is invalid beyond a few micron.
End of explanation
"""
figure(figsize=(14,4))
plot(wave,qsos['spec'][20])
xlim(3500,7500)
title('$z=%.3f$'%qsos['z'][20])
"""
Explanation: zoom in on the lyman alpha - CIV region:
End of explanation
"""
# XXX WARNING -- an ugly hack is needed here. Internally, a table of Voigt profiles is generated
# at startup in order to speed the forest spectra generation. This table is defined in terms of
# the wave dispersion the first time a simulation is run. Here we are changing the wavelength
# model, and thus before executing the next cells you must restart the kernel and execute only
# the first cell.
np.random.seed(12345)
wave = buildWaveGrid(dict(waveRange=(3500,4800),SpecDispersion=30000))
forest = hiforest.IGMTransmissionGrid(wave,WP11_model,1)
T = forest.next_spec(0,2.9)
figure(figsize=(14,4))
plot(wave,T)
figure(figsize=(14,4))
plot(wave,T)
xlim(4300,4800)
"""
Explanation: IGM absorption model (simqso.hiforest)
an example of the forest transmission spectra at R=30,000 (the native resolution for the monte carlo forest spectra):
End of explanation
"""
|
kvr777/deep-learning | gan_mnist/Intro_to_GANs_Exercises.ipynb | mit | %matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, shape=[None,real_dim], name='inputs_real')
inputs_z = tf.placeholder(tf.float32, shape=[None,z_dim], name='inputs_z')
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
Exercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha*h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
Exercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha*h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
Exercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.
End of explanation
"""
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, d_hidden_size,reuse=False,alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, d_hidden_size, reuse=True, alpha=alpha)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
Exercise: Build the network from the functions you defined earlier.
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
Exercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.
End of explanation
"""
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [g for g in t_vars if g.name.startswith('generator')]
d_vars = [d for d in t_vars if d.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
Exercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
Lattecom/HYStudy | scripts/[HYStudy 20th] Survival Analysis.ipynb | mit | import pandas as pd
import lifelines
import matplotlib.pylab as plt
%matplotlib inline
data = lifelines.datasets.load_dd()
"""
Explanation: Survival Analysis (1)
source : lifelines documents (https://lifelines.readthedocs.io/)
Survival Analysis is useful for searching break of machine or User's churn rate...and so on.
It usually uses 'Kaplan-Meier'.
package : lifelines
End of explanation
"""
data.head()
data.tail()
from lifelines import KaplanMeierFitter
kmf = KaplanMeierFitter()
# kaplan-meier
# KaplanMeierFitter.fit(event_times, event_observed=None,
# timeline=None, label='KM-estimate',
# alpha=None)
"""Parameters:
event_times: an array, or pd.Series, of length n of times that
the death event occured at
event_observed: an array, or pd.Series, of length n -- True if
the death was observed, False if the event was lost
(right-censored). Defaults all True if event_observed==None
timeline: set the index of the survival curve to this postively increasing array.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals.
Overrides the initializing alpha for this call to fit only.
Returns:
self, with new properties like 'survival_function_'
"""
T = data["duration"]
C = data["observed"]
kmf.fit(T, event_observed=C)
kmf.survival_function_.plot()
plt.title('Survival function of political regimes');
kmf.plot()
kmf.median_
## A leader is elected there is a 50% chance he or she will be gone in 3 years.
ax = plt.subplot(111)
dem = (data["democracy"] == "Democracy")
kmf.fit(T[dem], event_observed=C[dem], label="Democratic Regimes")
kmf.plot(ax=ax, ci_force_lines=True)
kmf.fit(T[~dem], event_observed=C[~dem], label="Non-democratic Regimes")
kmf.plot(ax=ax, ci_force_lines=True)
## ci_force_lines : force the confidence intervals to be line plots
plt.ylim(0,1);
plt.title("Lifespans of different global regimes");
"""
Explanation: political leaders
start : birth
end : retirement
End of explanation
"""
|
julienchastang/unidata-python-workshop | notebooks/CartoPy/CartoPy.ipynb | mit | # Set things up
%matplotlib inline
# Importing CartoPy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
"""
Explanation: <a name="top"></a>
<div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Plotting on a Map with CartoPy</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="http://scitools.org.uk/images/cartopy.png" alt="CartoPy" style="height: 200px;"></div>
Overview:
Teaching: 20 minutes
Exercises: 20 minutes
Questions
How do we plot on a map in Python?
How do I specify a map projection?
How do I tell CartoPy how to reference my data?
How do I add map features to a CartoPy plot?
Objectives
<a href="#basicfigure">Create a basic figure using CartoPy</a>
<a href="#mapfeatures">Add maps to the figure</a>
<a href="#plottingdata">Plot georeferenced data on the figure</a>
<a name="basicfigure"></a>
1. Basic CartoPy Plotting
High level API for dealing with maps
CartoPy allows you to plot data on a 2D map.
Support many different map projections
Support for shapefiles from the GIS world
End of explanation
"""
# Works with matplotlib's built-in transform support.
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson())
# Sets the extent to cover the whole globe
ax.set_global()
# Adds standard background map
ax.stock_img()
"""
Explanation: The simplest plot we can make sets a projection with no parameters. The one below uses the Robinson projection:
End of explanation
"""
# Set up a globe with a specific radius
globe = ccrs.Globe(semimajor_axis=6371000.)
# Set up a Lambert Conformal projection
proj = ccrs.LambertConformal(standard_parallels=[25.0], globe=globe)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Sets the extent using a lon/lat box
ax.set_extent([-130, -60, 20, 55])
ax.stock_img()
"""
Explanation: We also have fine-tuned control over the globe used in the projection as well as lots of standard parameters, which depend on individual projections:
End of explanation
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.stock_img()
ax.add_feature(cfeature.COASTLINE)
ax.set_extent([-130, -60, 20, 55])
"""
Explanation: <a href="#top">Top</a>
<hr style="height:2px;">
<a name="mapfeatures"></a>
2. Adding maps to CartoPy
CartoPy provides a couple helper methods for adding maps to the plot:
End of explanation
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
# Add variety of features
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
# Can also supply matplotlib kwargs
ax.add_feature(cfeature.BORDERS, linestyle=':')
ax.add_feature(cfeature.STATES, linestyle=':')
ax.add_feature(cfeature.LAKES, alpha=0.5)
ax.add_feature(cfeature.RIVERS, edgecolor='tab:green')
ax.set_extent([-130, -60, 20, 55])
"""
Explanation: Cartopy also has a lot of built-in support for a variety of map features:
End of explanation
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
# Add variety of features
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.COASTLINE)
# Can also supply matplotlib kwargs
ax.add_feature(cfeature.BORDERS.with_scale('50m'), linestyle=':')
ax.add_feature(cfeature.STATES.with_scale('50m'), linestyle=':')
ax.add_feature(cfeature.LAKES.with_scale('50m'), alpha=0.5)
ax.add_feature(cfeature.RIVERS.with_scale('50m'), edgecolor='tab:green')
ax.set_extent([-130, -60, 20, 55])
"""
Explanation: The map features are available at several different scales depending on how large the area you are covering is. The scales can be accessed using the with_scale method. Natural Earth features are available at 110m, 50m and 10m.
End of explanation
"""
from metpy.plots import USCOUNTIES
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale), edgecolor='black')
"""
Explanation: You can also grab other features from the Natural Earth project: http://www.naturalearthdata.com/
US Counties
MetPy has US Counties built in at the 20m, 5m, and 500k resolutions.
End of explanation
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS, linewidth=2)
ax.add_feature(cfeature.STATES, linestyle='--', edgecolor='black')
ax.plot(-105, 40, marker='o', color='tab:red')
ax.set_extent([-130, -60, 20, 55])
"""
Explanation: <a href="#top">Top</a>
<hr style="height:2px;">
<a name="plottingdata"></a>
3. Plotting Data
CartoPy supports all of the matplotlib plotting options you would expect on a map. It handles transforming your data between different coordinate systems transparently, provided you provide the correct information. (More on this later...). To start, let's put a marker at -105, 40:
End of explanation
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS, linewidth=2)
ax.add_feature(cfeature.STATES, linestyle='--', edgecolor='black')
data_projection = ccrs.PlateCarree()
ax.plot(-105, 40, marker='o', color='tab:red', transform=data_projection)
ax.set_extent([-130, -60, 20, 55])
"""
Explanation: So that did not succeed at putting a marker at -105 longitude, 40 latitude (Boulder, CO). Instead, what actually happened is that it put the marker at (-105, 40) in the map projection coordinate system; in this case that's a Lambert Conformal projection, and x,y are assumed in meters relative to the origin of that coordinate system. To get CartoPy to treat it as longitude/latitude, we need to tell it that's what we're doing. We do this through the use of the transform argument to all of the plotting functions.
End of explanation
"""
# Create some synthetic gridded wind data
import numpy as np
from metpy.calc import wind_speed
from metpy.units import units
# Note that all of these winds have u = 0 -> south wind
v = np.full((5, 5), 10, dtype=np.float64) + 10 * np.arange(5) * units.knots
u = np.zeros_like(v) * units.knots
speed = wind_speed(u, v)
# Create arrays of longitude and latitude
x = np.linspace(-120, -60, 5)
y = np.linspace(30, 55, 5)
# Plot as normal
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS)
# Plot wind barbs--CartoPy handles reprojecting the vectors properly for the
# coordinate system
ax.barbs(x, y, u.m, v.m, transform=ccrs.PlateCarree(), color='tab:blue')
ax.set_extent([-130, -60, 20, 55])
"""
Explanation: This approach by CartoPy separates the data coordinate system from the coordinate system of the plot. It allows you to take data in any coordinate system (lon/lat, Lambert Conformal) and display it in any map you want. It also allows you to combine data from various coordinate systems seamlessly. This extends to all plot types, not just plot:
End of explanation
"""
# YOUR CODE GOES HERE
"""
Explanation: Exercise
Create a map, on a Mercator Projection, which at least has coastlines and country and state borders. Bonus points for putting on colored land and oceans, or other map features.
Plot our location correctly on the map.
Set the bounds of the map to zoom in mostly over our state/region.
End of explanation
"""
# %load solutions/map.py
"""
Explanation: Solution
End of explanation
"""
|
VenkateshBejjenki/Machine_Learning_Specialization | Author_Classification/bag.ipynb | gpl-3.0 | # Importing pandas library
import pandas as pd
# Loding the data set
df = pd.read_table('data.csv',
sep=',',
header=None,
names=['rollNo','textData'])
# Output printing out first 5 columns
df.head()
# from sklearn.feature_extraction import text
"""
Explanation: Project Title : Author Labeling by text classification
Introduction :
Text classification is one of the major applications of Machine Learning. Most of the text classification projects are done by implementing any of the Machine Learning Algorithms. In this project we will use Naive_Bayes algorithm to label the text.
Input Data Preprocessing :
The student assignments of English class are used as input for this project and we have to label the text with respective author(student). The data we received has repetative content for every student, we have dropped such type of files from the input data and the student records with fewer files were also dropped.
Thus evolved data is processed to generate the ".csv" file which is used as input dataset for this project. It contains two columns, one with student roll number and other with corresponding text.
Working Theme
End of explanation
"""
# Shape is used to get the details of the data set.
df.shape
"""
Explanation: The above table shows the first 5 tuples of the dataset which contains two columns namely the roll no and text of the assignment.
End of explanation
"""
# split into training and testing sets
# USE from sklearn.model_selection import train_test_split to avoid seeing deprecation warning.
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df['textData'],
df['rollNo'],
random_state=1)
# Printing out the number of rows we have in each our training and testing data.
print('Number of rows in the total set: {}'.format(df.shape[0]))
print('Number of rows in the training set: {}'.format(X_train.shape[0]))
print('Number of rows in the test set: {}'.format(X_test.shape[0]))
"""
Explanation: The dataset contains 1028 entries (tuples) and 2 columns as described above.
Splitting Training and testing sets
Spliting the dataset into a training and testing set by using the train_test_split method in sklearn.
Spliting the data by using the following variables:<br />
-> X_train is our training data for the 'textData' column. <br />
-> y_train is our training data for the 'rollNo' column<br />
-> X_test is our testing data for the 'textData' column.<br />
-> y_test is our testing data for the 'rollNo' column.<br />
End of explanation
"""
from sklearn.feature_extraction.text import CountVectorizer
# Instantiate the CountVectorizer method
count_vector = CountVectorizer(stop_words="english", token_pattern=u'(?u)\\b\\w\\w+\\b')
# Fit the training data and then return the matrix
training_data = count_vector.fit_transform(X_train)
# Transform testing data and return the matrix. Note we are not fitting the testing data into the CountVectorizer()
testing_data = count_vector.transform(X_test)
"""
Explanation: Applying Bag of Words processing to our dataset
We have split the data, next we will generate Bag of words and convert our data into the desired matrix format.
We will be using CountVectorizer() which is in sklearn library.<br />
-> First we have to fit our training data (X_train) into CountVectorizer() and return the matrix.<br />
-> Later we have to transform our testing data (X_test) to return the matrix.<br />
Here X_train is our training data for the 'textData' column in our dataset and we will be using this to train our model.<br/>
X_test is our testing data for the 'textData' column and this is the data we will be using(after transformation to a matrix) to make predictions on. We will then compare those predictions with y_test later.
End of explanation
"""
from sklearn.naive_bayes import MultinomialNB
naive_bayes = MultinomialNB()
naive_bayes.fit(training_data, y_train)
"""
Explanation: Learning a vocabulary dictionary for the training data and then transforming the data into a document-term matrix and next for the testing data here we are only transforming the data into a document-term matrix. <br />
We have passed arguments to customize the count_vector which involved removing stop words of english language and puntuations.
Naive Bayes implementation using scikit-learn :
We will use sklearns sklearn.naive_bayes method to make predictions on our dataset.
Specifically, we will use the multinomial Naive Bayes implementation which is suitable for classification with discrete features (such as in our case, word counts for text classification). It takes in integer word counts as its input.
Loading the training data into the variable 'training_data' and the testing data into the variable 'testing_data'.
We will import the MultinomialNB classifier and fit the training data into the classifier using fit() and we will train the classifier using 'training_data' and 'y_train' which we have from our split.
End of explanation
"""
predictions = naive_bayes.predict(testing_data)
"""
Explanation: Our algorithm has been trained using the training data set we can now make some predictions on the test data
stored in 'testing_data' using predict().
End of explanation
"""
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print('Accuracy score: ', format(accuracy_score(y_test, predictions)))
print('Precision score: ', format(precision_score(y_test, predictions,average="weighted")))
print('Recall score: ', format(recall_score(y_test, predictions,average="weighted")))
print('F1 score: ', format(f1_score(y_test, predictions,average="weighted")))
"""
Explanation: Evaluating our model :
Computing the accuracy, precision, recall and F1 scores of our model using your test data 'y_test' and the predictions
we made earlier stored in the 'predictions' variable.
End of explanation
"""
|
w4zir/ml17s | lectures/.ipynb_checkpoints/lec09-logistic-regression-example-checkpoint.ipynb | mit | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors
df = pd.read_csv('datasets/exam_dataset1.csv', encoding='utf-8')
n_neighbors = 5
X = np.array(df[['exam1','exam2']])
y = np.array(df[['admission']]).ravel()
h = .02 # step size in the mesh
# # Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
print(clf.score(X,y))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("2-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
"""
Explanation: CSAL4243: Introduction to Machine Learning
Muhammad Mudassir Khan (mudasssir.khan@ucp.edu.pk)
Lecture 9: Logistic Regression and kNN Examples
Overview
University Admission Dataset
K - Nearest Neighbor (kNN) Classifier
Logistic-Regression
Iris Flower Dataset
K - Nearest Neighbor (kNN) Classifier
Logistic-Regression
Resources
Credits
<br>
<br>
University Admission Dataset
Find whether a student get admitted into a university based on his score in two exams taken by the university. You have historical data of previous applicants who got admitted and rejected based on their score on these two exams.
K - Nearest Neighbor (kNN) Classifier
End of explanation
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import linear_model
df = pd.read_csv('datasets/exam_dataset1.csv', encoding='utf-8')
X = np.array(df[['exam1','exam2']])
y = np.array(df[['admission']]).ravel()
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, y)
print(logreg.score(X,y))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Exam 1')
plt.ylabel('Exam 2')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
"""
Explanation: Logistic Regression
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 1
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
print(clf.score(X,y))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
"""
Explanation: <br>
Iris Flower Dataset
Using sepal length and width, predict the type of flower.
K - Nearest Neighbor (kNN) Classifier
End of explanation
"""
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
print(logreg.score(X,y))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
logreg.coef_
logreg.intercept_
"""
Explanation: <br>
Logistic Regression
End of explanation
"""
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors
from matplotlib.colors import ListedColormap
from sklearn import linear_model
df_reg = pd.read_csv('datasets/example2.csv', encoding='utf-8')
X = np.array(df_reg[['x']])
y = np.array(df_reg[['y']]).ravel()
# X = np.array(df_reg[['x1','x2']])
# y = np.array(df_reg[['label']]).ravel()
plt.scatter(X,y)
plt.show()
X.shape
df_reg["x_2"] = df_reg["x"]**2
df_reg["x_3"] = df_reg["x"]**3
df_reg["x_4"] = df_reg["x"]**4
X = np.array(df_reg[['x','x_2','x_3','x_4']])
reg = linear_model.Ridge()
# we create an instance of Neighbours Classifier and fit the data.
reg.fit(X, y)
print(reg.score(X,y))
x_line = np.linspace(0,8,100)
x_line = np.array([x_line,x_line**2,x_line**3,x_line**4]).T
y_line = reg.predict(x_line)
reg.intercept_
plt.scatter(X[:,0],y)
plt.plot(x_line[:,0],y_line)
plt.show()
"""
Explanation: Regularization Example
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/inm/cmip6/models/sandbox-1/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'sandbox-1', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: INM
Source ID: SANDBOX-1
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:05
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
Alexoner/skynet | notebooks/Dropout.ipynb | mit | # As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from skynet.neural_network.classifiers.fc_net import *
from skynet.utils.data_utils import get_CIFAR10_data
from skynet.utils.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from skynet.solvers.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
"""
Explanation: Dropout
Dropout [1] is a technique for regularizing neural networks by randomly setting some features to zero during the forward pass. In this exercise you will implement a dropout layer and modify your fully-connected network to optionally use dropout.
[1] Geoffrey E. Hinton et al, "Improving neural networks by preventing co-adaptation of feature detectors", arXiv 2012
End of explanation
"""
x = np.random.randn(500, 500) + 10
for p in [0.3, 0.6, 0.75]:
out, _ = dropout_forward(x, {'mode': 'train', 'p': p})
out_test, _ = dropout_forward(x, {'mode': 'test', 'p': p})
print('Running tests with p = ', p)
print('Mean of input: ', x.mean())
print('Mean of train-time output: ', out.mean())
print('Mean of test-time output: ', out_test.mean())
print('Fraction of train-time output set to zero: ', (out == 0).mean())
print('Fraction of test-time output set to zero: ', (out_test == 0).mean())
print()
"""
Explanation: Dropout forward pass
In the file neural_network/layers.py, implement the forward pass for dropout. Since dropout behaves differently during training and testing, make sure to implement the operation for both modes.
Once you have done so, run the cell below to test your implementation.
End of explanation
"""
x = np.random.randn(10, 10) + 10
dout = np.random.randn(*x.shape)
dropout_param = {'mode': 'train', 'p': 0.8, 'seed': 123}
out, cache = dropout_forward(x, dropout_param)
dx = dropout_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda xx: dropout_forward(xx, dropout_param)[0], x, dout)
print('dx relative error: ', rel_error(dx, dx_num))
"""
Explanation: Dropout backward pass
In the file neural_network/layers.py, implement the backward pass for dropout. After doing so, run the following cell to numerically gradient-check your implementation.
End of explanation
"""
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for dropout in [0, 0.25, 0.5]:
print('Running check with dropout = ', dropout)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
weight_scale=5e-2, dtype=np.float64,
dropout=dropout, seed=123)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
print()
"""
Explanation: Fully-connected nets with Dropout
In the file neural_network/classifiers/fc_net.py, modify your implementation to use dropout. Specificially, if the constructor the the net receives a nonzero value for the dropout parameter, then the net should add dropout immediately after every ReLU nonlinearity. After doing so, run the following to numerically gradient-check your implementation.
End of explanation
"""
# Train two identical nets, one with dropout and one without
num_train = 500
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
dropout_choices = [0, 0.75]
for dropout in dropout_choices:
model = FullyConnectedNet([500], dropout=dropout)
print(dropout)
solver = Solver(model, small_data,
num_epochs=25, batch_size=100,
update_rule='adam',
optim_config={
'learning_rate': 5e-4,
},
verbose=True, print_every=100)
solver.train()
solvers[dropout] = solver
# Plot train and validation accuracies of the two models
train_accs = []
val_accs = []
for dropout in dropout_choices:
solver = solvers[dropout]
train_accs.append(solver.train_acc_history[-1])
val_accs.append(solver.val_acc_history[-1])
plt.subplot(3, 1, 1)
for dropout in dropout_choices:
plt.plot(solvers[dropout].train_acc_history, 'o', label='%.2f dropout' % dropout)
plt.title('Train accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
for dropout in dropout_choices:
plt.plot(solvers[dropout].val_acc_history, 'o', label='%.2f dropout' % dropout)
plt.title('Val accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(ncol=2, loc='lower right')
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Regularization experiment
As an experiment, we will train a pair of two-layer networks on 500 training examples: one will use no dropout, and one will use a dropout probability of 0.75. We will then visualize the training and validation accuracies of the two networks over time.
End of explanation
"""
|
rishuatgithub/MLPy | nlp/UPDATED_NLP_COURSE/04-Semantics-and-Sentiment-Analysis/04-Sentiment-Analysis-Assessment-Solutions.ipynb | apache-2.0 | # Import spaCy and load the language library. Remember to use a larger model!
import spacy
nlp = spacy.load('en_core_web_md')
# Choose the words you wish to compare, and obtain their vectors
word1 = nlp.vocab['wolf'].vector
word2 = nlp.vocab['dog'].vector
word3 = nlp.vocab['cat'].vector
# Import spatial and define a cosine_similarity function
from scipy import spatial
cosine_similarity = lambda x, y: 1 - spatial.distance.cosine(x, y)
# Write an expression for vector arithmetic
# For example: new_vector = word1 - word2 + word3
new_vector = word1 - word2 + word3
# List the top ten closest vectors in the vocabulary to the result of the expression above
computed_similarities = []
for word in nlp.vocab:
if word.has_vector:
if word.is_lower:
if word.is_alpha:
similarity = cosine_similarity(new_vector, word.vector)
computed_similarities.append((word, similarity))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1])
print([w[0].text for w in computed_similarities[:10]])
"""
Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
Sentiment Analysis Assessment - Solution
Task #1: Perform vector arithmetic on your own words
Write code that evaluates vector arithmetic on your own set of related words. The goal is to come as close to an expected word as possible. Please feel free to share success stories in the Q&A Forum for this section!
End of explanation
"""
def vector_math(a,b,c):
new_vector = nlp.vocab[a].vector - nlp.vocab[b].vector + nlp.vocab[c].vector
computed_similarities = []
for word in nlp.vocab:
if word.has_vector:
if word.is_lower:
if word.is_alpha:
similarity = cosine_similarity(new_vector, word.vector)
computed_similarities.append((word, similarity))
computed_similarities = sorted(computed_similarities, key=lambda item: -item[1])
return [w[0].text for w in computed_similarities[:10]]
# Test the function on known words:
vector_math('king','man','woman')
"""
Explanation: CHALLENGE: Write a function that takes in 3 strings, performs a-b+c arithmetic, and returns a top-ten result
End of explanation
"""
# Import SentimentIntensityAnalyzer and create an sid object
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# Write a review as one continuous string (multiple sentences are ok)
review = 'This movie portrayed real people, and was based on actual events.'
# Obtain the sid scores for your review
sid.polarity_scores(review)
"""
Explanation: Task #2: Perform VADER Sentiment Analysis on your own review
Write code that returns a set of SentimentIntensityAnalyzer polarity scores based on your own written review.
End of explanation
"""
def review_rating(string):
scores = sid.polarity_scores(string)
if scores['compound'] == 0:
return 'Neutral'
elif scores['compound'] > 0:
return 'Positive'
else:
return 'Negative'
# Test the function on your review above:
review_rating(review)
"""
Explanation: CHALLENGE: Write a function that takes in a review and returns a score of "Positive", "Negative" or "Neutral"
End of explanation
"""
|
tiagoft/inteligencia_computacional | regressao.ipynb | mit | # Inicializacao
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
def nova_mlp(entradas, saidas, camadas):
lista_de_camadas = [entradas] + camadas + [saidas]
pesos = []
for i in xrange(len(lista_de_camadas)-1):
pesos.append(np.random.random((lista_de_camadas[i+1], lista_de_camadas[i])))
return pesos
def ff_mlp(entradas, pesos):
s = entradas
for i in xrange(len(pesos)-1):
s = np.tanh(np.dot(pesos[i],s))
s = np.dot(pesos[-1],s)
return s
def backpropagation_step(entradas, saidas, pesos, passo=0.01):
derivadas = []
resultados_intermediarios = [entradas]
s = entradas
for i in xrange(len(pesos)-1):
s = np.tanh(np.dot(pesos[i],s))
resultados_intermediarios.append(s)
s = np.dot(pesos[-1],s)
resultados_intermediarios.append(s)
# Derivada do erro em relacao a saida estimada
dedye = (resultados_intermediarios[-1] - saidas)
# Derivada em relacao a camada de saida linear
dedb = np.dot(dedye, resultados_intermediarios[-2].T)
# Para cada camada nao-linear, calcula a nova derivada na forma:
deda = dedye
for i in range(len(pesos)-2, -1, -1):
linear = np.dot(pesos[i], resultados_intermediarios[i])
flz = (1-np.tanh(linear)**2)
deda = np.dot(pesos[i+1].T, deda) # deriv_front
derivada = np.dot(deda * flz, resultados_intermediarios[i].T)
derivadas.insert (0, derivada)
derivadas.append(dedb)
# Executa um passo na direcao contraria da derivada
for i in xrange(len(derivadas)):
n = np.linalg.norm(derivadas[i])
pesos[i] -= passo * derivadas[i]/n
return pesos
def erro(y, y_e):
return np.sum((y-y_e)**2)
"""
Explanation: Regressão, Sobre-Ajuste e Adequação de Modelos
As redes neurais multi-camadas possuem capacidade de aproximação universal. Porém, como vimos, nem sempre é trivial encontrar a configuração de pesos coneccionistas que melhor modela - e extrapola - um determinado conjunto de dados. Essa dificuldade pode ser decorrente da falta de informação nas descrições vetoriais dos dados, mas também pode se dever ao excesso de parâmetros da rede. Esses dois conceitos devem estar claros para o estudante que deseja seguir adiante, então recomendo que algum tempo seja reservado para fazer dois desenhos, cada um explicando uma dessas dificuldades.
Além disso, antes de prosseguir com esta interação, o estudante deve estar familiarizado preparado para discutir:
As diferenças entre sistemas de aprendizado paramétricos e sistemas não-paramétricos,
Como funciona o aprendizado por gradiente descendente e retropropagação de erro em redes MLP.
Vamos tomar por base o código de retropropagação de erro construído na interação anterior:
End of explanation
"""
x = np.linspace(-3, 3, num=100)
y_ = x**2
y = y_ + np.random.normal(0, 0.5, 100) # Criando uma parabola com ruido
plt.figure();
plt.plot(x, y);
plt.ylabel('y');
plt.xlabel('x');
"""
Explanation: Regressão
A regressão é a tarefa de encontrar um modelo para a função que leva dados de entrada a dados de saída. O modelo, idealmente, deve ser útil para a aplicação em questão - comumente, realizar predições sobre o novos dados que poderiam ser gerados pelo mesmo processo.
Sobre-ajuste e modelos inadequados
Sobre-ajuste é um fenômeno que ocorre quando um sistema é muito ajustado para um determinado conjunto de entrada e, assim, perde capacidade de generalização. Esse fenômeno é observável e, também, é previsto em teoria. Trata-se do no free lunch theorem, que mostra que quando um sistema se torna mais especializado numa tarefa, ele se tornará menos especializado em todas as outras tarefas. Assim, o procedimento de treino por gradiente descendente só é eficaz na medida que os dados de treinamento são representantes eficazes para os dados de teste.
Esse fenômeno fica bastante visível em problemas de regressão. A seguir, usarei uma rede neural para aproximar uma parábola:
End of explanation
"""
# Caso 1: usando as 30 primeiras amostras (30% do total) para o aprendizado
x.shape = (1, x.size)
y.shape = (1, y.size)
x_treino = x[:,0:30]
x_teste = x[:,30:100]
y_treino = y[:,0:30]
y_teste = y[:,30:100]
mlp0 = nova_mlp(entradas=1, saidas=1, camadas=[13])
# Processo de treinamento
n_passos = 1000
eqm_treino = np.zeros((n_passos+1))
eqm_treino[0] = erro(y_treino, ff_mlp(x_treino, mlp0))
eqm_teste = np.zeros((n_passos+1))
eqm_teste[0] = erro(y_teste, ff_mlp(x_teste, mlp0))
for i in xrange(n_passos):
mlp0 = backpropagation_step(x_treino, y_treino, mlp0)
eqm_treino[i+1] = erro(y_treino, ff_mlp(x_treino, mlp0))
eqm_teste[i+1] = erro(y_teste, ff_mlp(x_teste, mlp0))
plt.figure();
plt.plot(range(n_passos+1), eqm_treino);
plt.plot(range(n_passos+1), eqm_teste);
plt.ylabel('EQM');
plt.xlabel('Passos');
plt.title('EQM nos conjuntos de treino e teste');
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, ff_mlp(x_treino, mlp0).T);
plt.plot(x_teste.T, ff_mlp(x_teste, mlp0).T);
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
"""
Explanation: Regressão da parábola usando uma rede MLP
End of explanation
"""
# Caso 2: usando 30 amostras aleatoriamente escolhidas (30% do total) para o aprendizado
import random
x.shape = (x.size)
y.shape = (y.size)
train_array = np.zeros((x.size)).astype(bool)
test_array = np.ones((x.size)).astype(bool)
while np.sum(train_array) < 30:
n = int(random.random() * x.size)
test_array[n] = False
train_array[n] = True
x_treino = x[train_array]
x_teste = x[test_array]
y_treino = y[train_array]
y_teste = y[test_array]
x_treino.shape = (1,x_treino.size)
y_treino.shape = (1,y_treino.size)
x_teste.shape = (1,x_teste.size)
y_teste.shape = (1,y_teste.size)
#x_treino = np.vstack((x_treino, np.ones(x_treino.size)))
#x_teste = np.vstack((x_teste, np.ones(x_teste.size)))
mlp0 = nova_mlp(entradas=1, saidas=1, camadas=[10, 10])
# Processo de treinamento
n_passos = 1000
eqm_treino = np.zeros((n_passos+1))
eqm_treino[0] = erro(y_treino, ff_mlp(x_treino, mlp0))
eqm_teste = np.zeros((n_passos+1))
eqm_teste[0] = erro(y_teste, ff_mlp(x_teste, mlp0))
for i in xrange(n_passos):
mlp0 = backpropagation_step(x_treino, y_treino, mlp0)
eqm_treino[i+1] = erro(y_treino, ff_mlp(x_treino, mlp0))
eqm_teste[i+1] = erro(y_teste, ff_mlp(x_teste, mlp0))
plt.figure();
plt.plot(range(n_passos+1), eqm_treino);
plt.plot(range(n_passos+1), eqm_teste);
plt.ylabel('EQM');
plt.xlabel('Passos');
plt.title('EQM nos conjuntos de treino e teste');
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, ff_mlp(x_treino, mlp0).T, color='blue');
plt.plot(x_teste.T, ff_mlp(x_teste, mlp0).T, color='red');
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
"""
Explanation: Este experimento evidencia uma importante característica das redes neurais: elas são incapazes de aprender situações que não lhes foram apresentadas durante a etapa de treinamento (ou seja, são adequadas para interpolação, mas não necessariamente para extrapolação). Assim, pode ser uma boa idéia usar, como dados de treinamento, entradas que são distribuídas por todo o domínio da função a ser modelada. Em problemas de classificação, isso significa usar exemplos variados de cada classe. Assim, se utilizarmos o seguinte procedimento:
End of explanation
"""
# Caso 3: usando 30 amostras aleatoriamente escolhidas (30% do total) para o aprendizado
# e bias nas entradas
import random
x.shape = (x.size)
y.shape = (y.size)
train_array = np.zeros((x.size)).astype(bool)
test_array = np.ones((x.size)).astype(bool)
while np.sum(train_array) < 30:
n = int(random.random() * x.size)
test_array[n] = False
train_array[n] = True
x_treino = x[train_array]
x_teste = x[test_array]
y_treino = y[train_array]
y_teste = y[test_array]
x_treino.shape = (1,x_treino.size)
y_treino.shape = (1,y_treino.size)
x_teste.shape = (1,x_teste.size)
y_teste.shape = (1,y_teste.size)
x_treino2 = np.vstack((x_treino, np.ones(x_treino.size)))
x_teste2 = np.vstack((x_teste, np.ones(x_teste.size)))
mlp0 = nova_mlp(entradas=2, saidas=1, camadas=[30])
# Processo de treinamento
n_passos = 1000
eqm_treino = np.zeros((n_passos+1))
eqm_treino[0] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste = np.zeros((n_passos+1))
eqm_teste[0] = erro(y_teste, ff_mlp(x_teste2, mlp0))
for i in xrange(n_passos):
mlp0 = backpropagation_step(x_treino2, y_treino, mlp0)
eqm_treino[i+1] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste[i+1] = erro(y_teste, ff_mlp(x_teste2, mlp0))
plt.figure();
plt.plot(range(n_passos+1), eqm_treino);
plt.plot(range(n_passos+1), eqm_teste);
plt.ylabel('EQM');
plt.xlabel('Passos');
plt.title('EQM nos conjuntos de treino e teste');
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, ff_mlp(x_treino2, mlp0).T);
plt.plot(x_teste.T, ff_mlp(x_teste2, mlp0).T);
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
x_treino2 = np.vstack((x_treino, x_treino**2))
x_teste2 = np.vstack((x_teste, x_teste**2))
# Ax = y -> A = y * pinv(x)
A = np.dot(y_treino, np.linalg.pinv(x_treino2))
y_est = np.dot(A, x_teste2)
print "EQM: ", erro(y_teste, y_est)
print A
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, np.dot(A, x_treino2).T);
plt.plot(x_teste.T, np.dot(A, x_teste2).T);
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
"""
Explanation: Verificamos, neste caso, que a falta de flexibilidade da rede resultou em uma rede final inadequada. A função obtida pela rede MLP não se parece com uma parábola.
Pré-processamento: bias nas entradas
End of explanation
"""
mlp0 = nova_mlp(entradas=2, saidas=1, camadas=[10, 10, 10])
# Processo de treinamento
n_passos = 1000
eqm_treino = np.zeros((n_passos+1))
eqm_treino[0] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste = np.zeros((n_passos+1))
eqm_teste[0] = erro(y_teste, ff_mlp(x_teste2, mlp0))
for i in xrange(n_passos):
mlp0 = backpropagation_step(x_treino2, y_treino, mlp0)
eqm_treino[i+1] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste[i+1] = erro(y_teste, ff_mlp(x_teste2, mlp0))
print "EQM final:", eqm_teste[-1]
plt.figure();
plt.plot(range(n_passos+1), eqm_treino);
plt.plot(range(n_passos+1), eqm_teste);
plt.ylabel('EQM');
plt.xlabel('Passos');
plt.title('EQM nos conjuntos de treino e teste');
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, ff_mlp(x_treino2, mlp0).T);
plt.plot(x_teste.T, ff_mlp(x_teste2, mlp0).T);
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
"""
Explanation: Podemos verificar, tanto graficamente quanto no cálculo do EQM, que o erro de aproximação é bem menor que no caso da rede MLP. Por outro lado, poderíamos dizer que houve uma certa "trapaça", já que o modelo linear já aproveitou a informação de que $x^2$ é uma variável válida na entrada. Se fizermos o mesmo proceso na rede MLP, teremos:
End of explanation
"""
# Usando as 30 primeiras amostras (30% do total) para o aprendizado
x.shape = (1, x.size)
y.shape = (1, y.size)
x_treino = x[:,0:30]
x_teste = x[:,30:100]
y_treino = y[:,0:30]
y_teste = y[:,30:100]
x_treino2 = np.vstack((x_treino, x_treino**2))
x_teste2 = np.vstack((x_teste, x_teste**2))
# Ax = y -> A = y * pinv(x)
A = np.dot(y_treino, np.linalg.pinv(x_treino2))
y_est = np.dot(A, x_teste2)
print "EQM: ", erro(y_teste, y_est)
print A
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, np.dot(A, x_treino2).T);
plt.plot(x_teste.T, np.dot(A, x_teste2).T);
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
"""
Explanation: Vemos que nossa rede, agora, tem uma estrutura adequada para executar a regressão sobre os dados de treino. Porém, parece aproximar a parábola com alguma distorção, já que, além da própria parábola, o modelo também tenta aproximar o ruído.
À partir desses exemplos, verificamos algumas diretrizes para o uso de redes neurais. É importante discutir cada uma delas:
Redes neurais têm capacidade de aproximação universal, mas isso não significa que é simples encontrar a configuração que fornece essa aproximação,
Modelos mais simples podem fornecer respostas melhores e mais rápidas que as redes neurais,
Mesmo assim, redes neurais serão capazes de fornecer algum resultado, o que pode ser melhor que não ter resultado nenhum,
Etapas de pré-processamento de dados podem ser importantes no processo de predição, gerando novas maneiras de relacionar entradas e saídas que possivelmente serão mais relevantes e/ou passíveis de generalização.
Modelos mais simples e a extrapolação
Até então, combinamos duas estratégias diferentes: a escolha cuidadosa de pontos de dados e o pré-processamento das entradas (calculando $x^2$). A seguir, verificaremos o comportamento dos dois modelos - linear e MLP - frente a um cenário em que fizemos o pré-processamento dos dados, mas não fomos capazes de buscar amostras uniformemente distribuídas por todo o domínio.
End of explanation
"""
mlp0 = nova_mlp(entradas=2, saidas=1, camadas=[2])
# Processo de treinamento
n_passos = 1000
eqm_treino = np.zeros((n_passos+1))
eqm_treino[0] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste = np.zeros((n_passos+1))
eqm_teste[0] = erro(y_teste, ff_mlp(x_teste2, mlp0))
for i in xrange(n_passos):
mlp0 = backpropagation_step(x_treino2, y_treino, mlp0)
eqm_treino[i+1] = erro(y_treino, ff_mlp(x_treino2, mlp0))
eqm_teste[i+1] = erro(y_teste, ff_mlp(x_teste2, mlp0))
print "EQM final:", eqm_teste[-1]
plt.figure();
plt.plot(range(n_passos+1), eqm_treino);
plt.plot(range(n_passos+1), eqm_teste);
plt.ylabel('EQM');
plt.xlabel('Passos');
plt.title('EQM nos conjuntos de treino e teste');
plt.figure();
plt.plot(x.T, y.T);
plt.plot(x_treino.T, ff_mlp(x_treino2, mlp0).T);
plt.plot(x_teste.T, ff_mlp(x_teste2, mlp0).T, color='red');
plt.ylabel('Y');
plt.xlabel('X');
plt.title('Aproximacao nos conjuntos de teste e treino');
"""
Explanation: Como podemos ver, o modelo linear apresentou resultados a aqueles encontrados no caso da amostragem uniforme.
Usando dados pré-processados na rede MLP buscando extrapolação
End of explanation
"""
|
kimkipyo/dss_git_kkp | Python 복습/08일차.금_정규표현식, class, 크롤링, 숙제/8일차_1T,3T_정규 표현식_이메일, 핸드폰 번호, Class_.ipynb | mit | with open("crawled.txt", "r", encoding='utf8') as f: #crawled.txt는 보기와 같이 임의로 텍스트 파일을 만들었습니다.
data = f.read()
print(data)
import re
with open("crawled.txt", "r", encoding='utf8') as f:
data = f.read()
phonenumber_regex = "010" # 1. 정규표현식 (regex)
# phonenumber_regex = "\d{3}[-]?\d{4}[-]?\d{4}"
# \d => 숫자가 나온다. => [0-9]
# \d{3} => 숫자가 3번 나온다.
# [-]? => "-"가 나올 수도 있고 안 나올 수도 있다.
phonenumber_pattern = re.compile(phonenumber_regex) # 파이썬에서 정규표현식을 사용할 수 있도록 SRE_Pattern 객체로 변경
phonenumber_list = phonenumber_pattern.findall(data) # 2. 파이썬에서 정규표현식 함수를 사용한다.
print(phonenumber_list)
with open("crawled.txt", "r", encoding='utf8') as f:
data = f.read()
phonenumber_regex = "[0-9영공빵일이둘삼사오육칠팔구]{3}[-]?[0-9영공빵일이둘삼사오육칠팔구]{3,4}[-]?[0-9영공빵일이둘삼사오육칠팔구]{4}"
# * => 0~n
# ? => 0-1
# + => 1-n
# 이런 데이터를 먼저 전처리를 하고 ( 공 => 0 )( 지금은 이게 더 바람직 )
phonenumber_pattern = re.compile(phonenumber_regex)
phonenumber_list = phonenumber_pattern.findall(data)
# email_regex = "[a-zA-Z0-9_]+[@][a-zA-Z0-9_]+[.][a-z]+[.]?[a-z]+$"
email_regex = "[a-zA-Z0-9_]+[@][a-zA-Z0-9.]+"
# email_regex = ""
# 정규표현식
# "다섯" 이라는 텍스트가 포함이 되는...
# "12다섯456"
# [a-zA-Z0-9_]*다섯[a-zA-Z0-9_]*
# {} -> 자릿수가 정해저있는 상황 ( 핸드폰 번호 ... )
email_pattern = re.compile(email_regex)
email_list = email_pattern.findall(data)
# .com
# .co.kr
print(phonenumber_list)
print(email_list)
def preprocess(phonenumber):
preprocess_dict = {
"영": 0,
"공": 0,
"일": 1,
"둘": 2,
"이": 2,
"삼": 3,
"사": 4,
"오": 5,
"육": 6,
"칠": 7,
"팔": 8,
"구": 9,
"-": "",
}
for key, value in preprocess_dict.items():
phonenumber = phonenumber.replace(key, str(value))
return phonenumber
[preprocess(phonenumber) for phonenumber in phonenumber_list]
"""
Explanation: 1T_정규 표현식 ( Regular Expression ) - 이메일, 핸드폰 번호
이메일, 핸드폰 번호 정규표현식
End of explanation
"""
class Fibonacci():
cache = {1: 0, 2: 1}
# n=1 => 0
# n=2 => 1
# f(n) = f(n-1) + f(n-2)
# def __init__(self):
# self.cache = {}
# 기존의 이런 형태가 아니라 다른 형태로 받기 위해서
# 클래스 => 클래스 메쏘드
# 객체 ( 인스턴스 ) => 인스턴스 메쏘드
@staticmethod # 클래스 메쏘드
def calc(n): # self 가 없습니다. self.cache가 아니라 Fibonacci.cache로 바뀜
if n in Fibonacci.cache:
return Fibonacci.cache[n]
if n <= 0:
return 0
# if n == 1:
# Fibonacci.cache[n] = 0
# return Fibonacci.cache[n]
# if n == 2:
# Fibonacci.cache[n] = 1
# return Fibonacci.cache[n]
Fibonacci.cache[n] = Fibonacci.calc(n-1) + Fibonacci.calc(n-2)
return Fibonacci.cache[n]
Fibonacci.calc(20)
Fibonacci.cache
"""
Explanation: 3T_객체 지향 프로그래밍 - 클래스 메쏘드와 인스턴스 메쏘드
Class - 클래스는 매우 중요합니다.
Class => object ( 객체가 생성되는 클래스 ) 에 대해서만 배웠다.
예제: 피보나치 수열
End of explanation
"""
class Factorial():
cache = {
1: 1,
}
@staticmethod
def run(n):
if n in Factorial.cache:
return Factorial.cache[n]
Factorial.cache[n] = n * Factorial.run(n-1)
return Factorial.cache[n]
@staticmethod
def prettify():
print("\n".join([
"{n}! == {result}".format(n=key, result=value)
for key, value
in Factorial.cache.items()
]))
Factorial.run(4)
Factorial.prettify()
"""
Explanation: 예제: Factorial
End of explanation
"""
import functools
class Calendar():
__days = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}
@staticmethod
def is_leap(year):
return\
year % 4 == 0\
and not year % 100 == 0\
or year % 400 == 0
@staticmethod
def days_in(year):
days = Calendar.__days.copy()
# days 라는 새로운 변수 => Calendar.__days 를 가리키고 있는 애
# 우리가 의도한 바 : 기존의 애를 복사하는 것
days[2] += int(Calendar.is_leap(year)) # True => 1,
# False => 0
return days
@staticmethod
def total_days_in(year):
return functools.reduce(
lambda x,y: x+y,
Calendar.days_in(year).values(),
)
@staticmethod
def total_days_until(year):
# total_days = 0
# for i in range(1900, year):
# total_days += Calendar.total_days_in(i)
# return total_days
return functools.reduce(
lambda x,y: x+y,
[
Calendar.total_days_in(i)
for i
in range(1900, year)
]
)
# 1년 1월 1일 부터 특정 년도 까지의 day 수 합계를 구하시오.
# 예, 1년 1월 1일 부터 2년 1월 1일 => 365
# 1년 1월 1일 부터 3년 1월 1일 => 365 * 2
# 하는 이유: day 수 합계 % 7 ==> 요일 ( 1년 1월 1일이 월요일 입니다 )
Calendar.total_days_until(1905)
Calendar.days_in(2016)
"""
Explanation: 예제: 달력
End of explanation
"""
|
dolittle007/dolittle007.github.io | notebooks/GLM-logistic.ipynb | gpl-3.0 | %matplotlib inline
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import seaborn
import warnings
warnings.filterwarnings('ignore')
from collections import OrderedDict
from time import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import fmin_powell
from scipy import integrate
import theano as thno
import theano.tensor as T
def run_models(df, upper_order=5):
'''
Convenience function:
Fit a range of pymc3 models of increasing polynomial complexity.
Suggest limit to max order 5 since calculation time is exponential.
'''
models, traces = OrderedDict(), OrderedDict()
for k in range(1,upper_order+1):
nm = 'k{}'.format(k)
fml = create_poly_modelspec(k)
with pm.Model() as models[nm]:
print('\nRunning: {}'.format(nm))
pm.glm.GLM.from_formula(fml, df, family=pm.glm.families.Normal())
traces[nm] = pm.sample(2000, init=None)
return models, traces
def plot_traces(traces, retain=1000):
'''
Convenience function:
Plot traces with overlaid means and values
'''
ax = pm.traceplot(traces[-retain:], figsize=(12,len(traces.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.df_summary(traces[-retain:]).iterrows()})
for i, mn in enumerate(pm.df_summary(traces[-retain:])['mean']):
ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data'
,xytext=(5,10), textcoords='offset points', rotation=90
,va='bottom', fontsize='large', color='#AA0022')
def create_poly_modelspec(k=1):
'''
Convenience function:
Create a polynomial modelspec string for patsy
'''
return ('income ~ educ + hours + age ' + ' '.join(['+ np.power(age,{})'.format(j)
for j in range(2,k+1)])).strip()
"""
Explanation: GLM: Logistic Regression
This is a reproduction with a few slight alterations of Bayesian Log Reg by J. Benjamin Cook
Author: Peadar Coyle and J. Benjamin Cook
How likely am I to make more than $50,000 US Dollars?
Exploration of model selection techniques too - I use DIC and WAIC to select the best model.
The convenience functions are all taken from Jon Sedars work.
This example also has some explorations of the features so serves as a good example of Exploratory Data Analysis and how that can guide the model creation/ model selection process.
End of explanation
"""
data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", header=None, names=['age', 'workclass', 'fnlwgt',
'education-categorical', 'educ',
'marital-status', 'occupation',
'relationship', 'race', 'sex',
'captial-gain', 'capital-loss',
'hours', 'native-country',
'income'])
data.head(10)
"""
Explanation: The Adult Data Set is commonly used to benchmark machine learning algorithms. The goal is to use demographic features, or variables, to predict whether an individual makes more than \$50,000 per year. The data set is almost 20 years old, and therefore, not perfect for determining the probability that I will make more than \$50K, but it is a nice, simple dataset that can be used to showcase a few benefits of using Bayesian logistic regression over its frequentist counterpart.
The motivation for myself to reproduce this piece of work was to learn how to use Odd Ratio in Bayesian Regression.
End of explanation
"""
data = data[~pd.isnull(data['income'])]
data[data['native-country']==" United-States"]
income = 1 * (data['income'] == " >50K")
age2 = np.square(data['age'])
data = data[['age', 'educ', 'hours']]
data['age2'] = age2
data['income'] = income
income.value_counts()
"""
Explanation: Scrubbing and cleaning
We need to remove any null entries in Income.
And we also want to restrict this study to the United States.
End of explanation
"""
g = seaborn.pairplot(data)
# Compute the correlation matrix
corr = data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = seaborn.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
seaborn.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
"""
Explanation: Exploring the data
Let us get a feel for the parameters.
* We see that age is a tailed distribution. Certainly not Gaussian!
* We don't see much of a correlation between many of the features, with the exception of Age and Age2.
* Hours worked has some interesting behaviour. How would one describe this distribution?
End of explanation
"""
with pm.Model() as logistic_model:
pm.glm.GLM.from_formula('income ~ age + age2 + educ + hours', data, family=pm.glm.families.Binomial())
trace_logistic_model = pm.sample(4000)
plot_traces(trace_logistic_model, retain=1000)
"""
Explanation: We see here not many strong correlations. The highest is 0.30 according to this plot. We see a weak-correlation between hours and income
(which is logical), we see a slighty stronger correlation between education and income (which is the kind of question we are answering).
The model
We will use a simple model, which assumes that the probability of making more than $50K
is a function of age, years of education and hours worked per week. We will use PyMC3
do inference.
In Bayesian statistics, we treat everything as a random variable and we want to know the posterior probability distribution of the parameters
(in this case the regression coefficients)
The posterior is equal to the likelihood $$p(\theta | D) = \frac{p(D|\theta)p(\theta)}{p(D)}$$
Because the denominator is a notoriously difficult integral, $p(D) = \int p(D | \theta) p(\theta) d \theta $ we would prefer to skip computing it. Fortunately, if we draw examples from the parameter space, with probability proportional to the height of the posterior at any given point, we end up with an empirical distribution that converges to the posterior as the number of samples approaches infinity.
What this means in practice is that we only need to worry about the numerator.
Getting back to logistic regression, we need to specify a prior and a likelihood in order to draw samples from the posterior. We could use sociological knowledge about the effects of age and education on income, but instead, let's use the default prior specification for GLM coefficients that PyMC3 gives us, which is $p(θ)=N(0,10^{12}I)$. This is a very vague prior that will let the data speak for themselves.
The likelihood is the product of n Bernoulli trials, $\prod^{n}{i=1} p{i}^{y} (1 - p_{i})^{1-y_{i}}$,
where $p_i = \frac{1}{1 + e^{-z_i}}$,
$z_{i} = \beta_{0} + \beta_{1}(age){i} + \beta_2(age)^{2}{i} + \beta_{3}(educ){i} + \beta{4}(hours){i}$ and $y{i} = 1$ if income is greater than 50K and $y_{i} = 0$ otherwise.
With the math out of the way we can get back to the data. Here I use PyMC3 to draw samples from the posterior. The sampling algorithm used is NUTS, which is a form of Hamiltonian Monte Carlo, in which parameteres are tuned automatically. Notice, that we get to borrow the syntax of specifying GLM's from R, very convenient! I use a convenience function from above to plot the trace infromation from the first 1000 parameters.
End of explanation
"""
plt.figure(figsize=(9,7))
trace = trace_logistic_model[1000:]
seaborn.jointplot(trace['age'], trace['educ'], kind="hex", color="#4CB391")
plt.xlabel("beta_age")
plt.ylabel("beta_educ")
plt.show()
"""
Explanation: Some results
One of the major benefits that makes Bayesian data analysis worth the extra computational effort in many circumstances is that we can be explicit about our uncertainty. Maximum likelihood returns a number, but how certain can we be that we found the right number? Instead, Bayesian inference returns a distribution over parameter values.
I'll use seaborn to look at the distribution of some of these factors.
End of explanation
"""
# Linear model with hours == 50 and educ == 12
lm = lambda x, samples: 1 / (1 + np.exp(-(samples['Intercept'] +
samples['age']*x +
samples['age2']*np.square(x) +
samples['educ']*12 +
samples['hours']*50)))
# Linear model with hours == 50 and educ == 16
lm2 = lambda x, samples: 1 / (1 + np.exp(-(samples['Intercept'] +
samples['age']*x +
samples['age2']*np.square(x) +
samples['educ']*16 +
samples['hours']*50)))
# Linear model with hours == 50 and educ == 19
lm3 = lambda x, samples: 1 / (1 + np.exp(-(samples['Intercept'] +
samples['age']*x +
samples['age2']*np.square(x) +
samples['educ']*19 +
samples['hours']*50)))
"""
Explanation: So how do age and education affect the probability of making more than $$50K?$ To answer this question, we can show how the probability of making more than $50K changes with age for a few different education levels. Here, we assume that the number of hours worked per week is fixed at 50. PyMC3 gives us a convenient way to plot the posterior predictive distribution. We need to give the function a linear model and a set of points to evaluate. We will pass in three different linear models: one with educ == 12 (finished high school), one with educ == 16 (finished undergrad) and one with educ == 19 (three years of grad school).
End of explanation
"""
# Plot the posterior predictive distributions of P(income > $50K) vs. age
pm.plot_posterior_predictive_glm(trace, eval=np.linspace(25, 75, 1000), lm=lm, samples=100, color="blue", alpha=.15)
pm.plot_posterior_predictive_glm(trace, eval=np.linspace(25, 75, 1000), lm=lm2, samples=100, color="green", alpha=.15)
pm.plot_posterior_predictive_glm(trace, eval=np.linspace(25, 75, 1000), lm=lm3, samples=100, color="red", alpha=.15)
import matplotlib.lines as mlines
blue_line = mlines.Line2D(['lm'], [], color='b', label='High School Education')
green_line = mlines.Line2D(['lm2'], [], color='g', label='Bachelors')
red_line = mlines.Line2D(['lm3'], [], color='r', label='Grad School')
plt.legend(handles=[blue_line, green_line, red_line], loc='lower right')
plt.ylabel("P(Income > $50K)")
plt.xlabel("Age")
plt.show()
b = trace['educ']
plt.hist(np.exp(b), bins=20, normed=True)
plt.xlabel("Odds Ratio")
plt.show()
"""
Explanation: Each curve shows how the probability of earning more than $ 50K$ changes with age. The red curve represents 19 years of education, the green curve represents 16 years of education and the blue curve represents 12 years of education. For all three education levels, the probability of making more than $50K increases with age until approximately age 60, when the probability begins to drop off. Notice that each curve is a little blurry. This is because we are actually plotting 100 different curves for each level of education. Each curve is a draw from our posterior distribution. Because the curves are somewhat translucent, we can interpret dark, narrow portions of a curve as places where we have low uncertainty and light, spread out portions of the curve as places where we have somewhat higher uncertainty about our coefficient values.
End of explanation
"""
lb, ub = np.percentile(b, 2.5), np.percentile(b, 97.5)
print("P(%.3f < O.R. < %.3f) = 0.95"%(np.exp(3*lb),np.exp(3*ub)))
"""
Explanation: Finally, we can find a credible interval (remember kids - credible intervals are Bayesian and confidence intervals are frequentist) for this quantity. This may be the best part about Bayesian statistics: we get to interpret credibility intervals the way we've always wanted to interpret them. We are 95% confident that the odds ratio lies within our interval!
End of explanation
"""
models_lin, traces_lin = run_models(data, 4)
dfdic = pd.DataFrame(index=['k1','k2','k3','k4'], columns=['lin'])
dfdic.index.name = 'model'
for nm in dfdic.index:
dfdic.loc[nm, 'lin'] = pm.stats.dic(traces_lin[nm], models_lin[nm])
dfdic = pd.melt(dfdic.reset_index(), id_vars=['model'], var_name='poly', value_name='dic')
g = seaborn.factorplot(x='model', y='dic', col='poly', hue='poly', data=dfdic, kind='bar', size=6)
"""
Explanation: Model selection
The Deviance Information Criterion (DIC) is a fairly unsophisticated method for comparing the deviance of likelhood across the the sample traces of a model run. However, this simplicity apparently yields quite good results in a variety of cases. We'll run the model with a few changes to see what effect higher order terms have on this model.
One question that was immediately asked was what effect does age have on the model, and why should it be age^2 versus age? We'll use the DIC to answer this question.
End of explanation
"""
dfdic = pd.DataFrame(index=['k1','k2','k3','k4'], columns=['lin'])
dfdic.index.name = 'model'
for nm in dfdic.index:
dfdic.loc[nm, 'lin'] = pm.stats.waic(traces_lin[nm],models_lin[nm])[0]
dfdic = pd.melt(dfdic.reset_index(), id_vars=['model'], var_name='poly', value_name='waic')
g = seaborn.factorplot(x='model', y='waic', col='poly', hue='poly', data=dfdic, kind='bar', size=6)
"""
Explanation: There isn't a lot of difference between these models in terms of DIC. So our choice is fine in the model above, and there isn't much to be gained for going up to age^3 for example.
Next we look at WAIC. Which is another model selection technique.
End of explanation
"""
|
dedx/STAR2015 | STAR2015Workshop.ipynb | mit | #Comments begin with #
#Allow graphics to render inside the notebook
%pylab inline
#import packages we might want to use
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
"""
Explanation: Coding in the Classroom
Jennifer Klay jklay@calpoly.edu
California Polytechnic State University, San Luis Obispo
Description
Coding and computer programming are essential skills for 21st century learners but how can we provide opportunities to build such skills in the classroom? Open source tools and programming languages with simple and natural syntax provide one avenue. In this workshop I will introduce participants to the Python programming language using the IPython/Jupyter notebook. I will present ways to help students learn to practice algorithmic thinking, to work together to problem solve, and to apply the computer to solve problems that interest them. In addition, a variety of useful resources for developing lessons and integrating coding into existing curricula will be discussed.
Audience: Aspiring or early-career K-12 STEM teachers hoping to include research/computing in the classroom.
Assumed programming experience: None
The complete set of materials for this workshop are available online at Github.
1. Introduction
This notebook provides a roadmap to help teachers bring coding and programming concepts to the classroom through project-based learning. With some basic programming skills you can tackle a wide array of interesting and instructive problems.
What are some skills that we wish students to gain?
Practice algorithmic thinking
Break complex problems into smaller, more manageable parts
Work together to problem solve
Figure out how to get unstuck/find help when things don't work as expected
Apply the computer to problems they want to solve
Find/identify interesting problems the computer can help them solve
There are several methods for helping students build their programming skills. In particular, encouraging them to program in pairs or groups will help them brainstorm solutions while decomposing a problem into the sequence of steps taken to solve a problem - an algorithm. Once they have the steps of a solution outlined, they can attempt to put their process into the syntax of a computing language.
There are some best practices for writing effective programs that even seasoned coders don't always follow. Nevertheless most programmers would agree that the following set of practices are helpful and effective. One thing you can do to encourage students is to ask them to periodically evaluate their own work throughout the development process and demonstrate how they are applying these practices. Eventually they won't need to be prompted, as the practices will become second-nature.
Some best practices for writing effective programs
Program together (in pairs or groups)
Deconstruct a problem into simple steps and create a scaffold of the full program from the start - some parts will be empty or placeholders until their logic can be filled in.
Develop "pseudo-code" (an informal description of the algorithm that uses the structural conventions of a programming language, but is intended for human reading rather than machine reading) for more complex components to work out the logic
Fill in the "guts" of the program components
Test each component individually to verify it gives expected results with a known input and output
Document the code as it is developed, describing the purpose of all functions, their inputs and outputs, and the purpose of the full program and how it all fits together
Demonstrate the program works and (to the best of your ability to judge) gives meaningful results
These basic goals and guidelines are applicable to any programming language or platform. For beginning coders, it is critical to make the entry point as easily accessible as possible within a framework that also provides a comprehensive set of tools that will enable them to expand beyond the simplest concepts. The tools suggested here provide such a framework.
2. Tools
IPython/Jupyter
The IPython Notebook (the "I" in IPython stands for interactive) is an interactive computational environment, in which you can combine code execution, rich text, mathematics, plots and rich media. It is a great platform for learning to program and for solving and presenting complex problems.
The notebook runs inside a web browser and can be easily installed with the Python programming language and a comprehensive library of scientific computing tools using the "Anaconda" package provided FREE by Continuum Analytics.
In 2015, the IPython Notebook evolved into Project Jupyter, which now manages the language-agnostic parts of the notebook and provides one uniform platform for working in several different computing languages, including Python.
If you are viewing this notebook via a weblink, you won't be able to interact with it, but if you install Anaconda, you can code along with us by opening the notebook from the IPython Notebook server.
Python
The Python programming language is an excellent tool for general-purpose programming, with a highly readable syntax, rich and powerful data types (strings, lists, sets, dictionaries, arbitrary length integers, etc) and a very comprehensive standard library. The language is easy to learn and because it is an interpreted language, commands are executed in real time without the need to compile a full program.
There is a whole ecosystem of additional libraries provided with Anaconda that can be used for mathematical and scientific computing, which will help you efficiently represent multidimensional datasets, solve linear algebra systems, perform general matrix manipulations (an essential building block of virtually all technical computing), and visually represent and interact with data of many different types.
SciPy
SciPy (pronounced "Sigh-Pie") is the premiere package for scientific computing in Python. It has many, many useful tools, which are central to scientific computing tasks you'll come across.
The NumPy library (pronounced "Numb-Pie") forms the base layer for the entire SciPy ecosystem.
Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. When you need to view or present data, it is invaluable.
These libraries, or their individual components, can be imported and used in the IPython notebook.
To write and execute code within the notebook, we use a code cell (the default). We will look at the details of the syntax later. For now, here is our first code cell, in which we import some of these libraries for later use:
End of explanation
"""
from IPython.display import Image
Image(filename='img/Cats.jpg')
"""
Explanation: To execute a cell:
Shift-Enter: run cell and move to new cell below.
Control-Enter: run cell and stay in the cell.
3. Notebook basics
There are two primary different kinds of cell types - "code" and "markdown". The cell in which this text was typed is a markdown cell. A code cell is one in which computer instructions or code can be typed. To execute a cell, hit Shift-Enter.
Markdown cells use the Markdown formatting system to allow you to include formatted text, such as italic and bold, or to create bulleted or numbered lists:
a list element
another list element
a numbered list element
another numbered list element
The view of these cells is determined by whether they have been executed or not. Double-clicking any executed markdown cell will bring it into "edit" mode. Try it with one of these cells.
Mathematical expressions can be rendered using LaTeX formatting (pronounced "Lah-Tech") to give full-featured symbolic representation:
$$x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$$
The LaTeX "code" to create that equation is
$$x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a}$$
LaTeX equation typesetting within the IPython Notebook is very helpful for displaying equations and directly connecting them with the computer code that implements them numerically.
Images can be included in markdown cells using HTML tags like
<img src="img/Cats.jpg" width=200>
<img src="img/Cats.jpg" width=200>
or they can be included in code cells using the display features of IPython.
End of explanation
"""
Image(url='http://python.org/images/python-logo.gif')
"""
Explanation: It is also possible to display images linked from sites around the web:
End of explanation
"""
from IPython.display import YouTubeVideo
YouTubeVideo('4disyKG7XtU')
"""
Explanation: How about embedding Youtube videos in the notebook using the video tag?
End of explanation
"""
print "Hello World"
"""
Explanation: These features enable the notebook to provide a rich development environment for projects that incorporate computer code but are not (necessarily) all about the code. In fact, the notebook lets you expand the notion of a computer "program" to include context and commentary side-by-side with the code to help readers/users better understand the purpose and results of the computer code.
Now that we have seen some of the notebook's capability in action, let's write some code.
4. Writing Code
A short workshop is not enough time to fully introduce all of programming to beginners, but we can learn some of the basics by trying them out. Here is a brief tour of simple code concepts implemented as code statements in Python.
4.1 Basic coding syntax and logic
Print a message:
End of explanation
"""
z = 5
"""
Explanation: Create a variable and assign it a value:
End of explanation
"""
print z
"""
Explanation: Print the value stored in the variable:
End of explanation
"""
z = z + 27
print z
"""
Explanation: Change the value of the variable with an arithmetic expression:
End of explanation
"""
name = raw_input("Hi, what's your name? ");
print "Hi, my name is",name
"""
Explanation: You can request input from a user that can be stored in memory and used for other purposes:
End of explanation
"""
def intro(name):
print "Hi, my name is",name
"""
Explanation: In Python, you can define a function and pass it arguments, then execute the function:
End of explanation
"""
intro("Jennifer")
"""
Explanation: To call the function, you type the name, with the argument(s) in parentheses as a code statement:
End of explanation
"""
def intro():
name = raw_input("Hi, my name is ");
intro()
"""
Explanation: here's a function that takes no arguments:
End of explanation
"""
if z > 10:
print "Yay!"
else:
print "Boo. :-("
print z
"""
Explanation: You can control the execution of code statements with if ... else:
End of explanation
"""
print "I can count to 10!"
for i in range(10):
print i
"""
Explanation: For repetitive tasks, you can use a for loop:
End of explanation
"""
shopping_list = ["eggs","milk","bacon","bread","strawberries","yogurt","jam"]
for item in shopping_list:
print item
"""
Explanation: Oops. Not quite what you were expecting?
Notice that in Python, indices start from 0, not 1, and count up to n-1.
I can create a list of items and iterate over them with a for loop:
End of explanation
"""
print shopping_list[2]
print "What's for breakfast?"
print shopping_list[2] + shopping_list[0]
"""
Explanation: I can access individual items from their location in the list using an index to that location:
End of explanation
"""
print "What's for breakfast?"
print shopping_list[0] + " and " + shopping_list[2]
"""
Explanation: Let's try that again:
End of explanation
"""
print shopping_list[1:4]
"""
Explanation: I can also slice through the list
End of explanation
"""
#Slice from 0th to the 1th element:
print shopping_list[:2]
#Slice from 0th (first) to the n-1th (last) element:
print shopping_list[:]
#Slice from 2th (third) to n-1th (last) element:
print shopping_list[2:]
"""
Explanation: This shows the "1th", "2th", and "3th" elements, or start to end-1. Note the difference between this and traditional ordinal numbering:
"0th" (Zero-th) = "First"
"1th" (One-th) = "Second"
"2th" (Two-th) = "Third"
"3th" (Three-th) = "Fourth"
and so on...
When slicing a list (or array), you can leave one or the other index blank to tell it to start at the beginning or go to the end. The result will be similar to before:
End of explanation
"""
#Print the last element:
print shopping_list[-1]
#Print the second-to-last element:
print shopping_list[-2]
"""
Explanation: You can also access elements backward from the end using negative numbers:
End of explanation
"""
#Print every other element:
print shopping_list[::2]
#Print every third element (skip 2):
print shopping_list[::3]
"""
Explanation: or designate a step size to jump over certain elements:
End of explanation
"""
#Iterate from the last to the first element in steps of 1:
print shopping_list[-1::-1]
"""
Explanation: How about printing them backward?
End of explanation
"""
#Create an array of 10 values between 0 and 10-1
arr = np.arange(10)
print arr
#Create an array of 10 evenly-spaced values between 0 and 10
arr2 = np.linspace(0,10,10)
print arr2
"""
Explanation: Python lists can contain elements of any data type - strings of characters (e.g. words, sentences, etc.), whole numbers (called "integers" or ints), decimal numbers (called "floating point numbers" or floats), other lists, etc.
There are other kinds of containers in Python - dictionaries, deques, queues - each with different features and uses. We won't look into them further here, but if you are interested, consult the documentation or an introductory text such as ThinkPython by Allen Downey.
4.2 NumPy Arrays
NumPy arrays are better containers for purely numerical data.
End of explanation
"""
#How many elements are in the array?
print len(arr)
#Print the 5-th element (sixth in the list):
print arr[5]
"""
Explanation: The elements of the arrays can be accessed with the same indexing and slicing as we used before:
End of explanation
"""
x = np.linspace(0, 2*np.pi, 300)
y = np.sin(x**2)
"""
Explanation: NumPy arrays can be used for simple or complex mathematical calculations.
End of explanation
"""
plt.plot(x, y)
plt.title("A little chirp");
"""
Explanation: Here, x and y are just arrays of numbers that represent the value of the function $y(x)=\sin(x^2)$ at each of the 300 values of $x$ between 0 and 2$\pi$.
4.3 Matplotlib
You can use matplotlib to create visual representations of such data:
End of explanation
"""
#Execute this cell to see the histogram example plot code, then execute again to make the plot.
%load http://matplotlib.org/mpl_examples/statistics/histogram_demo_multihist.py
"""
Explanation: Graphs in matplotlib have many attributes that you can customize (see the documentation). The developers also provide a gallery of plots to showcase the wide variety of visual representations that are available.
No one can keep all of the functions and fine layout control commands in their brain. Often when I need to make a plot, I go to the gallery page and browse the images until I find one that is similar to what I want to create and then I copy the code and modify it to suit my needs.
When you use the Matplotlib gallery to develop (or "template") a figure, you can very easily load the source code into your notebook and then modify it as needed to fit your specific needs using the Python "magic" command, %load.
Try it now. After the code is loaded, just execute the cell to see the output.
End of explanation
"""
from IPython.html.widgets import interact
def sin_plot(A=5.0,f1=5.0,f2=10.):
x = np.linspace(0,2*np.pi,1000)
#pure sine curve
y = A*np.sin(f1*x)
#superposition of sine curves with different frequency
#but same amplitude
y2 = A*(np.sin(f1*x)+np.sin(f2*x))
plt.plot(x,y,x,y2)
plt.xlim(0.,2.*np.pi)
plt.ylim(-10.,10.)
plt.grid()
plt.show()
v3 = interact(sin_plot,A=(0.,10.), f1=(1.0,10.0), f2=(1.0,10.0))
"""
Explanation: 4.4 IPython Widgets
Exploring data is much more fun when you can directly interact with it. IPython widgets provide a great way to do just that. Here is an example with two sine curves - one is a pure sine wave, the other is the superposition of two waves with different frequency but the same amplitude. You can interactively explore how the functions change when the parameters are changed.
End of explanation
"""
def scatter_plot(r=0.5, n=27):
t = np.random.uniform(0.0,2.0*np.pi,n)
rad = r*np.sqrt(np.random.uniform(0.0,1.0,n))
x = np.empty(n)
y = np.empty(n)
x = rad*np.cos(t)
y = rad*np.sin(t)
fig = plt.figure(figsize=(4,4),dpi=80)
plt.scatter(x,y)
plt.xlim(-1.,1.)
plt.ylim(-1.,1.)
plt.show()
v2 = interact(scatter_plot,r=(0.0,1.0), n=(1,1000))
"""
Explanation: Here's another interactive plot that allows you to randomly sample (x,y) pairs within a circle of radius $r$. The interact object lets you increase or decrease the number of samples in the circle.
End of explanation
"""
pirnt "Hello
print "World " + z-5
"""
Explanation: A last fun example of using widgets can be found in the accompanying notebook on the Quantum double-slit experiment.
4.5 When things go awry
Making mistakes when programming is an unavoidable part of the experience. It can be very frustrating. You might spend an hour trying to track down the source of a "bug" in a program, convinced that the computer is at fault. I know I have done that countless times. But computers are only as "smart" as their programmers. Like lemmings, they will blindly follow our instructions. It is pretty much always our fault. Get used to that.
Occasionally, the computer will spit out an error message or give back a gibberish answer, alerting us to our mistake. Sometimes we're not so lucky.
Bug-hunting (or "debugging") is part of the job. And part of debugging is being able to understand error messages. Let's generate some errors to get a feel for dealing with them.
End of explanation
"""
pirnt "Hello"
print "World " + z-5
"""
Explanation: This is an example of a syntax error. We forgot to close the quotes around our message. Python error messages are (usually) informative and tell you what the problem is and where it occurred. We can fix this problem by closing our quotes.
End of explanation
"""
print "Hello"
print "World " + z-5
"""
Explanation: Oops. We misspelled print, giving us another syntax error. In this case the message is not quite as clear about what the problem is, but it isn't too hard to spot the mistake.
Notice that Python doesn't execute the second statement in the cell when it encounters an error in the first line. Execution stops at the first sign of an error. Keeping the number of lines of code in cells or functions short makes tracking errors much easier.
Notice also that the Notebook provides syntax highlighting - Python keywords, like print, and numbers appear in <font color=green>green</font>, quoted strings appear in <font color=red>red</font>, operators like + and - appear in <font color=purple>purple</font> and variables are black. This can be helpful for spotting errors quickly.
Let's correct our spelling.
End of explanation
"""
print "Hello"
print "World " + str(z-5)
"""
Explanation: Now what? Here is a type error. We tried to perform the "+" operation on a string and an integer, which is not allowed. To correct this we could force the integer object to be treated as a string by casting it as one:
End of explanation
"""
ff = m*z + b
"""
Explanation: Let's move on to another error.
End of explanation
"""
m = 0.5
b = -3.
print ff
"""
Explanation: This is an example of a runtime error. The syntax is correct, but in this case some of the variables have not yet been defined. Let's try that again.
End of explanation
"""
ff = m*z + b
print z
"""
Explanation: Because of the previous error, ff was never defined. We have to either re-execute the cell above or re-define ff now that the other variables have been defined.
End of explanation
"""
def cube_root(x):
'''This function takes a number, x,
computes the cube root,
and returns the result
'''
return np.power(x,1/3)
print cube_root(8)
"""
Explanation: Runtime errors are annoying but at least they announce themselves. What happens if we make a logical error?
End of explanation
"""
5/4
"""
Explanation: Hmmm... What went wrong here? Everything looks okay. This is an example of a logical error. The syntax is correct, the code executed, but the output is not what we expect it to be. (The cube-root of 8 is 2: 2*2*2=8.)
How come this function doesn't work? Python is a dynamically-typed language, which means it tries to guess what the data type of the object you are defining is and then applies the rules of those objects for subsequent calculations. Integers are whole numbers of type int. Decimal numbers are represented in the computer as floating point numbers or floats. When Python computes the fraction with the "/" operator on integers, it assumes you want an integer result. The "/" operator on integers rounds down to the nearest whole integer:
End of explanation
"""
a = 3
b = 4
print a/b
c = 3.
d = 4.
print c/d
"""
Explanation: How do we avoid logical errors like this one? For mathematical calculations, you can explicitly define the data type of the numbers you use. If you want a floating-point result, use floating-point numbers by adding a decimal point to the end of the number:
End of explanation
"""
?
"""
Explanation: How do we avoid logical errors in general?
Never make a logical error.
Just kidding. The only way to minimize logical errors is to always test your code with a known result whenever possible. In the case of the cube_root function, we knew something was wrong with the logic because the cube-root of 8 should be 2. When we got a different value than expected, we knew we had a problem to track down and correct. The purpose of testing as a programming "best practice" is to minimize logical errors like this one.
Fix the cube_root function and test that it gives sensible results.
4.6 Getting help
When IPython needs to display additional information it will automatically invoke a pager at the bottom of the screen. You can get help with IPython in general by typing a question mark in a code cell and executing it:
End of explanation
"""
np.arange?
"""
Explanation: Or get help on a particular function with, e.g.
End of explanation
"""
#Your code here
"""
Explanation: There is extensive documentation for Python, IPython, NumPy, SciPy, Matplotlib, etc. on the web. Chances are, your question has been asked and answered somewhere already. Google is your friend. A few places to look when you get stuck:
The IPython website
Stackoverflow
Reddit
5. Applications
Now that we we have seen some of the basics, let's try using what we learned to solve a simple problem.
5.1 Project Euler
Project Euler is an online repository of simple and hard math puzzles that can be solved with the computer. These are great for practicing problem-solving, algorithmic thinking, and coding. Let's try a simple one together.
Multiples of 3 and 5
Problem 1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
https://projecteuler.net/problem=1
Work with a partner to brainstorm a solution to this problem by breaking it into logical steps. Type your set of steps into a markdown cell as a bulleted list. Then try to implement the code for these steps in a code cell. Verify your solution works for the case where N = 10 before trying N = 1000.
Steps:
*
*
End of explanation
"""
|
activitynet/ActivityNet | Notebooks/ActivityNet-Release1.3.Proposals.ipynb | mit | import sys
sys.path.append('../Evaluation')
from eval_proposal import ANETproposal
import matplotlib.pyplot as plt
import numpy as np
import json
%matplotlib inline
"""
Explanation: ActivityNet Challenge Proposal Task
This notebook is intended as demo on how to format and evaluate the performance of a submission file for the proposal task. Additionally, a helper function is given to visualize the performance on the evaluation metric.
End of explanation
"""
def run_evaluation(ground_truth_filename, proposal_filename,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation'):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=True)
anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
average_nr_proposals = anet_proposal.proposals_per_video
return (average_nr_proposals, average_recall, recall)
def plot_metric(average_nr_proposals, average_recall, recall, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
fn_size = 14
plt.figure(num=None, figsize=(6, 5))
ax = plt.subplot(1,1,1)
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
area_under_curve = np.zeros_like(tiou_thresholds)
for i in range(recall.shape[0]):
area_under_curve[i] = np.trapz(recall[i], average_nr_proposals)
for idx, tiou in enumerate(tiou_thresholds[::2]):
ax.plot(average_nr_proposals, recall[2*idx,:], color=colors[idx+1],
label="tiou=[" + str(tiou) + "], area=" + str(int(area_under_curve[2*idx]*100)/100.),
linewidth=4, linestyle='--', marker=None)
# Plots Average Recall vs Average number of proposals.
ax.plot(average_nr_proposals, average_recall, color=colors[0],
label="tiou = 0.5:0.05:0.95," + " area=" + str(int(np.trapz(average_recall, average_nr_proposals)*100)/100.),
linewidth=4, linestyle='-', marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[-1]] + handles[:-1], [labels[-1]] + labels[:-1], loc='best')
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1.0])
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.show()
"""
Explanation: Help functions to evaluate a proposal submission file and plot the metric results
End of explanation
"""
%%time
# seed the random number generator to get consistent results across multiple runs
np.random.seed(42)
with open("../Evaluation/data/activity_net.v1-3.min.json", 'r') as fobj:
gd_data = json.load(fobj)
subset='validation'
avg_nr_proposals = 100
proposal_data = {'results': {}, 'version': gd_data['version'], 'external_data': {}}
for vid_id, info in gd_data['database'].iteritems():
if subset != info['subset']:
continue
this_vid_proposals = []
for _ in range(avg_nr_proposals):
# generate random proposal center, length, and score
center = info['duration']*np.random.rand(1)[0]
length = info['duration']*np.random.rand(1)[0]
proposal = {
'score': np.random.rand(1)[0],
'segment': [center - length/2., center + length/2.],
}
this_vid_proposals += [proposal]
proposal_data['results'][vid_id] = this_vid_proposals
with open("../Evaluation/data/uniform_random_proposals.json", 'w') as fobj:
json.dump(proposal_data, fobj)
"""
Explanation: Generate uniform random proposal for the validation subset
End of explanation
"""
%%time
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid = run_evaluation(
"../Evaluation/data/activity_net.v1-3.min.json",
"../Evaluation/data/uniform_random_proposals.json",
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation')
plot_metric(uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid)
"""
Explanation: Evaluate the uniform random proposals and plot the metric results
End of explanation
"""
|
gapatino/Doing-frequentist-statistics-with-Scipy | PyData DC 2016 - Doing frequentist statistics with Scipy.ipynb | gpl-3.0 | import numpy as np
from scipy import stats
import pandas as pd
from tkinter import filedialog
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Use file browser to find name and path of the CSV file that contains the dataset
data_file = filedialog.askopenfilename()
print(data_file)
"""
Explanation: Doing frequentist statistics with Scipy
PyData DC 2016
Gustavo A. Patino
Department of Biomedical Sciences
Department of Neurology
Oakland University William Beaumont School of Medicine
Rochester, MI
patino@oakland.edu
https://github.com/gapatino/Doing-frequentist-statistics-with-Scipy
Iris Dataset:
Fisher RA. The use of multiple measurements in taxonomic problems. Annals of Eugenics 1936; 7 (2): 179–188
https://github.com/gapatino/Doing-frequentist-statistics-with-Scipy
End of explanation
"""
dataset = pd.read_csv(data_file, sep=',', na_values=["."," ","na"]) # Can use lists for possible missing values
dataset.shape
dataset.columns
dataset.head(n=10)
"""
Explanation: Loading the dataset
End of explanation
"""
dataset.describe(percentiles=[.25, .5, .75], include='all')
grouped_iris = dataset.groupby('Type')
grouped_iris.mean()
"""
Explanation: Exploratory analysis
End of explanation
"""
grouped_iris.describe(percentiles=[.25, .5, .75], include='all')
grouped_iris['Petal_Width'].hist()
sns.pairplot(dataset, kind='reg')
"""
Explanation: Exercise
Create a table that describes the grouped_iris data frame, including the 25th, 50th, and 75th percentiles
End of explanation
"""
grouped_iris['Type'] # .groupby function returns a GroupBy object that is lazily executed
dataset[dataset['Type']=='setosa'].head(n=10) # Better to use the original dataset
"""
Explanation: Data Extraction
For the statistical functions we will have to specify the dependent data variable and the independent data variable as two separate arrays
End of explanation
"""
dataset[dataset['Type']=='setosa']['Petal_Length'].head(n=10)
pl_setosa = dataset[dataset['Type']=='setosa']['Petal_Length']
pl_virginica = dataset[dataset['Type']=='virginica']['Petal_Length']
pl_versicolor = dataset[dataset['Type']=='versicolor']['Petal_Length']
pw_setosa = dataset[dataset['Type']=='setosa']['Petal_Width']
pw_virginica = dataset[dataset['Type']=='virginica']['Petal_Width']
pw_versicolor = dataset[dataset['Type']=='versicolor']['Petal_Width']
sl_setosa = dataset[dataset['Type']=='setosa']['Sepal_Length']
sl_virginica = dataset[dataset['Type']=='virginica']['Sepal_Length']
sl_versicolor = dataset[dataset['Type']=='versicolor']['Sepal_Length']
sw_setosa = dataset[dataset['Type']=='setosa']['Sepal_Width']
sw_virginica = dataset[dataset['Type']=='virginica']['Sepal_Width']
sw_versicolor = dataset[dataset['Type']=='versicolor']['Sepal_Width']
type(sw_setosa)
plt.hist(sw_versicolor, label='Versicolor', alpha=0.5)
plt.hist(sw_virginica, label='Virginica', alpha=0.5)
plt.hist(sw_setosa, label='Setosa', alpha=0.5)
plt.legend(loc='best')
"""
Explanation: Exercise
How would you make separate variables containing the petal and sepal characteristics of each iris type?
End of explanation
"""
# Kolmogorov-Smirnov test: Fairly conservative
ks_pl_setosa = stats.kstest(pl_setosa, 'norm', mode='asymp') # mode opts: 'approx'. Dist can be any in scipy.stats
ks_pl_setosa[1]
# Shapiro test
shapiro_pw_setosa = stats.shapiro(pw_setosa)
shapiro_pw_setosa
# Normal test: Combines skew and kurtosis measurement. Allows management of NaN
nt_sl_setosa = stats.normaltest(sl_setosa, nan_policy='omit') #nan_policy opts: 'propagate', 'raise'
nt_sl_setosa
# Anderson test: Modified KS, returns critical values for a list of significance levels
anderson_sw_setosa = stats.anderson(sw_setosa, dist='norm')
anderson_sw_setosa
"""
Explanation: Normality Testing
End of explanation
"""
print('KS: ', ks_pl_setosa)
print('Shapiro: ', shapiro_pw_setosa)
print('Normal: ', nt_sl_setosa)
print('Anderson: ', anderson_sw_setosa)
"""
Explanation: Exercise
What do the outputs mean?
How would you extract only the p-value of a given test?
End of explanation
"""
print('KS p-value using index: ', ks_pl_setosa[1])
# or
_ , p_ks_pl_setosa = stats.kstest(pl_setosa, 'norm', mode='asymp')
print('KS p-value using multiple variable assignment: ', p_ks_pl_setosa)
"""
Explanation: All of the outputs are the test value and associated p-value, except for Anderson test in which the test value is provided along with a table of critical values for given significances
End of explanation
"""
# Bartlett test: Requires normal populations
bartlett_length_versicolor = stats.bartlett(pl_versicolor, sl_versicolor)
print(bartlett_length_versicolor)
# Levene test: more robust than Bartlett if samples are non-normal. Can define what central tendency measure is used
levene_length_virginica = stats.levene(pl_virginica, sl_virginica, center='trimmed') # For heavy-tailed distributions
print(levene_length_virginica)
# Fligner-Killeen's test: Non-parametric
fk_length_setosa = stats.fligner(pl_virginica, sl_virginica, center='mean') # For normal distributions
# Use 'median' for skewed distributions
print(fk_length_setosa)
"""
Explanation: Homogeneity of Variance
End of explanation
"""
# t-test of 2 independent samples
ttest_sw_set_ver = stats.ttest_ind(sw_setosa, sw_versicolor, equal_var=True, nan_policy='omit') # equal_var default: T
print(ttest_sw_set_ver)
# t-test of paired samples
ttest_width_setosa = stats.ttest_rel(pw_setosa, sw_setosa, nan_policy='omit')
print(ttest_width_setosa)
# t-test from descriptive statistics: mean, SD, n from each sample
ttest_pw_vir_ver = stats.ttest_ind_from_stats(20.06, 2.902, 50, 13.26, 1.977, 50, equal_var=False)
print(ttest_pw_vir_ver)
"""
Explanation: Comparing 2 samples of a continuous measure: Parametric tests
t-tests
End of explanation
"""
# Calculate pooled STD
std_sw_set_ver = np.sqrt( ( (sw_setosa.size-1)*(sw_setosa.std()**2) + (sw_versicolor.size-1)*(sw_versicolor.std()**2) )
/ (sw_setosa.size + sw_versicolor.size - 2) )
# Calculate Cohen's d
cohend_sw_set_ver = (sw_setosa.mean() - sw_versicolor.mean()) / std_sw_set_ver
print('Cohen\'s d: ', cohend_sw_set_ver) # d=0.2 small effect size, 0.5 medium, 0.8 large
"""
Explanation: Effect sizes
Cohen's d
d = $\frac{\overline{x_1} - \overline{x_2}}{SDp}$
d=0.2 small effect size, 0.5 medium, 0.8 large
$SD_p$ (Pooled standard deviation) = $\sqrt[2]{\frac{(N_1-1)(SD_1^2)+(N_2-1)(SD_2^2)}{N_1+N_2-2}}$
Exercise
What is the value of Cohen's d?
End of explanation
"""
1-stats.norm.cdf(ttest_pw_vir_ver[0]) # one-side p-value if I know the test value
stats.norm.ppf(ttest_pw_vir_ver[1]) # What is the test value given the p-value
"""
Explanation: Pearson's correlation coefficient can also be used as a measure of effect size (see below)
End of explanation
"""
# Wilcoxon rank-sum test: Can use if n < 20
wrk_sw_set_ver = stats.ranksums(sw_setosa, sw_versicolor)
print(wrk_sw_set_ver)
# Mann-Whitney U test: More robust than Wilcoxon rank-sum, use if n > 20
mwu_sw_set_ver = stats.mannwhitneyu(sw_setosa, sw_versicolor, use_continuity=True, alternative='greater')
# alternative options: 'less', 'two-sided'. 'None' is deprecated
print(mwu_sw_set_ver)
# Wilcoxon test: For paired samples
wilcoxon_width_setosa = stats.wilcoxon(pw_setosa, sw_setosa, zero_method='wilcox', correction=False)
# zero_method is how zero-differences are handled. Options: 'pratt', 'zsplit'
# correction is if statistic is corrected towards the mean during calculation. Default: F
print(wilcoxon_width_setosa)
"""
Explanation: Comparing 2 samples of a continuous measure: Non-Parametric tests
Wilcoxon rank-sum
Mann-Whitney U
Wilcoxon
End of explanation
"""
# 1-way ANOVA: Parametric
anova_sw = stats.f_oneway(sw_setosa, sw_versicolor, sw_virginica)
print(anova_sw)
"""
Explanation: Comparing multiple groups
ANOVA
Kruskal-Wallis H
End of explanation
"""
# Kruskal-Wallis H test: Non-parametric
kw_sw = stats.kruskal(sw_setosa, sw_versicolor, sw_virginica, nan_policy='omit')
print(kw_sw)
"""
Explanation: What about post-hoc tests, DF, and other results?
Not available in the Scipy.stats implementation
Use of linear regression with the statsmodels module allows access to some of that data
End of explanation
"""
mean_pw = dataset['Petal_Width'].mean()
mean_sw = dataset['Sepal_Width'].mean()
width_table = pd.crosstab(dataset.Petal_Width > mean_pw, dataset.Sepal_Width > mean_sw)
width_table
# Chi square: Requires a matrix composed of individual arrays or a pd.crosstab result as input
chi2_width = stats.chi2_contingency(width_table, correction=False) # Correction: Yates'
# Another optional argument: lambda_='pearson'/'log-likelihood'/'freeman-tukey'/
# 'mod-log-likelihood'/'neyman'/'cressie-read'
# lambda_ default is None which computes Pearson's chi2
print(chi2_width)
print('\n')
print(' Chi-square value: ', chi2_width[0], '\n',
'p-value: ', chi2_width[1], '\n',
'Degrees of freedom: ', chi2_width[2], '\n',
'Expected frequencies: ', chi2_width[3], '\n')
# Fisher's exact test: Use if any expected frequency is < 5
fisher_width = stats.fisher_exact([[18,42],
[65,25]], alternative='two-sided') # alternative options: 'less', 'greater'
print(fisher_width)
print('\n')
print(' Odds ratio: ', fisher_width[0], '\n',
'p-value: ', fisher_width[1])
"""
Explanation: Contingency Tables
Chi square
Fisher's exact test
pd.crosstab(vector1, vector2) creates a contingency table from two binary vectors
Exercise
Create a contingency table from counts of big and small petal width and sepal width using the mean as cutoff
End of explanation
"""
# Pearson correlation coefficient: Parametric
pearson_petal = stats.pearsonr(dataset['Petal_Width'], dataset['Petal_Length'])
print(pearson_petal,'\n')
print('Pearson\'s correlation coefficient: ', pearson_petal[0])
print('p-value: ', pearson_petal[1]) # p-value is not so useful or reliable
# Spearman rank-order correlation coefficient: Non-parametric
spearman_sepal = stats.spearmanr(dataset['Sepal_Width'], dataset['Sepal_Length'], nan_policy='omit')
print(spearman_sepal)
# Point-biserial correlation coefficient: Measures correlation between a binary and a continuous variable
setosa_type = dataset['Type']=='setosa' #Binary variable
pbs_setosa_sw = stats.pointbiserialr(setosa_type, dataset['Sepal_Width'])
print(pbs_setosa_sw)
# Kendall's Tau: Non-parametric. Arguments for use: Ordinal data, more robust than Spearman, non-linear relations
ktau_versicolor = stats.kendalltau(pw_versicolor, pl_versicolor, initial_lexsort=None, nan_policy='omit')
# initial_lexsort=False uses quicksort
print(ktau_versicolor)
"""
Explanation: Correlation
Pearson's correlation coefficient r
Spearman rank-order correlation coefficient rho
Point-biserial correlation coefficient
Kendall's Tau
End of explanation
"""
# Scatterplot of variables to include in regression
sns.lmplot(y='Petal_Width', x='Sepal_Width', data=dataset) # Add hue='Type' to observe subgroups
# Scipy linear regression using least-squares. Only works for univariate
scipy_linreg_width = stats.linregress(dataset['Sepal_Width'], dataset['Petal_Width']) # order of x,y != from lmplot
print(scipy_linreg_width)
"""
Explanation: Linear Regression
End of explanation
"""
import statsmodels.formula.api as smf
reg_width = smf.ols(formula='Petal_Width ~ Sepal_Width', data=dataset)
reg_width_model = reg_width.fit()
reg_width_model.summary()
print(reg_width_model.summary()) # This way is better to obtain warnings
print( dir(reg_width_model) )
"""
Explanation: stats.linregress provides limited information, and the library lacks a logistic regression function.
Use the statsmodels library for regression
End of explanation
"""
import statsmodels.api as sm
pred_var_matrix = dataset['Sepal_Width']
pred_var_matrix = sm.add_constant(pred_var_matrix)
sm_reg_width = sm.OLS( dataset['Petal_Width'], pred_var_matrix) # Note the difference from smf.ols
sm_reg_width_model = sm_reg_width.fit()
# Plotting residuals:
# Obtain predicted values for dependent variable
predicted_values = reg_width_model.predict(pred_var_matrix) # dataset['Sepal_Width'] is not valid input
sm_predicted_values = sm_reg_width_model.predict(pred_var_matrix)
residuals = dataset['Petal_Width'] - sm_predicted_values
normalized_residuals = (residuals - np.mean(residuals)) / np.std(residuals)
normalized_predicted = (sm_predicted_values - np.mean(sm_predicted_values)) / np.std(sm_predicted_values)
plt.plot(normalized_residuals, normalized_predicted, 'o')
plt.xlabel('Standardized Residuals')
plt.ylabel('Standardized Predicted Value')
influence = sm_reg_width_model.get_influence()
influence_dbetas = influence.summary_frame().filter(regex='dfb')
print(influence_dbetas.head(5))
"""
Explanation: To plot residuals, an important quality control step, we need to use the predict() method. This function takes as input a matrix of predictive variables plus a new column for the intercept. To create this compound matrix we need to use the add_constant() function from the other statsmodels api: statsmodels.api
End of explanation
"""
influence_max = 2**(np.sqrt(sm_reg_width_model.nobs))
print('Maximum value of DFBeta: ', influence_max)
any(influence_dbetas['dfb_Sepal_Width'] > influence_max)
"""
Explanation: DFBeta measures the influence a given sample exerts over the model. The maximum value allowed can be calculated as:
$2^{\sqrt{N}}$
Exercise
Find if any sample exerts an excessive influence over our model
End of explanation
"""
logregr_setosa_sw = sm.Logit(setosa_type, pred_var_matrix)
logregr_setosa_sw_model = logregr_setosa_sw.fit()
print(logregr_setosa_sw_model.summary())
# Calculate odds ratio
print(np.exp(logregr_setosa_sw_model.params))
"""
Explanation: Logistic Regression
End of explanation
"""
# Plot predicted values vs. data
logregr_predicted_values = logregr_setosa_sw_model.predict(pred_var_matrix)
plt.plot(dataset['Sepal_Width'], setosa_type, 'o')
plt.plot(dataset['Sepal_Width'], logregr_predicted_values,'ok')
plt.xlabel('Sepal Width')
plt.ylabel('Setosa type')
plt.ylim(-0.05, 1.05)
"""
Explanation: Exercise
Plot the data against the values predicted by the model
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.