code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# Let's create a Index object
pd_index = pd.Index(['index1','index2','index3'],name='index')
print(pd_index)
# Important attributes that can help to understand Index object
pd_index.values
pd_index.name
pd_index.dtype
# +
# transfer to list, series, frame
print(pd_index.tolist())
print(pd_index.to_series()) # duplicate the index to series index, index to series data
print(pd_index.to_frame()) # duplicate the index to frame index, index to column 1
# series can be further converted to dict
print(pd_index.to_series().to_dict())
# -
# How about MultiIndex object, let's generate one.
arrays = [['col1','col2','col3'],['lev1','lev2','lev3']]
pd_MultiIndex = pd.MultiIndex.from_arrays(arrays,names=['col','lev'])
print(pd_MultiIndex)
# Again, play with the attributes a bit to get familiar with the composition of this special object.
pd_MultiIndex.names
pd_MultiIndex.levels
pd_MultiIndex.levels[0]
# ### There are several methods with MultiIndex
# indexing
df = pd.DataFrame(data=np.random.random([3,3]),index=pd_MultiIndex,columns=pd_MultiIndex)
df
# indexing column first level
df.loc[:,'col1']
# index column second level, also applicable to both levels together
df.loc[:,(slice(None),'lev1')]
# get_level_values
pd_MultiIndex.get_level_values('col')
# trasfer to list, frame
print(pd_MultiIndex.tolist())
print(pd_MultiIndex.to_frame()) # multiple level to columns, duplicate the index to df index
| pandas/examples/1_Learning_Index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h2>Tugas Harian 2</h2>
#
# Dengan menggunakan data di bawah ini, plot kan data tersebut kemudian custom sehingga memiliki result seperti yang di harapkan :
#
# note : warna terdiri dari warna Hijau, kuning, Biru, dan Merah
# +
import numpy as np
x = np.linspace(-10, 10, 100)
cos = np.cos(x)
cos3 = np.cos(x)*3
sin = np.sin(x-90)
sin2 = np.sin(x)*2
# -
# Expected Result :
#
# 
# +
from matplotlib import pyplot as plt
fig = plt.figure(figsize=[10,5])
ax = fig.add_subplot()
ax.plot(x,cos, label='Cosx', color='red')
ax.plot(x,cos3, label='3 Cosx', color='green', linestyle='--')
ax.plot(x,sin, label='Sin(x-90', color='blue', linestyle='-.')
ax.plot(x,sin2, label='2Sinx', color='lightgreen', marker='x', linestyle=' ')
ax.set_ylabel('Y-Axis Label')
ax.set_xlabel('X-Axis Label')
ax.set_title('Grafik Cos Sin')
plt.legend(loc='center')
plt.show()
# -
| 3. Data Visualization : Matplotlib/Jawaban Harian 2 Week 3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import the Required Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# dataset
from sklearn.datasets import load_boston
# scaling and dataset split
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# OLS, Ridge
from sklearn.linear_model import LinearRegression, Ridge
# model evaluation
from sklearn.metrics import r2_score, mean_squared_error
# # Load the Dataset
house_price = load_boston()
df = pd.DataFrame(house_price.data, columns=house_price.feature_names)
df
# # Standardize and train/test split
house_price.data = preprocessing.scale(house_price.data)
X_train, X_test, y_train, y_test = train_test_split(house_price.data, house_price.target, test_size=0.3, random_state=10)
# # Iterate the lambda values ranged from 0 to 199
# +
# initialize
ridge_reg = Ridge(alpha=0)
ridge_reg.fit(X_train, y_train)
ridge_df = pd.DataFrame({'variable': house_price.feature_names, 'estimate': ridge_reg.coef_})
ridge_train_pred = []
ridge_test_pred = []
# iterate lambdas
for alpha in np.arange(0, 200, 1):
# training
ridge_reg = Ridge(alpha=alpha)
ridge_reg.fit(X_train, y_train)
var_name = 'estimate' + str(alpha)
ridge_df[var_name] = ridge_reg.coef_
# prediction
ridge_train_pred.append(ridge_reg.predict(X_train))
ridge_test_pred.append(ridge_reg.predict(X_test))
# -
# organize dataframe
ridge_df = ridge_df.set_index('variable').T.reset_index()
# plot betas by lambda
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(ridge_df.RM, 'r', ridge_df.ZN, 'g', ridge_df.RAD, 'b', ridge_df.CRIM, 'c', ridge_df.TAX, 'y')
ax.axhline(y=0, color='black', linestyle='--')
ax.set_xlabel("Lambda")
ax.set_ylabel("Beta Estimate")
ax.set_title("Ridge Regression Trace", fontsize=16)
ax.legend(labels=['Room','Residential Zone','Highway Access','Crime Rate','Tax'])
ax.grid(True)
''' MSE of Ridge and OLS
ridge_mse_test = [mean_squared_error(y_test, p) for p in ridge_test_pred]
ols_mse = mean_squared_error(y_test, ridge_test_pred)
# plot mse
plt.plot(ridge_mse_test[:25], 'ro')
plt.axhline(y=ols_mse, color='g', linestyle='--')
plt.title("Ridge Test Set MSE", fontsize=16)
plt.xlabel("Model Simplicity$\longrightarrow$")
plt.ylabel("MSE")'''
| Ridge Regression on Boston Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:genpen]
# language: python
# name: conda-env-genpen-py
# ---
# + Collapsed="false"
import itertools
import numpy as np
import os
import seaborn as sns
from tqdm import tqdm
from dataclasses import asdict, dataclass, field
import vsketch
import shapely.geometry as sg
from shapely.geometry import box, MultiLineString, Point, MultiPoint, Polygon, MultiPolygon, LineString
import shapely.affinity as sa
import shapely.ops as so
import matplotlib.pyplot as plt
import pandas as pd
import networkx as nx
import vpype_cli
from typing import List, Generic
from genpen import genpen as gp, utils as utils
from scipy import stats as ss
import geopandas
from shapely.errors import TopologicalError
import bezier
import functools
# %load_ext autoreload
# %autoreload 2
# -
class BezierCurve(object):
def __init__(
self,
nodes=None,
degree=None,
n_eval_points=100,
):
nodes = nodes.transpose(np.argsort(np.array(nodes.shape)-2)) # hacky, to get in right orientation
self._nodes = nodes
self._degree = degree
self.n_eval_points = n_eval_points
@property
def degree(self):
if self._degree is None:
self._degree = self.nodes.shape[1]-1
return self._degree
@property
def nodes(self):
return self._nodes
@property
def _fortran_nodes(self):
return np.asfortranarray(self.nodes)
@property
def _curve(self):
return bezier.Curve(self._fortran_nodes, self.degree)
@property
def eval_points(self):
return np.linspace(0, 1, self.n_eval_points)
@property
def evaluated_curve(self):
x, y = self._curve.evaluate_multi(self.eval_points)
return np.stack([x, y]).T
@property
def linestring(self):
return LineString(self.evaluated_curve)
# +
class PerlinGrid(object):
def __init__(self, poly, xstep=0.1, ystep=0.1, lod=4, falloff=None, noiseSeed=71, noise_scale=0.001, output_range=(0, np.pi*2)):
self.p = poly
self.vsk = vsketch.Vsketch()
self.lod = lod
self.falloff = falloff
self.noiseSeed = noiseSeed
self.noise_scale = noise_scale
self.vsk.noiseSeed(self.noiseSeed)
self.vsk.noiseDetail(lod=self.lod, falloff=self.falloff)
self.output_range = output_range
def noise(self, x, y):
x = x * self.noise_scale
y = y * self.noise_scale
output = self.vsk.noise(x=x, y=y)
return np.interp(output, [0, 1], self.output_range)
# Cell
class Particle(object):
def __init__(self, pos, grid, stepsize=1):
self.pos = Point(pos)
self.grid = grid
self.stepsize = stepsize
self.n_step = 0
self.pts = [self.pos]
self.in_bounds = True
@property
def x(self):
return self.pos.x
@property
def y(self):
return self.pos.y
@property
def xy(self):
return np.array([self.x, self.y])
@property
def line(self):
return LineString(self.pts)
def get_angle(self):
self.a = self.grid.noise(x=self.x, y=self.y)
def check_if_in_bounds(self):
self.in_bounds = self.grid.p.contains(self.pos)
def calc_step(self):
self.get_angle()
self.dx = np.cos(self.a) * self.stepsize
self.dy = np.sin(self.a) * self.stepsize
def step(self):
self.check_if_in_bounds()
if self.in_bounds:
self.calc_step()
self.pos = sa.translate(self.pos, xoff=self.dx, yoff=self.dy)
self.pts.append(self.pos)
# -
# + Collapsed="false"
paper_size = '11x14 inches'
border:float=20
paper = utils.Paper(paper_size)
drawbox = paper.get_drawbox(border)
buffer_style = 2
# + Collapsed="false"
poly = drawbox
pg = PerlinGrid(poly, xstep=1, ystep=1, lod=10, falloff=None, noise_scale=0.053, noiseSeed=5)
# +
start_point = pg.p.centroid
start_buffer = 10
n_nodes = 400
n_steps = 260
circ = start_point.buffer(start_buffer).boundary
pts = [circ.interpolate(d, normalized=True) for d in np.linspace(0., 1., n_nodes)]
particles = [Particle(pos=pos, grid=pg, stepsize=1) for pos in pts if pg.p.contains(pos)]
linestrings = []
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes).linestring
linestrings.append(ls)
for ii in tqdm(range(n_steps)):
for p in particles:
p.pos = sa.translate(p.pos, xoff=0.3, yoff=0.1)
p.step()
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes = nodes).linestring
linestrings.append(ls)
layer = MultiLineString(linestrings)
layer = gp.make_like(layer, drawbox)
# -
lb = layer.buffer(0.25, join_style=2, cap_style=2).boundary
# + Collapsed="false"
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.25mm')
sk.stroke(1)
sk.geometry(lb)
sk.vpype('linesimplify splitall linemerge -t 0.4 linesort')
sk.display(color_mode='layer')
# +
savepath = '/Users/naka/code/side/plotter_images/oned_outputs/0342_perlin_repeater_buffered_less.svg'
sk.save(savepath)
# -
# ## try 2
# + Collapsed="false"
paper_size = '11x14 inches'
border:float=35
paper = utils.Paper(paper_size)
drawbox = paper.get_drawbox(border)
buffer_style = 2
# + Collapsed="false"
poly = drawbox
pg = PerlinGrid(poly, xstep=1, ystep=1, lod=10, falloff=None, noise_scale=0.053, noiseSeed=5)
# +
start_point = pg.p.centroid
start_buffer = 10
n_nodes = 400
n_steps = 70
circ = start_point.buffer(start_buffer).boundary
pts = [circ.interpolate(d, normalized=True) for d in np.linspace(0., 1., n_nodes)]
particles = [Particle(pos=pos, grid=pg, stepsize=1) for pos in pts if pg.p.contains(pos)]
linestrings = []
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes).linestring
linestrings.append(ls)
for ii in tqdm(range(n_steps)):
for p in particles:
p.pos = sa.translate(p.pos, xoff=1.3, yoff=0.4)
p.step()
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes = nodes).linestring
linestrings.append(ls)
layer = MultiLineString(linestrings)
layer = gp.make_like(layer, drawbox)
# -
buffer_gen = ss.uniform(loc=1, scale=6).rvs
d_buffer_gen = functools.partial(np.random.uniform, low=-0.8, high=-0.2)
angles_gen = ss.uniform(loc=0, scale=360).rvs
angles_gen = gp.make_callable(80)
d_translate_factor_gen = ss.uniform(loc=0.2, scale=0.6).rvs
fills = []
all_polys = Polygon()
for l in layer:
p = l.buffer(0.5, cap_style=2, join_style=3)
p = p.buffer(buffer_gen(), cap_style=2, join_style=2)
stp = gp.ScaleTransPrms(d_buffer=d_buffer_gen(),angles=angles_gen(),d_translate_factor=d_translate_factor_gen(), n_iters=300)
stp.d_buffers += np.random.uniform(-0.15, 0.15, size=stp.d_buffers.shape)
P = gp.Poly(p)
P.fill_scale_trans(**stp.prms)
visible_area = p.difference(all_polys)
visible_fill = P.fill.intersection(visible_area.buffer(1e-6))
fills.append(visible_fill)
all_polys = so.unary_union([all_polys, p])
L1 = gp.merge_LineStrings([f for f in fills if f.length > 0.1])
buffer_gen = ss.uniform(loc=1, scale=6).rvs
d_buffer_gen = functools.partial(np.random.uniform, low=-2, high=-1.2)
angles_gen = ss.uniform(loc=0, scale=360).rvs
angles_gen = gp.make_callable(80)
d_translate_factor_gen = ss.uniform(loc=0.2, scale=0.6).rvs
fills = []
all_polys = Polygon()
for l in layer:
p = l.buffer(0.5, cap_style=2, join_style=3)
p = p.buffer(buffer_gen(), cap_style=2, join_style=2)
stp = gp.ScaleTransPrms(d_buffer=d_buffer_gen(),angles=angles_gen(),d_translate_factor=d_translate_factor_gen(), n_iters=300)
stp.d_buffers += np.random.uniform(-0.15, 0.15, size=stp.d_buffers.shape)
P = gp.Poly(p)
P.fill_scale_trans(**stp.prms)
visible_area = p.difference(all_polys)
visible_fill = P.fill.intersection(visible_area.buffer(1e-6))
fills.append(visible_fill)
all_polys = so.unary_union([all_polys, p])
L2 = gp.merge_LineStrings([f for f in fills if f.length > 0.1])
# + Collapsed="false"
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.25mm')
sk.stroke(1)
sk.geometry(L1)
sk.stroke(2)
sk.geometry(L1)
sk.vpype('linesimplify linemerge linesort')
sk.display(color_mode='layer')
# +
savepath = '/Users/naka/code/side/plotter_images/oned_outputs/0345_perlin_repeater_buffered_fills_2color.svg'
sk.save(savepath)
# -
# ## try 2
# + Collapsed="false"
paper_size = '11x14 inches'
border:float=35
paper = utils.Paper(paper_size)
drawbox = paper.get_drawbox(border)
buffer_style = 2
# + Collapsed="false"
poly = drawbox
pg = PerlinGrid(poly, xstep=1, ystep=1, lod=10, falloff=None, noise_scale=0.053, noiseSeed=5)
# +
start_point = pg.p.centroid
start_buffer = 10
n_nodes = 400
n_steps = 70
circ = start_point.buffer(start_buffer).boundary
pts = [circ.interpolate(d, normalized=True) for d in np.linspace(0., 1., n_nodes)]
particles = [Particle(pos=pos, grid=pg, stepsize=1) for pos in pts if pg.p.contains(pos)]
linestrings = []
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes).linestring
linestrings.append(ls)
for ii in tqdm(range(n_steps)):
for p in particles:
p.pos = sa.translate(p.pos, xoff=1.3, yoff=0.4)
p.step()
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes = nodes).linestring
linestrings.append(ls)
layer = MultiLineString(linestrings)
layer = gp.make_like(layer, drawbox)
# -
layer = layer.buffer(0.01, join_style=2, cap_style=2).buffer(-0.01).boundary
layer = [l for l in layer if l.length>0.1]
buffer_gen = ss.uniform(loc=1, scale=6).rvs
d_buffer_gen = functools.partial(np.random.uniform, low=-0.8, high=-0.2)
angles_gen = ss.uniform(loc=0, scale=360).rvs
angles_gen = gp.make_callable(80)
d_translate_factor_gen = ss.uniform(loc=0.2, scale=0.6).rvs
fills = []
all_polys = Polygon()
for l in tqdm(layer):
p = l.buffer(0.5, cap_style=2, join_style=3)
p = p.buffer(buffer_gen(), cap_style=2, join_style=2)
stp = gp.ScaleTransPrms(d_buffer=d_buffer_gen(),angles=angles_gen(),d_translate_factor=d_translate_factor_gen(), n_iters=300)
stp.d_buffers += np.random.uniform(-0.15, 0.15, size=stp.d_buffers.shape)
P = gp.Poly(p)
P.fill_scale_trans(**stp.prms)
visible_area = p.difference(all_polys)
visible_fill = P.fill.intersection(visible_area.buffer(1e-6))
fills.append(visible_fill)
all_polys = so.unary_union([all_polys, p])
L1 = gp.merge_LineStrings([f for f in fills if f.length > 0.1])
# + Collapsed="false"
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.25mm')
sk.stroke(1)
sk.geometry(L1)
# sk.stroke(2)
# sk.geometry(L1)
sk.vpype('linesimplify linemerge linesort')
sk.display(color_mode='layer')
# +
savepath = '/Users/naka/code/side/plotter_images/oned_outputs/0346_perlin_repeater_buffered_fills_spikedout.svg'
sk.save(savepath)
# -
# ## try 2
# + Collapsed="false"
paper_size = '11x14 inches'
border:float=35
paper = utils.Paper(paper_size)
drawbox = paper.get_drawbox(border)
buffer_style = 2
# + Collapsed="false"
poly = drawbox
pg = PerlinGrid(poly, xstep=1, ystep=1, lod=10, falloff=None, noise_scale=0.053, noiseSeed=5)
# +
start_point = pg.p.centroid
start_buffer = 6
n_nodes = 200
circ = start_point.buffer(start_buffer).boundary
pts = [circ.interpolate(d, normalized=True) for d in np.linspace(0.1, 0.99, n_nodes)]
particles = [Particle(pos=pos, grid=pg, stepsize=1) for pos in pts if pg.p.contains(pos)]
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes).linestring
linestrings.append(ls)
# +
n_steps = 80
linestrings = []
for ii in tqdm(range(n_steps)):
for p in particles:
p.pos = sa.translate(p.pos, xoff=0., yoff=1)
p.step()
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes = nodes).linestring
linestrings.append(ls)
layer = MultiLineString(linestrings)
layer = gp.make_like(layer, drawbox)
# -
d=1e-1
mlayer = layer.buffer(d, join_style=1, cap_style=1).buffer(-d).boundary
mlayer = [l for l in mlayer if l.length>0.01]
buffer_gen = ss.uniform(loc=1, scale=6).rvs
d_buffer_gen = functools.partial(np.random.uniform, low=-0.8, high=-0.2)
angles_gen = ss.uniform(loc=0, scale=360).rvs
angles_gen = gp.make_callable(80)
d_translate_factor_gen = ss.uniform(loc=0.2, scale=0.6).rvs
buffer_gen = functools.partial(np.interp, xp=[-2, 1], fp=[1,3])
d_buffer_gen = functools.partial(np.interp, xp=[-1, 1], fp=[-0.2, -0.7])
fills = []
all_polys = Polygon()
for l in tqdm(mlayer):
# p = l.buffer(0.5, cap_style=2, join_style=3)
p = l.buffer(buffer_gen(np.log10(l.length)), cap_style=2, join_style=2)
stp = gp.ScaleTransPrms(
d_buffer=d_buffer_gen(np.log10(l.length)),
angles=angles_gen(),
d_translate_factor=d_translate_factor_gen(),
n_iters=300)
stp.d_buffers += np.random.uniform(-0.15, 0.15, size=stp.d_buffers.shape)
P = gp.Poly(p)
P.fill_scale_trans(**stp.prms)
visible_area = p.difference(all_polys)
visible_fill = P.fill.intersection(visible_area.buffer(1e-6))
fills.append(visible_fill)
all_polys = so.unary_union([all_polys, p])
L2 = gp.merge_LineStrings([f for f in fills if f.length > 0.1])
# +
n_steps = 55
linestrings = []
for ii in tqdm(range(n_steps)):
for p in particles:
p.pos = sa.translate(p.pos, xoff=0., yoff=0.6)
p.step()
_ls = LineString([p.pos for p in particles])
nodes = np.array(_ls)
ls = BezierCurve(nodes = nodes).linestring
linestrings.append(ls)
layer = MultiLineString(linestrings)
layer = gp.make_like(layer, drawbox)
# -
buffer_gen = ss.uniform(loc=1, scale=6).rvs
d_buffer_gen = functools.partial(np.random.uniform, low=-0.8, high=-0.2)
angles_gen = ss.uniform(loc=0, scale=360).rvs
angles_gen = gp.make_callable(80)
d_translate_factor_gen = ss.uniform(loc=0.2, scale=0.6).rvs
fills = []
# all_polys = Polygon()
for l in tqdm(layer):
p = l.buffer(0.5, cap_style=2, join_style=3)
p = p.buffer(buffer_gen(), cap_style=2, join_style=2)
stp = gp.ScaleTransPrms(d_buffer=d_buffer_gen(),angles=angles_gen(),d_translate_factor=d_translate_factor_gen(), n_iters=300)
stp.d_buffers += np.random.uniform(-0.15, 0.15, size=stp.d_buffers.shape)
P = gp.Poly(p)
P.fill_scale_trans(**stp.prms)
visible_area = p.difference(all_polys)
visible_fill = P.fill.intersection(visible_area.buffer(1e-6))
fills.append(visible_fill)
all_polys = so.unary_union([all_polys, p])
L1 = gp.merge_LineStrings([f for f in fills if f.length > 0.1])
# + Collapsed="false"
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.25mm')
sk.stroke(1)
sk.geometry(L1)
sk.stroke(2)
sk.geometry(L2)
sk.vpype('linesimplify linemerge linesort')
sk.display(color_mode='layer')
# +
savepath = '/Users/naka/code/side/plotter_images/oned_outputs/0348_perlin_repeater_buffered_fills_spikedout_2col.svg'
sk.save(savepath)
# -
| scratch/035_bez.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
o, _ = Survey.objects.get_or_create(name='Initial', version='0.1')
o
u, _ = User.objects.get_or_create(username='mikewolfd')
u.set_password('<PASSWORD>')
u.is_superuser = True
u.is_active = True
u.is_staff = True
u.save()
u
c, _ = Catageory.objects.get_or_create(name='Medical')
q , _ =Question.objects.get_or_create(text='Do you have underlying medical complications?', catageory=c, choices=BooleanAnswer.objects.get_or_create(choices={False: 'No', True: 'Yez'})[0])
Question.objects.all()
BooleanAnswer.objects.all()
q = Question.objects.last()
q.parent = Question.objects.first()
q.save()
q.parent.get_leafnodes()
q.children.all()
q.survey.add()
SurveyItem.objects.get_or_create(survey=o, question=q, order=1)
Survey.objects.first().surveyitem_set.all()
SurveyItem.objects.first().__dict__
| notebook/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Scientist
# ### Imports, load data from folders, instantiate Data Scientist
import sys
sys.path.append('../../src/')
import psi
from roles import DataScientist
from psi.ds_psi import PsiProtocolDS
from psi.ds_psi import DSPsiStar
from datasets import SampleSetPointer
#change with "data_file" or your own folder when training on a complete set of data
ds = DataScientist(label_dir="data_file_subset/labellist", index_dir="data_file_subset/indexlist")
# ### Connect to Data Owner 1
# Go to Data Owner 1's notebook, run the connect command, then run the command below
duet_1 = ds.connect_to_duet(name="do1")
# ### Connect to Data Owner 2
# Go to Data Owner 2's notebook, run the connect command, then run the command below
duet_2 = ds.connect_to_duet(name="do2")
# ### Initiate the PSI protocol, share the initial info
# Go to Data Owners' notebooks, run the set protocol command to share the data, then check the respective duet's store.
#
# The information shared would be False Positive Rate and a boolean corresponding to whether the full data intersection would be revealed or not
duet_1.store.pandas
duet_2.store.pandas
# ### Set the procol, by sending the len of our items
# Data Scientist sends the len of its items to all data owners
ds.set_protocol(DSPsiStar) #send the len of our items
# ### Setup the protocol
#
# Go to Data Owners' notebook, run the setup command to share the setup of the PSI
#let's see the setup message from data owner1
duet_1.store.pandas
#let's see the setup message from data owner1
duet_2.store.pandas
# Let's now respond to all data owners our response to complete the PSI setup.
# We basically send a request to the Data Owners to get the intersection.
#now let's respond to all with setup, and send the request to learn
ds.protocol.global_setup()
# ### Get the intersection
#
# Go to Data Owners' notebooks. They should now send us the response containing the ids in the intersection
#the data owners should have sent us the response now
duet_1.store.pandas
duet_2.store.pandas
# Fetch the response from all the data owners, and actually get the ids in the intersection
#now let's fetch the responses and get the intersections (local)
ds.protocol.global_response()
# ### Send global intersection IDs to data owners
#let's send to every data owners the intersection elements (the ones they should share)
ds.protocol.global_intersection()
# ### Data for training
# Go to the data owners' notebooks. They have figured out which IDs are needed for training. They fetch the elements corresponding to those Ids and they share the pointers to those tensors (and only those)
#the data owner sent us the elements for the intersection
duet_1.store.pandas
duet_2.store.pandas
# +
#fetch the pointers
ids_1 = duet_1.store["ids"]
values_1 = duet_1.store["values"]
labels_1 = duet_1.store["labels"]
ids_2 = duet_2.store["ids"]
values_2 = duet_2.store["values"]
labels_2 = duet_2.store["labels"]
# -
ids_1, values_1
# ### Example of data loading of remote data (i.e. tensor pointers)
# +
#create dataset
ds_1 = SampleSetPointer(labels_1, values_1, ids_1, name="do1")
ds_2 = SampleSetPointer(labels_2, values_2, ids_2, name="do2")
# -
vfd = VerticalFederatedDataset([ds_1, ds_2], n_samples=60)
vfd
# +
from torch.utils.data import DataLoader
dload_vertical = DataLoader(vfd, batch_size=4, collate_fn=vfd.collate_fn)
i = 0
for batch_idx, input in enumerate(dload_vertical):
print(input)
i += 1
if i == 3:
break
# -
| examples/dual-headed-nn/examples/PSI_Linkage/DataScientist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import requests
from shapely.geometry import Polygon
url = "https://qua-kit.ethz.ch/exercise/33/1686/geometry" # This is the geojson data to read
# + language="bash"
# touch toyeg.geojson
# -
# import geometry from url
# (optional) save it to geojson locally
f = requests.get(url).text
with open('toyeg.geojson', 'w') as f_geo:
f_geo.write(f)
fp = []
b = json.loads(f) # load: convert json --> python list
for i, feature in enumerate(b["geometry"]["features"]):
p = feature["properties"] # p store all the properties
if 'special' not in p.keys():
cg = feature['geometry']['coordinates'] # cg is the geometry coordinates of all surfaces
surface_t = cg[0][0] # can use other method to detect if it is really the top
surface_xy = [(it[0],it[1]) for it in surface_t]
footprint = Polygon(surface_xy)
# print(footprint.area)
# set new list that contains the info
fp.append({"properties":feature['properties'], "polygon": footprint})
# test for Python Geo Interface
from shapely.geometry import shape
from shapely.ops import cascaded_union
b = json.loads(f)
shapeunion = []
for i, feature in enumerate(b["geometry"]["features"]):
if 'special' not in feature["properties"].keys():
d = shape(feature['geometry']["coordinates"][0])
shapeunion.append(d)
print(d)
cascaded_union(shapeunion)
for pp in fp:
poly = pp['polygon']
print(poly.area)
print(fp)
print(fp[0]['polygon'].intersects(fp[10]['polygon']))
# +
from shapely.geometry import Polygon
oneshape1 = [
[
-154.53488159179688,
113.87541961669922
],
[
-170.66763305664062,
113.57225036621094
],
[
-170.6676483154297,
83.33378601074219
],
[
-137.55303955078125,
83.62376403808594
],
[
-138.40211486816406,
114.1654052734375,
],
]
shape2=[
[
227.1302490234375,
-65.12775421142578,
45
],
[
210.9974822998047,
-65.43091583251953,
45
],
[
210.9974822998047,
-95.66939544677734,
45
],
[
244.1121063232422,
-95.37940216064453,
45
],
[
243.2630157470703,
-64.8377456665039,
45
],
[
227.1302490234375,
-65.12775421142578,
45
]]
shape3 = [
[
215.075439453125,
134.30941772460938,
90
],
[
214.22634887695312,
103.47777557373047,
90
],
[
247.34097290039062,
103.76776885986328,
90
],
[
247.34097290039062,
73.52930450439453,
90
],
[
281.3047180175781,
74.41246795654297,
90
],
[
279.6065368652344,
135.49575805664062,
90
],
[
215.075439453125,
134.30941772460938,
90
]
]
shape3real = [(it[0],it[1]) for it in shape3]
print(shape3real)
# -
polygon = Polygon(surface_xy)
polygon
| .ipynb_checkpoints/geometry_model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# load and investigate the data here:
df = pd.read_csv('tennis_stats.csv')
df.head()
df.columns
df.info()
df.describe()
# exploratory analysis
print(df.corr())
plt.scatter(df['FirstServeReturnPointsWon'],df['Winnings'])
plt.title('FirstServeReturnPointsWon vs Winnings')
plt.xlabel('FirstServeReturnPointsWon')
plt.ylabel('Winnings')
plt.show()
plt.clf()
plt.scatter(df['BreakPointsOpportunities'],df['Winnings'])
plt.title('BreakPointsOpportunities vs Winnings')
plt.xlabel('BreakPointsOpportunities')
plt.ylabel('Winnings')
plt.show()
plt.clf()
plt.scatter(df['BreakPointsSaved'],df['Winnings'])
plt.title('BreakPointsSaved vs Winnings')
plt.xlabel('BreakPointsSaved')
plt.ylabel('Winnings')
plt.show()
plt.clf()
plt.scatter(df['TotalPointsWon'],df['Ranking'])
plt.title('TotalPointsWon vs Ranking')
plt.xlabel('TotalPointsWon')
plt.ylabel('Ranking')
plt.show()
plt.clf()
plt.scatter(df['TotalServicePointsWon'],df['Wins'])
plt.title('TotalServicePointsWon vs Wins')
plt.xlabel('TotalServicePointsWon')
plt.ylabel('Wins')
plt.show()
plt.clf()
#
# Use one feature from the dataset to build a single feature linear regression model on the data. Your model, at this point, should use only one feature and predict one of the outcome columns. Before training the model, split your data into training and test datasets so that you can evaluate your model on the test set. How does your model perform? Plot your model’s predictions on the test set against the actual outcome variable to visualize the performance.
# +
## single feature linear regression (FirstServeReturnPointsWon)
# select features and value to predict
features = df[['FirstServeReturnPointsWon']]
winnings = df[['Winnings']]
# -
# train, test, split the data
features_train, features_test, winnings_train, winnings_test = train_test_split(features, winnings, train_size = 0.8)
# create and train model on training data
model = LinearRegression()
model.fit(features_train,winnings_train)
# score model on test data
print('Predicting Winnings with FirstServeReturnPointsWon Test Score:', model.score(features_test,winnings_test))
# make predictions with model
winnings_prediction = model.predict(features_test)
# plot predictions against actual winnings
plt.scatter(winnings_test,winnings_prediction, alpha=0.4)
plt.title('Predicted Winnings vs. Actual Winnings - 1 Feature')
plt.xlabel('Actual Winnings')
plt.ylabel('Predicted Winnings')
plt.show()
plt.clf()
# +
## single feature linear regression (BreakPointsOpportunities)
# select features and value to predict
features = df[['BreakPointsOpportunities']]
winnings = df[['Winnings']]
# +
# train, test, split the data
features_train, features_test, winnings_train, winnings_test = train_test_split(features, winnings, train_size = 0.8)
# create and train model on training data
model = LinearRegression()
model.fit(features_train,winnings_train)
# score model on test data
print('Predicting Winnings with BreakPointsOpportunities Test Score:', model.score(features_test,winnings_test))
# +
# make predictions with model
winnings_prediction = model.predict(features_test)
# plot predictions against actual winnings
plt.scatter(winnings_test,winnings_prediction, alpha=0.4)
plt.title('Predicted Winnings vs. Actual Winnings - 1 Feature')
plt.xlabel('Actual Winnings')
plt.ylabel('Predicted Winnings')
plt.show()
plt.clf()
# +
# two feature linear regression
# select features and value to predict
features = df[['BreakPointsOpportunities','FirstServeReturnPointsWon']]
winnings = df[['Winnings']]
# +
# train, test, split the data
features_train, features_test, winnings_train, winnings_test = train_test_split(features, winnings, train_size = 0.8)
# create and train model on training data
model = LinearRegression()
model.fit(features_train,winnings_train)
# score model on test data
print('Predicting Winnings with 2 Features Test Score:', model.score(features_test,winnings_test))
# make predictions with model
winnings_prediction = model.predict(features_test)
# -
# plot predictions against actual winnings
plt.scatter(winnings_test,winnings_prediction, alpha=0.4)
plt.title('Predicted Winnings vs. Actual Winnings - 2 Features')
plt.xlabel('Actual Winnings')
plt.ylabel('Predicted Winnings')
plt.show()
plt.clf()
# +
## multiple features linear regression
# select features and value to predict
features = df[['FirstServe','FirstServePointsWon','FirstServeReturnPointsWon','SecondServePointsWon','SecondServeReturnPointsWon','Aces','BreakPointsConverted','BreakPointsFaced','BreakPointsOpportunities','BreakPointsSaved','DoubleFaults','ReturnGamesPlayed','ReturnGamesWon','ReturnPointsWon','ServiceGamesPlayed','ServiceGamesWon','TotalPointsWon','TotalServicePointsWon']]
winnings = df[['Winnings']]
# +
# train, test, split the data
features_train, features_test, winnings_train, winnings_test = train_test_split(features, winnings, train_size = 0.8)
# create and train model on training data
model = LinearRegression()
model.fit(features_train,winnings_train)
# score model on test data
print('Predicting Winnings with Multiple Features Test Score:', model.score(features_test,winnings_test))
# +
# make predictions with model
winnings_prediction = model.predict(features_test)
# plot predictions against actual winnings
plt.scatter(winnings_test,winnings_prediction, alpha=0.4)
plt.title('Predicted Winnings vs. Actual Winnings - Multiple Features')
plt.xlabel('Actual Winnings')
plt.ylabel('Predicted Winnings')
plt.show()
plt.clf()
# -
| Tennis_Ace.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Lasso path using LARS
#
#
# Computes Lasso Path along the regularization parameter using the LARS
# algorithm on the diabetes dataset. Each color represents a different
# feature of the coefficient vector, and this is displayed as a function
# of the regularization parameter.
#
#
#
# +
print(__doc__)
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
_, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| 01 Machine Learning/scikit_examples_jupyter/linear_model/plot_lasso_lars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/visiont3lab/data-visualization/blob/master/courses/02_Pandas/Pandas_Esercizio_Soluzione.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="sMM63f1cW1wJ" colab_type="text"
# ### Descrizione
#
# Datasets:
# * dati_province = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
# * dati_regioni = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
# * dati_italia = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
#
# Obbiettivo: Utilizzando i dati provinciali analizzare l'andamento dei contagiati nella propria provincia nel tempo.
#
# 1. Plottare l'andamento nel tempo dei contagiati della propria provincia.
# 2. Comparare l'andamento nel tempo dei contagiati delle prime 3 provincie della nostra regione (Emilia-Romagna) con quelli di una provincia appartenente sempre alla nostra regione.
# 3. Creare un grafico a torta con le percentuali di contagiati per ogni provincia apparente alla nostra regione (Emilia-Romagna)
# + [markdown] id="Ks4Jr2gmW9sH" colab_type="text"
# ## Importare Librerie
# + id="u1sWtG4WWjc9" colab_type="code" colab={}
import matplotlib.pyplot as plt
import pandas as pd
import os
directory = "assets/images/"
if not os.path.exists(directory):
os.makedirs(directory)
# + [markdown] id="M-5kckZfXB4v" colab_type="text"
# ## Parte 1
# + id="xYfGRuqcWs46" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 592} outputId="738e31a1-3dba-4003-fd2d-d4041ec76ed0"
def plot_andamento_provincia_style(df,provincia):
df_new = df[df["denominazione_provincia"]==provincia]
#display(df_new.head())
#display(df_new.info())
#plt.style.use("dark_background")
lista = list(df_new["data"])
new_lista = []
for i in range(0,len(lista),2):
new_lista.append(lista[i])
with plt.style.context("dark_background"):
plt.rc("lines", linewidth=2, markersize=4, markerfacecolor="red")
#plt.rcParams['lines.linewidth']=10
#plt.rcParams['lines.markersize']=10
#plt.rcParams['lines.markerfacecolor'] ="red"
#plt.rcParams['axes.prop_cycle'] = cycler(color=['y']) # color line
plt.rc("grid", color="red",alpha=0.3)
plt.rc("axes",titlesize=20, titlecolor="cyan")
plt.rc("font", family="fantasy", fantasy="Comic Neue", size=20)
plt.rcParams["date.autoformatter.day"]="%m-%d"
#plt.rcParams["date.autoformatter.day"]= "%m-%d" #"%Y-%m"
ax = df_new.plot(kind="line", x="data", y="totale_casi", title="Andamento Provincia: " + provincia , grid=True, figsize=(20,9), style="o--c", xticks=new_lista)
ax.set_xlabel("Data")
ax.set_ylabel("Totale casi")
ax.tick_params(axis="y",labelcolor="orange")
ax.tick_params(axis="x",labelcolor="orange", labelsize=15)
ax.legend(loc="best",title="Legenda")
ax.figure.savefig("assets/images/fig1.png", transparent=False)
return ax.figure
df = pd.read_csv("https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv")
provincia = "Ravenna"
df["data"] = pd.to_datetime(df["data"])
# Esercizion 1)
fig1 = plot_andamento_provincia_style(df, provincia)
# + [markdown] id="x4cbyLxuXEi2" colab_type="text"
# ## Parte 2
# + id="ScPQiQFIWws5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="8411f1b6-6759-48df-de1c-acefc2fcd000"
def get_nomi_regioni(df):
return list(df["denominazione_regione"].unique())
def get_nomi_province(df,regione):
a = df[df["denominazione_regione"]==regione]
#ultima_data = list(a.tail(1)["data"])[0]
ultima_data = a.tail(1)["data"].values[0]
res = list(a[a["data"]==ultima_data]["denominazione_provincia"])
res.remove('In fase di definizione/aggiornamento')
return res
def get_andamento_province(df,regione):
nomi_province = get_nomi_province(df, regione)
my_dict={}
my_dict["data"] = df["data"].unique()
for nome in nomi_province:
my_dict[nome] = list(df[df["denominazione_provincia"]==nome]["totale_casi"])
df_new = pd.DataFrame(my_dict)
#display(df_new.head())
with plt.style.context("seaborn"):
ax = df_new.plot(x="data", title="Andamento Province della regione " +regione, figsize=(20,9))
ax.figure.savefig("assets/images/fig2.png", transparent=False)
return ax.figure
# Esercizio 2)
regione="Emilia-Romagna"
fig2 = get_andamento_province(df,regione)
# + [markdown] id="fTheCeZKXG9S" colab_type="text"
# ## Parte 3
# + id="RpgBdH5ZWzrJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 520} outputId="c8c9f7b9-71dc-4640-e8a7-20902f56ea41"
def pie_plot_province(df,regione):
ultima_data = list(df.tail(1)["data"])[0]
a =df[(df["denominazione_regione"]==regione) & (df["data"]==ultima_data) & (df["denominazione_provincia"]!="In fase di definizione/aggiornamento") ]
with plt.style.context("Solarize_Light2"):
ax = a.set_index("denominazione_provincia").plot(kind="pie", y="totale_casi",autopct='%1.0f%%', figsize=(20,9)) #, pctdistance=1.1, labeldistance=1.2)
ax.figure.savefig("assets/images/fig3.png", transparent=False)
return ax.figure
# Check style
# print(plt.style.available)
# Esercizio 3
fig3 = pie_plot_province(df, regione)
# + [markdown] id="qsYuDaFAXRSB" colab_type="text"
# ## Website development
#
#
#
# + id="5v6AJ1NzXcTP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="fd81321d-48e9-4843-8501-2288fef320bc"
# !pip install dash flask_ngrok
# + id="dQvxXcMNXZf8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a597857d-5afb-4885-a3b0-64a721a419fe"
# %%writefile assets/typography.css
body {
padding: 20px 300px 0px 300px;
margin : 0px 0px 0px 0px;
/*padding : 0px 0px 0px 0px;*/
background-color: white;
}
# + id="-gfGMcWlXV0X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="327b0aa6-957a-423a-fd1b-a57ec01f5361"
import dash
import dash_html_components as html
from flask_ngrok import run_with_ngrok
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
run_with_ngrok(server)
app.layout = html.Div([
html.Div(
[
html.Img(src='/assets/images/fig1.png'),
]),
html.Div(
[
html.Img(src='/assets/images/fig2.png'),
]),
html.Div(
[
html.Img(src='/assets/images/fig3.png'),
])
],style={'backgroundColor': "rgb(255,255,255)", "margin": "0", "padding": "0"})
if __name__ == '__main__':
server.run()
#app.run_server() #debug=True, host="0.0.0.0", port=8900)
| content/pandas/Pandas_Esercizio_Soluzione.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introdução
# Enquanto que a análise de imagem em tons de cinza exibe a imagem em uma única matriz cujos valores dos pixels representam intensidade luminosa uma imagem exibida em um canal colorido é basicamente um conjunto $N$ imagens cujos os pixels de cada imagem são valores de intensidade no canal e a representação da imagem é a composição das três imagens.
#
# Existem diversos canais de cores, tais como os canais (i) RBG, traduzido por intensidades nas cores vermelho, azul e verde, e o canal (ii) HSI, traduzido por matiz (representação da cor), saturação da cor e intensidade da cor. Há características específicas entre os tipos de canais, alguns tendem a representar as cores como o olho humano ver, enquanto outros representam imagens coloridas de uma forma otimizada para o processamento computacional.
# # Discussões sobre o método
# O processo de conversão de utilizando uma imagem de entrada RGB para um canal HSV, é um método eficaz para extração de cor em imagens, visto que agora a cor será representada pelo canal H e sua satuação pelo canal S, e não pela composição de três canais R, G e B.
#
# O método consiste em: (i) realizar mudança de canal, (ii) detectar a cor da pela pré-definida, (iii) binarizar por meio de um limiar, utilizando a cor definida, resultando em uma máscara com a pela da imagem, e ao final (iv) operar a imagem com a máscara, o resultado é a segmentação da pela na imagem.
# + jupyter={"outputs_hidden": true}
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pywt
# -
# - Abrir imagem:
# + jupyter={"outputs_hidden": false}
#img = np.array(cv2.imread('imagemTeste.png'))
img = np.array(cv2.imread('navar.jpg'))
img = cv2.resize(img, (350, 300), 0, 0)
img_rbg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# + jupyter={"outputs_hidden": false}
plt.figure(1)
plt.imshow(img_rbg)
#plt.axis("off")
plt.title("Imagem original")
plt.show()
# -
# - Converter para HSV:
# + jupyter={"outputs_hidden": false}
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# + jupyter={"outputs_hidden": false}
plt.figure(figsize=(11,8))
plt.subplot(131)
plt.imshow(img_hsv[:,:,0],'gray')
plt.axis("off")
plt.title("Hue")
plt.subplot(132)
plt.imshow(img_hsv[:,:,1],'gray')
plt.axis("off")
plt.title("Saturation")
plt.subplot(133)
plt.imshow(img_hsv[:,:,2],'gray')
plt.axis("off")
plt.title("Intensity")
plt.show()
# + jupyter={"outputs_hidden": false}
# fomart into BGR para HSV
skin = np.uint8([[[img[150,125,0], img[150,125,1], img[150,125,2]]]])
print(skin)
skin_hsv = cv2.cvtColor(skin ,cv2.COLOR_BGR2HSV)
print(skin_hsv)
# + jupyter={"outputs_hidden": false}
lower = np.uint8([5, 61, 173])
upper = np.uint8([180, 255, 255])
skinMask = cv2.inRange(img_hsv, lower, upper)
print(skinMask.shape)
# + jupyter={"outputs_hidden": false}
res = cv2.bitwise_and(img,img, mask=skinMask)
res_rbg = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
# + jupyter={"outputs_hidden": false}
plt.figure(figsize=(12,8))
plt.subplot(131)
plt.imshow(img_rbg)
plt.title("Imagem original")
plt.axis("off")
plt.subplot(132)
plt.imshow(skinMask,'gray')
plt.axis("off")
plt.title("Mask")
plt.subplot(133)
plt.imshow(res_rbg)
plt.title("Pele")
plt.axis("off")
plt.show()
# -
# Foi possível extrair parte da pele, utilizando um processamento de imagem colorida a partir de uma conversão para o canal HSV.
# # Conclusões
# O processamento de imagens coloridas porvê ferramentas úteis para extração de informações em coloridas e segmentação de objetos coloridos. São operações sólidas, que utilizam conceitos elementares do processamento de imagem.
| notebooks/Complementar/Outros_01/T16/T16 - Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Iryna-Lytvynchuk/Data_Science/blob/main/Hw6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="Bz2sZH0hEBfO" outputId="287b9531-0acc-4d99-8b56-020078a302f8"
# !pip install pyspark
# + id="x_4Or9ZQ5Nmu"
#1
import pandas as pd
import pyspark
import seaborn as sns
from pyspark.sql import SparkSession
from pyspark.sql.functions import mean, stddev, round as _round
from matplotlib import pyplot as plt
import seaborn as sns
spark_session = SparkSession.builder.getOrCreate()
df = spark_session.read.csv("https://drive.google.com/file/d/1eTEbn59Q6ZwVbSIeqtZwYgOeJkta1ipi/view?usp=sharing")
df.show()
df2 = df.filter("_c2 > 4.7")
df2.select(mean("_c4").alias("average price")).show()
# + id="kadeZr_oF5An"
df=spark_session.read.option("header","true").csv("bestsellers with categories.csv")
df.show()
df3 = df.select("Reviews")
df3.toPandas()[["Reviews"]].astype('float64').hist(column = "Reviews")
plt.show()
# + id="BPg3hX6jGKtV"
df=spark_session.read.option("header","true").csv("bestsellers with categories.csv")
df.show()
df_matrix = df.select("User Rating", "Reviews", "Price")
df_matrix.show()
# + id="ko31DZq_TT6O"
df=spark_session.read.option("header","true").csv("bestsellers with categories.csv")
df.show()
df_matrix = df.select(df["User Rating"].astype('float'), df.Reviews.astype('float'), df.Price.astype('float'))
df_matrix.show()
df_matrix.toPandas().corr()
df4 = df_matrix.toPandas()
sns.pairplot(df4)
plt.show()
# + id="V8ZGXDipBY0s"
#2
df = spark_session.read.csv("bestsellers with categories.csv")
df.show()
#Найдите автора с самым высоким рейтингом.
df.createOrReplaceTempView("My_df")
spark_session.sql("Select _c1, max(_c2) From My_df Group By _c1 Order by max(_c2) desc ").show()
#Найдите трех авторов с самым низким рейтингом.
df.createOrReplaceTempView("My_df")
spark_session.sql("Select _c1, min(_c2) From My_df Group By _c1 Order by min(_c2) asc Limit 3").show()
#Определите какая книга жанра "Fiction" имеет самый маленький рейтинг.
df.createOrReplaceTempView("My_df")
spark_session.sql("Select _c0, _c2, _c6 From My_df Where _c6 = 'Fiction' Order by _c2 asc").show()
#Определите какая книга жанра "Non Fiction" имеет самый высокий рейтинг.
df.createOrReplaceTempView("My_df")
spark_session.sql("Select _c0, _c2, _c6 From My_df Where _c6 = 'Non Fiction' Order by _c2 desc").show()
# + id="_n_xvBMPBqcP"
df=spark_session.read.option("header","true").csv("bestsellers with categories.csv")
df.show()
#Определите какой автор получил наибольшее количество рецензий.
df.createOrReplaceTempView("df3")
spark_session.sql("Select Author, Reviews From df3 Order by Reviews desc").show()
#Определите какой автор написал наибольшее количество книг за весь доступный в данных период.
df.createOrReplaceTempView("My_df")
spark_session.sql("Select Author, count(Author) From My_df Group By Author").show()
# + id="g0YCZYQ9sLZx"
#3
import matplotlib.pyplot as plt
import pandas as pd
import pyspark
from pyspark.sql import SparkSession
import seaborn as sns
spark_session = SparkSession.builder.getOrCreate()
df=spark_session.read.option("header","true").csv("bestsellers with categories.csv")
df.show()
df_avg = df.groupBy("Year").agg({"Reviews": "avg"})
df_avg.show()
df_avg.toPandas().plot(x="Year", y="avg(Reviews)", kind = "bar")
plt.show()
| Hw6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv(filepath_or_buffer='https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv', sep='\t').iloc[:100,:]
df.head()
# ## Cuantos pedidos por cada orden?
mask = df['order_id'] == 1
df[mask]
df[mask].quantity
df[mask].quantity.sum()
mask = df['order_id'] == 2
df[mask]
df[mask].quantity
df[mask].quantity.sum()
# +
mask = df['order_id'] == 3
df[mask]
df[mask].quantity
df[mask].quantity.sum()
# -
# +
mask = df['order_id'] == pepa
df[mask]
df[mask].quantity
df[mask].quantity.sum()
# -
for pepa in [1,2,3]:
mask = df['order_id'] == pepa
df[mask]
df[mask].quantity
df[mask].quantity.sum()
for pepa in [1,2,3, 4, 5, 6,7,8,9]: # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
print(df[mask].quantity.sum())
n_productos_pedidos = []
for pepa in [1,2,3,4,5,6,7,8,9]: # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
n_productos_pedidos.append(df[mask].quantity.sum())
n_productos_pedidos
n_productos_pedidos = []
for pepa in [1,2,3,4,5,6,7,8,9]: # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
n_productos_pedidos.append(df[mask])
n_productos_pedidos[0]
n_productos_pedidos[1]
n_productos_pedidos[2]
n_productos_pedidos[3]
n_productos_pedidos[4]
n_productos_pedidos
dic['order 1'] = 5
dic
dic['order 2'] = 3
dic
dic_pedidos = {}
dic_pedidos
dic_pedidos['order 1'] = 4
dic_pedidos
dic_pedidos[1] = 4
dic_pedidos
dic_pedidos['clave'] = 89
dic_pedidos = {}
for pepa in [1,2,3,4,5,6,7,8,9]: # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
dic_pedidos[pepa] = df[mask].quantity.sum()
dic_pedidos
df
dic_pedidos = {}
for pepa in [1,2,3,4,5,6,7,8,9]: # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
dic_pedidos[pepa] = df[mask].quantity.sum()
dic_pedidos
df
df['order_id']
pepas = df['order_id'].unique()
for pepa in pepas:
dic_pedidos = {}
for pepa in array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44]): # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
dic_pedidos[pepa] = df[mask].quantity.sum()
dic_pedidos
dic_pedidos = {}
for pepa in df['order_id'].unique(): # pepa = 1, # pepa = 2
mask = df['order_id'] == pepa # mask = df['order_id'] == 1, mask = df['order_id'] == 2
dic_pedidos[pepa] = df[mask].quantity.sum()
dic_pedidos
dfg = df.groupby('order_id')
dfg.get_group(1).quantity.sum()
dfg.get_group(2)
df.quantity
for i in ...
import seaborn as sns
df = sns.load_dataset('mpg')
import matplotlib.pyplot as plt
plt.scatter(x='weight', y='mpg', data=df)
mask_usa = df.origin == 'usa'
mask_japan = df.origin == 'japan'
plt.scatter(x='weight', y='mpg', data=df[mask_usa])
plt.scatter(x='weight', y='mpg', data=df[mask_japan])
'x'
x
# +
mask = df.origin == x
plt.scatter(x='weight', y='mpg', data=df[mask])
# -
for x in ['usa','japan']:
mask = df.origin == x
plt.scatter(x='weight', y='mpg', data=df[mask])
df
paises = df.origin.unique()
paises
for x in paises:
mask = df.origin == x
plt.scatter(x='weight', y='mpg', data=df[mask])
for x in df.cylinders.unique():
mask = df.cylinders == x
plt.scatter(x='weight', y='mpg', data=df[mask])
dic = {}
for x in df.cylinders.unique():
mask = df.cylinders == x
plt.scatter(x='weight', y='mpg', data=df[mask])
dic[x] = len(df[mask].cylinders.unique())
dic
dfsel = df[df['order_id'] == 1]
n_pedidos = dfsel.shape[0]
dfsel
dfsel = df[df['order_id'] == 2]
n_pedidos = dfsel.shape[0]
dfsel
dfsel = df[df['order_id'] == 3]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos = {}
dfsel = df[df['order_id'] == 1]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[1] = n_pedidos
dic_pedidos
dfsel = df[df['order_id'] == 2]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[2] = n_pedidos
dic_pedidos
dfsel = df[df['order_id'] == 3]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[3] = n_pedidos
dic_pedidos
dic_pedidos = {}
# +
dfsel = df[df['order_id'] == 3]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[3] = n_pedidos
dic_pedidos
# -
dic_pedidos = {}
# +
dfsel = df[df['order_id'] == pepa]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[pepa] = n_pedidos
dic_pedidos
# -
dic_pedidos = {}
for pepa in [1,2,3]:
dfsel = df[df['order_id'] == pepa]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[pepa] = n_pedidos
dic_pedidos
dic_pedidos
df
df.order_id
pedidos = df.order_id.unique()
dic_pedidos = {}
for pepa in pedidos:
dfsel = df[df['order_id'] == pepa]
n_pedidos = dfsel.shape[0]
dfsel
dic_pedidos[pepa] = n_pedidos
dic_pedidos
dic_pedidos
pd.Series(dic_pedidos)
df.order_id.value_counts()
df.order_id.value_counts(sort=False)
df.order_id.value_counts(sort=False)
# +
dic_pedidos = {}
for pepa in pedidos:
dfsel = df[df['order_id'] == pepa]
n_pedidos = dfsel.shape[0]
dic_pedidos[pepa] = n_pedidos
pd.Series(dic_pedidos)
# -
df.item_price.sum()
for i in df.item_price:
print(i)
i = '$2.39 '
clean_i = i.replace('$', '').replace(' ', '')
float_i = float(clean_i)
float_i
# +
i = '$2.39 '
clean_i = i.replace('$', '').replace(' ', '')
float_i = float(clean_i)
float_i
# -
i = '$2.39 '
clean_i = i.replace('$', '').replace(' ', '')
float_i = float(clean_i)
float_i
for i in df.item_price:
clean_i = i.replace('$', '').replace(' ', '')
float_i = float(clean_i)
float_i
float_i
lista_precios = []
for i in df.item_price:
clean_i = i.replace('$', '').replace(' ', '')
float_i = float(clean_i)
lista_precios.append(float_i)
lista_precios
df['precio'] = lista_precios
df
df.item_price.sum()
df.precio.sum()
df
for item in df.choice_description:
print(item)
item = '[Tomatillo-Red Chili Salsa (Hot), [Black Beans, Rice, Cheese, Sour Cream]]'
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
# +
item = '[Tomatillo-Red Chili Salsa (Hot), [Black Beans, Rice, Cheese, Sour Cream]]'
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
# -
item = '[Tomatillo-Red Chili Salsa (Hot), [Black Beans, Rice, Cheese, Sour Cream]]'
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
for item in df.choice_description:
item = '[Tomatillo-Red Chili Salsa (Hot), [Black Beans, Rice, Cheese, Sour Cream]]'
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
for item in df.choice_description:
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
for item in df.choice_description:
print(item)
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
import numpy as np
np.nan
np.nan.replace()
if type(item) == float:
print('bingo')
else:
print('nanai')
for item in df.choice_description:
if type(item) == float:
item
else:
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_item
for item in df.choice_description:
if type(item) == float:
item
else:
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
print(lista_item)
for item in df.choice_description:
if type(item) == float:
lista_item = []
else:
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
print(lista_item)
lista_todos = []
for item in df.choice_description:
if type(item) == float:
lista_item = []
lista_todos.append(lista_item)
else:
item = item.replace('[', '').replace(']', '')
item
lista_item = item.split(', ')
lista_todos.append(lista_item)
lista_todos
# +
from sklearn.feature_extraction.text import TfidfVectorizer
bag_of_words = CountVectorizer(tokenizer=lambda doc: doc, lowercase=False).fit_transform(lista_todos)
# -
bag_of_words.get
vec = TfidfVectorizer()
lista_todos = [','.join(i) for i in lista_todos]
vec.fit(lista_todos)
data = vec.transform(lista_todos).toarray()
vec.get_feature_names_out()
pd.DataFrame(data)
pd.DataFrame(bag_of_words.toarray())
| I Python Basics & Pandas/06_Applied For Loops vs DataFrame Functionalities/06session_pandas-vs-for_loop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ValerieLangat/DS-Unit-1-Sprint-4-Statistical-Tests-and-Experiments/blob/master/Valerie_Intermediate_Linear_Algebra_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="GSNiYn8lr6nN" colab_type="text"
# # Statistics
# + [markdown] id="3d4izUhQvh2_" colab_type="text"
# ## 1.1 Sales for the past week was the following amounts: [3505, 2400, 3027, 2798, 3700, 3250, 2689]. Without using library functions, what is the mean, variance, and standard deviation of of sales from last week? (for extra bonus points, write your own function that can calculate these two values for any sized list)
# + id="w1iZfYvBtEA1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="e982b934-a67b-49eb-fc6e-24d30d03cc3b"
import pandas as pd
import numpy as np
sales = [3505, 2400, 3027, 2798, 3700, 3250, 2689]
def mean1(numbs):
return sum(numbs)/len(numbs)
def variance(numbs):
mean = mean1(numbs)
return sum([(mean-x)**2 for x in numbs])/len(numbs)
def std(numbs):
return variance(numbs)**.5
SalesMean = mean(sales)
SalesVariance = variance(sales)
SalesSTDEV = std(sales)
print("Mean of Sales: ", SalesMean)
print("Variance of Sales: ", SalesVariance)
print("Standard Deviation of Sales: ", SalesSTDEV)
# + [markdown] id="oh63KaOctEp_" colab_type="text"
# ## 1.2 Find the covariance between last week's sales numbers and the number of customers that entered the store last week: [127, 80, 105, 92, 120, 115, 93] (you may use librray functions for calculating the covariance since we didn't specifically talk about its formula)
# + id="G7ZB0krot564" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="9a6351c8-940f-4ddc-e189-05a13c9a4959"
import math
sales = [3505, 2400, 3027, 2798, 3700, 3250, 2689]
customers = [127, 80, 105, 92, 120, 115, 93]
df = pd.DataFrame({'LWsales': sales, 'LWcustomers':customers})
print(np.cov(df['LWsales'], df['LWcustomers']))
covariance = df.cov()['LWsales']['LWcustomers']
print(covariance)
# + [markdown] id="J9SbUY9mt66I" colab_type="text"
# ## 1.3 Find the standard deviation of customers who entered the store last week. Then, use the standard deviations of both sales and customers to standardize the covariance to find the correlation coefficient that summarizes the relationship between sales and customers. (You may use library functions to check your work.)
# + id="vFJms2YRrKhY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="bdff6837-dce1-493c-fc6b-1892b4c900b3"
last_weekstd = std(customers)
print(last_weekstd)
co_co = covariance / (df['LWsales'].std() * df['LWcustomers'].std())
print(co_co)
# + [markdown] id="IbZVf7nmujPJ" colab_type="text"
# ## 1.4 Use pandas to import a cleaned version of the titanic dataset from the following link: [Titanic Dataset](https://raw.githubusercontent.com/Geoyi/Cleaning-Titanic-Data/master/titanic_clean.csv)
#
# ## Calculate the variance-covariance matrix and correlation matrix for the titanic dataset's numeric columns. (you can encode some of the categorical variables and include them as a stretch goal if you finish early)
# + id="0TWgUIiaCFzq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 210} outputId="049cb134-fa5d-4564-a50d-8ae400e2cd33"
df = pd.read_csv('https://raw.githubusercontent.com/Geoyi/Cleaning-Titanic-Data/master/titanic_clean.csv')
df.head(3)
# + id="8FGEAk4fw3M3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 328} outputId="31fedcc4-5081-4420-b979-111a4e6eec38"
df.cov()
# + id="P8HnWso2xAz-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 328} outputId="65dc98bd-4d28-4fa7-da3b-07a212cf5f5c"
df.corr()
# + [markdown] id="7K0Xfh8MvYkl" colab_type="text"
# # Orthogonality
# + [markdown] id="Pe3eOZ2fvdZ-" colab_type="text"
# ## 2.1 Plot two vectors that are orthogonal to each other. What is a synonym for orthogonal?
# + id="YLSBk7hJvvCx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="360d3f9d-f458-426a-d5c1-53315d755744"
import matplotlib.pyplot as plt
# A synonym for orthogonal is perpendicular
v1 = np.array([10,0])
v2 = np.array([0,10])
plt.arrow(0,0, *v1, head_width=1, head_length=1, color='purple')
plt.arrow(0,0, *v2, head_width=1, head_length=1, color='blue')
plt.xlim(-1, 15)
plt.ylim(-1, 15)
plt.show()
# + [markdown] id="7AS4V1Nhvvxz" colab_type="text"
# ## 2.2 Are the following vectors orthogonal? Why or why not?
#
# \begin{align}
# a = \begin{bmatrix} -5 \\ 3 \\ 7 \end{bmatrix}
# \qquad
# b = \begin{bmatrix} 6 \\ -8 \\ 2 \end{bmatrix}
# \end{align}
# + id="F_-y54YSz47k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="157c2e8f-c3d9-40a8-c5e9-19547f76358c"
#We can calculate the dot product to find out:
a = np.array([-5,3,7]).T
b = np.array([6,-8,2]).T
np.dot(a, b)
# Not orthogonal bc the dot product is -40, not zero+
# + [markdown] id="MiNjyqiEz5SG" colab_type="text"
# ## 2.3 Compute the following values: What do these quantities have in common?
#
# ## What is $||c||^2$?
#
# ## What is $c \cdot c$?
#
# ## What is $c^{T}c$?
#
# \begin{align}
# c = \begin{bmatrix} 2 & -15 & 6 & 20 \end{bmatrix}
# \end{align}
# + id="IlV_uaYK1EQB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="63abb169-8ea6-4acb-d563-0fa9dc66123a"
c = np.array([2, -15, 6, 20])
one = (np.linalg.norm(c) ** 2)
two = (np.dot(c, c))
three = (np.matmul(c.T, c))
print (one, two, three)
#All the same number
# + [markdown] id="MK_TpWqk1Evk" colab_type="text"
# # Unit Vectors
# + [markdown] id="Kpit6WWO1b8l" colab_type="text"
# ## 3.1 Using Latex, write the following vectors as a linear combination of scalars and unit vectors:
#
# \begin{align}
# d = \begin{bmatrix} 7 \\ 12 \end{bmatrix}
# \qquad
# e = \begin{bmatrix} 2 \\ 11 \\ -8 \end{bmatrix}
# \end{align}
# + [markdown] id="oBCj1sDW2ouC" colab_type="text"
# Your text here
# + [markdown] id="dAdUQuep1_yJ" colab_type="text"
# ## 3.2 Turn vector $f$ into a unit vector:
#
# \begin{align}
# f = \begin{bmatrix} 4 & 12 & 11 & 9 & 2 \end{bmatrix}
# \end{align}
# + id="I3W8ZiHR1_Fa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a635d170-a8a7-497c-8a2b-983f96268d50"
f = [4, 12, 11, 9, 2]
unitvec = f/np.linalg.norm(f)
print(unitvec)
# + [markdown] id="o39UyP-I5lpP" colab_type="text"
# # Linear Independence / Dependence
# + [markdown] id="ajfBqYe45sT5" colab_type="text"
# ## 4.1 Plot two vectors that are linearly dependent and two vectors that are linearly independent (bonus points if done in $\mathbb{R}^3$).
# + id="dU6qoWNT3FhO" colab_type="code" colab={}
# + [markdown] id="TrJ0MT_n3SvO" colab_type="text"
# # Span
# + [markdown] id="86iXLzwM2z8l" colab_type="text"
# ## 5.1 What is the span of the following vectors?
#
# \begin{align}
# g = \begin{bmatrix} 1 & 2 \end{bmatrix}
# \qquad
# h = \begin{bmatrix} 4 & 8 \end{bmatrix}
# \end{align}
# + id="G2LK2RWL39Q4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="513a7156-8494-451d-f3da-d42adf0863a6"
g = np.array([1, 2])
h = np.array([4, 8])
span = np.linalg.matrix_rank(g, h)
print(span)
# + [markdown] id="l1deylUj4IHH" colab_type="text"
# ## 5.2 What is the span of $\{l, m, n\}$?
#
# \begin{align}
# l = \begin{bmatrix} 1 & 2 & 3 \end{bmatrix}
# \qquad
# m = \begin{bmatrix} -1 & 0 & 7 \end{bmatrix}
# \qquad
# n = \begin{bmatrix} 4 & 8 & 2\end{bmatrix}
# \end{align}
# + id="p1i_ueD25ZcP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="6d552687-cbb5-4560-fb41-cbf6d11bff46"
l = np.array([1,2,3])
m = np.array([-1,0,7])
n = np.array([4, 8, 2])
span2 = np.linalg.matrix_rank(l, m, n)
print(span2)
#that didnt work.....
np.linalg.matrix_rank(np.array([[1, 2, 3], [-1, 0, 7], [4, 8, 2]]))
# + [markdown] id="IBqe7X1732kX" colab_type="text"
# # Basis
# + [markdown] id="YeUZVHRM6PpT" colab_type="text"
# ## 6.1 Graph two vectors that form a basis for $\mathbb{R}^2$
#
#
# + id="utvF3Pkt8NP6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="2b8767c0-d925-4652-85a7-1340dbe08c4d"
# Could use my orthogonal vectors from 2.1 but I'll make a new one
v1 = np.array([20,0])
v2 = np.array([0,20])
plt.arrow(0,0, *v1, head_width=1, head_length=1, color='purple')
plt.arrow(0,0, *v2, head_width=1, head_length=1, color='pink')
plt.xlim(-5, 25)
plt.ylim(-5, 25)
plt.show()
# + [markdown] id="20yPFBDUxxnS" colab_type="text"
# ## 6.2 What does it mean to form a basis?
# + [markdown] id="3w4tEayT8M0o" colab_type="text"
# Vectors in a space that are linearly independent and span the whole vector space
# + [markdown] id="EHmUxbcY6vD3" colab_type="text"
# # Rank
# + [markdown] id="IpJwt9kw6v8U" colab_type="text"
# ## 7.1 What is the Rank of P?
#
# \begin{align}
# P = \begin{bmatrix}
# 1 & 2 & 3 \\
# -1 & 0 & 7 \\
# 4 & 8 & 2
# \end{bmatrix}
# \end{align}
# + id="8tq4tktU8T1X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c27e3f98-0c60-4b2d-b9b5-2fde8739d241"
# Exactly the same as 5.2 but this comined it to one big matrix. Can solve it the same way
RoP = np.linalg.matrix_rank(np.array([[1, 2, 3], [-1, 0, 7], [4, 8, 2]]))
RoP
# + [markdown] id="jGqFMBYY7mHD" colab_type="text"
# ## 7.2 What does the rank of a matrix tell us?
# + [markdown] id="Vjg1IiCD8nnP" colab_type="text"
# It can tell us how many independent columns and rows are in a given matrix and the different ways the matrix may be legally transformed. And other things too.
# + [markdown] id="0Db2sc_V8QD6" colab_type="text"
# # Linear Projections
#
# ## 8.1 Line $L$ is formed by all of the vectors that can be created by scaling vector $v$
# \begin{align}
# v = \begin{bmatrix} 1 & 3 \end{bmatrix}
# \end{align}
#
# \begin{align}
# w = \begin{bmatrix} -1 & 2 \end{bmatrix}
# \end{align}
#
# ## find $proj_{L}(w)$
#
# ## graph your projected vector to check your work (make sure your axis are square/even)
# + id="hp5z2WTBCNKx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="325547ba-1a34-48a9-95f2-9cd59a43ece8"
v = np.array([1, 3])
w = np.array([-1, 2])
sca_v = np.dot(w, v)/(np.dot(v, v))
proj= sca_v * v
proj
# + id="7nLay_C1_Yq8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="5cc3706d-58d6-4f91-81c6-34417faef389"
#Checking
plt.arrow(0, 0, *v, head_width=.3, head_length=.3, color='purple')
plt.arrow(0, 0, *w, head_width=.3, head_length=.3, color='pink')
plt.arrow(0, 0, *proj, head_width=.3, head_length=.3, color='blue')
plt.xlim(-2, 5)
plt.ylim(-1, 5)
plt.show()
# + [markdown] id="TKkrPwRM-Oar" colab_type="text"
# # Stretch Goal
#
# ## For vectors that begin at the origin, the coordinates of where the vector ends can be interpreted as regular data points. (See 3Blue1Brown videos about Spans, Basis, etc.)
#
# ## Write a function that can calculate the linear projection of each point (x,y) (vector) onto the line y=x. run the function and plot the original points in blue and the new projected points on the line y=x in red.
#
# ## For extra points plot the orthogonal vectors as a dashed line from the original blue points to the projected red points.
# + id="cp52kZra-ykj" colab_type="code" outputId="12502200-eafe-4e20-bfb7-2d539c6027cb" colab={"base_uri": "https://localhost:8080/", "height": 347}
import pandas as pd
import matplotlib.pyplot as plt
# Creating a dataframe for you to work with -Feel free to not use the dataframe if you don't want to.
x_values = [1, 4, 7, 3, 9, 4, 5 ]
y_values = [4, 2, 5, 0, 8, 2, 8]
data = {"x": x_values, "y": y_values}
df = pd.DataFrame(data)
df.head()
plt.scatter(df.x, df.y)
plt.show()
# + id="EZTA6Tj6BGDb" colab_type="code" colab={}
| Valerie_Intermediate_Linear_Algebra_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -----------
# User Instructions:
#
# Modify the the search function so that it becomes
# an A* search algorithm as defined in the previous
# lectures.
#
# Your function should return the expanded grid
# which shows, for each element, the count when
# it was expanded or -1 if the element was never expanded.
#
# If there is no path from init to goal,
# the function should return the string 'fail'
# ----------
grid = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]]
heuristic = [[9, 8, 7, 6, 5, 4],
[8, 7, 6, 5, 4, 3],
[7, 6, 5, 4, 3, 2],
[6, 5, 4, 3, 2, 1],
[5, 4, 3, 2, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0 ], # go up
[ 0, -1], # go left
[ 1, 0 ], # go down
[ 0, 1 ]] # go right
delta_name = ['^', '<', 'v', '>']
# -
def search(grid,init,goal,cost,heuristic):
# ----------------------------------------
# modify the code below
# ----------------------------------------
closed = [[0 for col in range(len(grid[0]))] for row in range(len(grid))]
closed[init[0]][init[1]] = 1
expand = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]
action = [[-1 for col in range(len(grid[0]))] for row in range(len(grid))]
x = init[0]
y = init[1]
g = 0
h = heuristic[x][y]
f = g + h
open = [[f, g, h, x, y]]
found = False # flag that is set when search is complete
resign = False # flag set if we can't find expand
count = 0
while not found and not resign:
if len(open) == 0:
resign = True
return "Fail"
else:
print("====")
print("Before processing", open)
open.sort()
open.reverse()
print("After processing", open)
next = open.pop()
x = next[3]
y = next[4]
g = next[1]
expand[x][y] = count
print("[f, g, h, x, y, expand] = ", [next[0], g, next[2], x, y, expand[x][y]])
count += 1
if x == goal[0] and y == goal[1]:
found = True
open = [f, g, h, x, y]
else:
# expand winning element and add to new open list
for i in range(len(delta)):
x2 = x + delta[i][0]
y2 = y + delta[i][1]
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]):
if closed[x2][y2] == 0 and grid[x2][y2] == 0:
g2 = g + cost
h2 = heuristic[x2][y2]
f2 = g2 + h2
print("f2, g2, h2, x2, y2 = ", f2, g2, h2, x2, y2)
open.append([f2, g2, h2, x2, y2])
closed[x2][y2] = 1
print("-------------------")
print("Optimal value: ", open)
return expand
search(grid,init,goal,cost,heuristic)
grid1 = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0]]
init1 = [0, 0]
goal1 = [len(grid1)-1, len(grid1[0])-1]
search(grid1,init1,goal,cost,heuristic)
| test_code/L8/L8_15_Implement_Astar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Start out by establishing a connection to a MongoDB instance.
import pymongo
conn = pymongo.MongoClient()
db = conn.test
coll = db.objects
coll.drop()
# Insert a couple of documents that represent themselves naturally in JSON.
import datetime
coll.insert_one({'a': datetime.datetime.now(), 'b': 1.0})
coll.insert_one({'items': [1, 2, 3, 'd']})
# But what about more complex objects (classes and instances of those classes)?
# +
class MyObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return '{self.__class__.__name__}({self.x}, {self.y})'.format(**vars())
ob1 = MyObject(1, 2)
ob2 = MyObject(0, 5)
# -
coll.insert_one(ob1)
import jaraco.modb
jaraco.modb.encode(ob1)
# Because a MyObject instance doesn't have a natural representation in JSON, it's serialized as a dictionary with a special key 'py/object', which signals to the decoder that this is a JSONPickled Python Object. As long as the system doing the decoding implements `__main__.MyObject` with a compatible interface, the object will decode nicely.
coll.insert_one(jaraco.modb.encode(ob1))
coll.insert_one(jaraco.modb.encode(ob2))
# Now the two objects should be persisted to the database. Query them to see how they appear.
list(coll.find())
next(map(jaraco.modb.decode, coll.find({'x': 0})))
# But what about more complex objects? Consider ob3 whose x attribute is another MyObject.
ob3 = MyObject(ob2, 2)
ob3
coll.insert_one(jaraco.modb.encode(ob3))
# Because MongoDB's document query engine allows reaching deep into the documents, one can even query based on child object's attributes.
# Find all objects whose x attribute has a y attribute with a value of 5
query = {'x.y': 5}
next(map(jaraco.modb.decode, coll.find(query)))
# Where are the limitations? What about integer keys?
coll.insert_one({1: 3})
res = coll.insert_one(jaraco.modb.encode({1: 3}))
coll.find_one({'_id': res.inserted_id})
# You might note that the integer 1 is now represented as a string '1'. This limitation is an unfortunate side-effect of relying on JSON as a serialization layer.
| tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# metadata:
# interpreter:
# hash: 63fd5069d213b44bf678585dea6b12cceca9941eaf7f819626cde1f2670de90d
# name: python3
# ---
# # Python loops
# ## While loop
#
# While loop executes a set of statements as long as a condition is true.
# + tags=[]
# print i as long as i is less than 5
i = 0
while i < 5:
print(i)
i += 1
# -
# Remember to increment i, or the loop will continue forever.
# The `break` statement: we can stop the loop even if the while condition is true:
# + tags=[]
# Exit the loop when i is 3
i = 0
while i < 5:
i += 1
if i == 3:
break
print(i)
print('Outside the loop')
# -
# The `break` statement broke the loop. We finished it after 2 iterations.
# The `continue` statement: we can stop the current iteration, and continue with the next
# + tags=[]
# continue to the next iteration if i is 3
i = 0
while i < 5:
i += 1
if i == 3:
continue
print(i)
print('Outside the loop')
# -
# Notice that we never printed 3, because we used there the `continue` statement.
# The `else` statement: we can run a block of code once when the condition no longer is true:
# + tags=[]
# Print a message once the condition is false:
i = 0
while i < 5:
print(i)
i += 1
else:
print("i is no longer less than 5")
# -
# ## For loop
#
# A `for` loop is used for iterating over a sequence (that is either a list, a tuple, a dictionary, a set, or a string).
# With the for loop we can execute a set of statements, once for each item in a list, tuple, set etc.
# + tags=[]
# Print each name in the students list
students = ['John', 'Mary', 'Anna']
for s in students:
print(s)
# -
# Looping Through a String
#
# Strings are iterable objects, they contain a sequence of characters.
# + tags=[]
for s in 'string':
print(s)
# -
# The `break` statement stops the loop before it has looped through all the items.
# + tags=[]
# Exit the loop when s is 'Mary'
students = ['John', 'Rose', 'Mary', 'Anna']
for s in students:
if s == 'Mary':
break
print(s)
# -
# The `continue` statement stops the current iteration of the loop, and continue with the next.
# + tags=[]
students = ['John', 'Rose', 'Mary', 'Anna']
for s in students:
if s == 'Mary':
continue
print(s)
# -
# Notice that in this case the foor loop printed all the name but Mary.
# The `range()` function returns a sequence of numbers, starting from 0 by default, and increments by 1 (by default).
# + tags=[]
for x in range(5):
print(x)
# + tags=[]
# print the values from 2 to 5-1
for x in range(2,5):
print(x)
# + tags=[]
# increment the sequence by 2 (default is 1)
for x in range(2,10,2):
print(x)
# + tags=[]
# increment the sequence by 10
for x in range(3,45,10):
print(x)
# -
# `else` in For Loop: The else keyword in a for loop specifies a block of code to be executed when the loop is finished.
# + tags=[]
# Print all numbers from 0 to 5, and print a message when the loop has ended:
for x in range(6):
print(x)
else:
print("Finally finished!")
# -
# ### Nested loops
# + tags=[]
# print each adjective for every student
adj = ['smart', 'polite']
students = ['John', 'Mary', 'Anna']
for a in adj:
for s in students:
print(a,s)
# + tags=[]
# Two dice
for i in range(6):
for j in range(6):
print('(',i+1,',',j+1,')')
# -
# ### The pass statements
#
# `for` loops cannot be empty, but if you for some reason have a for loop with no content, put in the `pass` statement to avoid getting an error.
# The pass statement:
for x in [0, 1, 'h']:
pass
# Iterating through a Dictionary
d1 = {1:'a', 2:'b', 3:'c'}
type(d1)
for k in d1:
print(k)
# As you can see, when a for loop iterates through a dictionary, the loop variable is assigned to the dictionary’s keys.
#
# To access the dictionary values within the loop, you can make a dictionary reference using the key as usual:
for k in d1:
print(d1[k])
# We can use `d1.keys()` to iterate over every key in the dictionary:
for k in d1.keys():
print(k)
# We can use d1.values() to iterate over every value in the dictionary:
for k in d1.values():
print(k)
| 01-Basic Python/08-Loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 2: Part B
# ## Blackjack
#
# ### Authors: <NAME>, <NAME>, <NAME>
# + [markdown] id="F7W922C5x1LA"
# Blackjack is a comparing card game between a player and a dealer. It is played with one or more decks of 52 cards and is one of the most popular casino games.
#
# The goal here is to make a simulator for the blackjack game where the computer will act as a dealer and you(or the user) will be the player.
# + [markdown] id="F7W922C5x1LA"
# #### Gameplay
#
# Blackjack is based on a scoring system where each card has a value, and the goal is to beat the score of the dealer's hand without going over 21.
#
# The value of each card is as follows:
# - Number cards (2-10): Their face value (2 has a value of 2, and so on)
# - Face Cards(J, Q, K): 10
# - Ace: 1 (hard) or 11 (soft) depending on whichever makes a better hand
# - Suit is irrelevant.
#
# As an example, consider the hand: `2 K A`: If the Ace is given a value of 11, then the total score is 23, which is greater than 21, so it is assigned a value of 1 which gives a score of 13.
# + [markdown] id="F7W922C5x1LA"
# ## Simplified Gameplay
# Here, we describe the simplified gameplay that we will guide you in building in this tutorial.
#
#
# Each player is dealt two cards. One of the dealers cards is hidden from the player. So a possible scenario is
#
# Dealer: 9 ?(7)
# Player: 5 4
#
#
#
# The player has the following options:<br>
# ***Hit***: ask for another card.<br>
# ***Stand***: hold your total and end your turn.
#
# If at any point, the player exceeds 21, then the player loses. After the player ends their turn, the dealer reveals the hidden card. The dealer then hits until the hand
#
# The dealer will hit unless his/her cards total 17 or higher after which he must stand.
#
# The outcome of the hands is as follows:
# - If the player goes over 21 you bust ( i.e. you lose), and the dealer wins regardless of the dealer's hand.
# - If you are dealt 21 from the start (Ace & 10), you got a 'blackjack'.
# - If both the player and the dealer receive a 'blackjack', no one wins
# - If Dealer exceeds 21 and the player does not, the player wins
# - If the player attains a final sum higher than the dealer and does not bust, the player wins.
# -
# For the complete set of rules, have a look this YouTube video.
# + colab={"base_uri": "https://localhost:8080/", "height": 321} id="3tmQBzQvyVqy" outputId="134ef302-e0b2-46da-d726-4c7029cbf7aa"
from IPython.display import YouTubeVideo
YouTubeVideo('VB-6MvXvsKo')
# -
# ---
# Let us implement this game by first making a minimal version of blackjack and then adding features (similar to `Fraction` in Part A of this tutorial)
# First, we define the cards. For now, we will not consider suits (since they are irrelevant to the game anyways). Create Python objects (*you decide which kind*) to store the ranks (2-10, J, Q, K, A) and their values.
# +
# When you feel like you need any library, add the imports at the top
# Create global variables for cards, scores of each card. Ignore the suits for now
# -
# ---
# Now, create a shuffled deck of cards. Remember that each deck has cards of 4 suits (52 cards in total)
# +
# Shuffled deck of cards that we will use
# -
# ---
# Next up, write a function that will accept a list of cards (a `hand`) and return the value of the hand. Remember that aces can have two values!
def calc_score(hand):
"""Return the score of a hand
Aces can take value 11 or 1.
Numbered cards take the value of their numbers
Face cards are all valued 10
"""
pass
# ---
# Now we are ready to get started on the actual gameplay.
#
# Deal cards to the player and the dealer. Print out the first card dealt to the dealer and the two cards dealt to the player.
#
# For now, simply compare the values of the hands dealt to both parties and decide the winner (which is a boring game, but a game nonetheless)
# +
# Deal 2 cards each to the dealer and the player
# Whoever has the hand with a larger score wins
# -
# ---
# By now you have a completely working game. It's not exactly Blackjack, but it's close! (somewhat).
#
# Let's spice things up a bit now: Ask the player if they want to hit or stand (ignore all the other possible options for now), and print the hand that the player has. If they bust, then GAME OVER! If they don't they get to choose again. Repeat this until either the player busts or they decide they have a good enough hand to continue.
#
# To summarize, we now have a game where the dealer is dealt two cards, one hidden from the player. The player is dealt two cards and asked if they want to hit or stand, and are allowed to do so until they either stand or bust. Then the person with the higher score among the dealer and the player (if they haven't busted) wins!
# + id="z2lKNNm4zTiv"
# You may find an infinite `while` loop (with a `break` somewhere) to be useful here
# -
# ---
# Now, we implement the dealer's behaviour. After the player is done, the dealer's card is revealed to the player. The dealer must hit if the value is less than 17, and keep going until the value is greater than 17. The moment this happens they must stop.
#
# * If the player had gone bust, the dealer wins, even if the dealer goes bust (This is why you shouldn't gamble, kids).
# * If the dealer goes bust, and the player doesn't, the player wins.
# * If the dealer and player both don't go bust, the one with the higher score wins!
# You may find `while` loops to be useful here as well.
# ---
# ## We are done with mini-Blackjack!
#
# If you have reached this far, give yourself a pat on the back.<br>
# If you haven't, and you want to give yourself a pat on the back, reach out to us via the Doubts channel on MS Teams.<br>
#
#
# If you want to ~torture yourself~ improve this implementation of the game, here are features you can add:
# 1. Implement split (see the YouTube video for the explanation), where if a player is dealt two similar cards (both 9s, for example), they can place another bet and split the cards into two independent hands.
# 2. Implement bets. Start off the player with an amount of their choosing, and they win or lose money as they play.
# 3. Implement Double Downs.
# 4. Instead of 1 deck of cards, most casinos use multiple decks of cards. Implement this in your program. In addition, whenever the deck goes below a certain number of cards after a round is done, fill up the stack with a set of new decks.
# 5. Add the suits to the cards. Python3 is unicode-compliant, so you can print and use any unicode symbol. Add a bit of color as well, printing hearts and diamonds as red, and spades and clubs as black.
# 6. Make the game more interactive. Ask the player for their names. Enable multiple players to play (after dealing the first two cards, the process is just done one after another for each player).
# 7. Think about the way you have designed your code:<br>
# Did you use classes in this tutorial? Did you have to?<br>
# How can you make your code more readable and robust to bugs
# __fin__
#
# ---
| tutorial2/Tutorial2_PartB_unsolved.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from PIL import Image
import numpy as np
import os
import cv2
import json
# +
imgPath = 'C:/Users/q/Desktop/Code/Python/demo/resize/melonjson/test/'
jsonPath = 'C:/Users/q/Desktop/Code/Python/demo/resize/melonjson/test/outputs/'
out_path = 'C:/Users/q/Desktop/Code/Python/demo/resize/melonjson/test32/'
# f = open(txt, 'r') # 以只读形式打开txt文件
# contents = f.readlines() # 读取文件中所有行
# f.close() # 关闭txt文件
# x, y_ = [], [] # 建立空列表
# for content in contents: # 逐行取出
# value = content.split() # 以空格分开,图片路径为value[0] , 标签为value[1] , 存入列表
# img_path = path + value[0] # 拼出图片路径和文件名
# img = Image.open(img_path) # 读入图片
# img2=img.resize((32,32))
# img2.save(out_path+value[0])
# print('loading : ' + content) # 打印状态提示
############################read_json################################
for i in range(1,14):
n = (str)(i)
jsonPathn = jsonPath+n+'.json'
file = open(jsonPathn, "rb")
data = json.load(file)
outputs = data['outputs']
name = outputs.get("object")
lable = name[0].get("name")
fh = open('C:/Users/q/Desktop/Code/Python/demo/resize/melonjson/test.txt', 'a', encoding='utf-8')
fh.write(n+'.jpg'+" "+lable+"\n")
print(n+'.jpg'+" "+lable+"\n")
fh.close()
# print(jsonPathn)
#############################resize_img#################################
# for i in range(1,14):
# n = (str)(i)
# imgPathn = imgPath+n+'.jpg'
# img = Image.open(imgPathn) # 读入图片
# img2=img.resize((32,32))
# img2.save(out_path+n+'.jpg')
# print('loading : ' + imgPathn) # 打印状态提示
###############################################################
print("OVER")
| Tensorflow/resize2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2 with Spark 2.1
# language: python
# name: python2-spark21
# ---
# <center><h1> Predict heart failure with Watson Machine Learning</h1></center>
# 
# <p>This notebook contains steps and code to create a predictive model to predict heart failure and then deploy that model to Watson Machine Learning so it can be used in an application.</p>
# ## Learning Goals
# The learning goals of this notebook are:
# * Load a CSV file into the Object Storage Service linked to your Data Science Experience
# * Create an Apache® Spark machine learning model
# * Train and evaluate a model
# * Persist a model in a Watson Machine Learning repository
#
# ## 1. Setup
#
# Before you use the sample code in this notebook, you must perform the following setup tasks:
# * Create a Watson Machine Learning Service instance (a free plan is offered) and associate it with your project
# * Upload heart failure data to the Object Store service that is part of your data Science Experience trial
#
# +
# IMPORTANT Follow the lab instructions to insert authentication and access info here to get access to the data used in this notebook
# -
# ## 2. Load and explore data
# <p>In this section you will load the data as an Apache® Spark DataFrame and perform a basic exploration.</p>
#
# <p>Load the data to the Spark DataFrame from your associated Object Storage instance.</p>
# +
# This function is used to setup the access of Spark to your Object Storage.
def set_os_config_with_credentials(credentials):
"""This function sets the configuration so it is possible to
access data from Bluemix Object Storage using Spark"""
hconf = sc._jsc.hadoopConfiguration()
hconf.set('fs.swift.service.keystone.auth.url', credentials['auth_url'] + '/v3/auth/tokens')
hconf.set('fs.swift.service.keystone.auth.endpoint.prefix', 'endpoints')
hconf.set('fs.swift.service.keystone.tenant', credentials['project_id'])
hconf.set('fs.swift.service.keystone.username', credentials['user_id'])
hconf.set('fs.swift.service.keystone.password', credentials['password'] )
hconf.setInt('fs.swift.service.keystone.http.port', 8080)
hconf.set('fs.swift.service.keystone.region', credentials['region'])
hconf.setBoolean('fs.swift.service.keystone.public', False)
# Right side of assignment must match the variable created via the Insert Credentail operation
mycredentials = credentials_1
set_os_config_with_credentials(mycredentials)
spark = SparkSession.builder.getOrCreate()
# Read data file and create a Data Frame
df_data = spark.read\
.format('org.apache.spark.sql.execution.datasources.csv.CSVFileFormat')\
.option('header', 'true')\
.option('inferSchema', 'true')\
.load('swift://' + mycredentials['container'] + '.keystone' + '/' + mycredentials['filename'])
# -
# Explore the loaded data by using the following Apache® Spark DataFrame methods:
# * print schema
# * print top ten records
# * count all records
df_data.printSchema()
# As you can see, the data contains ten fields. The HEARTFAILURE field is the one we would like to predict (label).
df_data.show()
df_data.describe().show()
df_data.count()
# As you can see, the data set contains 10800 records.
# ## 3 Interactive Visualizations w/PixieDust
# To confirm you have the latest version of PixieDust on your system, run this cell
# !pip install --user --upgrade pixiedust
# If indicated by the installer, restart the kernel and rerun the notebook until here and continue with the workshop.
import pixiedust
# ### Simple visualization using bar charts
# With PixieDust display(), you can visually explore the loaded data using built-in charts, such as, bar charts, line charts, scatter plots, or maps.
# To explore a data set: choose the desired chart type from the drop down, configure chart options, configure display options.
# + pixiedust={"displayParams": {"aggregation": "AVG", "chartsize": "78", "handlerId": "scatterPlot", "keyFields": "AGE", "kind": "kde", "mpld3": "false", "rendererId": "seaborn", "rowCount": "500", "title": "Explore", "valueFields": "BMI"}}
display(df_data)
# -
# ## 4. Create an Apache® Spark machine learning model
# In this section you will learn how to prepare data, create and train an Apache® Spark machine learning model.
#
# ### 4.1: Prepare data
# In this subsection you will split your data into: train and test data sets.
# +
split_data = df_data.randomSplit([0.8, 0.20], 24)
train_data = split_data[0]
test_data = split_data[1]
print "Number of training records: " + str(train_data.count())
print "Number of testing records : " + str(test_data.count())
# -
# As you can see our data has been successfully split into two data sets:
# * The train data set, which is the largest group, is used for training.
# * The test data set will be used for model evaluation and is used to test the assumptions of the model.
#
# ### 4.2: Create pipeline and train a model
# In this section you will create an Apache® Spark machine learning pipeline and then train the model.
# In the first step you need to import the Apache® Spark machine learning packages that will be needed in the subsequent steps.
#
# A sequence of data processing is called a _data pipeline_. Each step in the pipeline processes the data and passes the result to the next step in the pipeline, this allows you to transform and fit your model with the raw input data.
from pyspark.ml.feature import StringIndexer, IndexToString, VectorAssembler
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml import Pipeline, Model
# In the following step, convert all the string fields to numeric ones by using the StringIndexer transformer.
stringIndexer_label = StringIndexer(inputCol="HEARTFAILURE", outputCol="label").fit(df_data)
stringIndexer_sex = StringIndexer(inputCol="SEX", outputCol="SEX_IX")
stringIndexer_famhist = StringIndexer(inputCol="FAMILYHISTORY", outputCol="FAMILYHISTORY_IX")
stringIndexer_smoker = StringIndexer(inputCol="SMOKERLAST5YRS", outputCol="SMOKERLAST5YRS_IX")
#
# In the following step, create a feature vector by combining all features together.
vectorAssembler_features = VectorAssembler(inputCols=["AVGHEARTBEATSPERMIN","PALPITATIONSPERDAY","CHOLESTEROL","BMI","AGE","SEX_IX","FAMILYHISTORY_IX","SMOKERLAST5YRS_IX","EXERCISEMINPERWEEK"], outputCol="features")
# Next, define estimators you want to use for classification. Random Forest is used in the following example.
rf = RandomForestClassifier(labelCol="label", featuresCol="features")
# Finally, indexed labels back to original labels.
labelConverter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=stringIndexer_label.labels)
transform_df_pipeline = Pipeline(stages=[stringIndexer_label, stringIndexer_sex, stringIndexer_famhist, stringIndexer_smoker, vectorAssembler_features])
transformed_df = transform_df_pipeline.fit(df_data).transform(df_data)
transformed_df.show()
# Let's build the pipeline now. A pipeline consists of transformers and an estimator.
pipeline_rf = Pipeline(stages=[stringIndexer_label, stringIndexer_sex, stringIndexer_famhist, stringIndexer_smoker, vectorAssembler_features, rf, labelConverter])
# Now, you can train your Random Forest model by using the previously defined **pipeline** and **training data**.
model_rf = pipeline_rf.fit(train_data)
# You can check your **model accuracy** now. To evaluate the model, use **test data**.
predictions = model_rf.transform(test_data)
evaluatorRF = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluatorRF.evaluate(predictions)
print("Accuracy = %g" % accuracy)
print("Test Error = %g" % (1.0 - accuracy))
# You can tune your model now to achieve better accuracy. For simplicity of this example tuning section is omitted.
# ## 5. Persist model
# In this section you will learn how to store your pipeline and model in Watson Machine Learning repository by using Python client libraries.
# First, you must import client libraries.
from repository.mlrepositoryclient import MLRepositoryClient
from repository.mlrepositoryartifact import MLRepositoryArtifact
# Authenticate to Watson Machine Learning service on Bluemix.
#
# ## **STOP here !!!!:**
# Put authentication information (username and password) from your instance of Watson Machine Learning service here.
service_path = 'https://ibm-watson-ml.mybluemix.net'
username = 'xxxxxxxxxxxxxxx'
password = '<PASSWORD>'
# **Tip:** service_path, username and password can be found on Service Credentials tab of the Watson Machine Learning service instance created in Bluemix.
ml_repository_client = MLRepositoryClient(service_path)
ml_repository_client.authorize(username, password)
# Create model artifact (abstraction layer).
model_artifact = MLRepositoryArtifact(model_rf, training_data=train_data, name="Heart Failure Prediction Model")
# **Tip:** The MLRepositoryArtifact method expects a trained model object, training data, and a model name. (It is this model name that is displayed by the Watson Machine Learning service).
# ## 5.1: Save pipeline and model¶
# In this subsection you will learn how to save pipeline and model artifacts to your Watson Machine Learning instance.
saved_model = ml_repository_client.models.save(model_artifact)
# Get saved model metadata from Watson Machine Learning.
# **Tip:** Use *meta.availableProps* to get the list of available props.
saved_model.meta.available_props()
print "modelType: " + saved_model.meta.prop("modelType")
print "trainingDataSchema: " + str(saved_model.meta.prop("trainingDataSchema"))
print "creationTime: " + str(saved_model.meta.prop("creationTime"))
print "modelVersionHref: " + saved_model.meta.prop("modelVersionHref")
print "label: " + saved_model.meta.prop("label")
#
# ## 5.2 Load model to verify that it was saved correctly
# You can load your model to make sure that it was saved correctly.
loadedModelArtifact = ml_repository_client.models.get(saved_model.uid)
# Print the model name to make sure that model artifact has been loaded correctly.
print str(loadedModelArtifact.name)
# Congratulations. You've sucessfully created a predictive model and saved it in the Watson Machine Learning service. You can now switch to the Watson Machine Learning console to deploy the model and then test it in application.
#
| demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit ('.venv')
# name: python38664bitvenvd0df888a817b4373b7808de25afabc5b
# ---
# +
from pathlib import Path
from IPython.display import display
import numpy as np
import pandas as pd
import spotipy
import lib_spotify_app.api_adapter as api_adapter
pd.set_option('max_columns', None)
# -
credential_fp = Path(r'private/spotify_credential.json')
# I download all the saved/liked tracks from my Spotify account and their audio features:
# https://developer.spotify.com/documentation/web-api/reference/tracks/get-several-audio-features/
# +
sp = api_adapter.setup_spotipy(
credential_fp,
scope=['user-library-read','user-top-read'],
cache_path=Path(r'private')
)
df = api_adapter.query_liked_songs(sp)
df = api_adapter.enrich_audiofeature(df, sp, col="track.id")
# -
df = df.drop('index', axis=1)
df['added_at'] = pd.to_datetime(df['added_at'])
# I notice that I can know when I saved/liked the song, I would like to know when I was the most active:
# Cleaning of the columns for analysis, names are too complex and I will concatenate the "artists" into a list column
# + tags=[]
df.columns = df.columns.str.replace('^(track\.)(id\.)?', '')
df = df.loc[:,~df.columns.duplicated()]
df.columns.values
# -
# Concatenate the artists values into 1 column for:
# * names
# * id
df['artists.name'] = df.filter(regex='^artists\.\d+\.name')\
.apply(lambda x: x.dropna().to_list(), axis=1)
df['artists.id'] = df.filter(regex='^artists\.\d+\.id')\
.apply(lambda x: x.dropna().to_list(), axis=1)
df['duration_min'] = df['duration_ms'] / 60000
df.sort_values('popularity', ascending=False)[['name', 'artists.name', 'preview_url', 'external_urls.spotify', 'popularity']].head(10)
df.to_csv(Path(r'private/data.csv'), sep='\t')
# We can use the LastFM app to enrich the songs features with the number of listening from the user
last_api = api_adapter.setup_lastfm(Path('private', 'lastfm_credential.json'))
top_tracks_lastfm = pd.DataFrame(
last_api.user.get_top_tracks(period='overall', limit=500)
)
display(top_tracks_lastfm)
# Issue is that LastFM and Spotify don't share the same songs ID or data management.
| 1_data_preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %store -r the_page
if 'the_page' not in locals():
import pickle
print("Loading default data...")
the_page = pickle.load(open("data/the_page.p",'rb'))
# -
#
#
# Welcome!
#
# You have just opened a collection of notebooks that lets you inspect the evolution of the revision history of a Wikipedia article, up to now (From the English language edition). It also allows you to highlight **article- or word-specific conflicts as well as the productivity of any given editor.**
#
# Specifically, for the notebooks after this initial one, it interfaces with the API of a specialized service called [WikiWho](www.wikiwho.net), which provides fine-grained change information about the tokens (words) in an article.
#
# It is written in a way that you can **explore it like a Web app, without interacting with the code behind it**, or - if you choose to - click on "edit app" in the Juypter navigation bar and play around with the code yourself.
#
# The default introduction example is the article "The Camp of the Saints" (a novel), which we recommend to start with. You can enter/search an article of your choice and explore it as well.
#
# Let's first get live data of some general statistics from Wikipedias own API and a service called Xtools:
from IPython.display import display, Markdown as md
display(md("---"))
display(md(f"# A. Basic Info from Wikipedia"))
display(md(f"***Search for an article on the English Wikipedia***"))
# +
from ipywidgets import widgets, Output
from IPython.display import display, clear_output
from external.wikipedia import WikipediaDV, WikipediaAPI
wikipedia_dv = WikipediaDV(WikipediaAPI(domain='en.wikipedia.org'))
# the method that listens to the click event
def on_button_clicked(b):
global the_page
# use the out widget so the output is overwritten when two or more
# searches are performed
with out:
try:
# query wikipedia
search_result = wikipedia_dv.search_page(searchTerm.value)
the_page = wikipedia_dv.get_page(search_result)
# %store the_page
clear_output()
display(the_page.to_frame('value'))
display(md(f'You selected:'))
display(the_page['title'])
except:
clear_output()
display(md(f'The page title *"{searchTerm.value}"* was not found'))
# by default display the last search
try:
searchTerm = widgets.Text(the_page['title'], description='Page title:')
except:
searchTerm = widgets.Text("The Camp of the Saints", description='Page title:')
# create and display the button
button = widgets.Button(description="Search")
example = md("e.g. *The Camp of the Saints*")
display(searchTerm,example,button)
# the output widget is used to remove the output after the search field
out = Output()
display(out)
# set the event
button.on_click(on_button_clicked)
# trigger the event with the default value
on_button_clicked(button)
# +
from ipywidgets import widgets
from IPython.display import display, Javascript
def run_below(ev):
display(Javascript('IPython.notebook.execute_cells_below()'))
display(md(f'If this is correct, load the data and set this as the article to explore.'))
button = widgets.Button(description="Load data", button_style='info', min_width=500)
button.on_click(run_below)
display(button)
# -
from IPython.display import display, Markdown as md
display(md("---"))
display(md(f"# B. General Statistics "))
display(md(f"Provided through the Xtools API (1)"))
display(md(f"***Page: {the_page['title']}***"))
# +
from IPython.display import display, Markdown as md
from external.xtools import XtoolsAPI, XtoolsDV
xtools_api = XtoolsAPI(project = 'en.wikipedia.org')
xtools_dv = XtoolsDV(xtools_api)
page_info = xtools_dv.get_page_info(the_page['title'])
page_info['assessment'] = page_info['assessment']['value']
page_info = page_info.to_frame('value').rename(index={
'project': 'Project name',
'page': 'Page name',
'watchers': 'Watchers (2)', 'pageviews': f"Page Views (per {page_info['pageviews_offset']} days)",
'revisions': 'Revisions',
'editors': 'Editors',
'author': 'Creator of the page',
'created_at': 'Creation Date',
'created_rev_id': 'Creation revision id',
'modified_at': 'Last modified',
'last_edit_id': 'Last revision id',
'assessment': 'Content Assessment (3)',
}).drop(index = ['pageviews_offset', 'author_editcount', 'secs_since_last_edit','elapsed_time'])
display(page_info)
display(md("<sup>**(1)** *A community-built service for article statistics at xtools.wmflabs.org* **(2)** *Users that added this page to their watchlist.* **(3)** *See [Wikipedia Content Assessment](https://en.wikipedia.org/wiki/Wikipedia:Content_assessment)*</sup>"))
# -
from IPython.display import display, Markdown as md
display(md("---"))
display(md(f"# C. Page Views"))
display(md(f"Provided through the Wikimedia API"))
display(md(f"***Page: {the_page['title']}***"))
# +
# Query request
from external.wikimedia import WikiMediaDV, WikiMediaAPI
wikimedia_api = WikiMediaAPI(project='en.wikipedia')
wikimedia_dv = WikiMediaDV(wikimedia_api)
views = wikimedia_dv.get_pageviews(the_page['title'], 'daily')
# Visualization
from visualization.views_listener import ViewsListener
from ipywidgets import interact
from ipywidgets.widgets import Dropdown
listener = ViewsListener(views)
interact(listener.listen,
begin=Dropdown(options=views.timestamp),
end=Dropdown(options=views.timestamp.sort_values(ascending=False)),
granularity=Dropdown(options=['Yearly', 'Monthly', 'Weekly', 'Daily'], value='Monthly'))
# The df_plotted keeps a reference to the plotted data above
listener.df_plotted['views'].agg({
'Total views': sum,
'Max views period': max,
'Min views period': min,
'Average views': min,}).to_frame('Value')
# -
#
# After we have no seen some general statistics of the article and the views it attracted, we will go on to take a look at what specific kinds of changes by which editors it was subject to over time.
#
# Click below to go to the next notebook. You can later come back to this notebook and simply enter another article name to start the process over with that new article.
from utils.notebooks import get_next_notebook
from IPython.display import HTML
display(HTML(f'<a href="{get_next_notebook()}" target="_blank">Go to next workbook</a>'))
| 1. General Metadata of a Wikipedia Article.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../Pierian-Data-Logo.PNG">
# <br>
# <strong><center>Copyright 2019. Created by <NAME>.</center></strong>
# # Neural Network Exercises
# For these exercises we'll perform a binary classification on the Census Income dataset available from the <a href = 'http://archive.ics.uci.edu/ml/datasets/Adult'>UC Irvine Machine Learning Repository</a><br>
# The goal is to determine if an individual earns more than $50K based on a set of continuous and categorical variables.
#
# <div class="alert alert-danger" style="margin: 10px"><strong>IMPORTANT NOTE!</strong> Make sure you don't run the cells directly above the example output shown, <br>otherwise you will end up writing over the example output!</div>
# ## Census Income Dataset
# For this exercises we're using the Census Income dataset available from the <a href='http://archive.ics.uci.edu/ml/datasets/Adult'>UC Irvine Machine Learning Repository</a>.
#
# The full dataset has 48,842 entries. For this exercise we have reduced the number of records, fields and field entries, and have removed entries with null values. The file <strong>income.csv</strong> has 30,000 entries
#
# Each entry contains the following information about an individual:
# * <strong>age</strong>: the age of an individual as an integer from 18 to 90 (continuous)
# * <strong>sex</strong>: Male or Female (categorical)
# * <strong>education</strong>: represents the highest level of education achieved by an individual (categorical)
# * <strong>education_num</strong>: represents education as an integer from 3 to 16 (categorical)
# <div><table style="display: inline-block">
# <tr><td>3</td><td>5th-6th</td><td>8</td><td>12th</td><td>13</td><td>Bachelors</td></tr>
# <tr><td>4</td><td>7th-8th</td><td>9</td><td>HS-grad</td><td>14</td><td>Masters</td></tr>
# <tr><td>5</td><td>9th</td><td>10</td><td>Some-college</td><td>15</td><td>Prof-school</td></tr>
# <tr><td>6</td><td>10th</td><td>11</td><td>Assoc-voc</td><td>16</td><td>Doctorate</td></tr>
# <tr><td>7</td><td>11th</td><td>12</td><td>Assoc-acdm</td></tr>
# </table></div>
# * <strong>marital-status</strong>: marital status of an individual (categorical)
# <div><table style="display: inline-block">
# <tr><td>Married</td><td>Divorced</td><td>Married-spouse-absent</td></tr>
# <tr><td>Separated</td><td>Widowed</td><td>Never-married</td></tr>
# </table></div>
# * <strong>workclass</strong>: a general term to represent the employment status of an individual (categorical)
# <div><table style="display: inline-block">
# <tr><td>Local-gov</td><td>Private</td></tr>
# <tr><td>State-gov</td><td>Self-emp</td></tr>
# <tr><td>Federal-gov</td></tr>
# </table></div>
# * <strong>occupation</strong>: the general type of occupation of an individual (categorical)
# <div><table style="display: inline-block">
# <tr><td>Adm-clerical</td><td>Handlers-cleaners</td><td>Protective-serv</td></tr>
# <tr><td>Craft-repair</td><td>Machine-op-inspct</td><td>Sales</td></tr>
# <tr><td>Exec-managerial</td><td>Other-service</td><td>Tech-support</td></tr>
# <tr><td>Farming-fishing</td><td>Prof-specialty</td><td>Transport-moving</td></tr>
# </table></div>
# * <strong>hours-per-week</strong>: the hours an individual has reported to work per week as an integer from 20 to 90 (continuous)
# * <strong>income</strong>: whether or not an individual makes more than \\$50,000 annually (label)
# * <strong>label</strong>: income represented as an integer (0: <=\\$50K, 1: >\\$50K) (optional label)
# ## Perform standard imports
# Run the cell below to load the libraries needed for this exercise and the Census Income dataset.
# +
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
# %matplotlib inline
df = pd.read_csv('../Data/income.csv')
# -
print(len(df))
df.head()
df['label'].value_counts()
# ### 1. Separate continuous, categorical and label column names
# You should find that there are 5 categorical columns, 2 continuous columns and 1 label.<br>
# In the case of <em>education</em> and <em>education-num</em> it doesn't matter which column you use. For the label column, be sure to use <em>label</em> and not <em>income</em>.<br>
# Assign the variable names "cat_cols", "cont_cols" and "y_col" to the lists of names.
df.columns
# +
# CODE HERE
cat_cols = ['sex', 'education', 'marital-status', 'workclass', 'occupation']
cont_cols = ['age', 'hours-per-week']
y_col = ['label']
# RUN THIS CODE TO COMPARE RESULTS:
print(f'cat_cols has {len(cat_cols)} columns')
print(f'cont_cols has {len(cont_cols)} columns')
print(f'y_col has {len(y_col)} column')
# +
# DON'T WRITE HERE
# -
# ### 2. Convert categorical columns to category dtypes
# +
# CODE HERE
for cat in cat_cols:
df[cat] = df[cat].astype('category')
df.dtypes
# +
# DON'T WRITE HERE
# -
# ### Optional: Shuffle the dataset
# The <strong>income.csv</strong> dataset is already shuffled. However, if you would like to try different configurations after completing the exercises, this is where you would want to shuffle the entire set.
# THIS CELL IS OPTIONAL
df = shuffle(df, random_state=101)
df.reset_index(drop=True, inplace=True)
df.head()
# ### 3. Set the embedding sizes
# Create a variable "cat_szs" to hold the number of categories in each variable.<br>
# Then create a variable "emb_szs" to hold the list of (category size, embedding size) tuples.
# +
# CODE HERE
cat_szs = [len(df[col].cat.categories) for col in cat_cols]
emb_szs = [(size, min(50, (size + 1) // 2)) for size in cat_szs]
emb_szs
# +
# DON'T WRITE HERE
# -
# ### 4. Create an array of categorical values
# Create a NumPy array called "cats" that contains a stack of each categorical column <tt>.cat.codes.values</tt><br>
# Note: your output may contain different values. Ours came after performing the shuffle step shown above.
# +
# CODE HERE
cats = np.stack([df[col].cat.codes.values for col in cat_cols], axis=1)
# RUN THIS CODE TO COMPARE RESULTS
cats[:5]
# +
# DON'T WRITE HERE
# -
# ### 5. Convert "cats" to a tensor
# Convert the "cats" NumPy array to a tensor of dtype <tt>int64</tt>
# CODE HERE
cats = torch.tensor(cats, dtype=torch.int64)
# +
# DON'T WRITE HERE
# -
# ### 6. Create an array of continuous values
# Create a NumPy array called "conts" that contains a stack of each continuous column.<br>
# Note: your output may contain different values. Ours came after performing the shuffle step shown above.
# +
# CODE HERE
conts = np.stack([df[col].values for col in cont_cols], axis=1)
# RUN THIS CODE TO COMPARE RESULTS
conts[:5]
# +
# DON'T WRITE HERE
# -
# ### 7. Convert "conts" to a tensor
# Convert the "conts" NumPy array to a tensor of dtype <tt>float32</tt>
# +
# CODE HERE
conts = torch.tensor(conts, dtype=torch.float32)
# RUN THIS CODE TO COMPARE RESULTS
conts.dtype
# +
# DON'T WRITE HERE
# -
# ### 8. Create a label tensor
# Create a tensor called "y" from the values in the label column. Be sure to flatten the tensor so that it can be passed into the CE Loss function.
# CODE HERE
y = torch.tensor(df[y_col].values).flatten()
# +
# DON'T WRITE HERE
# -
# ### 9. Create train and test sets from <tt>cats</tt>, <tt>conts</tt>, and <tt>y</tt>
# We use the entire batch of 30,000 records, but a smaller batch size will save time during training.<br>
# We used a test size of 5,000 records, but you can choose another fixed value or a percentage of the batch size.<br>
# Make sure that your test records remain separate from your training records, without overlap.<br>
# To make coding slices easier, we recommend assigning batch and test sizes to simple variables like "b" and "t".
# +
# CODE HERE
b = 30000 # suggested batch size
t = 5000 # suggested test size
cat_train = cats[:b - t]
cat_test = cats[b - t:b]
con_train = conts[:b - t]
con_test = conts[b - t:b]
y_train = y[:b - t]
y_test = y[b - t:b]
# +
# DON'T WRITE HERE
# -
# ### Define the model class
# Run the cell below to define the TabularModel model class we've used before.
class TabularModel(nn.Module):
def __init__(self, emb_szs, n_cont, out_sz, layers, p=0.5):
# Call the parent __init__
super().__init__()
# Set up the embedding, dropout, and batch normalization layer attributes
self.embeds = nn.ModuleList([nn.Embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(p)
self.bn_cont = nn.BatchNorm1d(n_cont)
# Assign a variable to hold a list of layers
layerlist = []
# Assign a variable to store the number of embedding and continuous layers
n_emb = sum((nf for ni,nf in emb_szs))
n_in = n_emb + n_cont
# Iterate through the passed-in "layers" parameter (ie, [200,100]) to build a list of layers
for i in layers:
layerlist.append(nn.Linear(n_in,i))
layerlist.append(nn.ReLU(inplace=True))
layerlist.append(nn.BatchNorm1d(i))
layerlist.append(nn.Dropout(p))
n_in = i
layerlist.append(nn.Linear(layers[-1],out_sz))
# Convert the list of layers into an attribute
self.layers = nn.Sequential(*layerlist)
def forward(self, x_cat, x_cont):
# Extract embedding values from the incoming categorical data
embeddings = []
for i,e in enumerate(self.embeds):
embeddings.append(e(x_cat[:,i]))
x = torch.cat(embeddings, 1)
# Perform an initial dropout on the embeddings
x = self.emb_drop(x)
# Normalize the incoming continuous data
x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1)
# Set up model layers
x = self.layers(x)
return x
# ### 10. Set the random seed
# To obtain results that can be recreated, set a torch manual_seed (we used 33).
# CODE HERE
torch.manual_seed(33)
# +
# DON'T WRITE HERE
# -
# ### 11. Create a TabularModel instance
# Create an instance called "model" with one hidden layer containing 50 neurons and a dropout layer p-value of 0.4
# +
# CODE HERE
model = TabularModel(emb_szs, conts.shape[1], 2, [50], 0.4)
# RUN THIS CODE TO COMPARE RESULTS
model
# +
# DON'T WRITE HERE
# -
# ### 12. Define the loss and optimization functions
# Create a loss function called "criterion" using CrossEntropyLoss<br>
# Create an optimization function called "optimizer" using Adam, with a learning rate of 0.001
# +
# CODE HERE
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# +
# DON'T WRITE HERE
# -
# ### Train the model
# Run the cell below to train the model through 300 epochs. Remember, results may vary!<br>
# After completing the exercises, feel free to come back to this section and experiment with different parameters.
# +
import time
start_time = time.time()
epochs = 300
losses = []
for i in range(epochs):
i+=1
y_pred = model(cat_train, con_train)
loss = criterion(y_pred, y_train)
losses.append(loss)
# a neat trick to save screen space:
if i%25 == 1:
print(f'epoch: {i:3} loss: {loss.item():10.8f}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'epoch: {i:3} loss: {loss.item():10.8f}') # print the last line
print(f'\nDuration: {time.time() - start_time:.0f} seconds') # print the time elapsed
# -
# ### 13. Plot the Cross Entropy Loss against epochs
# Results may vary. The shape of the plot is what matters.
# +
# CODE HERE
plt.plot(range(epochs), losses)
plt.xlabel('epoch')
plt.ylabel('Cross Entropy Loss')
# +
# DON'T WRITE HERE
# -
# ### 14. Evaluate the test set
# With torch set to <tt>no_grad</tt>, pass <tt>cat_test</tt> and <tt>con_test</tt> through the trained model. Create a validation set called "y_val". Compare the output to <tt>y_test</tt> using the loss function defined above. Results may vary.
# +
# CODE HERE
with torch.no_grad():
y_val = model(cat_test, con_test)
loss = criterion(y_val, y_test)
# RUN THIS CODE TO COMPARE RESULTS
print(f'CE Loss: {loss:.8f}')
# +
# TO EVALUATE THE TEST SET
# -
# ### 15. Calculate the overall percent accuracy
# Using a for loop, compare the argmax values of the <tt>y_val</tt> validation set to the <tt>y_test</tt> set.
# +
# CODE HERE
count = 0
for i, val in enumerate(y_val):
if val.argmax() == y_test[i]:
count += 1
total = len(y_val)
print(f'{count} out of {total} = {count / total * 100:.2f}% correct')
# +
# DON'T WRITE HERE
# -
# ### BONUS: Feed new data through the trained model
# See if you can write a function that allows a user to input their own values, and generates a prediction.<br>
# <strong>HINT</strong>:<br>There's no need to build a DataFrame. You can use inputs to populate column variables, convert them to embeddings with a context dictionary, and pass the embedded values directly into the tensor constructors:<br>
# <pre>mar = input("What is the person's marital status? ")
# mar_d = dict(Divorced=0, Married=1, Married-spouse-absent=2, Never-married=3, Separated=4, Widowed=5)
# mar = mar_d[mar]
# cats = torch.tensor([..., ..., mar, ..., ...], dtype=torch.int64).reshape(1,-1)</pre>
# Make sure that names are put in alphabetical order before assigning numbers.
#
# Also, be sure to run <tt>model.eval()</tt> before passing new date through. Good luck!
# WRITE YOUR CODE HERE:
# RUN YOUR CODE HERE:
# +
# DON'T WRITE HERE
# -
# ## Great job!
| coursework/02-ANN-Artificial-Neural-Networks/05-Neural-Network-Exercises.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# ## Mechanical system with spring, mass and damping
# Consider a mechanical oscillator including a spring
# $$ F_{c}(t) = c ~ x(t) $$
# with $ c > 0 $, a damping
# $$
# F_{D}(t) = D ~ v(t) = D ~ \dot{x}(t)
# $$
# with D > 0, and a mass
# $$ F_{m}(t) = m ~ a(t) = m ~ \ddot{x}(t) $$
#
# with m > 0. This system is excited by an external force $ F_{ex}(t) $ and all forces are summarized to
#
# $$ F_{m}(t) + F_{D}(t) + F_{c}(t) = m ~ \ddot{x}(t) + D ~ \dot{x}(t) + c ~ x(t) = F_{ex}(t) \text{.} $$
#
# The complete mechanical system is portrayed in the figure below.
# + jupyter={"source_hidden": true} outputExpanded=false
foo = """
<svg height="210" width="700">
<defs>
<!-- arrowhead marker definition -->
<marker id="arrow" viewBox="0 0 10 10" refX="5" refY="5"
markerWidth="6" markerHeight="6"
orient="auto-start-reverse">
<path d="M 0 0 L 10 5 L 0 10 z" />
</marker>
</defs>
<line x1="0" y1="0" x2="0" y2="150" style="stroke:rgb(0,0,0);stroke-width:4" />
<!-- Damping -->
<line x1="0" y1="30" x2="40" y2="30" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="40" y1="10" x2="40" y2="50" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="40" y1="10" x2="160" y2="10" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="40" y1="50" x2="160" y2="50" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="100" y1="15" x2="100" y2="45" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="100" y1="30" x2="200" y2="30" style="stroke:rgb(0,0,0);stroke-width:2" />
<text x="120" y="70" fill="black">Damping D</text>
<!-- Spring -->
<line x1="0" y1="120" x2="40" y2="120" style="stroke:rgb(0,0,0);stroke-width:2" />
<polyline points="40,120 50,100 60,140 70,100 80,140 90,100 100,140 110,100 120,140 130,100 140,140 150,100 160,140 170,100 180,120"
style="fill:none;stroke:black;stroke-width:2" />
<line x1="180" y1="120" x2="200" y2="120" style="stroke:rgb(0,0,0);stroke-width:2" />
<text x="120" y="160" fill="black">Spring c</text>
<!-- Mass -->
<rect x="200" y="10" width="50" height="130" style="fill:rgb(255,255,255);stroke-width:3;stroke:rgb(0,0,0)" />
<text x="210" y="160" fill="black">Mass m</text>
<!-- External Force -->
<line x1="250" y1="75" x2="300" y2="75" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<text x="260" y="100" fill="black">Ext. Force F_ex</text>
<!-- Overview Forces -->
<line x1="500" y1="10" x2="500" y2="160" style="stroke:rgb(0,0,0);stroke-width:3" />
<line x1="430" y1="25" x2="500" y2="25" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="430" y1="85" x2="500" y2="85" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="430" y1="145" x2="500" y2="145" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="500" y1="75" x2="570" y2="75" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<text x="450" y="50" fill="black">F_D</text>
<text x="450" y="110" fill="black">F_m</text>
<text x="450" y="170" fill="black">F_c</text>
<text x="520" y="100" fill="black">F_ex</text>
</svg>
"""
display("image/svg+xml", foo)
# -
# The second order differential equation
# $$ \ddot{x}(t) + \frac{D}{m} \dot{x}(t) + \frac{c}{m} x(t) ~=~ \frac{1}{m} F_{ex}(t) $$
# is transfered with
#
# $$ \frac{D}{m} = 2 ~ d ~ \omega_{0} \quad \text{,} \quad \frac{c}{m} = \omega_{0}^{2} \quad \text{,} \quad \frac{1}{m} = K ~ \omega_{0}^{2} \quad \text{and} \quad F_{ex}(t) = u(t) $$
#
# to the general oscillation equation
#
# $$
# \begin{align}
# \ddot{y}(t) + 2 d ~ \omega_{0} ~ \dot{y}(t) + \omega_{0}^{2} ~ y(t) ~=~ K ~ \omega_{0}^{2} ~ u(t) \text{.} \label{eq:gen_oscil} \tag{1}
# \end{align}
# $$
#
# Equation $ \eqref{eq:gen_oscil} $ is noted with $ x_{1}(t) = y(t) $ and $ x_{2}(t) = \dot{y}(t) $ as the first order differential equation
#
# $$
# \begin{align}
# \begin{pmatrix}
# \dot{x}_{1}(t) \\
# \dot{x}_{2}(t)
# \end{pmatrix}
# =
# \begin{pmatrix}
# 0 & 1 \\
# - \omega_{0}^{2} & - 2 d ~ \omega_{0}
# \end{pmatrix}
# \begin{pmatrix}
# x_{1}(t) \\
# x_{2}(t)
# \end{pmatrix}
# +
# \begin{pmatrix}
# 0 \\
# K ~ \omega_{0}^{2}
# \end{pmatrix}
# u(t)
# \end{align}
# $$
#
# ### Stability
#
# The uncontrolled system (for $u(t) = 0$) is stable if matrix
#
# $$
# A = \begin{pmatrix}
# 0 & 1 \\
# - \omega_{0}^{2} & - 2 d ~ \omega_{0}
# \end{pmatrix}
# $$
#
# has only eigenvalues in the left complex space. The eigenvalues are calculated with
#
# $$ det(\lambda I - A) = \lambda ~ (\lambda + 2 d \omega_{0}) + \omega_{0}^2 = \lambda^2 + 2 d ~ \omega_{0} \lambda + \omega_{0}^{2} = 0 $$
#
# and thus one holds
# $$ \lambda = -d ~ \omega_{0} \pm j \omega_{0} ~ \sqrt{1 - d^{2}} \text{.} $$
#
# The uncontrolled system is always stable if $d > 0$ and $\omega_{0} > 0$, which is both guaranteed for usual mechanical systems. Furthermore, a damping $d < 0$ implies complex eigenvalues and leads to an oscillating behaviour of the solution trajectory $y(t)$.
# ### Numerical solution
#
# The controlled mechancial oscillator $ \dot{x}(t) = A ~ x(t) + b ~ u(t) $ is simulated with a numerical integration method instead of calculating the solution via eigenvalues and eigenvectors. For further information about numerical integration see [Wikipedia](https://en.wikipedia.org/wiki/Numerical_methods_for_ordinary_differential_equations) and the [DifferentialEquations documentation](http://docs.juliadiffeq.org/latest/index.html).
#
# The initial values are defined with
x₀ = [2.0, 1.0]; # Inital values
tspan = (0.0, 50.0); # Time range
d = 0.5; # Damping
ω₀ = 0.7; # Eigenfrequency
param = [d, ω₀];
# The right-hand side $ A ~ x(t) + b ~ u(t) $ with an arbitrary input - here $u(t) = sign( sin(t))$ is chosen as a periodic excitation - and gain $K$ is defined as a function. The input is generated by the external force in the sense of a mechanical oscillator.
function mechanical_oscillator(dx, x, p, t)
u = sign(sin(t)) # Input: periodic excitation
K = 0.3 # Gain or amplification
dx[1] = x[2]
dx[2] = -(p[2]^2)*x[1] - 2*p[1]*p[2]*x[2] + K*p[2]^2 * u
end
# Differential equations are solved in two steps in Julia. Firstly, the mathematical problem is formulated and secondly the mathematical problem is solved with a "numerical integration" method. Tolerances are set to specify the precision of the solution.
using DifferentialEquations
mech_osc_problem = ODEProblem(mechanical_oscillator,x₀,tspan, param); # Build of the ODE problem
mech_osc_solution = solve(mech_osc_problem,Tsit5(),reltol=1e-8,abstol=1e-8); # Solution of the ODE problem
# The calculated solution is figured in a plot.
using Plots
plot(mech_osc_solution, title="Mechanical Oscillator", label=["x1" "x2"])
| 2020-1-Summer/text/jupyter/CE_2020_04_modeling_mechanical_oscillator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/fridaruh/Curso_Intro_AI_Crehana/blob/master/Crehana_NN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="UMrlHeCBNkJ_"
# # Importando librerías
# + id="TAsGPk9aNImT"
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="cir4w5Y8OE6u"
# # Carga de Datos
# + [markdown] id="UEtDo0VDyLMt"
# Importamos las librerías que utilziaremos para cargar los datos para la red neuronal:
# + id="fIPzj3uuyKXr"
from keras.datasets import mnist
from keras import layers, models
# + colab={"base_uri": "https://localhost:8080/"} id="Qdw6fXueNn9x" outputId="1fef2eba-7636-422d-a831-86608b93c7bc"
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="t6HIYi9oOUNX" outputId="31162588-71d5-49de-9003-61f0d3c1fb76"
train_data.shape
# + colab={"base_uri": "https://localhost:8080/"} id="cYi6ZplLOXc1" outputId="58808640-2647-4ffd-c425-f882a084e3de"
train_data[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="E--kFy-0OdTD" outputId="e1dfe11c-139a-4db7-a5f2-881e054be308"
plt.imshow(train_data[0])
plt.show
# + colab={"base_uri": "https://localhost:8080/"} id="So39rwCWOqnW" outputId="167bab0a-464a-4359-f309-ee5c92b24fe4"
train_labels[0]
# + [markdown] id="iRcQmSEEPGnt"
# # Modelo
# + id="O4f8FPuaOquR"
model = models.Sequential()
model.add(layers.Dense(512, activation='relu', input_shape=(28*28,))) #Tenemos 512 neuronas de entrada, que tienen una forma de 28*28 px
model.add(layers.Dense(10, activation = 'softmax')) #10 neuronas de la posible salida
# + id="byDAbuTPOq2F"
model.compile(optimizer='rmsprop',
loss = 'categorical_crossentropy', #Función de pérdida
metrics = ['accuracy'] #Variable a optimizar
)
# + [markdown] id="x7qvgtuHQhJv"
# # Transformación de datos
# + id="QIZuINz3QfKP"
x_train = train_data.reshape((60000, 28*28))
x_train = x_train.astype('float32')/255
# + id="bYPJXkSly_w3"
x_test = test_data.reshape(10000, 28*28)
x_test = x_test.astype('float32')/255
# + id="6sTCJAsOzH8d"
from tensorflow.keras.utils import to_categorical
# + id="EHs-W4jYRGFd"
y_train = to_categorical(train_labels)
y_test = to_categorical(test_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="6GeH8gHPRZDd" outputId="51bfdc14-296d-4b9e-f552-b5b7efd1ba0b"
train_labels[0]
# + colab={"base_uri": "https://localhost:8080/"} id="IrcJzmOzRpaO" outputId="cd617685-c21a-4b19-9c04-921f783b1555"
y_train[0]
# + [markdown] id="aCx6FHK5RzhT"
# # Entrenamiento
# + [markdown] id="bERBdh9JzToQ"
# Iteraciones de las épocas
# + colab={"base_uri": "https://localhost:8080/"} id="qZLxOUukRtTF" outputId="591508c0-c1fe-49d2-c256-414901e95a8e"
model.fit(x_train, y_train, epochs=5, batch_size=128)
# + [markdown] id="rynTRzB6SMgs"
# # Evaluación
# + colab={"base_uri": "https://localhost:8080/"} id="JCnpuP4fSGmQ" outputId="aa794284-d350-4595-9b28-a16fe1ac86d9"
model.evaluate(x_test, y_test)
# + id="LPYfHm4QSR9w"
| Crehana_NN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 检测画面中的运动物体
#
# 此文档用来分析画面中是否有运动或出现变化的物体,基于openCV。
#
#
# # 导入摄像头相关的函数库
#
# 运行以下代码块后,稍等片刻,等待摄像头初始化,初始化成功后会在该代码块的下方出现一个300x300大小的摄像头实时视频画面。
#
# 你可以在这个画面上点击右键,点击`Create New View for Output`,这样就可以将摄像头画面放置在窗口的一遍,即使浏览到该文档的后面部分,你依然可以随时观看摄像头的画面,该方法也同样适用于其它组件。
#
# 多次运行该代码块有一定概率会初始化失败,在`jetbot.Camera`中已经包含了该问题的解决方法,你只需要重新运行该Kernel即可,但是注意不要使用该选项卡上方的圆形箭头来重新运行,那种方法有一定概率会依然初始化失败。
#
# 建议重新运行该Kernel的方法:
# 在左侧`File Browser`中,前方有绿色圆点的`*.ipynb`文件上点击右键(前面有绿色的原点说明Kernel正在运行中),选择`Shut Down Kernel`,你会发现绿色的圆点消失了,然后关闭掉该选项卡再双击刚才被关掉的`*.ipynb`文件来重新运行kernel。
#
# 再次运行以下代码块,摄像头就应该可以正常初始化了。
# +
import traitlets
import ipywidgets
from IPython.display import display
from jetbot import Camera, bgr8_to_jpeg
camera = Camera.instance(width=300, height=300)
image_widget = ipywidgets.Image() # this width and height doesn't necessarily have to match the camera
camera_link = traitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)
display(image_widget)
# -
# # 运动侦测的函数
#
# 该运动侦测功能基于openCV来实现,在Jetpack中已经预安装了openCV以及imutils,所以你可以直接运行以下代码块导入所需要的函数库,如果你使用的不是jetpack或者报错没有相应的库,则需要手动在终端中安装openCV或imutils,分别使用`sudo pip3 install opencv-python`和`sudo pip3 install imutils`来安装相应的库,如果运行没有报错提示缺少这两个库,则可以忽略掉这些内容直接进行下一步的代码块运行。
# +
import cv2
import imutils
import datetime
# avg用来保存一帧基准画面(背景),新的画面与其比较判断画面中的哪里发生了变化。
avg = None
lastMovtionCaptured = datetime.datetime.now()
# 在这个函数中导入采集到的画面帧,加以处理。
def motionDetect(imgInput):
global avg, lastMovtionCaptured
# 获得当前时间戳。
timestamp = datetime.datetime.now()
# 将画面转换为黑白,可以增大分析效率。
gray = cv2.cvtColor(imgInput, cv2.COLOR_BGR2GRAY)
# 将画面增加高斯模糊,避免噪点带来的误判。
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# 如果还没有获得基准帧(背景),则新建一个。
if avg is None:
avg = gray.copy().astype("float")
return imgInput
# 背景更新。
cv2.accumulateWeighted(gray, avg, 0.5)
# 比较新画面gray与背景的不同。
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# 获得画面中出现变化的区域轮廓。
thresh = cv2.threshold(frameDelta, 5, 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# 画面中也许不止有一个出现变化的区域,所以需要使用for循环来圈出所有轮廓。
for c in cnts:
# 这里默认是30,是变化面积的阈值,我们只分析大于800的区域。
# 此数值越小,运动检测越灵敏,但也可能会检测到无意义的噪音。
if cv2.contourArea(c) < 30:
continue
# 绘制元素,包括轮廓线和文字
(mov_x, mov_y, mov_w, mov_h) = cv2.boundingRect(c)
cv2.rectangle(imgInput, (mov_x, mov_y), (mov_x+mov_w, mov_y+mov_h), (128, 255, 0), 1)
# 保存当前的时间戳用于标记变化检测到的时间。
lastMovtionCaptured = timestamp
# 为了避免绘制元素闪烁频率过高,将检测到运动后的0.5秒内也算作检测到了运动,并绘制元素。
if (timestamp - lastMovtionCaptured).seconds >= 0.5:
cv2.putText(imgInput,"Motion Detecting",(10,80), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(128,255,0),1,cv2.LINE_AA)
else:
cv2.putText(imgInput,"Motion Detected",(10,80), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,128,255),1,cv2.LINE_AA)
# 返回处理后的画面
return imgInput
# -
# # 处理视频帧并显示
#
# 运行以下代码块后,你就可以看到画面的颜色发生了变化,说明视频画面已经通过`motionDetect()`函数处理成功。
# +
def execute(change):
global image_widget
image = change['new']
image_widget.value = bgr8_to_jpeg(motionDetect(image))
execute({'new': camera.value})
camera.unobserve_all()
camera.observe(execute, names='value')
# -
# 此时你已经运行了全部代码块,当画面中有物体运动或发生变化时,文字内容会发生改变,会有绿色的矩形框圈出发生变化的区域。
# # 关闭图像处理和摄像头
# 运行以下代码块来关闭该图像处理功能。
camera.unobserve(execute, names='value')
# 在结束该例程前,我们需要关闭摄像头,这样才可以释放摄像头资源供其它例程使用。
camera.stop()
| JETANK_3_motionDetect/motionDetect_cn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''foggy'': conda)'
# name: python379jvsc74a57bd0533331669000a43511796226c0ff0dc9c285bf5a1783fa2d13045c1d50bcdf1d
# ---
# +
import math
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import datasets
from torchvision import transforms
from torch.utils import data
# +
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name='BasicBlock'):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == 'basicblock':
assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202'
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == 'bottleneck':
assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199'
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError('block_name shoule be Basicblock or Bottleneck')
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet32(**kwargs):
model = ResNet(depth=32, **kwargs)
return model
# +
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# @neelabh17 implementation
class CCELossFast(torch.nn.Module):
def __init__(self, n_classes, n_bins = 10, mode = "eval", loss_type = "sce"):
'''
deprecated info ignore!!
output = [n_Class, h , w] np array: The complete probability vector of an image
target = [h , w] np array: The GT for the image
n_bins = [h , w] np array: Number of bins for the Calibration division
'''
super(CCELossFast,self).__init__()
self.n_classes = n_classes
self.n_bins = n_bins
self.mode = mode
self.loss_type = loss_type
self.createBins()
self.no_pred_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
self.no_acc_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
self.conf_sum_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
def reset(self):
self.no_pred_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
self.no_acc_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
self.conf_sum_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
def forward(self , output, target):
'''
imp info!! dont ignore
output = [batch, n_Class] np array: The complete logit vector of an image
target = [batch] np array: The GT for the image
create an three array of [n_class, n_bins]
-> Number of prediciton array for that specification
-> Number of correct prediction for that class
-> Percentge of correct
'''
current_no_pred_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
current_no_acc_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
current_conf_sum_tot = torch.zeros(self.n_classes, self.n_bins).cuda()
output = torch.softmax(output, dim=1)
# [batch, classes]
for i, (bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):
mask = (output> bin_lower) * (output <= bin_upper)
for class_id in range(self.n_classes):
class_mask = mask[:,class_id]
classwise_gt = (target == class_id).long()
current_no_pred_tot[class_id][i] = torch.sum(class_mask)
current_no_acc_tot[class_id][i] = torch.sum(class_mask * classwise_gt)
current_conf_sum_tot[class_id][i] = torch.sum((output[:,class_id])[class_mask])
self.no_pred_tot += current_no_pred_tot.data
self.no_acc_tot += current_no_acc_tot.data
self.conf_sum_tot += current_conf_sum_tot.data
avg_acc = (current_no_acc_tot)/(current_no_pred_tot + 1e-13)
avg_conf = current_conf_sum_tot / (current_no_pred_tot + 1e-13)
# overall_cceLoss = torch.sum(torch.abs(avg_acc - avg_conf) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# overall_cceLoss = torch.sum(((avg_acc - avg_conf)**2))
assert (self.loss_type=="sce" or self.loss_type=="kernel" or self.loss_type=="diff")
# Correct implementation
if(self.loss_type=="sce"):
overall_cceLoss = torch.sum(torch.abs(avg_acc - avg_conf) * current_no_pred_tot/torch.sum(current_no_pred_tot))
# Kernel based implementation
elif(self.loss_type=="kernel"):
overall_cceLoss = torch.sum((1-torch.exp((-1*((avg_acc - avg_conf)**2))/0.5)) * (current_no_pred_tot/torch.sum(current_no_pred_tot)))
# difference based approach
elif(self.loss_type=="diff"):
overall_cceLoss = torch.sum(((avg_acc - avg_conf)**2))
# overall_cceLoss = torch.sum(12500*(1-torch.exp((-1*((avg_acc - avg_conf)**2))/6400)) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# print(self.conf_sum_tot.requires_grad)
return overall_cceLoss
def createBins(self):
#uniform bin spacing
bin_boundaries = np.linspace(0, 1, self.n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
self.avg_bin = torch.Tensor((self.bin_lowers + self.bin_uppers)/2).cuda()
def get_perc_table(self, classes):
self.perc = (self.no_acc_tot)/(self.no_pred_tot + 1e-13)
self.perc *= 100
from tabulate import tabulate
x= list(self.perc.cpu().numpy())
for i in range(len(x)):
x[i]=list(x[i])
x[i]=[classes[i]]+list(x[i])
print(tabulate(x, headers = ["Classes"]+[ "{:0.2f} - {:0.2f}".format(self.bin_lowers[i] * 100, self.bin_uppers[i] * 100) for i in range( len(self.bin_lowers))]))
return self.perc
def get_diff_score(self):
avg_acc = (self.no_acc_tot)/(self.no_pred_tot + 1e-13)
avg_conf = self.conf_sum_tot / (self.no_pred_tot + 1e-13)
return torch.sum (torch.abs(avg_acc-avg_conf))/(self.n_bins*self.n_classes)
def get_overall_CCELoss(self):
avg_acc = (self.no_acc_tot)/(self.no_pred_tot + 1e-13)
avg_conf = self.conf_sum_tot / (self.no_pred_tot + 1e-13)
# overall_cceLoss = torch.sum(torch.abs(avg_acc - avg_conf) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# overall_cceLoss = torch.sum(((avg_acc - avg_conf)**2))
# Correct implementation
# overall_cceLoss = torch.sum(((avg_acc - avg_conf)**2) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# Non Squared implementation
overall_cceLoss = torch.sum((torch.abs(avg_acc - avg_conf)) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# Kernel based implementation
# overall_cceLoss = torch.sum((1-torch.exp((-1*((avg_acc - avg_conf)**2))/0.5)) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# overall_cceLoss = torch.sum(12500*(1-torch.exp((-1*((avg_acc - avg_conf)**2))/6400)) * (self.no_pred_tot/torch.sum(self.no_pred_tot)))
# print("Overall CCE Loss = ", overall_cceLoss)
return overall_cceLoss
def get_classVise_CCELoss(self, classes):
avg_acc = (self.no_acc_tot)/(self.no_pred_tot + 1e-13)
# print(avg_acc.shape)
avg_conf = self.conf_sum_tot / (self.no_pred_tot + 1e-13)
# print(avg_conf.shape)
x = torch.sum(torch.abs(avg_acc-avg_conf) * self.no_pred_tot, dim = 1) / torch.sum(self.no_pred_tot, dim = 1)
x = x.reshape(-1,1)
# print(x.shape)
x=list(x)
from tabulate import tabulate
for i in range(len(x)):
x[i]=list(x[i])
x[i]=[classes[i]]+list(x[i])
print(tabulate(x, headers = ["Classes", "ECELoss"]))
def get_diff_mean_std (self):
self.perc = (self.no_acc_tot)/(self.no_pred_tot + 1e-13)
avg_conf = self.conf_sum_tot / (self.no_pred_tot + 1e-13)
self.perc *= 100
avg_conf *= 100
dif = torch.abs(avg_conf- self.perc)
return dif.mean(), dif.std()
# +
num_classes = 10
model = resnet32(num_classes= num_classes)
resume = "checkpoint.pth"
saved_model_dict = torch.load(resume)
model.load_state_dict(saved_model_dict['state_dict'])
model.cuda()
sce_criterion = CCELossFast(n_classes = num_classes)
# +
#preparing dataset
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
dataloader = datasets.CIFAR10
testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=8)
# +
model.eval()
bar = tqdm(testloader, total=len(testloader))
sce_criterion.reset()
with torch.no_grad():
for inputs, targets in bar:
targets = targets.cuda()
inputs = inputs.cuda()
# compute output
outputs = model(inputs)
sce_criterion.forward(outputs, targets)
sce = sce_criterion.get_overall_CCELoss().item()
print()
print(sce)
# -
| ramya.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Problem 42
# ## Coded triangle numbers
#
# The $n^{th}$ term of the sequence of triangle numbers is given by, $t_n = \frac{n (n + 1)}{2}$; so the first ten triangle numbers are:
#
# $$1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...$$
#
# By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is $19 + 11 + 25 = 55 = t_{10}$. If the word value is a triangle number then we shall call the word a triangle word.
#
# Using [words.txt](https://projecteuler.net/project/resources/p042_words.txt) (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
#
# OEIS Sequence: [A000217](https://oeis.org/A000217)
#
# ## Solution
# + pycharm={"name": "#%%\n"}
from euler.calculus import triangular_numbers
# + pycharm={"name": "#%%\n"}
def compute(path: str) -> int:
triangles = set(triangular_numbers(20))
chars = {chr(ord('A') + i): i + 1 for i in range(26)}
triangle_words = 0
for word in open(path).read().replace('"', '').split(','):
value = sum(chars[letter] for letter in word)
if value in triangles:
triangle_words += 1
return triangle_words
# + pycharm={"name": "#%%\n"}
compute('p042_words.txt')
# + pycharm={"name": "#%%\n"}
# %timeit -n 100 -r 1 -p 6 compute('p042_words.txt')
| problems/0042/solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
import os
os.system("sudo pip install findspark sql_magic")
import findspark
findspark.init('/usr/lib/spark')
import pyspark
from pyspark.sql import SparkSession
from datetime import datetime
from pyspark.sql.functions import col, udf
from pyspark.sql.types import DateType
spark = SparkSession\
.builder\
.getOrCreate()
# %load_ext sql_magic
# %config SQL.conn_name = 'spark'
df = spark.read.csv("s3n://2017edmfasatb/nypd_complaints/data/NYPD_Complaint_Data_Historic.csv", header = True, inferSchema = True)
# +
date_func = udf(lambda x: datetime.strptime(x, '%m/%d/%Y'), DateType())
time_func = udf(lambda x: datetime.strptime(x, '%H:%M:%S'), DateType())
df = df.fillna('01/01/1900', subset = ['CMPLNT_FR_DT', 'CMPLNT_TO_DT', 'RPT_DT'])
df = df.fillna('00:00:00', subset = ['CMPLNT_FR_TM', 'CMPLNT_TO_TM'])
df = df.withColumn('CMPLNT_FR_DT_FORMATTED', date_func(col('CMPLNT_FR_DT')))
df = df.withColumn('CMPLNT_TO_DT_FORMATTED', date_func(col('CMPLNT_TO_DT')))
df = df.withColumn('RPT_DT_FORMATTED', date_func(col('RPT_DT')))
df = df.withColumn('CMPLNT_FR_TM_FORMATTED', time_func(col('CMPLNT_FR_TM')))
df = df.withColumn('CMPLNT_TO_TM_FORMATTED', time_func(col('CMPLNT_TO_TM')))
df.printSchema()
# +
extract_year = udf(lambda x: x.year)
extract_month = udf(lambda x: x.month)
df = df.withColumn('CMPLNT_FR_DT_YEAR', extract_year(col('CMPLNT_FR_DT_FORMATTED')))
df = df.withColumn('CMPLNT_FR_DT_MONTH', extract_month(col('CMPLNT_FR_DT_FORMATTED')))
# -
df.createOrReplaceTempView("complaint")
# result = %read_sql SELECT * FROM complaint LIMIT 10;
result
| notebook/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Modeling with `scikit-learn`
# <br>
# <center>
# <img src="https://raw.githubusercontent.com/uc-r/Advanced-R/f1001a5b40b5e3803e4cd01a40c7129fee3afb39/docs/images/process-icon.svg" alt="fortune-teller.gif" width="1200" height="1200">
# </center>
# + [markdown] slideshow={"slide_type": "slide"}
# # Introduction to Machine Learning
# + [markdown] slideshow={"slide_type": "slide"}
# ## Introduction
#
# Machine learning (ML) continues to grow in importance for many organizations across nearly all domains. Some example applications of machine learning in practice include:
#
# * Predicting the likelihood of a patient returning to the hospital (_readmission_) within 30 days of discharge.
# * Segmenting customers based on common attributes or purchasing behavior for targeted marketing.
# * Predicting coupon redemption rates for a given marketing campaign.
# * Predicting customer churn so an organization can perform preventative intervention.
# * And many more!
#
# To address each scenario, we can use a given set of <u>_features_</u> to train an <u>_algorithm_</u> and extract insights.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Types of modeling
#
# These algorithms, or _learners_, can be classified according to the amount and type of supervision needed during training.
#
# Two primary categories of algorithms:
#
# - ___supervised learners___ which construct predictive models
# - ___unsupervised learners___ which build descriptive models.
#
# Which type you will need to use depends on the learning task you hope to accomplish.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Supervised learning
#
# A ___predictive model___ is used for tasks that involve the prediction of a given output (or target) using other variables (or features) in the data set.
# + [markdown] slideshow={"slide_type": "fragment"}
# The learning algorithm in a predictive model attempts to discover and model the relationships among the <font color="red">target</font> variable (the variable being predicted) and the other <font color="blue">features</font> (aka predictor variables).
# + [markdown] slideshow={"slide_type": "fragment"}
# Examples of predictive modeling include:
#
# * using <font color="blue">customer attributes</font> to predict the probability of the <font color="red">customer churning</font> in the next 6 weeks;
# * using <font color="blue">home attributes</font> to predict the <font color="red">sales price</font>;
# * using <font color="blue">employee attributes</font> to predict the likelihood of <font color="red">attrition</font>;
# * using <font color="blue">patient attributes</font> and symptoms to predict the risk of <font color="red">readmission</font>;
# * using <font color="blue">production attributes</font> to predict <font color="red">time to market</font>.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Types of supervised learning
#
#
# Most supervised learning problems can be bucketed into one of two categories, <u>_regression_</u> or <u>_classification_</u>
#
# * __Regression__: objective is to predict a numeric outcome
# - What is the expected sales price?
# - What is the expected wait time?
# - What is the expected time to market?
# * __Classification__: objective is to predict a categorical outcome
# - Did a customer redeem a coupon (coded as yes/no or 1/0)?
# - Did a customer churn (coded as yes/no or 1/0)?
# - Did a customer click on our online ad (coded as yes/no or 1/0)?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Unsupervised learning
#
# A set of statistical tools to better understand _n_ observations that contain a set of features without being guided by a response variable.
#
# In essence, unsupervised learning is concerned with identifying groups in a data set
#
# * __clustering__: reduce the observation space of a data set
# * __dimension reduction__: reduce the feature space of a data set
#
# <center>
# <img src="https://uc-r.github.io/Advanced-R/images/clustering_vs_pca.jpeg" alt="modeling-process" width="2000">
# </center>
# + [markdown] slideshow={"slide_type": "slide"}
# # Today's focus
#
# - __Supervised learning__ for a __regression problem__
#
# - using <font color="blue">home attributes</font> to predict real estate <font color="red">sales price</font>
#
# - __Objective__: understand the basic supervised learning modeling process and how to implement with scikit-learn
#
# Our ___Advanced Python workshop___ will go into much more detail than we have time for here.
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Supervised learning modeling process
# + [markdown] slideshow={"slide_type": "slide"}
# ## Modeling Process
#
# * The machine learning process is very iterative and heurstic-based
#
# * Common for many ML approaches to be applied, evaluated, and modified before a final, optimal model can be determined
#
# * A proper process needs to be implemented to have confidence in our results
#
# <center>
# <img src="https://uc-r.github.io/Advanced-R/images/modeling_process.png" alt="modeling-process" width="900" height="900">
# </center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Modeling Process
#
# This module provides an introduction to the modeling process and the concepts that are useful for any type of machine learning model:
#
# * data splitting
#
# * model application
#
# * resampling
#
# * bias-variance trade-off -- hyperparameter tuning
#
# * model evaluation
# + [markdown] slideshow={"slide_type": "slide"}
# ## Prerequisites - packages
# + slideshow={"slide_type": "-"}
# Helper packages
import math
import numpy as np
import pandas as pd
from plotnine import ggplot, aes, geom_density, geom_line, geom_point, ggtitle
# Modeling process
from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
# + [markdown] slideshow={"slide_type": "slide"}
# ## Prerequisites - Ames housing data
#
# - __problem type__: supervised regression
# - __response variable__: `Sale_Price` (i.e., \\$195,000, \\$215,000)
# - __features__: 80
# - __observations__: 2,930
# - __objective__: use property attributes to predict the sale price of a home
# -
# Ames housing data
ames = pd.read_csv("../data/ames.csv")
ames.head()
# + [markdown] slideshow={"slide_type": "slide"}
# # <font color="red">Your Turn</font>
#
# Take 5 minutes to explore the housing data
#
# - What does the distribution of the response variable (`Sale_Price`) look like?
#
# - How could the different features be helpful in predicting the sales price?
# + [markdown] slideshow={"slide_type": "slide"}
# # Data Splitting
# + [markdown] slideshow={"slide_type": "slide"}
# ## Generalizability
#
# __Generalizability__: we want an algorithm that not only fits well to our past data, but more importantly, one that <font color="blue">predicts a future outcome accurately</font>.
# + [markdown] slideshow={"slide_type": "fragment"}
# * __Training Set__: these data are used to develop feature sets, train our algorithms, tune hyper-parameters, compare across models, and all of the other activities required to reach a final model decision.
#
# * __Test Set__: having chosen a final model, these data are used to estimate an unbiased assessment of the model’s performance (generalization error).
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src="https://uc-r.github.io/Advanced-R/images/nope.png" alt="modeling-process" width="300" style="float:right" >
#
# <font color="red">DO NOT TOUCH THE TEST SET UNTIL THE VERY END!!!</font>
# + [markdown] slideshow={"slide_type": "slide"}
# ## What's the right split?
#
# * typical recommendations for splitting your data into training-testing splits include 60% (training) - 40% (testing), 70%-30%, or 80%-20%
#
# * as data sets get smaller ($n < 500$):
# - spending too much in training ($> 80$%) won’t allow us to get a good assessment of predictive performance. We may find a model that fits the training data very well, but is not generalizable (overfitting),
# - sometimes too much spent in testing ($> 40$%) won’t allow us to get a good assessment of model parameters
#
# * as n gets larger ($n > 100$K):
# - marginal gains with larger sample sizes
# - may use a smaller training sample to increase computation speed
#
# * as p gets larger ($p \geq n$)
# - larger samples sizes are often required to identify consistent signals in the features
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mechanics of data splitting
# -
# create train/test split
train, test = train_test_split(ames, train_size=0.7, random_state=123)
# + slideshow={"slide_type": "fragment"}
# dimensions of training data
train.shape
# + slideshow={"slide_type": "fragment"}
# dimensions of testing data
test.shape
# + [markdown] slideshow={"slide_type": "slide"}
# ## Visualizing response distribution
#
# Always good practice to ensure the distribution of our target variable is similar across the training and test sets
# + slideshow={"slide_type": "fragment"}
(ggplot(train, aes(x='Sale_Price'))
+ geom_density(color='blue')
+ geom_density(data = test, color = "red")
+ ggtitle("Distribution of Sale_Price"))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Separating features & target
#
# * In Python, we are required to separate our features from our label into discrete data sets.
#
# * For our first model we will simply use two features from our training data - total square feet of the home (`Gr_Liv_Area`) and year built (`Year_Built`) to predict the sale price.
# -
# separate features from labels
X_train = train[["Gr_Liv_Area", "Year_Built"]]
y_train = train["Sale_Price"]
# + [markdown] slideshow={"slide_type": "slide"}
# # Creating Models
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating Models with scikit-learn
# + [markdown] slideshow={"slide_type": "-"}
# Scikit-learn has many modules for supervised learning
#
# * Linear models (i.e. ordinary least squares)
# * Nearest neighbors (i.e. _K_-nearest neighbor)
# * Tree-based models (i.e. decision trees, random forests)
# * and many more: https://scikit-learn.org/stable/supervised_learning.html
# + [markdown] slideshow={"slide_type": "fragment"}
# To apply these models, they all follow a similar pattern:
#
# 1. Identify the appropriate module
# 2. Instantiate the model object
# 3. Fit the model
# 4. Make predictions
# + [markdown] slideshow={"slide_type": "slide"}
# ## Ordinary least squares
# + slideshow={"slide_type": "-"}
# 1. Prerequisite
from sklearn.linear_model import LinearRegression
# + slideshow={"slide_type": "fragment"}
# 2. Instantiate the model object
reg = LinearRegression()
# + slideshow={"slide_type": "fragment"}
# 3. Fit the model
reg.fit(X_train, y_train)
# + slideshow={"slide_type": "fragment"}
# 4. Make predictions
reg.predict(X_train)
# + [markdown] slideshow={"slide_type": "slide"}
# ## K-nearest neighhbor
# + slideshow={"slide_type": "-"}
# 1. Prerequisite
from sklearn.neighbors import KNeighborsRegressor
# + slideshow={"slide_type": "fragment"}
# 2. Instantiate the model object
knn = KNeighborsRegressor()
# + slideshow={"slide_type": "fragment"}
# 3. Fit the model
knn.fit(X_train, y_train)
# + slideshow={"slide_type": "fragment"}
# 4. Make predictions
knn.predict(X_train)
# + [markdown] slideshow={"slide_type": "slide"}
# # <font color="red">Your Turn</font>
#
# Create and predict a model using the random forest algorithm.
# + slideshow={"slide_type": "-"}
# 1. Prerequisite
from sklearn.ensemble import RandomForestRegressor
# + slideshow={"slide_type": "fragment"}
# 2. Instantiate the model object
rf = RandomForestRegressor()
# + slideshow={"slide_type": "fragment"}
# 3. Fit the model
rf.fit(X_train, y_train)
# + slideshow={"slide_type": "fragment"}
# 4. Make predictions
rf.predict(X_train)
# + [markdown] slideshow={"slide_type": "slide"}
# # Evaluating Models
# + [markdown] slideshow={"slide_type": "slide"}
# ## Evaluating model performance
#
# - It is important to understand how our model is performing.
#
# - With ML models, measuring performance means understanding the predictive accuracy -- the difference between a predicted value and the actual value.
#
# - We measure predictive accuracy with ___loss functions___.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Many loss functions for regression problems
# + [markdown] slideshow={"slide_type": "fragment"}
# * __Mean Square Error__ (MSE) = $\frac{1}{n} \sum^n_{i=1} (y_i - \hat{y}_i)^2$
# + [markdown] slideshow={"slide_type": "fragment"}
# * __Root Mean Square Error__ (RMSE) = $\sqrt{MSE}$
# + [markdown] slideshow={"slide_type": "fragment"}
# * Other common loss functions
# - Mean Absolute Error (MAE)
# - Mean Absolute Percent Error (MAPE)
# - Root Mean Squared Logarithmic Error (RMSLE)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Computing MSE
# + slideshow={"slide_type": "-"}
# compute MSE for linear model
pred = reg.predict(X_train)
mse = mean_squared_error(y_train, pred)
mse
# -
rmse = math.sqrt(mse)
rmse
# + [markdown] slideshow={"slide_type": "fragment"}
# On average, our model's predictions are over \\$48,000 off from the actual sales price!!
# + [markdown] slideshow={"slide_type": "slide"}
# # <font color="red">Your Turn</font>
#
# With MSE & RMSE our objective is to ___minimize___ this value.
#
# * Compare the MSE & RMSE for the K-nearest neighbor and random forest model to our linear model.
#
# * Which model performs best?
#
# * Are we certain this is the best way to measure our models' performance?
# + [markdown] slideshow={"slide_type": "slide"}
# # Resampling Methods
# + [markdown] slideshow={"slide_type": "slide"}
# ## Resampling methods
#
# Provides an approach for us to repeatedly fit a model of interest to parts of the training data and test the performance on other parts.
#
# * Allows us to estimate the generalization error while training, tuning, and comparing models without using the test data set
#
# * The two most commonly used resampling methods include:
# - _k_-fold cross validation
# - bootstrapping.
#
# <img src="https://uc-r.github.io/Advanced-R/images/resampling.png" alt="resampling" width="800" style="float:right" >
# + [markdown] slideshow={"slide_type": "slide"}
# ## _K_-fold cross validation
#
# * randomly divides the training data into k groups of approximately equal size
#
# * assign one block as the <font color="orange">test block</font> and the rest as <font color="blue">training block</font>
#
# * train model on each folds' <font color="blue">training block</font> and evaluate on <font color="orange">test block</font>
#
# * average performance across all folds
# <br>
# <center>
# <img src="https://uc-r.github.io/Advanced-R/images/cv.png" alt="kfold" width="800"></center>
#
# <br>
#
# <center><b><i>k</i> is usually taken to be 5 or 10</b></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## _K_-fold CV implementation
#
# * Use `KFold` to create k-fold objects and then
# * `cross_val_score` to train our model across all *k* folds and provide our loss score for each fold
# +
# define loss function
loss = 'neg_root_mean_squared_error'
# create 10 fold CV object
kfold = KFold(n_splits=10, random_state=123, shuffle=True)
# fit KNN model with 10-fold CV
results = cross_val_score(knn, X_train, y_train, cv=kfold, scoring=loss)
results
# + [markdown] slideshow={"slide_type": "fragment"}
# __Note__: The unified scoring API in scikit-learn always maximizes the score, so scores which need to be minimized are negated in order for the unified scoring API to work correctly. Consequently, you can just interpret the RMSE values below as the $RMSE \times -1$.
# + [markdown] slideshow={"slide_type": "slide"}
# ## _K_-fold results
# + slideshow={"slide_type": "fragment"}
# summary stats for all 10 folds
pd.DataFrame(results * -1).describe()
# + [markdown] slideshow={"slide_type": "slide"}
# # <font color="red">Your Turn</font>
#
# * Compute _K_-fold results for the linear model and/or the random forest model.
#
# * How do the results compare?
# + [markdown] slideshow={"slide_type": "slide"}
# # Hyperparameter Tuning
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bias-variance trade-off
#
# * Prediction errors can be decomposed into two main subcomponents we have control over:
#
# - error due to “bias”
# - error due to “variance”
#
# * There is a tradeoff between a model’s ability to minimize bias and variance.
#
# * Understanding how different sources of error lead to bias and variance helps us improve the data fitting process resulting in more accurate models.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bias
#
# _Bias_ is the difference between the expected (or average) prediction of our model and the correct value which we are trying to predict.
#
# Some models are naturally ___high bias___:
#
# * Models that are not very flexible (i.e. generalized linear models)
# * High bias models are rarely affected by the noise introduced by resampling
#
# <center>
# <img src="https://uc-r.github.io/Advanced-R/03-supervised-modeling-process_files/figure-html/bias-model-1.png" alt="bias" width="800">
# </center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Variance
#
# Error due to ___variance___ is defined as the variability of a model prediction for a given data point.
#
# Some models are naturally high variance:
#
# * Models that are very adaptable and offer extreme flexibility in the patterns that they can fit to (e.g., _k_-nearest neighbor, decision trees, gradient boosting machines).
# * These models offer their own problems as they run the risk of overfitting to the training data.
# * Although you may achieve very good performance on your training data, the model will not automatically generalize well to unseen data.
#
# <center>
# <img src="https://uc-r.github.io/Advanced-R/03-supervised-modeling-process_files/figure-html/variance-model-1.png" alt="variance" width="800">
# </center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Hyperparameter tuning
#
# So what does this mean to you?
# + [markdown] slideshow={"slide_type": "fragment"}
# * We tend to like very flexible models since they can capture many patterns in our data,
# * but we need to control variance so our model generalizes to new data well.
# * ___Hyperparameters___ can help to control bias-variance trade-off
# + [markdown] slideshow={"slide_type": "slide"}
# ## Hyperparameter tuning
#
# Hyperparameters are the "knobs to twiddle" to control of complexity of machine learning algorithms and, therefore, the bias-variance trade-off
#
# <center>
# <img src="https://uc-r.github.io/Advanced-R/03-supervised-modeling-process_files/figure-html/example-knn-1.png" alt="variance" width="800">
# </center>
#
# _k_-nearest neighbor model with differing values for _k_. Small _k_ value has too much variance. Big _k_ value has too much bias. <font color="red">How do we find the optimal value?</font>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Grid search
#
# * A grid search is an automated approach to searching across many combinations of hyperparameter values
#
# * We perform a grid search with `GridSearchCV()` and supply it a model object and hyperparameter values we want to assess.
#
# * Also notice that we supply it with the `kfold` object we created previously and the `loss` function we want to optimize for.
# +
# Basic model object
knn = KNeighborsRegressor()
# Hyperparameter values to assess
hyper_grid = {'n_neighbors': range(2, 26)}
# Create grid search object
grid_search = GridSearchCV(knn, hyper_grid, cv=kfold, scoring=loss)
# Tune a knn model using grid search
results = grid_search.fit(X_train, y_train)
# + slideshow={"slide_type": "slide"}
# Best model's cross validated RMSE
abs(results.best_score_)
# -
# Best model's k value
results.best_estimator_.get_params().get('n_neighbors')
# + slideshow={"slide_type": "-"}
# Plot all RMSE results
all_rmse = pd.DataFrame({'k': range(2, 26),
'RMSE': np.abs(results.cv_results_['mean_test_score'])})
(ggplot(all_rmse, aes(x='k', y='RMSE'))
+ geom_line()
+ geom_point()
+ ggtitle("Cross validated grid search results"))
# + [markdown] slideshow={"slide_type": "slide"}
# # Putting the Processes Together
# + [markdown] slideshow={"slide_type": "slide"}
# ## Putting the Processes Together
#
# You've been exposed to a lot in a very short amount of time. Let's bring these pieces together but rather than just look at the 2 features that we included thus far (`Gr_Liv_Area` & `Year_Built`), we'll include ___all numeric features___.
#
# __Steps:__
#
# 1. Split into training vs testing data
#
# 2. Separate features from labels and only use numeric features
#
# 3. Create KNN model object
#
# 4. Define loss function
#
# 5. Specify _K_-fold resampling procedure
#
# 6. Create our hyperparameter grid
#
# 7. Execute grid search
#
# 8. Evaluate performance
# + slideshow={"slide_type": "slide"}
# 1. Split into training vs testing data
train, test = train_test_split(ames, train_size=0.7, random_state=123)
# 2. Separate features from labels and only use numeric features
X_train = train.select_dtypes(include='number').drop("Sale_Price", axis=1)
y_train = train["Sale_Price"]
# 3. Create KNN model object
knn = KNeighborsRegressor()
# 4. Define loss function
loss = 'neg_root_mean_squared_error'
# 5. Specify K-fold resampling procedure
kfold = KFold(n_splits=10, random_state=123, shuffle=True)
# 6. Create grid of hyperparameter values
hyper_grid = {'n_neighbors': range(2, 26)}
# 7. Tune a knn model using grid search
grid_search = GridSearchCV(knn, hyper_grid, cv=kfold, scoring=loss)
results = grid_search.fit(X_train, y_train)
# + slideshow={"slide_type": "slide"}
# 8. Evaluate performance: Best model's cross validated RMSE
abs(results.best_score_)
# + slideshow={"slide_type": "fragment"}
# 8. Evaluate performance: Best model's k value
results.best_estimator_.get_params().get('n_neighbors')
# + slideshow={"slide_type": "fragment"}
# 8. Evaluate performance: Plot all RMSE results
all_rmse = pd.DataFrame({'k': range(2, 26),
'RMSE': np.abs(results.cv_results_['mean_test_score'])})
(ggplot(all_rmse, aes(x='k', y='RMSE'))
+ geom_line()
+ geom_point()
+ ggtitle("Cross validated grid search results"))
# + [markdown] slideshow={"slide_type": "slide"}
# ## Can we do better?
#
# * Is this the best we can do?
#
# * Do you think other models could perform better?
#
# * Are we doing the best with the features we've been given?
# + [markdown] slideshow={"slide_type": "fragment"}
# <br><br>
# <center><b>Come to our Advanced Python workshop to find out more!</b></center>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Learning More
# + [markdown] slideshow={"slide_type": "fragment"}
# * **Don't feel intimated** -- you're not going to learn this in an hour
# + [markdown] slideshow={"slide_type": "fragment"}
# * There are a lot of things you can do to improve your skills
# + [markdown] slideshow={"slide_type": "slide"}
# * Books
# * *Introduction to Statistical Learning* or *Elements of Statistical Learning*, Hastie, Tibshirani, and Friedman
# * *Python Data Science Handbook*, <NAME>
# * *Hands-on Machine Learning with scikit-learn and TensorFlow*, <NAME>
# + [markdown] slideshow={"slide_type": "fragment"}
# * Online Courses
# * Machine Learning with Python - Coursera
# + [markdown] slideshow={"slide_type": "fragment"}
# * Practice
# * Use your own data
# * Kaggle
# + [markdown] slideshow={"slide_type": "slide"}
# ## Questions
# + [markdown] slideshow={"slide_type": "-"}
# Are there any questions before moving on?
| notebooks/09-Modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from astropy import units as u
import pandas as pd
'''
NAME:
convert_ecl_rel_pos_to_geo_rel_ast
PURPOSE:
To determine the deltaLat and deltaLong of a moon from its primary KBO.
CALLING SEQUENCE:
deltaLong,deltaLat = convert_ecl_rel_pos_to_geo_rel_ast(ecl_rel_pos, obs_obj_rel_pos)
INPUTS
ecl_rel_pos - The J2000 ecliptic relative position of the KBO in Cartesian coordinates
obs_obj_rel_pos - The J2000 ecliptic relative position of the Moon in Cartesian coordinates
OUTPUTS:
deltaLong - The difference in Longitude of the moon vs. it's primary KBO
deltaLat - The difference in Latitude of the moon vs. it's primary KBO
'''
def convert_ecl_rel_pos_to_geo_rel_ast(ecl_rel_pos, obj_rel_pos, rel_moon):
#Get the Cartesian positions of the Observer
x1,y1,z1 = ecl_rel_pos[0],ecl_rel_pos[1],ecl_rel_pos[2]
#Get the distance from the origin (Heliocenter) of the observer
R1= np.sqrt(x1**2+y1**2+z1**2)
#Get the Heliocentric Cartesian positions of the KBO
x2,y2,z2 = obj_rel_pos[0],obj_rel_pos[1],obj_rel_pos[2]
#Observer centric coordinates on KBO
x2,y2,z2 = x2-x1,y2-y1,z2-z1
#Get the distance from the origin (Now observer-center) of the KBO
R2= np.sqrt(x2**2+y2**2+z2**2)
x3,y3,z3 = rel_moon[0],rel_moon[1],rel_moon[2]
moonX = x3+x2
moonY = y3+y2
moonZ = z3+z2
R3 = np.sqrt((x3+x2)**2+(y3+y2)**2+(z3+z2)**2)
#Now calculate the latitude and longitude from the coordinates given
longitude1 = np.arcsin(z1/R1*u.degree)
latitude1 = np.arccos(x1/R1/np.cos(longitude1*u.degree)*u.degree)
#Calculate the latitude and longitude from the coordinates
longitude2 = np.arcsin(z2/R2*u.degree)
latitude2 = np.arccos(x2/R2/np.cos(longitude2*u.degree)*u.degree)
#Calculate the latitude and longitude from the coordinates
longitude3 = np.arcsin(moonZ/R3*u.degree)
latitude3 = np.arccos(moonX/R3/np.cos(longitude3*u.degree)*u.degree)
#Calculate the deltaLat and deltaLong
deltaLat = latitude3-latitude2
deltaLong = (longitude3-longitude2)*np.cos(latitude2)
return deltaLong, deltaLat
# +
from astroquery.jplhorizons import Horizons
from astropy.time import Time
df = pd.read_csv('NewMakeMake_LatLon.csv')
x1 = df['X1']
x2 = df['X2']
y1 = df['Y1']
y2 = df['Y2']
z1 = df['Z1']
z2 = df['Z2']
dates = df['Primary-Centric Time']
dateList = []
for i in dates:
jd = Time(i,format='jd')
dateList.append(jd)
L2 = Horizons(id='258',location=None,epochs=dateList)
print(L2.vectors()['range'])
EclLatL2 = L2.vectors()['EclLat']
EclLon2 = L2.vectors()['EclLon']
print(EclLatL2,EclLon2)
convert_ecl_rel_pos_to_geo_rel_ast([x1,y1,z1],[x2,y2,z2],[5,5,5])
# -
| ConvertToLatLon/.ipynb_checkpoints/Convert to Ast-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Image Deconvolution (PGM Plug-and-Play Priors w/ BM3D)
# ======================================================
#
# This example demonstrates the use of class
# [pgm.AcceleratedPGM](../_autosummary/scico.optimize.rst#scico.optimize.AcceleratedPGM)
# to solve an image deconvolution problem using the Plug-and-Play Priors
# framework <cite data-cite="venkatakrishnan-2013-plugandplay2"/>
# <cite data-cite="kamilov-2017-plugandplay"/>, using BM3D <cite data-cite="dabov-2008-image"/>
# as a denoiser.
# +
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.pgm import AcceleratedPGM
from scico.util import device_info
plot.config_notebook_plotting()
# -
# Create a ground truth image.
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
# Set up forward operator and test signal consisting of blurred signal
# with additive Gaussian noise.
# +
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / n**2
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape)
y = Ax + σ * noise
# -
# Set up and run a PGM solver.
# +
f = loss.SquaredL2Loss(y=y, A=A)
L0 = 15 # APGM inverse step size parameter
λ = L0 * 2.0 / 255 # BM3D regularization strength
g = λ * functional.BM3D()
maxiter = 50 # number of APGM iterations
solver = AcceleratedPGM(
f=f, g=g, L0=L0, x0=A.T @ y, maxiter=maxiter, itstat_options={"display": True, "period": 10}
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
# -
# Show the recovered image.
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
y = snp.clip(y, 0, 1)
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
# Plot convergence statistics.
plot.plot(hist.Residual, ptyp="semilogy", title="PGM Residual", xlbl="Iteration", ylbl="Residual")
| notebooks/deconv_ppp_bm3d_pgm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python tutorial
#
# - [Basics](#Basics): Math, Variables, Functions, Control flow, Modules
# - [Data representation](#Data-representation): String, Tuple, List, Set, Dictionary, Objects and Classes
# - [Standard library modules](#Standard-library-modules): script arguments, file operations, timing, processes, forks, multiprocessing
# - Beginners in programming have a hard time knowing where to begin and are overwhelmed.
# - These first steps are not very methodical. Programming language classes take time, start with basic concepts and gradually improve and expand on them.
# - Universities offer more relaxed programming classes for students, but this is a different type of learning. Students have time for many things, for the rest of us working people there are two or perhaps three steps to learning a new language or a new library, and we learn by doing. The [tutorial](https://docs.python.org/3/tutorial/) is a simple exposure that takes you through some key aspects, next comes the programming guide (or any decent book) where concepts are explained in greater detail and at last there is the [reference library](https://docs.python.org/3/library/index.html), where every detail is supposed to be documented in concise format. Good programmers learn to read the tutorial, read some key aspects from the guide that make their library stand out and only check the reference guide when needed.
# - No matter how good I would become at teaching basic Python in one or two hours, it fails to compare with reading the default tutorial, which would take much longer time than we have at our disposal. So please understand that there is a trade-off between time and quality. As we learn by doing I would like to invite you to check whenever possible the documentation for Python and for the libraries we are using.
# - If you want to a certain standard module to be discussed in more depth, mention it in Hackmd/Slack!
# # Basics
# ### Variables and comments
#
# Variable vs type. 'Native' datatypes. Console output.
# This is a line comment.
"""
A multi-line
comment.
"""
a = None #Just declared an empty object
print(a)
a = 1
print(a)
a = 'abc'
print(a)
b = 3
c = [1, 2, 3]
a = [a, 2, b, 1., 1.2e-5, True] #This is a list.
print(a)
## Python is a dynamic language
a = 1
print(type(a))
print(a)
a = "spam"
print(type(a))
print(a)
a = 1
a
b = 'abc'
print(b)
#b
# Now let us switch the values of two variables.
print(a, b, c)
t = c
c = b
b = t
print(a, b, c)
# ### Math operations
#
# #### Arithmetic
a = 2
b = 1
b = a*(5 + b) + 1/0.5
print(b)
d = 1/a
print(d)
# #### Logical operations:
a = True
b = 3
print(b == 5)
print(a == False)
print(b < 6 and not a)
print(b < 6 or not a)
print(b < 6 and (not a or not b == 3))
print(False and True)
True == 1
# ### Functions
#
# Functions are a great way to separate code into readable chunks. The exact size and number of functions needed to solve a problem will affect readability.
#
# New concepts: indentation, namespaces, global and local scope, default parameters, passing arguments by value or by reference is meaningless in Python, what are mutable and imutable types?
# +
## Indentation and function declaration, parameters of a function
def operation(a, b):
c = 2*(5 + b) + 1/0.5
a = 1
return a, c
a = None
mu = 2
operation(mu, 1)
a, op = operation(a, 1)
print(a, op)
# -
# Function scope, program workflow
def f(a):
print("inside the scope of f():")
a = 4
print("a =", a)
return a
a = 1
print("f is called")
f(a)
print("outside the scope of f, a=", a)
print("also outside the scope of f, f returns", f(a))
# +
## Defining default parameters for a function
def f2(a, b=1):
return a + b
print(f2(5))
print(f2(5, b=2))
# +
## Globals. Never use them!
g = 0
def f1():
# Comment bellow to spot the diference
global g # Needed to modify global copy of g
g = 1
def f2():
print("f2:",g)
print(g)
f1()
print(g)
f2()
# -
# Task:
# - Define three functions, f, g and h. Call g and h from inside f. Run f on some value v.
# - You can also have functions that are defined inside the namespace of another function. Try it!
# #### Data types
#
# Everything is an object in Python, and every object has an ID (or identity), a type, and a value. This means that whenever you assign an expression to a variable, you're not actually copying the value into a memory location denoted by that variable, but you're merely giving a name to the memory location where the value actually exists.
#
# - Once created, the ID of an object never changes. It is a unique identifier for it, and it is used behind the scenes by Python to retrieve the object when we want to use it.
# - The type also never changes. The type tells what operations are supported by the object and the possible values that can be assigned to it.
# - The value can either change or not. If it can, the object is said to be mutable, while when it cannot, the object is said to be immutable.
#
from IPython.display import Image
Image(url= "../img/mutability.png", width=400, height=400)
# +
i = 43
print(id(i))
print(type(i))
print(i)
i = 42
print(id(i))
print(type(i))
print(i)
# +
i = 43
print(id(i))
print(type(i))
print(i)
i = i + 1
print(id(i))
print(type(i))
print(i)
# +
# assignments reference the same object as i
i = 43
print(id(i))
print(type(i))
print(i)
j = i
print(id(j))
print(type(j))
print(j)
# -
# Task: will j also change?
i = 5
# Strings of characters are also immutable, x did not changed its value
x = 'foo'
y = x
print(x, y) # foo
y += 'bar'
print(x, y) # foo
# lists are mutable
x = [1, 2, 3]
print(x)
print(id(x))
print(type(x))
x.pop()
#x = [1, 2, 3]
print(x)
print(id(x))
# Question:
# - Why weren't all data types made mutable only, or immutable only?
#
# Below, if ints would have been mutable, you would expect both variables to be updated. But you normally want variables pointing to ints to be independent.
a = 5
b = a
a += 5
print(a, b)
## A list however is mutable datatype in Python
x = [1, 2, 3]
y = x
print(x, y) # [1, 2, 3]
y += [3, 2, 1]
print(x, y) # [1, 2, 3, 3, 2, 1]
# +
## String mutable? No
def func(val):
val += 'bar'
return val
x = 'foo'
print(x) # foo
print(func(x))
print(x) # foo
# +
## List mutable? Yes.
def func(val):
val += [3, 2, 1]
return val
x = [1, 2, 3]
print(x) # [1, 2, 3]
print(func(x))
print(x) # [1, 2, 3, 3, 2, 1]
# -
# **Control flow**
#
# There are two major types of programming languages, procedural and functional. Python is mostly procedural, with very simple functional elements. Procedural languages typicaly have very strong control flow specifications. Programmers spend time specifying how a program should run. In functional languages the time is spent defining the program while how to run it is left to the computer. Scala is the most used functional language in Bioinformatics.
# for loops
for b in [1, 2, 3]:
print(b)
# +
# while, break and continue
b = 0
while b < 10:
b += 1
a = 2
if b%a == 0:
#break
continue
print(b)
# Now do the same, but using the for loop
# -
## if else: use different logical operators and see if it makes sense
a = 1
if a == 3:
print('3')
elif a == 4:
print('4')
else:
print('something else..')
## error handling - use sparingly!
## python culture: better to apologise than to verify!
def divide(x, y):
"""catches an exception"""
try:
result = x / y
except ZeroDivisionError:
print("division by zero!")
#raise ZeroDivisionError
#pass
else:
print("result is", result)
finally:
print("executing finally code block..")
divide(1,0)
# # Python modules
#
# ```
# import xls
# "How can you simply import Excel !?!"
# ```
#
# - How Python is structured:
#
# Packages are the way code libraries are distributed. Libraries contain one or several modules. Each module can contain object classes, functions and submodules.
#
# - Object introspection.
#
# It happens often that some Python code that you require is not well documented. To understand how to use the code one can interogate any object during runtime. Aditionally the code is always located somewhere on your computer.
#
import math
print(dir())
print(dir(math))
print(help(math.log))
a = 3
print(type(a))
import numpy
print(numpy.__version__)
import os
print(os.getcwd())
# **Task:**
#
# - Compute the distance between 2D points.
# - `d(p1, p2)=sqrt((x1-x2)**2+(y1-y2)**2), where pi(xi,yi)`
# - Define a module containing a function that computes the euclidian distance. Use the Spyder code editor and save the module on your filesystem.
# - Import that module into a new code cell bellow.
# - Make the module location available to Jupyter.
# +
"""
%run full(relative)path/distance.py
or
os.setcwd(path)
"""
import distance
print(distance.euclidian(1, 2, 4.5 , 6))
from distance import euclidian
print(euclidian(1, 2, 4.5 , 6))
import distance as d
print(d.euclidian(1, 2, 4.5 , 6))
# +
import math
def euclidian(x1, x2, y1 , y2):
l = math.sqrt((x1-x2)**2+(y1-y2)**2)
return l
# -
import sys
print(sys.path)
sys.path.append('/my/custom/path')
print(sys.path)
# ## Data representation
#
#
# ### Strings
# +
#String declarations
statement = "Gene IDs are great. My favorite gene ID is"
name = "At5G001024"
statement = statement + " " + name
print(statement)
statement2 = 'Genes names \n \'are great. My favorite gene name is ' + 'Afldtjahd'
statement3 = """
Gene IDs are great.
My favorite genes are {} and {}.""".format(name, 'ksdyfngusy')
print(statement2)
print(statement3)
print('.\n'.join(statement.split(". ")))
print (statement.split(". "))
# -
#String methods
name = "At5G001024"
print(name.lower())
print(name.index('G00'))
print(name.rstrip('402'))
print(name.strip('Add34'))
#Splits, joins
statement = "Gene IDs are great. My favorite gene ID is At5G001024"
words = statement.split()
print("Splitting a string:", words)
print("Joining into a string:", "\t ".join(words))
import random
random.shuffle(words)
print("Fun:", " ".join(words))
#Strings are lists of characters!
print(statement)
print(statement[0:5] + " blabla " + statement[-10:-5])
# ### Tuples
#
# A few pros for tuples:
# - Tuples are faster than lists
# - Tuples can be keys to dictionaires (they are immutable types)
# +
#a tupple is an immutable list
a = (1, "spam", 5)
#a.append("eggs")
print(a[1])
b = (1, "one")
c = (a, b, 3)
print(c)
#unpacking a collection into positional arguments
def sum(a, b):
return a + b
values = (5, 2)
s = sum(*values)
print(s)
# -
# ## Lists
#
a = [1,"one",(2,"two")]
print(a[0])
print(a)
a.append(3)
print(a)
b = a + a[:2]
print(b)
## slicing and indexing
print(b[2:5])
del a[-1]
print(a)
print(a.index("one"))
print(len(a))
## not just list size but list elements too are scoping free! (list is mutable)
def f(a, b):
a[1] = "changed"
b = [1,2]
return
a = [(2, 'two'), 3, 1]
b = [2, "two"]
f(a, b)
print(a, b)
# +
## matrix
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
print(matrix)
print(matrix[0][1])
print(list(range(2,10,3)))
for x in range(len(matrix)):
for y in range(len(matrix[x])):
print(x,y, matrix[x][y])
# -
## ranges
r = range(0, 5)
for i in r: print("step", i)
## list comprehensions
def f(i):
return 2*i
a = [2*i for i in range(10)]
a = [f(i) for i in range(10)]
print(a)
b = [str(e) for e in a[4:] if e%3==0]
print(b)
## sorting a list of tupples
a = [(str(i), str(j)) for i in a for j in range(3)]
print(a)
a.sort(key=lambda tup: tup[1])
a.sort(key=lambda tup: len(tup[1]), reverse = True)
print(a)
# +
#zipping and enumerating
y = zip('abc', 'def')
print(list(y)) # y is a generator
print(list(y)) # second cast to list, content is empty!
print(list(zip(['one', 'two', 'three'], [1, 2, 3])))
x = [1, 2, 3]
y = [4, 5, 6]
zipped = zip(x, y)
#print(type(zipped))
print(zipped)
x2, y2 = zip(*zipped)
print (x == list(x2) and y == list(y2))
print (x2, y2)
alist = ['a1', 'a2', 'a3']
for i, e in enumerate(alist): print (i, e) #this is called a one liner
for i in range(len(alist)):
print(i, alist[i])
print(list(range(len(alist))))
# -
# mapping
a = [1, 2, 3, 4, 5]
b = [2, 2, 9, 0, 9]
print(list(map(lambda x: max(x), zip(a, b))))
print(list(zip(a, b)))
# +
# deep and shallow copies on mutable objects or collections of mutable objects
lst1 = ['a','b',['ab','ba']]
lst2 = lst1 #this is a shallow copy of the entire list
lst2[0]='e'
print(lst1)
lst1 = ['a','b',['ab','ba']]
lst2 = lst1[:] #this is a shallow copy of each element
lst2[0] = 'e'
lst2[2][1] = 'd'
print(lst1)
from copy import deepcopy
lst1 = ['a','b',['ab','ba']]
lst2 = deepcopy(lst1) #this is a deep copy
lst2[2][1] = "d"
lst2[0] = "c";
print(lst2)
print(lst1)
# -
# ### Sets
#
# Sets have no order and cannot include identical elements. Use them when the position of elements is not relevant. Finding elements is faster than in a list. Also set operations are more straightforward. A frozen set has a hash value.
#
# #### Task:
# - Find on the Internet the official reference documentation for the Python sets
# set vs. frozenset
s = set()
#s = frozenset()
s.add(1)
s = s | set([2,"three"])
s |= set([2,"three"])
s.add(2)
s.remove(1)
print(s)
print("three" in s)
s1 = set(range(10))
s2 = set(range(5,15))
s3 = s1 & s2
print(s1, s2, s3)
s3 = s1 - s2
print(s1, s2, s3)
print(s3 <= s1)
s3 = s1 ^ s2
print(s1, s2, s3)
# ### Dictionary
#
# - considered one of the most elegant data structure in Python
# - A set of key: value pairs.
# - Keys must be hashable elements, values can be any Python datatype.
# - The keys of the dictionary are hashable i.e. the are generated by hashing function which generates unique result for each unique value supplied to the hash function. This makes a dictionary value retrieval by key much faster than if using a list!
# TODO: !timeit, sha() function
d = {'geneid9': 100, 'geneid8': 90, 'geneid7': 80, 'geneid6': 70, 'geneid5': 60, 'geneid4': 50}
d
d = {}
d['geneid10'] = 110
d
#Creation: dict(list)
genes = ['geneid1', 'geneid2', 'geneid3']
values = [20, 30, 40]
d = dict(zip(genes, values))
print(d)
# +
#Creation: dictionary comprehensions
d2 = { 'geneid'+str(i):10*(i+1) for i in range(4, 10) }
print(d2)
#Keys and values
print(d2.keys())
print(d2.values())
for k in d2.keys(): print(k, d2[k])
# -
# #### Task:
#
# Find the dictionary key corresponding to a certain value. Why is Python not offering a native method for this?
# +
d = {'geneid9': 100, 'geneid8': 90, 'geneid7': 90, 'geneid6': 70, 'geneid5': 60, 'geneid4': 50}
def getkey(value):
ks = set()
# .. your code here
return ks
print(getkey(90))
# -
# ### Objects and Classes
#
# Everything is an object in Python and every variable is a reference to an object. References map the adress in memory where an object lies. However this is kept hidden in Python. C was famous for not cleaning up automatically the adress space after alocating memory for its data structures. This was causing memory leaks that makes some programs gain more and more RAM space. Modern languages cleanup dynamically after the scope of a variable ended, something called "garbage collecting". However this is afecting their speed of computation.
#
# New concepts:
# - Instantiation, Fields, Methods, Decomposition into classes, Inheritance
# +
class Dog(object):
def __init__(self, name):
self.name = name
return
def bark_if_called(self, call):
if call[:-1]==self.name:
print("Woof Woof!")
else:
print("*sniffs..")
return
def get_ball(self):
print(self.name + " brings back ball")
d = Dog("Buffy")
print(d.name, "was created from Ether!") #name is an attribute
d.bark_if_called("Bambi!") #bark_if_called is a method
#dog.bark_if_called("Buffy!")
# +
class PitBull(Dog):
def get_ball(self):
super(PitBull, self).get_ball()
print("*hates you")
return
def chew_boots(self):
print("*drools")
return
d2 = PitBull("Georgie")
d2.bark_if_called("Loopie!")
d2.bark_if_called("Georgie!")
d2.chew_boots()
#d.chew_boots()
d2.get_ball()
print(d2.name)
# -
# ### Decorators
# +
from time import sleep
def sleep_decorator(function):
"""
Limits how fast the function is
called.
"""
def wrapper(*args, **kwargs):
sleep(2)
return function(*args, **kwargs)
return wrapper
@sleep_decorator
def print_number(num):
return num
print(print_number(222))
for num in range(1, 6):
print(print_number(num))
# -
# ## Standard library modules
# https://docs.python.org/3/library/
#
# - sys - system-specific parameters and functions
# - os - operating system interface
# - shutil - shell utilities
# - math - mathematical functions and constants
# - random - pseudorandom number generator
# - timeit - time it
# - format - number and text formating
# - zlib - file archiving
# - ... etc ...
#
# Reccomendation: Take time to explore the [Python module of the week](https://pymotw.com/3/). It is a very good way to learn why Python comes "with batteries included".
# ### The sys module. Command line arguments.
# +
import sys
print(sys.argv)
sys.exit()
##getopt, sys.exit()
##getopt.getopt(args, options[, long_options])
# import getopt
# try:
# opts, args = getopt.getopt(sys.argv[1:],"hi:o:",["ifile=","ofile="])
# except getopt.GetoptError:
# print 'test.py -i <inputfile> -o <outputfile>'
# sys.exit(2)
# for opt, arg in opts:
# if opt == '-h':
# print 'test.py -i <inputfile> -o <outputfile>'
# sys.exit()
# elif opt in ("-i", "--ifile"):
# inputfile = arg
# elif opt in ("-o", "--ofile"):
# outputfile = arg
# print inputfile, outputfile
# -
# #### Task:
# - Create a second script that contains command line arguments and imports the distance module above. If an -n 8 is provided in the arguments, it must generate 8 random points and compute a matrix of all pair distances.
# ### os module: File operations
#
# The working directory, file IO, copy, rename and delete
#
# +
import os
print(os.getcwd())
#os.chdir(newpath)
os.system('mkdir testdir')
f = open('testfile.txt','wt')
f.write('One line of text\n')
f.write('Another line of text\n')
f.close()
import shutil
#shutil.copy('testfile.txt', 'testdir/')
shutil.copyfile('testfile.txt', 'testdir/testfile1.txt')
shutil.copyfile('testfile.txt', 'testdir/testfile2.txt')
with open('testdir/testfile1.txt','rt') as f:
for l in f: print(l)
for fn in os.listdir("testdir/"):
print(fn)
#fpath = os.path.join(dirpath,filename)
os.rename('testdir/'+fn, 'testdir/file'+fn[-5]+'.txt')
import glob
print (glob.glob('testdir/*'))
os.remove('testdir/file2.txt')
#os.rmdir('testdir')
#shutil.rmtree(path)
# -
# #### Task:
# - Add a function to save the random vectors and the generated matrix into a file.
# ### Timing
# +
from datetime import datetime
startTime = datetime.now()
n = 10**8
for i in range(n):
continue
print datetime.now() - startTime
# -
# ### Processes
#
# Launching a process, Paralellization: shared resources, clusters, clouds
# +
import os
#print os.system('/path/yourshellscript.sh args')
subprocess.run(["ls", "-l", "/dev/null"], stdout=subprocess.PIPE)
subprocess.run("exit 1", shell=True, check=True)
from subprocess import call
call(["ls", "-l"])
# +
args = ['/path/yourshellscript.sh', '-arg1', 'value1', '-arg2', 'value2']
p = Popen(args, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p.wait()
(child_stdin, child_stdout, child_stderr) = (p.stdin, p.stdout, p.stderr)
# +
# def child():
# print 'A new child ', os.getpid( )
# os._exit(0)
# def parent():
# while True:
# newpid = os.fork()
# if newpid == 0:
# child()
# else:
# pids = (os.getpid(), newpid)
# print "parent: %d, child: %d" % pids
# if raw_input( ) == 'q': break
# parent()
# -
# How to do the equivalent of shell piping in Python? This is the basic step of an automated pipeline.
#
# `cat test.txt | grep something`
#
# **Task**:
# - Test this!
# - Uncomment `p1.stdout.close()`. Why is it not working?
# - What are signals? Read about SIGPIPE.
# +
p1 = Popen(["cat", "test.txt"], stdout=PIPE)
p2 = Popen(["grep", "something"], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close()
output = p2.communicate()[0]
# -
# Questions:
# - What are the Python's native datatypes? Have a look at the Python online documentation for each datatype.
# - How many data types does Python have?
# - Python is a "dynamic" language. What does it mean?
# - Python is an "interpreted" language. What does it mean?
# - Which data strutures are mutable and which are immutable. When does this matters?
# - What is "hash" and how does it influences set and dictionary operations?
# - What are the most important Python libraries for you? Read through Anaconda's collection of libraries and check out some of them.
# Task. Explain why this happens:
# +
def run(l=[]):
l.append(len(l))
return l
print(run())
print(run())
print(run())
# -
# Task.
#
# dic = {'a':[1,2,3], 'b':[4,5,6,7]}
#
# Using list comprehension, return:
#
# [1, 2, 3, 4, 5, 6, 7]
# ['a', 'a', 'a', 'b', 'b', 'b', 'b']
| day1/.ipynb_checkpoints/tutorial-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Google Play Store Apps
#
# ##### source:- https://www.kaggle.com/lava18/google-play-store-apps#license.txt
# ## Loading our data
import pandas as pd
import numpy as np
store_df = pd.read_csv('googleplaystore.csv')
store_df.head()
# ## Data Cleaning
store_df.isna().sum() # for geting no of missing values
store_df.shape
store_df.info() # to see the data types of the feature
store_df['Rating'].describe()
store_df['Rating'].fillna(store_df['Rating'].median(),inplace=True)
store_df.isna().sum()
# let's drop not so important feature
df = store_df.drop(['Android Ver','Current Ver','Last Updated','Content Rating'],1)
df.head(20)
df.isna().sum()
df.Type.mode()
df.Type.value_counts()
df[df['Type']=='0']
df = df.drop(10472) # we dropped our outlier using drop
df.Type.value_counts()
df.isna().sum()
# now fillna
df['Type'].fillna('Free',inplace=True)
df.isna().sum()
df.head()
df["Reviews"] = pd.to_numeric(df["Reviews"]) # convert everything to float values
df.info()
df1 = df.sort_values('Reviews', ascending=False)
df1.head(20)
df1 = df1.drop(['Size','Price'],1)
df1.head()
df1 = df1.drop(['Genres','Category'],1)
df1.head()
print(4891723*4.6)
print(56642847*4.0)
print(78158306*4.1)
df1['score'] = df1['Rating'] * df1['Reviews']
df1.head()
# +
from sklearn.preprocessing import MinMaxScaler
scaling=MinMaxScaler()
df1['score']=scaling.fit_transform(df1[['score']])
df1['score'].head()
# -
df1.head(20)
# # Top 10 Popular Apps According To Type
import matplotlib.pyplot as plt
import seaborn as sns
# Function to define top 10 popular recommendation
def popular_apps(Type):
b = df1[df1['Type']==Type]
b = b.sort_values('score',ascending = False)
h = b['App'].drop_duplicates()
return h.head(10)
# ### 1. Top 10 free Popular Apps
popular_apps('Free')
# ### 2. Top 10 Paid Popular Apps
popular_apps('Paid')
# # Top 10 Apps According To Genres
df.head()
df['score'] = df['Rating'] * df['Reviews']
df.head()
# +
from sklearn.preprocessing import MinMaxScaler
scaling=MinMaxScaler()
df['score']=scaling.fit_transform(df[['score']])
df['score'].head()
# -
df['Genres'].value_counts().head(20)
# Function to define top 10 popular recommendation
def popular_apps(Genres,Type):
b = df[df['Genres']==Genres]
a = b[b['Type']== Type]
a = a.sort_values('score',ascending = False)
h = a['App'].drop_duplicates()
return h.head(10)
# ### 1. Top 10 Sports Genre Popular Apps
popular_apps('Sports','Free')
# ### 2. Top 10 Education Genre Popular Apps
# Free apps
popular_apps('Education','Free')
# Paid apps
popular_apps('Education','Paid')
# ### 3. Top 10 Social Genre Popular Apps
# Free social apps
popular_apps('Social','Free')
# Top paid social apps
popular_apps('Social','Paid')
# ### 4. Top 10 Strategy Genre Popular Apps
# Top Free strategy games
popular_apps('Strategy','Free')
# Top Paid strategy games
popular_apps('Strategy','Paid')
# ### 5. Top 10 Entertainment Genre Popular Apps
# Top Free
popular_apps('Entertainment','Free')
# Top Paid
popular_apps('Entertainment','Paid')
| Google Play Store Apps Recommendation Engine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.3
# language: julia
# name: julia-0.6
# ---
# # JAXTAM - Just Another X-Ray Timing Module
#
# ## Walkthrough
#
# First, import JAXTAM by calling `using JAXTAM`
using JAXTAM
# Now, the local user configuration needs to be set up.
# ## Configuration
JAXTAM.config()
# Add in the missions. Mission configurations are defined by a `MissionDefinition` type:
#
#
# ```
# mutable struct MissionDefinition
# name::String
# url::String
# path::String
# path_obs::Function
# path_cl::Function
# path_uf::Function
# path_rmf::String
# good_energy_max::Number
# good_energy_min::Number
# instruments::Array
# end
# ```
#
# These types contain the mission `name`, an `url` to the HEASARC master table for that mission, a `path` to the local folder the data will be stored in, three path functions (`path_obs`, `path_cl`, and `path_uf`) which return the path to the relevant folders/files, given a row from the master table. Functions have to be used for this as some more complex path structures exist, like for NICER which uses the observation date for the folders. Lastly, the mission energy ranges are entered as `good_energy_min` and `good_energy_max`.
#
# JAXTAM includes default missions, if a mission name is included in these defaults then only the local `path` is required. This speeds up adding new missions:
JAXTAM.config(:nicer, "/media/robert/HVS1/heasrac/nicer/")
JAXTAM.config(:nustar, "/media/robert/HVS1/heasrac/nustar/")
# A default mission can also be set in the configuration:
JAXTAM.config(:default, :nicer)
# Removing missions is simple and can be done with `config_rm(mission_name`.
# ## Master Tables
#
# As the url to the master table is included in the mission definitions, downloading the master tables is simple:
JAXTAM.master(:nicer)[1:2, :]
# Once the table has been downloaded, repeated calls to `master` will use the current version of the table. To update the table, simply call `master_update`
# ### Append Table
#
# For sanity, the master table is not modified (apart from dates being converted from MJD to the standard yyyy-mm-ddThh:mm:ss format), so an `append` table is used to store new variables.
JAXTAM.append()[1:2, :]
# The append table contains file and folder paths for each observation, which are found using the path functions contained in the mission definition, as well as a publicity flag and a downloaded flag. This table can easily be expanded on as required.
#
# To automatically load a joined version of the master and append tables, call `master_a()`.
# ### Master Query
#
# For utility, a generic query function exists, allowing users to search for specific column values within the master and append tables.
master_df = JAXTAM.master(:nicer);
JAXTAM.master_query(master_df, :obsid, "1010010128")
JAXTAM.master_query(master_df, :subject_category, "MAGNETAR")[1:2, :]
# Public only query functions are also available, in this case we see NICER has 5850 public observations.
size(JAXTAM.master_query_public(:nicer))
size(JAXTAM.master_query_public(master_df, :subject_category, "MAGNETAR"))
# With 1353 of them being magnetars.
# ## Downloading Observations
#
# Downloading observations is made to be simple and intuitive, simply give an observation ID and the observation will be downloaded, respecting the FTP folder structure, into the mission path specified in the configuration file:
JAXTAM.download(:nicer, "1050080143")
# This is done via `lftp` using 10 parallel connections for speed.
#
# The download command can be paired with query to queue up multiple downloads, for example downloading all public observations would be:
#
# `JAXTAM.download(:nicer, JAXTAM.master_query_public(:nicer)[:obsid])`
# ## Analysis
#
# ### Reading Observation to Feather
#
# Reading an observation in must be done before any analysis is started. This function converts the FITS files into feather ones for faster i/o operations, and stores a few relevant fits comments:
instrument_data = JAXTAM.read_cl(:nicer, "1050080143")
# The observations are stored in a dictionary, with instruments as the dictionary key. This is done to simplify the cases where a mission has multiple instruments, such as NuSTAR:
JAXTAM.read_cl(:nustar, "10002008001")
# The data is stored in an `InstrumentData` type, which currently contains:
#
# ```
# struct InstrumentData
# mission::Symbol
# instrument::Symbol
# obsid::String
# events::DataFrame
# gtis::DataFrame
# start::Number
# stop::Number
# header::DataFrame
# end
# ```
#
# Where `header` is the full FITS header for the primary (0th) HDU.
#
# As an example, `InstrumentData` looks like:
instrument_data[:XTI]
# ### Energy Calibration
#
# The PI channels can be converted into energies quite easily (done using the mission rmf path provided in the user config):
JAXTAM.calibrate(:nicer, "1050080143")
# This simply adds an extra column to the `InstrumentData.events` DataFrame which contains the energy of that event.
# ### Light Curves
#
# Binning the events into a light curve can be done by:
JAXTAM.lcurve(:nicer, "1050080143", 1)
# With `1` as a 1 sec bin in this case. As before, this computed the lightcurve, saves the files to the disk (or, if files alread exist, reads them), then returns a `BinnedData` type:
#
# ```
# struct BinnedData
# mission::Symbol
# instrument::Symbol
# obsid::String
# bin_time::Real
# counts::SparseVector
# times::StepRangeLen
# gtis::Array{Float64,2}
# end
# ```
#
# Then, `BinnedData` can be filtered through the GTIs, which will return one (or multiple) `GTIData` types, for each of the valid/accepted GTIs:
#
# ```
# struct GTIData
# mission::Symbol
# instrument::Symbol
# obsid::String
# bin_time::Real
# gti_index::Int
# gti_start_time::Real
# counts::Array
# times::Array
# end
# ```
#
# Again, these are all saved to the disk as well:
gtis = JAXTAM.gtis(:nicer, "1050080143", 1)
# For generality, a dictionary is returned with each instrument, within that is another dictionary with the index of each GTI (although this might change to a new `GTIContainer` type). So, for the above we have:
gtis[:XTI]
# Which, as a quick demonstration, looks like:
using Gaston
plot(gtis[:XTI][1].times, gtis[:XTI][1].counts; xlabel="time (s)", ylabel="counts");
# Alternatively, we can look at a NuSTAR observation just as easily:
JAXTAM.download(:nustar, "90302319006");
JAXTAM.read_cl(:nustar, "90302319006");
JAXTAM.calibrate(:nustar, "90302319006");
JAXTAM.lcurve(:nustar, "90302319006", 1);
gtis = JAXTAM.gtis(:nustar, "90302319006", 1)
# Where, unlike with the NICER data, the purpose of the dictionary approach to storage should be clearer. Above we see both NuSTAR instruments in the GTIs dictionary, looking at one instrument we see:
gtis[:FPMA]
# Some simple analysis can be performed elegantly with this approach:
[sum(gti.counts) for gti in values(gtis[:FPMA])]
# Or even:
[sum(gti.counts)/gti.times[end] for gti in values(gtis[:FPMA])]
# So, we see a roughly constant count rate. Now:
plot(gtis[:FPMA][24].times, gtis[:FPMA][24].counts; xlabel="time (s)", ylabel="counts");
# For some basic timing analysis:
using DSP
gti_fft = abs.(rfft(gtis[:FPMA][24].counts));
gti_fft_t = DSP.Util.rfftfreq(length(gtis[:FPMA][24].counts), 1/gtis[:FPMA][24].bin_time);
plot(Array(gti_fft_t)[2:end], gti_fft[2:end]; xlabel="freq (Hz)", ylabel="power");
# FFT functions are still being implemented.
| JAXTAM Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="tEl9dunlB2me" colab={"base_uri": "https://localhost:8080/"} outputId="16f53cd1-12e9-44e4-87b3-b3d5965f25c3"
# !pip install nerda -q
# !pip install seqeval -q
# !pip install nerda -q
# !pip install flair -q
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 123} id="dr9YQltqB4tW" outputId="07b4cb8a-d4fd-4e64-f090-cacf15679139"
from NERDA.datasets import get_conll_data, download_conll_data
from google.colab import files
import pandas as pd
import ast
import unicodedata
import numpy as np
import seqeval.metrics
import spacy
import torch
from tqdm import tqdm, trange
from transformers import LukeTokenizer, LukeForEntitySpanClassification
from flair.data import Sentence
from flair.models import SequenceTagger
import timeit
from sklearn.model_selection import train_test_split
uploaded = files.upload()
download_conll_data()
training = get_conll_data('train')
validation = get_conll_data('valid')
testing = get_conll_data('test')
# + colab={"base_uri": "https://localhost:8080/"} id="mEBI1FrLeOel" outputId="0e1b9d1a-8f15-4049-9391-749a1718c67b"
# Download the testb set of the CoNLL-2003 dataset
# !wget https://raw.githubusercontent.com/synalp/NER/master/corpus/CoNLL-2003/eng.testb
# + id="T0X1i3QEB6e2"
tag_scheme = [
'B-PER',
'I-PER',
'B-ORG',
'I-ORG',
'B-LOC',
'I-LOC',
'B-MISC',
'I-MISC'
]
# + id="-ULQK1x6M6cY"
def generate_labels(input_text):
input_text = str(input_text)
if input_text.count(" ") > 0:
if "went to the store" in input_text:
if input_text.count(" ") > 4:
return ["B-PER", "I-PER", "O", "O", "O", "O"]
return ["B-PER", "O", "O", "O", "O"]
return ["B-PER", "I-PER"]
else:
return ["B-PER"]
# + id="t8OHlFfmCJqy"
transformer = 'studio-ousia/luke-large-finetuned-conll-2003'
# hyperparameters for network
dropout = 0.1
# hyperparameters for training
# training_hyperparameters = {
# 'epochs' : 5,
# 'warmup_steps' : 2500,
# 'train_batch_size': 2048,
# 'learning_rate': 1e-5
# }
training_hyperparameters = {
'epochs' : 2,
'warmup_steps' : 500,
'train_batch_size': 13,
'learning_rate': 1e-5
}
# + id="tDXtUVVP16k0"
def get_sentence_from_name(input_name):
input_name = str(input_name)
return input_name.split(" ")
# + id="VO9vzT_PBlbH"
retrain_subset = pd.read_csv("retrain_processed.csv", index_col=0)
retrain_subset["tags_list"] = retrain_subset["Name"].apply(lambda x: generate_labels(x))
retrain_subset["sentences"] = retrain_subset["Name"].apply(lambda x: get_sentence_from_name(x))
rt_train, rt_valid = train_test_split(retrain_subset, test_size=0.15, stratify=retrain_subset['Race'])
# + id="f7LxzDvd12b_"
retrain_subset = pd.read_csv("retrain_processed.csv", index_col=0)
retrain_subset["tags_list"] = retrain_subset["Name"].apply(lambda x: generate_labels(x))
retrain_subset["sentences"] = retrain_subset["Name"].apply(lambda x: get_sentence_from_name(x))
# + id="vSIaxPg_C6aU"
retrain_dict = {"sentences": list(rt_train["sentences"]), "tags": list(rt_train["tags_list"])}
valid_dict = {"sentences": list(rt_valid["sentences"]), "tags": list(rt_valid["tags_list"])}
# + id="J9K5HhiOETfo"
total_sentences = list(retrain_dict["sentences"]) + list(training["sentences"])
total_tags = list(retrain_dict["tags"]) + list(training["tags"])
valid_sentences = list(valid_dict["sentences"]) + list(validation["sentences"])
valid_tags = list(valid_dict["tags"]) + list(validation["tags"])
total_retrain_dict = {"sentences": total_sentences, "tags": total_tags}
total_valid_dict = {"sentences": valid_sentences, "tags": valid_tags}
# + [markdown] id="sgvbouDmgORN"
# # LUKE Model Finetuning
# + id="6ZCxsaBaCLuq"
# from NERDA.models import NERDA
# model = NERDA(
# dataset_training = total_retrain_dict,
# dataset_validation = total_valid_dict,
# tag_scheme = tag_scheme,
# tag_outside = 'O',
# transformer = transformer,
# dropout = dropout,
# hyperparameters = training_hyperparameters
# )
# + id="Ur2aoywpCObT"
# model.train()
# + [markdown] id="LLw3DrDngSL1"
# # Custom LUKE Model Testing on CoNLL 2003 Test Set
#
# + id="qwtaBvF-CRN9"
# test = get_conll_data('test')
# model.evaluate_performance(test)
# + [markdown] id="tb8dTEuDga6v"
# # Custom LUKE Model Testing on Curated Name Dataset
#
# + id="TnOnC0HAe7O6"
processed_test_df = pd.read_csv("processed_df.csv", index_col=0)
processed_test_df["tags_list"] = processed_test_df["Name"].apply(lambda x: generate_labels(x))
processed_test_df["sentences"] = processed_test_df["Name"].apply(lambda x: get_sentence_from_name(x))
processed_test_dict = {"sentences": list(processed_test_df["sentences"]), "tags": list(processed_test_df["tags_list"])}
# + id="CAP891xVBXmb"
# model.evaluate_performance(processed_test_dict)
# + id="CuTaXM9tCzrp"
# curated_test_labels = processed_test_dict["tags"]
# curated_pred_labels = [generate_entities(processed_test_dict["sentences"][i]) for i in range(len(processed_test_dict["sentences"]))]
# # curated_pred_labels = [i[0] for i in curated_pred_labels]
# + id="rm7l_9VcEHvV"
# print(seqeval.metrics.classification_report(curated_test_labels, curated_pred_labels, digits=4))
# + id="Wp6lE3dJom3z" colab={"base_uri": "https://localhost:8080/", "height": 162, "referenced_widgets": ["c1666dffd11141aabd8ee273a00279e9", "5cc685e1ec3d4d36b33d3d7c5c303d4d", "ce0bf6efa78346919b440262cb772237", "e7dbc698083940d68f080a3c8bf7c424", "7d0cfb4db0b14c668d4f65f3c3d9e324", "a5a8fb6347de48f68a418db45ca84d62", "b5e4cebc2dfd4ffa8292d876046a0d26", "3c50e4c032ad4cec8ea4d72b5ebad0b5", "<KEY>", "40e0deec223a47f6b02a3d5921f3083d", "e20fe77d020e4280a27113fdc46b0972", "<KEY>", "<KEY>", "b0b7a02ef8db443490a0fe7d6342b8cc", "eb84ec2e934f42cfbdfe98daaa8ba096", "<KEY>", "968783f4c30743e8a13ac9d5d84c607c", "<KEY>", "68eb803e4f784e1f9889bad469c60241", "<KEY>", "<KEY>", "636c3d31e68c4294abbde61452470ace", "73629bf8b1314ef7bab6e1a9f44b3231", "a959da7de04a40a289c1cffa9ee479df", "f3a0528ab594427ea65128b06210cfa4", "<KEY>", "54338810c44a4d38bf9ee862162ef05f", "<KEY>", "0c34c9ef793b4e3c9fd0258b316b08b4", "e8883a27a9f14a218993219b3d18e7dc", "<KEY>", "893ea42bcfb14c4eb26f18feed224ed2", "6b85ad68ce1c420eaccda928417181ad", "<KEY>", "<KEY>", "<KEY>", "2caf6603932c4e579661013850eeea84", "<KEY>", "4b934187956e4e728f673d3edba1ef4a", "97292dff02134814b613ce64b3a6cc64", "<KEY>", "d7739095d89040888aa0fc38e01c7392", "3e6eea4c96eb4217bd57dcbe1db243b0", "d21f2eb8d0de4b6098eb7a3cc7120424"]} outputId="c52bd0ec-ded1-4f8d-9552-77e2927a05e1"
# load tagger
tagger = SequenceTagger.load("flair/ner-english-large")
# + id="FVWls5T-ItjK"
def generate_entities(input_string):
return model.predict([input_string])[0]
# + id="jB-n04gYlXXJ"
processed_white_df = processed_test_df.loc[processed_test_df["Race"]=="White"].reset_index(drop=True)
processed_black_df = processed_test_df.loc[processed_test_df["Race"]=="Black"].reset_index(drop=True)
processed_api_df = processed_test_df.loc[processed_test_df["Race"]=="API"].reset_index(drop=True)
processed_hispanic_df = processed_test_df.loc[processed_test_df["Race"]=="Hispanic"].reset_index(drop=True)
processed_test_dict_w = {"sentences": list(processed_white_df["sentences"]), "tags": list(processed_white_df["tags_list"])}
processed_test_dict_b = {"sentences": list(processed_black_df["sentences"]), "tags": list(processed_black_df["tags_list"])}
processed_test_dict_a = {"sentences": list(processed_api_df["sentences"]), "tags": list(processed_api_df["tags_list"])}
processed_test_dict_h = {"sentences": list(processed_hispanic_df["sentences"]), "tags": list(processed_hispanic_df["tags_list"])}
# + [markdown] id="pPAHzwUPtDDV"
# # Flair Baseline Results
# + id="eHklIiDv_K4r"
def generate_entities_flair_baseline(input_string):
output_length = len(input_string)
input_string = " ".join(input_string)
sentence = Sentence(input_string)
# predict NER tags
tagger.predict(sentence)
sentence_length = len(sentence)
values = ["O"] * output_length
total_string = ""
tagged_string = sentence.to_tagged_string()
tagged_dict = sentence.to_dict(tag_type='ner')
named_entities = tagged_dict["entities"]
total_entities = []
total_text = []
for i in named_entities:
text = i["text"]
space_count = text.count(" ")
entities = []
current_entity = str(i["labels"][0])[:5]
current_text = str(i["text"])
if "ORG" in current_entity:
current_entity = "ORG"
if "MISC" in current_entity:
current_entity = "MISC"
if "PER" in current_entity:
current_entity = "PER"
if "LOC" in current_entity:
current_entity = "LOC"
total_text.append(current_text)
entities.append("B-"+current_entity)
if space_count >=1:
for j in range(space_count):
entities.append("I-"+current_entity)
total_entities.append(entities)
copy_string = input_string
for i, te in enumerate(total_text):
copy_string = copy_string.replace(te, str(total_entities[i]).replace(" ", ""), 1)
entity_list = []
copy_string = copy_string.replace("'].", "']")
for i in copy_string.split(" "):
prefix = (i[0:4])
if prefix == "['B-":
if i[-1] != "]":
i = i[:i.index("]") + 1]
entry = [n.strip() for n in ast.literal_eval(i)]
entity_list.extend(entry)
else:
entity_list.append("O")
return entity_list
# + id="pzCQfVoPA2Vg"
def get_named_entities_flair_baseline(input_row, index):
words = input_row["words"]
sentence_boundaries = input_row["sentence_boundaries"]
start = 0
total_labels = []
for i in sentence_boundaries:
if i != 0:
current_string = words[start:i]
if len(current_string) >= 120:
midpoint = len(current_string) // 2
first_half = current_string[:midpoint]
second_half = current_string[midpoint:]
prediction = generate_entities_flair_baseline(first_half) + generate_entities_flair_baseline(second_half)
else:
prediction = generate_entities_flair_baseline(current_string)
total_labels.extend(prediction)
start = i
return total_labels
# + id="_Gkb9EEBL44y"
# len(test_documents[149]["labels"])
# # ["labels"][-14]
# + id="y8C7NBXGAG7f"
# for i in range(len(test_labels)):
# if len(test_labels[i]) != len(pred_labels[i]):
# print("hit")
# print(len(test_labels[i]))
# print(len(pred_labels[i]))
# print(i)
# print("-----")
# + id="lnHIY94S_OcN" outputId="2662355a-f784-4bc2-db44-a48063a2ac20" colab={"base_uri": "https://localhost:8080/"}
start = timeit.default_timer()
test_labels_flair = [test_documents[i]["labels"] for i in range(len(test_documents))]
pred_labels_flair = [get_named_entities_flair_baseline(test_documents[i], i) for i in range(len(test_documents))]
print(seqeval.metrics.classification_report(test_labels_flair, pred_labels_flair, digits=4))
stop = timeit.default_timer()
print('Flair Runtime: {} seconds'.format(stop - start))
# + id="FjJhxhceqeeq" colab={"base_uri": "https://localhost:8080/", "height": 375} outputId="d7a4b949-5c1e-4981-912d-682bfc5e883c"
start = timeit.default_timer()
curated_test_labels_w = processed_test_dict_w["tags"]
curated_pred_labels_w = [generate_entities_flair_baseline(processed_test_dict_w["sentences"][i]) for i in range(len(processed_test_dict_w["sentences"]))]
print(seqeval.metrics.classification_report(curated_test_labels_w, curated_pred_labels_w, digits=4))
stop = timeit.default_timer()
print('Flair Runtime: {} seconds'.format(stop - start))
# + id="Lku7suRtmIuG"
start = timeit.default_timer()
curated_test_labels_b = processed_test_dict_b["tags"]
curated_pred_labels_b = [generate_entities_flair_baseline(processed_test_dict_b["sentences"][i]) for i in range(len(processed_test_dict_b["sentences"]))]
print(seqeval.metrics.classification_report(curated_test_labels_b, curated_pred_labels_b, digits=4))
stop = timeit.default_timer()
print('Flair Runtime: {} seconds'.format(stop - start))
# + id="sV5eSQsZmJJ9"
start = timeit.default_timer()
curated_test_labels_a = processed_test_dict_a["tags"]
curated_pred_labels_a = [generate_entities_flair_baseline(processed_test_dict_a["sentences"][i]) for i in range(len(processed_test_dict_a["sentences"]))]
print(seqeval.metrics.classification_report(curated_test_labels_a, curated_pred_labels_a, digits=4))
stop = timeit.default_timer()
print('Flair Runtime: {} seconds'.format(stop - start))
# + id="AJltK02lmJlu"
start = timeit.default_timer()
curated_test_labels_h = processed_test_dict_h["tags"]
curated_pred_labels_h = [generate_entities_flair_baseline(processed_test_dict_h["sentences"][i]) for i in range(len(processed_test_dict_h["sentences"]))]
print(seqeval.metrics.classification_report(curated_test_labels_h, curated_pred_labels_h, digits=4))
stop = timeit.default_timer()
print('Flair Runtime: {} seconds'.format(stop - start))
# + id="QWL_Q-PW_J5f"
# + [markdown] id="YcAEz7y6gomd"
# # Custom LUKE Model Testing on CoNLL 2003 Test Set
#
# + id="pDW10Qpfnx4r" colab={"base_uri": "https://localhost:8080/", "height": 241, "referenced_widgets": ["61448fa4f4144f35bf24277f0b120ed3", "69864b7bf5f94f22b25148593162b095", "905ee55ac94c40458bd59b5daf5c24b8", "3d5feb5079ee4c7490f4082bb96b648f", "5fd6e2865049400ba4e07ef2eab23512", "<KEY>", "f17e8982731944f787b5efc0900ce97d", "2aecbe5c02e6444cbce8b743bd914ea5", "15345dc8a3894d929f75b70a306b992c", "4d28e8cfc7e74107be93256b6020504b", "a5c40395a3b44641ac93960911cd18b9", "<KEY>", "<KEY>", "<KEY>", "7bd6e24f3e5a431a855111bb0c3e3ad6", "<KEY>", "<KEY>", "20c6e0d9df4c4e0d94d8e0abc57dce14", "8fe6e77be0d2492da371225f32cef6b1", "<KEY>", "f88a4e8e591d4ef4a845037d37684e62", "<KEY>", "<KEY>", "667f145caf3947acbbe8cc98f425a956", "<KEY>", "002d4fd8d7a24838a727cdab1c6ed9fe", "583d1b501d6949418a20af7a0ddd0ce1", "<KEY>", "<KEY>", "<KEY>", "3ffef9885f8f41d8856a2a63a89a9ca3", "cc03aa4ed34246059656a3e8c2ca8596", "<KEY>", "b06cdda183f04ac589797c8870b78ec4", "<KEY>", "1d48b06b5064483cbd93dec7e70fad9f", "36a3d857d80b40cfaae2aacbede3dd96", "6d43865360f34045952e589888d6dbd9", "<KEY>", "26626a8d91ed466898826a203edfca95", "<KEY>", "<KEY>", "<KEY>", "5d0c47ac158e4b5799cd23d02d1f9e69", "fa948bc4ba13485783c93ee2a46192bb", "<KEY>", "1b726451017d44418e3d17125a42a505", "37cfa81aab1444efa472e5e43833ad9c", "48ced37bdc0b4a9091275cc5ba643b58", "19b02d747a7045d185e5ee5fc942acef", "3dea51fa83f44235ae2686c9ea487a42", "<KEY>", "24e2aff59a2c4c47bc38a9dc67ebf73f", "406802b2eca047c394296c814f5b603e", "561c0db108a54488a4079656d85b04af", "<KEY>", "9ea4f8c4eb9b4341bae728f844330135", "<KEY>", "a0e42dd3c181481f80a729df744fdf7e", "<KEY>", "5f1305644e0a41d58d5ed40edd4ccbd8", "05f8c9ceff7e407c96c1254bd9138c84", "f95a3d86af5d4736af985740ae8835fe", "aef216d7343b4e09b8b9e1db36a26c7b", "<KEY>", "08d3d5e9665e4d619664ada9b9547c39", "1e42bd22a1064b1e9041d884a9a05169", "<KEY>", "eec53eef8a804762ab318f38843e415c", "<KEY>", "f491763b3b9a4519902e86600cec920e", "<KEY>", "26b6edec982740bc8825d6de3156b5bc", "<KEY>", "199307f2ddeb4b00a2f04b0723220481", "eab39d45044e40a8b06c8dba76337b6a", "c829d573340140ce9b64b70acabcc8dd"]} outputId="8dfc43d8-98e9-411b-af15-c7922611a31f"
# Load the tokenizer
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
def load_documents(dataset_file):
documents = []
words = []
labels = []
sentence_boundaries = []
with open(dataset_file) as f:
for line in f:
line = line.rstrip()
if line.startswith("-DOCSTART"):
if words:
documents.append(dict(
words=words,
labels=labels,
sentence_boundaries=sentence_boundaries
))
words = []
labels = []
sentence_boundaries = []
continue
if not line:
if not sentence_boundaries or len(words) != sentence_boundaries[-1]:
sentence_boundaries.append(len(words))
else:
items = line.split(" ")
words.append(items[0])
labels.append(items[-1])
if words:
documents.append(dict(
words=words,
labels=labels,
sentence_boundaries=sentence_boundaries
))
return documents
def load_examples(documents):
examples = []
max_token_length = 510
max_mention_length = 30
for document in tqdm(documents):
words = document["words"]
subword_lengths = [len(tokenizer.tokenize(w)) for w in words]
total_subword_length = sum(subword_lengths)
sentence_boundaries = document["sentence_boundaries"]
for i in range(len(sentence_boundaries) - 1):
sentence_start, sentence_end = sentence_boundaries[i:i+2]
if total_subword_length <= max_token_length:
# if the total sequence length of the document is shorter than the
# maximum token length, we simply use all words to build the sequence
context_start = 0
context_end = len(words)
else:
# if the total sequence length is longer than the maximum length, we add
# the surrounding words of the target sentence to the sequence until it
# reaches the maximum length
context_start = sentence_start
context_end = sentence_end
cur_length = sum(subword_lengths[context_start:context_end])
while True:
if context_start > 0:
if cur_length + subword_lengths[context_start - 1] <= max_token_length:
cur_length += subword_lengths[context_start - 1]
context_start -= 1
else:
break
if context_end < len(words):
if cur_length + subword_lengths[context_end] <= max_token_length:
cur_length += subword_lengths[context_end]
context_end += 1
else:
break
text = ""
for word in words[context_start:sentence_start]:
if word[0] == "'" or (len(word) == 1 and is_punctuation(word)):
text = text.rstrip()
text += word
text += " "
sentence_words = words[sentence_start:sentence_end]
sentence_subword_lengths = subword_lengths[sentence_start:sentence_end]
word_start_char_positions = []
word_end_char_positions = []
for word in sentence_words:
if word[0] == "'" or (len(word) == 1 and is_punctuation(word)):
text = text.rstrip()
word_start_char_positions.append(len(text))
text += word
word_end_char_positions.append(len(text))
text += " "
for word in words[sentence_end:context_end]:
if word[0] == "'" or (len(word) == 1 and is_punctuation(word)):
text = text.rstrip()
text += word
text += " "
text = text.rstrip()
entity_spans = []
original_word_spans = []
for word_start in range(len(sentence_words)):
for word_end in range(word_start, len(sentence_words)):
if sum(sentence_subword_lengths[word_start:word_end]) <= max_mention_length:
entity_spans.append(
(word_start_char_positions[word_start], word_end_char_positions[word_end])
)
original_word_spans.append(
(word_start, word_end + 1)
)
examples.append(dict(
text=text,
words=sentence_words,
entity_spans=entity_spans,
original_word_spans=original_word_spans,
))
return examples
def is_punctuation(char):
cp = ord(char)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
# + colab={"base_uri": "https://localhost:8080/"} id="aUgqat8Pdpy3" outputId="b3bffdfb-7ad8-4e7f-c0bf-15f78b50f0f3"
test_documents = load_documents("eng.testb")
test_examples = load_examples(test_documents)
# + id="v31aMBEmtq5J"
def get_named_entities_custom_luke(input_row):
words = input_row["words"]
sentence_boundaries = input_row["sentence_boundaries"]
start = 0
total_labels = []
for i in sentence_boundaries:
if i != 0:
current_string = words[start:i]
if len(current_string) >= 120:
midpoint = len(current_string) // 2
first_half = current_string[:midpoint]
second_half = current_string[midpoint:]
prediction = model.predict([first_half])[0] + model.predict([second_half])[0]
else:
prediction = model.predict([current_string])[0]
total_labels.extend(prediction)
start = i
return total_labels
# + id="11WqaoyduUlM"
# + id="MnjGsBV-0I4_"
# model.save_network(model_path='model.bin')
# files.download('model.bin')
# + id="AIu1mD-dx_yU"
# + id="B47CgqAn8FU9"
# get_named_entities_flair_baseline([["hello", "I'm", "David"]])
# + id="mhPIsF6OsXuH"
test_labels = [test_documents[i]["labels"] for i in range(len(test_documents[:50]))]
pred_labels = [get_named_entities_flair_baseline(test_documents[i]) for i in range(len(test_documents[:50]))]
# pred_labels = [get_named_entities_custom_luke(test_documents[i]) for i in range(len(test_documents))]
# + id="oOQgyP-Mza_Q"
len(test_documents[54]["labels"])
# + id="jwGCTFsUwHlm"
test_documents[13]["words"]
# + id="MaGHKxWX992F"
test_labels[13]
# + id="Vivhrnoh9bDg"
pred_labels[13]
# + id="ARe_NcOf9fs3"
[i for i in range(len(pred_labels)) if len(pred_labels[i]) == 31]
# + id="yF7MiTGutnDK"
for i, val in enumerate(test_labels):
if len(val) != len(pred_labels[i]):
print(pred_labels[i])
print(val)
print(len(test_documents[i]["words"]))
print(len(val))
print(len(pred_labels[i]))
print("-----")
# + id="IM5Odty3yqAT"
print(seqeval.metrics.classification_report(test_labels, pred_labels, digits=4))
# + [markdown] id="j1L6Neq2g3PJ"
# # Baseline LUKE Model Testing on Curated Name Dataset
#
# + id="BAoTbpZUg4oi"
# + [markdown] id="3ZZHlJWTh8E_"
# # Baseline LUKE Model Testing on CoNLL 2003 Dataset
#
# + id="_YgiKHN8h9ls"
| notebooks/other_notebooks/model_finetuning_exp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Full
# +
import cv2
def getLaser(img,minArea, maxArea):
contours, hierarchy = cv2.findContours(masked,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
#print(area)
if area > minArea and area < maxArea:
convexHull = cv2.convexHull(cnt)
cv2.drawContours(imgContour, [convexHull], -1, (255, 0, 0), 2)
# Display the final convex hull image
# if area > minArea and area < maxArea:
# cv2.drawContours(imgContour,cnt,-1,(255,0,0),3)
# peri = cv2.arcLength(cnt,True)
# #print(peri)
# approx = cv2.approxPolyDP(cnt,0.04*peri,True)
# print(len(approx))
# objCor = len(approx)
# x, y, w, h = cv2.boundingRect(approx)
# #if objCor == 3: objectType = "Tri"
# if objCor == 4: #elif
# #aspRatio = w/float(h)
# #if aspRatio > 0.95 and aspRatio < 1.05: objectType = "Square"
# #else:
# objectType = "Rectangle"
# elif objCor > 4: objectType = "Round"
# else:
# objectType="None"
# cv2.rectangle(imgContour,(x,y),(x+w,y+h),(0,255,0),2)
# cv2.line(imgContour,(int(x+w/2),int(y)),(int(x+w/2),y+h),(0,0,255),1) # Прицел вертикаль
# cv2.line(imgContour,(int(x),int(y+h/2)),(int(x+w),int(y+h/2)),(0,0,255),1) # Прицел горизонталь
# cv2.putText(imgContour,objectType,(x+(w//2)-10,y+(h//2)-10),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,125),2)
# for contour in contours:
def BrightnessContrast(brightness=0):
# getTrackbarPos returns the current
# position of the specified trackbar.
brightness = cv2.getTrackbarPos('Brightness',
'GEEK')
contrast = cv2.getTrackbarPos('Contrast',
'GEEK')
effect = controller(img, brightness,
contrast)
# The function imshow displays an image
# in the specified window
cv2.imshow('Effect', effect)
def controller(img, brightness=255,
contrast=127):
brightness = int((brightness - 0) * (255 - (-255)) / (510 - 0) + (-255))
contrast = int((contrast - 0) * (127 - (-127)) / (254 - 0) + (-127))
if brightness != 0:
if brightness > 0:
shadow = brightness
max = 255
else:
shadow = 0
max = 255 + brightness
al_pha = (max - shadow) / 255
ga_mma = shadow
# The function addWeighted calculates
# the weighted sum of two arrays
cal = cv2.addWeighted(img, al_pha,
img, 0, ga_mma)
else:
cal = img
if contrast != 0:
Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast))
Gamma = 127 * (1 - Alpha)
# The function addWeighted calculates
# the weighted sum of two arrays
cal = cv2.addWeighted(cal, Alpha,
cal, 0, Gamma)
# putText renders the specified text string in the image.
# cv2.putText(cal, 'B:{},C:{}'.format(brightness,
# contrast), (10, 30),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
return cal
# The function waitKey waits for
# a key event infinitely or for delay
# milliseconds, when it is positive.
def vconcat_resize(img_list, interpolation
= cv2.INTER_CUBIC):
# take minimum width
w_min = min(img.shape[1]
for img in img_list)
# resizing images
im_list_resize = [cv2.resize(img,
(w_min, int(img.shape[0] * w_min / img.shape[1])),
interpolation = interpolation)
for img in img_list]
# return final image
return cv2.vconcat(im_list_resize)
def hconcat_resize(img_list,
interpolation
= cv2.INTER_CUBIC):
# take minimum hights
h_min = min(img.shape[0]
for img in img_list)
# image resizing
im_list_resize = [cv2.resize(img,
(int(img.shape[1] * h_min / img.shape[0]),
h_min), interpolation
= interpolation)
for img in img_list]
# return final image
return cv2.hconcat(im_list_resize)
def concat_vh(list_2d):
# return final image
return cv2.vconcat([cv2.hconcat(list_h)
for list_h in list_2d])
def concat_tile_resize(list_2d,
interpolation = cv2.INTER_CUBIC):
# function calling for every
# list of images
img_list_v = [hconcat_resize(list_h,
interpolation = cv2.INTER_CUBIC)
for list_h in list_2d]
# return final image
return vconcat_resize(img_list_v, interpolation=cv2.INTER_CUBIC)
# +
# Python3 program to find
# the midpoint of a line
# Function to find the
# midpoint of a line
def midpoint(x1, x2, y1, y2):
print((x1 + x2) // 2, " , ",
(y1 + y2) // 2)
# Driver Code
# x1, y1, x2, y2 = -1, 2, 3, -6
# midpoint(x1, x2, y1, y2)
# This code is contributed by <NAME>.
def getRectangles(img,minArea, maxArea):
contours, hierarchy = cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
# cor = contours[1]
for cnt in contours:
area = cv2.contourArea(cnt)
#print(area)
if area > minArea and area < maxArea:
# cv2.drawContours(imgContour,cnt,-1,(255,0,0),3) #Чертит контур из синих точек
peri = cv2.arcLength(cnt,True)
#print(peri)
approx = cv2.approxPolyDP(cnt,0.04*peri,True)
print(len(approx))
objCor = len(approx)
x, y, w, h = cv2.boundingRect(approx)
#if objCor == 3: objectType = "Tri"
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
# print('box = ',box)
# print('box[0][0] = ',box[0][0])
#
A1A2 = int(np.sqrt((np.square(box[0][0]-box[1][0])+np.square(box[0][1]-box[1][1]))))
A2A3 = int(np.sqrt((np.square(box[1][0]-box[2][0])+np.square(box[1][1]-box[2][1]))))
aspRatio=A1A2/A2A3
print('aspRatio = ', aspRatio)
if objCor == 4:
# aspRatio = w/float(h)
if aspRatio > 0.92 and aspRatio < 1.08:
objectType = "400x400"
# print('aspRatio400x400 =', aspRatio)
elif aspRatio > 0.50 and aspRatio < 0.8:
objectType = "350x200"
elif aspRatio > 1.50 and aspRatio < 1.8:
objectType = "350x200"
else:
objectType = "Two 350x200"
# print('aspRatio350x400 =', aspRatio)
# cv2.drawContours(imgContour,cnt,-1,(255,0,0),3)
cv2.drawContours(imgContour,[box],0,(0,0,255),2)
# elif objCor > 4: objectType = "Round"
else:
objectType="None"
# try:
# for corner in corners:
# x,y = corner.ravel()
# cv2.circle(imgContour,(x,y),8,(255,120,255),-1)
# print("({}, {})".format(x,y))
# except:
# pass
# cv2.rectangle(imgContour,(x,y),(x+w,y+h),(0,255,0),2)
# cv2.line(imgContour,(int(x+w/2),int(y)),(int(x+w/2),y+h),(0,0,255),1) # Прицел вертикаль
# cv2.line(imgContour,(int(x),int(y+h/2)),(int(x+w),int(y+h/2)),(0,0,255),1) # Прицел горизонталь
#Точки координат прицела
if A1A2 > A2A3:
x1 = int((box[0][0]+box[1][0])/2)
y1 = int((box[0][1]+box[1][1])/2)
x2 = int((box[3][0]+box[2][0])/2)
y2 = int((box[3][1]+box[2][1])/2)
else:
x1 = int((box[1][0]+box[2][0])/2)
y1 = int((box[1][1]+box[2][1])/2)
x2 = int((box[0][0]+box[3][0])/2)
y2 = int((box[0][1]+box[3][1])/2)
# print("arat = ",arat)
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
# cv2.drawContours(img, [c], -1, (0, 255, 0), 2)
a = (cX,cY)
# cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# cv2.putText(imgContour, str(cY), (cX, cY+10),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# cv2.putText(imgContour, "Center", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if objectType == "400x400":
cv2.putText(imgContour, objectType, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(imgContour, (cX, cY), 2, (0, 255, 0), -1)
cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(imgContour, str(cY), (cX, cY+15),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# if objectType == "Two 350x400":
# cv2.line(imgContour,(x1,y1),(x2,y2),(0,0,255),1) # Прицел вертикаль
# cv2.putText(imgContour, aspRatio, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
if objectType == "Two 350x200":
cv2.putText(imgContour, objectType, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(imgContour, (cX, cY), 2, (0, 255, 0), -1)
cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(imgContour, str(cY), (cX, cY+15),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
if objectType == "Two 350x200":
cv2.line(imgContour,(x1,y1),(x2,y2),(0,0,255),1) # Прицел вертикаль
# cv2.putText(imgContour, aspRatio, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
if objectType == "350x200":
cv2.putText(imgContour, objectType, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(imgContour, (cX, cY), 2, (0, 255, 0), -1)
cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(imgContour, str(cY), (cX, cY+15),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# if objectType == "Two 350x400":
# cv2.line(imgContour,(x1,y1),(x2,y2),(0,0,255),1) # Прицел вертикаль
# cv2.putText(imgContour, aspRatio, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# cv2.putText(imgContour, objectType, (cX + 20, cY + 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
Tochka = midpoint(box[0][0], box[1][0], box[0][1], box[1][1])
# print(Tochka)
# print('left = ',left)
# print(right)
# cv2.line(imgContour,((left[0]),(right[0])),(0,0,255),1) # Прицел вертикаль
# cv2.line(imgContour,(int(x),int(y+h/2)),(int(x+w),int(y+h/2)),(0,0,255),1) # Прицел горизонталь
# cv2.line(imgContour,(left[0],left[1]),(right[0],right[1]),(0,0,255),1) # Прицел горизонталь
# cv2.putText(imgContour,objectType,int(x+(w//2)-10),int(y+(h//2)-10),cv2.FONT_HERSHEY_COMPLEX,0.2,(0,255,125),2)
# imgContour = cv2.putText(imgContour,'objectType',x,y,cv2.FONT_HERSHEY_COMPLEX,0.2,(0,255,125),2)
# cv2.fillPoly(mask, [box], (255,255,255))
# Find corners on the mask
# mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# corners = cv2.goodFeaturesToTrack(mask, maxCorners=4, qualityLevel=0.5, minDistance=150)
# print(cnt)
# +
import cv2
import numpy as np
from IPython.display import clear_output
import keyboard
import sys
sys.path.insert(1, '../pyKinectAzure/')
from pyKinectAzure import pyKinectAzure, _k4a
# Path to the module
# TODO: Modify with the path containing the k4a.dll from the Azure Kinect SDK
modulePath = 'C:\\Program Files\\Azure Kinect SDK v1.4.1\\sdk\\windows-desktop\\amd64\\release\\bin\\k4a.dll'
# under x86_64 linux please use r'/usr/lib/x86_64-linux-gnu/libk4a.so'
# In Jetson please use r'/usr/lib/aarch64-linux-gnu/libk4a.so'
# img = cv2.imread("13.png")
# img = cv2.resize(img,(400,400))
def empty(a):
pass
kernel = np.ones((3, 3), 'uint8')
# imgErode = cv2.erode(imgBlur, kernel, cv2.BORDER_REFLECT, iterations=1)
webcam = cv2.VideoCapture(1)
if __name__ == '__main__':
img = cv2.imread("13.png")
depth_color_image = cv2.imread("13.png")
# Initialize the library with the path containing the module
pyK4A = pyKinectAzure(modulePath)
# Open device
pyK4A.device_open()
# Modify camera configuration
device_config = pyK4A.config
device_config.color_format = _k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32
device_config.color_resolution = _k4a.K4A_COLOR_RESOLUTION_1080P
device_config.depth_mode = _k4a.K4A_DEPTH_MODE_NFOV_UNBINNED
print(device_config)
# Start cameras using modified configuration
pyK4A.device_start_cameras(device_config)
k = 0
cv2.namedWindow("TrackBars")
cv2.namedWindow('GEEK')
cv2.resizeWindow("TrackBars",1260,680)
cv2.createTrackbar("Blur","TrackBars",1,50,empty)
cv2.createTrackbar("BlurTimes","TrackBars",1,10,empty)
cv2.createTrackbar("BlurDepth","TrackBars",1,50,empty)
cv2.createTrackbar("BlurDepthTimes","TrackBars",1,10,empty)
cv2.createTrackbar("Erode","TrackBars",0,10,empty)
cv2.createTrackbar("Dialate","TrackBars",0,10,empty)
cv2.createTrackbar("Canny X","TrackBars",0,250,empty)
cv2.createTrackbar("Canny Y","TrackBars",0,250,empty)
cv2.createTrackbar("Canny X Depth","TrackBars",0,250,empty)
cv2.createTrackbar("Canny Y Depth","TrackBars",0,250,empty)
cv2.createTrackbar("A","TrackBars",0,1500,empty)
cv2.createTrackbar("B","TrackBars",0,1500,empty)
cv2.createTrackbar("C","TrackBars",0,1500,empty)
cv2.createTrackbar("D","TrackBars",0,1500,empty)
cv2.createTrackbar("Minimum Area","TrackBars",0,35000,empty)
cv2.createTrackbar("Maximum Area","TrackBars",0,35000,empty)
cv2.createTrackbar("Hue Min","TrackBars",0,179,empty)
cv2.createTrackbar("Hue Max","TrackBars",179,179,empty)
cv2.createTrackbar("Sat Min","TrackBars",0,255,empty)
cv2.createTrackbar("Sat Max","TrackBars",255,255,empty)
cv2.createTrackbar("Val Min","TrackBars",0,255,empty)
cv2.createTrackbar("Val Max","TrackBars",179,255,empty)
# The function imshow displays an
# image in the specified window.
# BrightnessContrast(0)
# createTrackbar(trackbarName,
# windowName, value, count, onChange)
# Brightness range -255 to 255
cv2.createTrackbar('Brightness',
'GEEK', 255, 2 * 255,
empty)
# Contrast range -127 to 127
cv2.createTrackbar('Contrast', 'GEEK',
127, 2 * 127,
empty)
while True:
# success, img = webcam.read()
# Get capture
pyK4A.device_get_capture()
# Get the depth image from the capture
depth_image_handle = pyK4A.capture_get_depth_image()
# Get the color image from the capture
color_image_handle = pyK4A.capture_get_color_image()
# Check the image has been read correctly
if depth_image_handle and color_image_handle:
# Read and convert the image data to numpy array:
depth_image = pyK4A.image_convert_to_numpy(depth_image_handle)
depth_color_image = cv2.convertScaleAbs (depth_image, alpha=0.05) #alpha is fitted by visual comparison with Azure k4aviewer results
maximum_hole_size = 30
depth_color_image = cv2.applyColorMap(depth_color_image, cv2.COLORMAP_JET)
color_image = pyK4A.image_convert_to_numpy(color_image_handle)[:,:,:3]
img = cv2.resize(color_image,(400,320))
# The function imread loads an image
# from the specified file and returns it.
# original = cv2.imread("pic.jpeg")
# Making another copy of an image.
# img = original.copy()
# The function namedWindow creates a
# window that can be used as a placeholder
# for images.
# img = depth_color_image
# img_depth = cv2.resize(img,(400,320))
#Получаем контрастное изображение
brightness = cv2.getTrackbarPos('Brightness', 'GEEK')
contrast = cv2.getTrackbarPos('Contrast', 'GEEK')
effect = controller(img, brightness, contrast)
#Производим преобразование цветов
imgHSV = cv2.cvtColor(effect,cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min,s_min,v_min])
upper = np.array([h_max,s_max,v_max])
mask = cv2.inRange(imgHSV,lower,upper)
imgResult = cv2.bitwise_and(effect,effect,mask=mask)
#===========================================
blur = cv2.getTrackbarPos("Blur", "TrackBars")
if blur%2 == 1:
blur = cv2.getTrackbarPos("Blur", "TrackBars")
else:
blur = cv2.getTrackbarPos("Blur", "TrackBars")-1
blurtimes = cv2.getTrackbarPos("BlurTimes", "TrackBars")
blurdepth = cv2.getTrackbarPos("BlurDepth", "TrackBars")
if blurdepth%2 == 1:
blurdepth = cv2.getTrackbarPos("BlurDepth", "TrackBars")
else:
blurdepth = cv2.getTrackbarPos("BlurDepth", "TrackBars")-1
blurdepthtimes = cv2.getTrackbarPos("BlurDepthTimes", "TrackBars")
erode = cv2.getTrackbarPos("Erode", "TrackBars")
dialateN = cv2.getTrackbarPos("Dialate", "TrackBars")
dialateDepthN = cv2.getTrackbarPos("Dialate", "TrackBars")
cannyx = cv2.getTrackbarPos("Canny X", "TrackBars")
cannyy = cv2.getTrackbarPos("Canny Y", "TrackBars")
cannyx_depth = cv2.getTrackbarPos("Canny X Depth", "TrackBars")
cannyy_depth = cv2.getTrackbarPos("Canny Y Depth", "TrackBars")
A = cv2.getTrackbarPos("A", "TrackBars")
B = cv2.getTrackbarPos("B", "TrackBars")
C = cv2.getTrackbarPos("C", "TrackBars")
D = cv2.getTrackbarPos("D", "TrackBars")
minArea = cv2.getTrackbarPos("Minimum Area", "TrackBars")
maxArea = cv2.getTrackbarPos("Maximum Area", "TrackBars")
print(blur)
# Convert the img to grayscale
imgGray = cv2.cvtColor(imgResult,cv2.COLOR_BGR2GRAY)
depth_image_gray = cv2.cvtColor(depth_color_image,cv2.COLOR_BGR2GRAY)
#Blur
imgBlur = cv2.GaussianBlur(imgGray,(blur,blur),blurtimes)
depth_image_blur = cv2.GaussianBlur(depth_image_gray,(blurdepth,blurdepth),blurdepthtimes)
#Erode
imgErode = cv2.erode(imgBlur, kernel, cv2.BORDER_REFLECT, iterations=erode)
# Apply edge detection method on the image
imgCanny = cv2.Canny(imgErode,cannyx,cannyy)
depth_image_canny = cv2.Canny(depth_image_blur,cannyx_depth,cannyy_depth)
#Утолщение граней
imgDialation = cv2.dilate(imgCanny,kernel, iterations = dialateN)
#Обрезка области интересов
mask = np.zeros(imgCanny.shape[:2], dtype="uint8")
cv2.rectangle(mask, (A, B), (C, D), 255, -1)
# cv2.rectangle(mask, (120, 120), (290, 310), 255, -1)
masked = cv2.bitwise_and(imgDialation, imgDialation, mask=mask)
imgContour = effect.copy()
#Применение функции поиска контура и центра
getLaser(masked, minArea, maxArea)
getRectangles(depth_image_canny, minArea, maxArea)
imgBlank = np.zeros_like(img)
center = None
# cv2.imshow('ConvexHull', imgContour)
# cv2.waitKey(0)
# function calling
# im_tile_resize = concat_tile_resize([[img,effect,imgResult],
# [imgCanny, imgDialation],
# [masked, imgContour, img]])
imgBlurRGB = cv2.cvtColor(imgBlur,cv2.COLOR_GRAY2RGB)
imgCannyRGB = cv2.cvtColor(imgCanny,cv2.COLOR_GRAY2RGB)
imgDialationRGB = cv2.cvtColor(imgDialation,cv2.COLOR_GRAY2RGB)
imgGrayRGB = cv2.cvtColor(imgGray,cv2.COLOR_GRAY2RGB)
maskedRGB = cv2.cvtColor(masked,cv2.COLOR_GRAY2RGB)
depth_image_cannyRGB = cv2.cvtColor(depth_image_canny,cv2.COLOR_GRAY2RGB)
# function calling
im_tile_resize = concat_tile_resize([[img,effect,imgBlurRGB],
[depth_color_image, imgResult, depth_image_cannyRGB],
[imgCannyRGB, imgDialationRGB, imgContour]])
# show the image
cv2.imshow('concat_tile_resize.jpg', im_tile_resize)
print('Blur =', blur, 'Erode =',erode, 'Canny X =',cannyx, 'Canny Y =',cannyy)
cv2.waitKey(1)
clear_output(wait=True)
# img = original.copy()
#img = cv2.imread("Sample.png") #import an image
#img = cv2.resize(img,(400,400))
#img = cv2.imread("Sample.png") #import an image
#img = cv2.resize(img,(400,400))
# -
while True:
success, img = webcam.read()
cv2.imshow('concat_tile_resize.jpg', img)
cv2.waitKey(1)
#
# +
# Python3 program to find
# the midpoint of a line
# Function to find the
# midpoint of a line
def midpoint(x1, x2, y1, y2):
print((x1 + x2) // 2, " , ",
(y1 + y2) // 2)
# Driver Code
# x1, y1, x2, y2 = -1, 2, 3, -6
# midpoint(x1, x2, y1, y2)
# This code is contributed by <NAME>.
def getRectangles(img,minArea, maxArea):
contours, hierarchy = cv2.findContours(img,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
# cor = contours[1]
for cnt in contours:
area = cv2.contourArea(cnt)
#print(area)
if area > minArea and area < maxArea:
# cv2.drawContours(imgContour,cnt,-1,(255,0,0),3) #Чертит контур из синих точек
peri = cv2.arcLength(cnt,True)
#print(peri)
approx = cv2.approxPolyDP(cnt,0.04*peri,True)
print(len(approx))
objCor = len(approx)
x, y, w, h = cv2.boundingRect(approx)
#if objCor == 3: objectType = "Tri"
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
# print('box = ',box)
# print('box[0][0] = ',box[0][0])
#
A1A2 = int(np.sqrt((np.square(box[0][0]-box[1][0])+np.square(box[0][1]-box[1][1]))))
A2A3 = int(np.sqrt((np.square(box[1][0]-box[2][0])+np.square(box[1][1]-box[2][1]))))
aspRatio=A1A2/A2A3
print('aspRatio = ', aspRatio)
if objCor == 4:
# aspRatio = w/float(h)
if aspRatio > 0.92 and aspRatio < 1.08:
objectType = "400x400"
# print('aspRatio400x400 =', aspRatio)
elif aspRatio > 0.50 and aspRatio < 0.8:
objectType = "350x200"
elif aspRatio > 1.50 and aspRatio < 1.8:
objectType = "350x200"
else:
objectType = "Two 350x200"
# print('aspRatio350x400 =', aspRatio)
# cv2.drawContours(imgContour,cnt,-1,(255,0,0),3)
cv2.drawContours(imgContour,[box],0,(0,0,255),2)
# elif objCor > 4: objectType = "Round"
else:
objectType="None"
# try:
# for corner in corners:
# x,y = corner.ravel()
# cv2.circle(imgContour,(x,y),8,(255,120,255),-1)
# print("({}, {})".format(x,y))
# except:
# pass
# cv2.rectangle(imgContour,(x,y),(x+w,y+h),(0,255,0),2)
# cv2.line(imgContour,(int(x+w/2),int(y)),(int(x+w/2),y+h),(0,0,255),1) # Прицел вертикаль
# cv2.line(imgContour,(int(x),int(y+h/2)),(int(x+w),int(y+h/2)),(0,0,255),1) # Прицел горизонталь
#Точки координат прицела
if A1A2 > A2A3:
x1 = int((box[0][0]+box[1][0])/2)
y1 = int((box[0][1]+box[1][1])/2)
x2 = int((box[3][0]+box[2][0])/2)
y2 = int((box[3][1]+box[2][1])/2)
else:
x1 = int((box[1][0]+box[2][0])/2)
y1 = int((box[1][1]+box[2][1])/2)
x2 = int((box[0][0]+box[3][0])/2)
y2 = int((box[0][1]+box[3][1])/2)
# print("arat = ",arat)
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
# cv2.drawContours(img, [c], -1, (0, 255, 0), 2)
a = (cX,cY)
# cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# cv2.putText(imgContour, str(cY), (cX, cY+10),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# cv2.putText(imgContour, "Center", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
if objectType == "400x400":
cv2.putText(imgContour, objectType, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(imgContour, (cX, cY), 2, (0, 255, 0), -1)
cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(imgContour, str(cY), (cX, cY+15),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# if objectType == "Two 350x400":
# cv2.line(imgContour,(x1,y1),(x2,y2),(0,0,255),1) # Прицел вертикаль
# cv2.putText(imgContour, aspRatio, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
if objectType == "Two 350x200":
cv2.putText(imgContour, objectType, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(imgContour, (cX, cY), 2, (0, 255, 0), -1)
cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(imgContour, str(cY), (cX, cY+15),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
if objectType == "Two 350x200":
cv2.line(imgContour,(x1,y1),(x2,y2),(0,0,255),1) # Прицел вертикаль
# cv2.putText(imgContour, aspRatio, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
if objectType == "350x200":
cv2.putText(imgContour, objectType, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(imgContour, (cX, cY), 2, (0, 255, 0), -1)
cv2.putText(imgContour, str(cX), (cX, cY),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
cv2.putText(imgContour, str(cY), (cX, cY+15),cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# if objectType == "Two 350x400":
# cv2.line(imgContour,(x1,y1),(x2,y2),(0,0,255),1) # Прицел вертикаль
# cv2.putText(imgContour, aspRatio, (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# cv2.putText(imgContour, objectType, (cX + 20, cY + 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
Tochka = midpoint(box[0][0], box[1][0], box[0][1], box[1][1])
# print(Tochka)
# print('left = ',left)
# print(right)
# cv2.line(imgContour,((left[0]),(right[0])),(0,0,255),1) # Прицел вертикаль
# cv2.line(imgContour,(int(x),int(y+h/2)),(int(x+w),int(y+h/2)),(0,0,255),1) # Прицел горизонталь
# cv2.line(imgContour,(left[0],left[1]),(right[0],right[1]),(0,0,255),1) # Прицел горизонталь
# cv2.putText(imgContour,objectType,int(x+(w//2)-10),int(y+(h//2)-10),cv2.FONT_HERSHEY_COMPLEX,0.2,(0,255,125),2)
# imgContour = cv2.putText(imgContour,'objectType',x,y,cv2.FONT_HERSHEY_COMPLEX,0.2,(0,255,125),2)
# cv2.fillPoly(mask, [box], (255,255,255))
# Find corners on the mask
# mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
# corners = cv2.goodFeaturesToTrack(mask, maxCorners=4, qualityLevel=0.5, minDistance=150)
# print(cnt)
# +
import cv2
import numpy as np
from IPython.display import clear_output
import keyboard
import sys
sys.path.insert(1, '../pyKinectAzure/')
from pyKinectAzure import pyKinectAzure, _k4a
# Path to the module
# TODO: Modify with the path containing the k4a.dll from the Azure Kinect SDK
modulePath = 'C:\\Program Files\\Azure Kinect SDK v1.4.1\\sdk\\windows-desktop\\amd64\\release\\bin\\k4a.dll'
# under x86_64 linux please use r'/usr/lib/x86_64-linux-gnu/libk4a.so'
# In Jetson please use r'/usr/lib/aarch64-linux-gnu/libk4a.so'
# img = cv2.imread("13.png")
# img = cv2.resize(img,(400,400))
def empty(a):
pass
kernel = np.ones((3, 3), 'uint8')
# imgErode = cv2.erode(imgBlur, kernel, cv2.BORDER_REFLECT, iterations=1)
webcam = cv2.VideoCapture(1)
if __name__ == '__main__':
# img = original.copy()
#img = cv2.imread("Sample.png") #import an image
#img = cv2.resize(img,(400,400))
#img = cv2.imread("Sample.png") #import an image
#img = cv2.resize(img,(400,400))
# img = cv2.imread("Boards4.png")
# img = cv2.resize(img,(400,400))
kernel = np.ones((3, 3), 'uint8')
# imgErode = cv2.erode(imgBlur, kernel, cv2.BORDER_REFLECT, iterations=1)
img = cv2.imread("13.png")
depth_color_image = cv2.imread("13.png")
# Initialize the library with the path containing the module
pyK4A = pyKinectAzure(modulePath)
# Open device
pyK4A.device_open()
# Modify camera configuration
device_config = pyK4A.config
device_config.color_format = _k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32
device_config.color_resolution = _k4a.K4A_COLOR_RESOLUTION_1080P
device_config.depth_mode = _k4a.K4A_DEPTH_MODE_NFOV_UNBINNED
print(device_config)
# Start cameras using modified configuration
pyK4A.device_start_cameras(device_config)
k = 0
# imgBlank = np.zeros_like(img)
# img = cv2.resize(img,(320,400))
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars",1260,480)
cv2.createTrackbar("Blur","TrackBars",1,30,empty)
cv2.createTrackbar("Erode","TrackBars",0,10,empty)
cv2.createTrackbar("Canny X","TrackBars",0,200,empty)
cv2.createTrackbar("Canny Y","TrackBars",0,200,empty)
cv2.createTrackbar("A","TrackBars",0,1500,empty)
cv2.createTrackbar("B","TrackBars",0,1500,empty)
cv2.createTrackbar("C","TrackBars",0,1500,empty)
cv2.createTrackbar("D","TrackBars",0,1500,empty)
cv2.createTrackbar("Minimum Area","TrackBars",0,50000,empty)
cv2.createTrackbar("Maximum Area","TrackBars",0,50000,empty)
while True:
# Get capture
pyK4A.device_get_capture()
# Get the depth image from the capture
depth_image_handle = pyK4A.capture_get_depth_image()
# Get the color image from the capture
color_image_handle = pyK4A.capture_get_color_image()
# Check the image has been read correctly
if depth_image_handle and color_image_handle:
# Read and convert the image data to numpy array:
depth_image = pyK4A.image_convert_to_numpy(depth_image_handle)
depth_color_image = cv2.convertScaleAbs (depth_image, alpha=0.05) #alpha is fitted by visual comparison with Azure k4aviewer results
maximum_hole_size = 30
depth_color_image = cv2.applyColorMap(depth_color_image, cv2.COLORMAP_JET)
color_image = pyK4A.image_convert_to_numpy(color_image_handle)[:,:,:3]
# img = cv2.resize(depth_color_image,(400,320))
img = depth_color_image
blur = cv2.getTrackbarPos("Blur", "TrackBars")
if blur%2 == 1:
blur = cv2.getTrackbarPos("Blur", "TrackBars")
else:
blur = cv2.getTrackbarPos("Blur", "TrackBars")-1
erode = cv2.getTrackbarPos("Erode", "TrackBars")
cannyx = cv2.getTrackbarPos("Canny X", "TrackBars")
cannyy = cv2.getTrackbarPos("Canny Y", "TrackBars")
A = cv2.getTrackbarPos("A", "TrackBars")
B = cv2.getTrackbarPos("B", "TrackBars")
C = cv2.getTrackbarPos("C", "TrackBars")
D = cv2.getTrackbarPos("D", "TrackBars")
minArea = cv2.getTrackbarPos("Minimum Area", "TrackBars")
maxArea = cv2.getTrackbarPos("Maximum Area", "TrackBars")
# success, img = webcam.read()
# Convert the img to grayscale
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray,(blur,blur),1)
imgErode = cv2.erode(imgBlur, kernel, cv2.BORDER_REFLECT, iterations=erode)
# Apply edge detection method on the image
imgCanny = cv2.Canny(imgErode,cannyx,cannyy)
imgDialation = cv2.dilate(imgCanny,kernel, iterations = 1)
mask = np.zeros(imgCanny.shape[:2], dtype="uint8")
cv2.rectangle(mask, (A, B), (C, D), 255, -1)
# cv2.rectangle(mask, (120, 120), (290, 310), 255, -1)
masked = cv2.bitwise_and(imgDialation, imgDialation, mask=mask)
imgContour = img.copy()
getRectangles(masked, minArea, maxArea)
imgBlank = np.zeros_like(img)
center = None
cv2.waitKey(1)
cv2.imshow("imgGray",imgGray)
cv2.waitKey(1)
cv2.imshow("imgErode",imgErode)
cv2.waitKey(1)
cv2.imshow("imgCanny",imgCanny)
cv2.waitKey(1)
cv2.imshow("imgDialation",imgDialation)
cv2.waitKey(1)
cv2.imshow("imgMasked",masked)
cv2.waitKey(1)
cv2.imshow("imgContour",imgContour)
cv2.waitKey(1)
print('Blur =', blur, 'Erode =',erode, 'Canny X =',cannyx, 'Canny Y =',cannyy)
cv2.waitKey(1)
clear_output(wait=True)
#img = cv2.imread("Sample.png") #import an image
#img = cv2.resize(img,(400,400))
#img = cv2.imread("Sample.png") #import an image
#img = cv2.resize(img,(400,400))
# -
| pyKinectAzure/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tarea 1
#
# 31 julio 2019
# AI
# <NAME> 20160090
# **¿Cuál es el rango de valores que cada uno de estos tipos puede representar?**
#
# El rango de valores de cada tipo de datos varia desde números negativos a numeros positivos en el caso de los números con firma. Los sin firma, van de rangos desde 0 a n.
#
# - float 32: 3.4E-38 a 3.4+38
# - int 32: -2147483648 a 2147483647
# - uint 32: 0 a 4294967295
# - int 16: -32768 a 32767
# - uint 16: 0 a 65535
#
# funete: http://decsai.ugr.es/~jfv/ed1/c/cdrom/cap2/cap24.htm
# **¿Cuál es la diferencia entre un Int y un uint?**
#
# Los tipos de datos que tienen u al principio, contienen el mismo tamaño de bits pero no pueden almacenar números negativos. Solo almacenan números positivos dos veces más grandes que los números *int*.
#
# Fuente: https://codeday.me/es/qa/20181228/49148.html
#
# **¿Cuál es el tipo default que utiliza base python, numpy, pytorch?**
#
# - python: int, números con firma.
# - numpy: int, números con firma.
# - pytorch: int, números con firma.
#
| Tarea1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Counting letters
#
# * https://adventofcode.com/2020/day/2
#
# I like to use a dataclass for parsing tasks like these. A single regex to read out each line, and methods on the class to implement the password rule checks.
# +
import re
from dataclasses import dataclass
_line = re.compile(r'^(?P<min_>\d+)-(?P<max_>\d+) (?P<letter>[a-z]):\s*(?P<password>[a-z]+)$')
@dataclass
class PWRule:
min_: int
max_: int
letter: str
password: str
@classmethod
def from_line(cls, line: str) -> 'PWRule':
match = _line.search(line)
min_, max_ = int(match['min_']), int(match['max_'])
return cls(min_, max_, match['letter'], match['password'])
def is_valid(self) -> bool:
return self.min_ <= self.password.count(self.letter) <= self.max_
def is_valid_toboggan_policy(self) -> bool:
return (self.password[self.min_ - 1], self.password[self.max_ - 1]).count(self.letter) == 1
def read_passwords(lines):
return [PWRule.from_line(l) for l in lines]
test = read_passwords('''\
1-3 a: abcde
1-3 b: cdefg
2-9 c: ccccccccc
'''.splitlines())
assert sum(pwr.is_valid() for pwr in test) == 2
assert sum(pwr.is_valid_toboggan_policy() for pwr in test) == 1
# -
import aocd
rules = read_passwords(aocd.get_data(day=2, year=2020).splitlines())
print('Part 1:', sum(pwr.is_valid() for pwr in rules))
print('Part 2:', sum(pwr.is_valid_toboggan_policy() for pwr in rules))
| 2020/Day 02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import GetOldTweets3 as got
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from transformers import pipeline
# + pycharm={"name": "#%%\n"}
classifier = pipeline("zero-shot-classification")
# + pycharm={"name": "#%%\n"}
# Function that pulls tweets based on a general search query
# Parameters: (text query you want to search), (max number of most recent tweets to pull from)
def text_query_to_df(text_query, count):
# Creation of query object
tweetCriteria = got.manager.TweetCriteria().setQuerySearch(text_query)\
.setMaxTweets(count).setLang('en')
# Creation of list that contains all tweets
tweets = got.manager.TweetManager.getTweets(tweetCriteria)
# Creating list of chosen tweet data
text_tweets = [[tweet.date, tweet.text] for tweet in tweets]
# Creation of dataframe from tweets
tweets_df = pd.DataFrame(text_tweets, columns = ['Datetime', 'Text'])
return tweets_df
# Input search query to scrape tweets and name csv file
# Max recent tweets pulls x amount of most recent tweets from that user
txt = 'climate fight'
max_recs = 500
# Calling function to query X amount of relevant tweets and create a CSV file
tweets_df = text_query_to_df(txt, max_recs)
# + pycharm={"name": "#%%\n"}
tweets_df.head(10)
# + pycharm={"name": "#%%\n"}
candidate_labels = ["renewable", "politics", "emission", "temperature", "emergency", "advertisment"]
candidate_results = [0, 0, 0, 0, 0, 0]
for sent in tqdm(tweets_df['Text'].values):
# To do multi-class classification, simply pass multi_class=True.
# In this case, the scores will be independent, but each will fall between 0 and 1.
res = classifier(sent, candidate_labels)
if res['labels'][0] == 'renewable' and res['scores'][0] > 0.5:
candidate_results[0] = candidate_results[0] + 1
if res['labels'][0] == 'politics' and res['scores'][0] > 0.5:
candidate_results[1] = candidate_results[1] + 1
if res['labels'][0] == 'emission' and res['scores'][0] > 0.5:
candidate_results[2] = candidate_results[2] + 1
if res['labels'][0] == 'temperature' and res['scores'][0] > 0.5:
candidate_results[3] = candidate_results[3] + 1
if res['labels'][0] == 'emergency' and res['scores'][0] > 0.5:
candidate_results[4] = candidate_results[4] + 1
if res['labels'][0] == 'advertisment' and res['scores'][0] > 0.5:
candidate_results[5] = candidate_results[5] + 1
if res['scores'][0] > 0.5:
print(sent)
print(res['labels'])
print(res['scores'])
print('\n')
print(candidate_results)
# + pycharm={"name": "#%%\n"}
data = {'labels': candidate_labels,
'values': candidate_results}
df_chart = pd.DataFrame(data, columns=['labels','values'])
df_chart.head()
# + pycharm={"name": "#%%\n"}
sns.barplot(data = df_chart
,x = 'labels'
,y = 'values'
)
# + pycharm={"name": "#%%\n"}
| 01/zero-shot-pipeline-sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# hello , iraq bagdata group
# السلام عليكم
# على وعدنا اليوم نصور اول فيديو عربي يشرح اساسيات الداتا ساينز
# شنو الي راح نسوي اليوم
# 1- نتعرف على kaggle
# 2- نزل داتا ست من كاجيل
# 3- نشوف شون نحلل البيانات
# 4- نشغل تعلم عميق نورال نيتورك على البيانات ونشوف شيطلع بيدنا
#
# خلي نبلش
# kaggle اشرتها قوقول
# تمثل منصة للشركات تعرض بياناتها يحللوها علماء البيانات الي ان شاء الله تكون انت احدهم مستقبلا
# وتحاول تحل مشاكل علمية عملية
#
# اول واهم داتا سيت تستخدمها كمبتديء هيه مشكلة التايتنك
# راح تتعلم منها كل الاساسيات وبنفس الوقت المشكلة واقعه بدومين عام ماكو واحد بينا مايعرف قصة التايتنك
#
# خل نشوف
# سبق ومنزل الداتا خل نشوف شنسوي
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# +
DataSet = pd.read_csv('train.csv')
DataSet.head()
# -
# نعزل الواي نخلي بيه اليبلات ونعزل الاكس بي الفيشر الي نضن انو هيه مهمه
# زين شون حنعرف هيه مهمه
# هنا يدخل شي اسمه هندسة الخصائص
# بس حسوي شي بسيط حتى نفهم شويا عن البيانات
# لكن بشكل عام تحتاج تقرا الداتا زين وتفهمها حتى تختار افضل الخصائص كلما الخصائص مضبوطة كلما نتائج التخمين
# للمودل مالتك تكون ادق
Y = DataSet.iloc[:,1].values
Y[0]
# i will take now for test age and pclass and see its effact
X = DataSet.iloc[:,[2 , 4 , 5]].values
X[0]
# +
from sklearn.preprocessing import LabelEncoder
labels_x = LabelEncoder()
X[:,1] = labels_x.fit_transform(X[:,1])
X
# +
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN' , strategy="mean" , axis=0)
imputer = imputer.fit(X[:,:])
X[:,:] = imputer.transform(X[:,:])
X
# -
for i in xrange(Y.shape[0]):
c = 'green'
if Y[i] == 0:
c = 'red'
plt.scatter(X[i,0] , X[i,1] , color = c)
plt.xlabel('the passengar class')
plt.ylabel('the gender')
plt.legend()
plt.show()
# شتلاحضون
# الركاب بالمقصورات الاولى نسبة نجاتهم اكثر من المقصورات الدرجات الاقل منهم
#
#
#
# خل نجرب ناخذ بعد خصائص
# من الرسم هذا نلاحظ شغلة عدد النساء
# او جندر امراءة اي كان طفلة شابة متزوجه المهم جنسها
# هنا نسبة نجاتهن اكثر من الرجال
#
# هسه فهمنا انو البيانات الي اختارينها مؤثره
# طبعا اكو بعد بيانات بس انت لازم تدرس بياناتكم المهم وتعرف تاثيرهن خل نكمل برمجة
X[0]
# +
from mamonAnn import mamonAnn , catgoricl , normalize , cost
X = catgoricl(X , 0)
X[0]
# -
# حولت الداتا الي هيه من نوع كاتجوريكال
# الى دمي فايربل
# يعني لو عندي كتجوركل بهلشكل
# a b c
# 0 1 0
# يعني هاي من نوع بي
# طبعا هسه لازم اسحب عنصر لان معادلة الاحتساب تضيف قيمة فلاطب بالدمي تراب اسحب عنصر اشفته
#
X = X[:,1:]
X[0]
X.shape
X = normalize(X)
X[0]
# الي سويته سكيل للداتا حتى تساوى نوع الريناجات بالقيم
# معادلة السكيلر الي استخدمتها هيه المعدل على انحراف
X[3]
from sklearn.model_selection import train_test_split
x_train , x_test , y_train , y_test = train_test_split(X,Y , test_size = 0.20 , random_state=0)
print x_train[3]
print x_test[3]
deep = mamonAnn()
LL = []
deep.Layers(4 , [6 , 8 , 3 ] , 1)
# lr مقادر التعلم
#
# ,batch , كل شكد ونسوي تعلم
# اني هنا حستخدم طريقة الري انفورسمينت
# يعني كل ايتم نتعلم عليه
# اكو طريقه اسمها الباتش واكو المني باتش يعني يقسم الى كل وجبة مثلا كل مية ايتم نتعلم
# reg , معامل الريجولايرايزيشن
#
# factive , mid , last , هنا طرق الاكتفيشن فنكشن
#
# LL هاي مصفوفة اخلي بيها قيم الارور اني هنا استخدم معادلة اسمه الكروس انتروبي
# الي هيه سالب معدل لوغارتمات الهدف والبردكشن نشوفها اوضح
# +
deep.fit(x_train , y_train , 600 , 100 , 0.003 ,1 , 0.0005 ,'sigmoid' , 'tanh' ,'sigmoid' , LL )
plt.plot(LL)
plt.show()
# -
rate = np.mean(deep.out.round() == y_train)
rate
deep.farowrds(x_test , 'sigmoid' , 'tanh' , 'sigmoid')
deep.out
rate = np.mean(deep.out.round() == y_test)
rate
# هنا نخلص
# نسبة التخمين 61 بالمية طبعا اكو تحسينات تكدر تسويها
# اولا واهم شي اختيار الخصائص الصحيحة
#
# ثانيا تعديل معمارية النيورال يعني هسه مثال انطيك
# +
aفت شون شمرت زايد الدالة بس غيرنا الاكتفيشن فنكشن صارت مو خوش نتائج
نكدر نغير قيم التعلم او معامرية النيورال
واحسن شي نسوي نسوي
grid search
يعني نخلي اكثر من قيم ومعمارية وندخلهن لوب والحاسبة تجرب لحد متلكى الافضل
تحياتي الكم
| youtbe-kaggle-solving-mamonclass-deepAnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="1czVdIlqnImH"
# # Pix2Pix
# + [markdown] colab_type="text" id="1KD3ZgLs80vY"
# ### Goals
# In this notebook, you will write a generative model based on the paper [*Image-to-Image Translation with Conditional Adversarial Networks*](https://arxiv.org/abs/1611.07004) by Isola et al. 2017, also known as Pix2Pix.
#
# You will be training a model that can convert aerial satellite imagery ("input") into map routes ("output"), as was done in the original paper. Since the architecture for the generator is a U-Net, which you've already implemented (with minor changes), the emphasis of the assignment will be on the loss function. So that you can see outputs more quickly, you'll be able to see your model train starting from a pre-trained checkpoint - but feel free to train it from scratch on your own too.
#
#
# 
#
#
# <!-- You will take the segmentations that you generated in the previous assignment and produce photorealistic images. -->
#
# ### Learning Objectives
# 1. Implement the loss of a Pix2Pix model that differentiates it from a supervised U-Net.
# 2. Observe the change in generator priorities as the Pix2Pix generator trains, changing its emphasis from reconstruction to realism.
#
# <!-- When you're done with this assignment, you'll be able to understand much of [*Image-to-Image Translation with Conditional Adversarial Networks*](https://arxiv.org/abs/1611.07004), which introduced Pix2Pix.
#
# You'll be using the same U-Net as in the previous assignment, but you'll write another discriminator and change the loss to make it a GAN. -->
# + [markdown] colab_type="text" id="wU8DDM6l9rZb"
# ## Getting Started
# You will start by importing libraries, defining a visualization function, and getting the pre-trained Pix2Pix checkpoint. You will also be provided with the U-Net code for the Pix2Pix generator.
# + colab={} colab_type="code" id="JfkorNJrnmNO"
import torch
from torch import nn
from tqdm.auto import tqdm
from torchvision import transforms
from torchvision.datasets import VOCSegmentation
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
torch.manual_seed(0)
def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28)):
'''
Function for visualizing images: Given a tensor of images, number of images, and
size per image, plots and prints the images in an uniform grid.
'''
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
# + [markdown] colab_type="text" id="NjFyvNTG1CqY"
# #### U-Net Code
#
# The U-Net code will be much like the code you wrote for the last assignment, but with optional dropout and batchnorm. The structure is changed slightly for Pix2Pix, so that the final image is closer in size to the input image. Feel free to investigate the code if you're interested!
# + colab={} colab_type="code" id="xvY4ZNyUviY9"
def crop(image, new_shape):
'''
Function for cropping an image tensor: Given an image tensor and the new shape,
crops to the center pixels (assumes that the input's size and the new size are
even numbers).
Parameters:
image: image tensor of shape (batch size, channels, height, width)
new_shape: a torch.Size object with the shape you want x to have
'''
middle_height = image.shape[2] // 2
middle_width = image.shape[3] // 2
starting_height = middle_height - new_shape[2] // 2
final_height = starting_height + new_shape[2]
starting_width = middle_width - new_shape[3] // 2
final_width = starting_width + new_shape[3]
cropped_image = image[:, :, starting_height:final_height, starting_width:final_width]
return cropped_image
class ContractingBlock(nn.Module):
'''
ContractingBlock Class
Performs two convolutions followed by a max pool operation.
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(ContractingBlock, self).__init__()
self.conv1 = nn.Conv2d(input_channels, input_channels * 2, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.activation = nn.LeakyReLU(0.2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.batchnorm = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, x):
'''
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv1(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.conv2(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.maxpool(x)
return x
class ExpandingBlock(nn.Module):
'''
ExpandingBlock Class:
Performs an upsampling, a convolution, a concatenation of its two inputs,
followed by two more convolutions with optional dropout
Values:
input_channels: the number of channels to expect from a given input
'''
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(ExpandingBlock, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.batchnorm = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.activation = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, x, skip_con_x):
'''
Function for completing a forward pass of ExpandingBlock:
Given an image tensor, completes an expanding block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
skip_con_x: the image tensor from the contracting path (from the opposing block of x)
for the skip connection
'''
x = self.upsample(x)
x = self.conv1(x)
skip_con_x = crop(skip_con_x, x.shape)
x = torch.cat([x, skip_con_x], axis=1)
x = self.conv2(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
x = self.conv3(x)
if self.use_bn:
x = self.batchnorm(x)
if self.use_dropout:
x = self.dropout(x)
x = self.activation(x)
return x
class FeatureMapBlock(nn.Module):
'''
FeatureMapBlock Class
The final layer of a U-Net -
maps each pixel to a pixel with the correct number of output dimensions
using a 1x1 convolution.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels):
super(FeatureMapBlock, self).__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=1)
def forward(self, x):
'''
Function for completing a forward pass of FeatureMapBlock:
Given an image tensor, returns it mapped to the desired number of channels.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x = self.conv(x)
return x
class UNet(nn.Module):
'''
UNet Class
A series of 4 contracting blocks followed by 4 expanding blocks to
transform an input image into the corresponding paired image, with an upfeature
layer at the start and a downfeature layer at the end.
Values:
input_channels: the number of channels to expect from a given input
output_channels: the number of channels to expect for a given output
'''
def __init__(self, input_channels, output_channels, hidden_channels=32):
super(UNet, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_dropout=True)
self.contract2 = ContractingBlock(hidden_channels * 2, use_dropout=True)
self.contract3 = ContractingBlock(hidden_channels * 4, use_dropout=True)
self.contract4 = ContractingBlock(hidden_channels * 8)
self.contract5 = ContractingBlock(hidden_channels * 16)
self.contract6 = ContractingBlock(hidden_channels * 32)
self.expand0 = ExpandingBlock(hidden_channels * 64)
self.expand1 = ExpandingBlock(hidden_channels * 32)
self.expand2 = ExpandingBlock(hidden_channels * 16)
self.expand3 = ExpandingBlock(hidden_channels * 8)
self.expand4 = ExpandingBlock(hidden_channels * 4)
self.expand5 = ExpandingBlock(hidden_channels * 2)
self.downfeature = FeatureMapBlock(hidden_channels, output_channels)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
'''
Function for completing a forward pass of UNet:
Given an image tensor, passes it through U-Net and returns the output.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
'''
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
x4 = self.contract4(x3)
x5 = self.contract5(x4)
x6 = self.contract6(x5)
x7 = self.expand0(x6, x5)
x8 = self.expand1(x7, x4)
x9 = self.expand2(x8, x3)
x10 = self.expand3(x9, x2)
x11 = self.expand4(x10, x1)
x12 = self.expand5(x11, x0)
xn = self.downfeature(x12)
return self.sigmoid(xn)
# + [markdown] colab_type="text" id="T6ndvjc_1KXx"
# ## PatchGAN Discriminator
#
# Next, you will define a discriminator based on the contracting path of the U-Net to allow you to evaluate the realism of the generated images. Remember that the discriminator outputs a one-channel matrix of classifications instead of a single value. Your discriminator's final layer will simply map from the final number of hidden channels to a single prediction for every pixel of the layer before it.
# + colab={} colab_type="code" id="0nVuJPjV1f92"
# UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED CLASS: Discriminator
class Discriminator(nn.Module):
'''
Discriminator Class
Structured like the contracting path of the U-Net, the discriminator will
output a matrix of values classifying corresponding portions of the image as real or fake.
Parameters:
input_channels: the number of image input channels
hidden_channels: the initial number of discriminator convolutional filters
'''
def __init__(self, input_channels, hidden_channels=8):
super(Discriminator, self).__init__()
self.upfeature = FeatureMapBlock(input_channels, hidden_channels)
self.contract1 = ContractingBlock(hidden_channels, use_bn=False)
self.contract2 = ContractingBlock(hidden_channels * 2)
self.contract3 = ContractingBlock(hidden_channels * 4)
self.contract4 = ContractingBlock(hidden_channels * 8)
#### START CODE HERE ####
self.final = nn.Conv2d(hidden_channels * 16, None, kernel_size=None)
#### END CODE HERE ####
def forward(self, x, y):
x = torch.cat([x, y], axis=1)
x0 = self.upfeature(x)
x1 = self.contract1(x0)
x2 = self.contract2(x1)
x3 = self.contract3(x2)
x4 = self.contract4(x3)
xn = self.final(x4)
return xn
# + colab={} colab_type="code" id="AFZBTJ_4Ubld"
# UNIT TEST
test_discriminator = Discriminator(10, 1)
assert tuple(test_discriminator(
torch.randn(1, 5, 256, 256),
torch.randn(1, 5, 256, 256)
).shape) == (1, 1, 16, 16)
print("Success!")
# + [markdown] colab_type="text" id="qRk_8azSq3tF"
# ## Training Preparation
# <!-- You'll be using the same U-Net as in the previous assignment, but you'll write another discriminator and change the loss to make it a GAN. -->
#
# Now you can begin putting everything together for training. You start by defining some new parameters as well as the ones you are familiar with:
# * **real_dim**: the number of channels of the real image and the number expected in the output image
# * **adv_criterion**: an adversarial loss function to keep track of how well the GAN is fooling the discriminator and how well the discriminator is catching the GAN
# * **recon_criterion**: a loss function that rewards similar images to the ground truth, which "reconstruct" the image
# * **lambda_recon**: a parameter for how heavily the reconstruction loss should be weighed
# * **n_epochs**: the number of times you iterate through the entire dataset when training
# * **input_dim**: the number of channels of the input image
# * **display_step**: how often to display/visualize the images
# * **batch_size**: the number of images per forward/backward pass
# * **lr**: the learning rate
# * **target_shape**: the size of the output image (in pixels)
# * **device**: the device type
# + colab={} colab_type="code" id="UXptQZcwrBrq"
import torch.nn.functional as F
# New parameters
adv_criterion = nn.BCEWithLogitsLoss()
recon_criterion = nn.L1Loss()
lambda_recon = 200
n_epochs = 20
input_dim = 3
real_dim = 3
display_step = 200
batch_size = 4
lr = 0.0002
target_shape = 256
device = 'cuda'
# + [markdown] colab_type="text" id="WPOUC6-nVDCv"
# You will then pre-process the images of the dataset to make sure they're all the same size and that the size change due to U-Net layers is accounted for.
# + colab={} colab_type="code" id="PNAK2XqMJ419"
transform = transforms.Compose([
transforms.ToTensor(),
])
import torchvision
dataset = torchvision.datasets.ImageFolder("maps", transform=transform)
# + [markdown] colab_type="text" id="t7vKN1POUjud"
# Next, you can initialize your generator (U-Net) and discriminator, as well as their optimizers. Finally, you will also load your pre-trained model.
# + colab={} colab_type="code" id="vBY3Y9UrUgVX"
gen = UNet(input_dim, real_dim).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = Discriminator(input_dim + real_dim).to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
# Feel free to change pretrained to False if you're training the model from scratch
pretrained = True
if pretrained:
loaded_state = torch.load("pix2pix_15000.pth")
gen.load_state_dict(loaded_state["gen"])
gen_opt.load_state_dict(loaded_state["gen_opt"])
disc.load_state_dict(loaded_state["disc"])
disc_opt.load_state_dict(loaded_state["disc_opt"])
else:
gen = gen.apply(weights_init)
disc = disc.apply(weights_init)
# + [markdown] colab_type="text" id="YcpFbNDYzJrh"
# While there are some changes to the U-Net architecture for Pix2Pix, the most important distinguishing feature of Pix2Pix is its adversarial loss. You will be implementing that here!
# + colab={} colab_type="code" id="YZE-Eyj0LOpm"
# UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED CLASS: get_gen_loss
def get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon):
'''
Return the loss of the generator given inputs.
Parameters:
gen: the generator; takes the condition and returns potential images
disc: the discriminator; takes images and the condition and
returns real/fake prediction matrices
real: the real images (e.g. maps) to be used to evaluate the reconstruction
condition: the source images (e.g. satellite imagery) which are used to produce the real images
adv_criterion: the adversarial loss function; takes the discriminator
predictions and the true labels and returns a adversarial
loss (which you aim to minimize)
recon_criterion: the reconstruction loss function; takes the generator
outputs and the real images and returns a reconstructuion
loss (which you aim to minimize)
lambda_recon: the degree to which the reconstruction loss should be weighted in the sum
'''
# Steps: 1) Generate the fake images, based on the conditions.
# 2) Evaluate the fake images and the condition with the discriminator.
# 3) Calculate the adversarial and reconstruction losses.
# 4) Add the two losses, weighting the reconstruction loss appropriately.
#### START CODE HERE ####
#### END CODE HERE ####
return gen_loss
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="KLndbJ213hV5" outputId="a713cd00-2b7d-41da-a3cb-90f4dc0ca49f"
# UNIT TEST
def test_gen_reasonable(num_images=10):
gen = torch.zeros_like
disc = lambda x, y: torch.ones(len(x), 1)
real = None
condition = torch.ones(num_images, 3, 10, 10)
adv_criterion = torch.mul
recon_criterion = lambda x, y: torch.tensor(0)
lambda_recon = 0
assert get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon).sum() == num_images
disc = lambda x, y: torch.zeros(len(x), 1)
assert torch.abs(get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon)).sum() == 0
adv_criterion = lambda x, y: torch.tensor(0)
recon_criterion = lambda x, y: torch.abs(x - y).max()
real = torch.randn(num_images, 3, 10, 10)
lambda_recon = 2
gen = lambda x: real + 1
assert torch.abs(get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon) - 2) < 1e-4
adv_criterion = lambda x, y: (x + y).max() + x.max()
assert torch.abs(get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon) - 3) < 1e-4
test_gen_reasonable()
print("Success!")
# + [markdown] colab_type="text" id="SMDZWZTz3ivA"
# ## Pix2Pix Training
#
# Finally, you can train the model and see some of your maps!
# + colab={"base_uri": "https://localhost:8080/", "height": 373, "referenced_widgets": ["aa7565ec3f294fd6b9c592bd5fc0dfcb", "fe98210470c3421c9a39734dc1203817", "a47c53e27edd4ef2b5bc79b3d64c44d3", "54a72fb618d146babc5830644ff65992", "6bf1b15c1b8e42758c8e9768115d6f8e", "<KEY>", "8cdec6ea735847709cc610fef8dc5755", "5042b39eadc14d5ab8b310e23d9c7d96"]} colab_type="code" id="fy6UBV60HtnY" outputId="c174bb25-acbf-4507-c6e2-6bf7ef08661c"
from skimage import color
import numpy as np
def train(save_model=False):
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
for epoch in range(n_epochs):
# Dataloader returns the batches
for image, _ in tqdm(dataloader):
image_width = image.shape[3]
condition = image[:, :, :, :image_width // 2]
condition = nn.functional.interpolate(condition, size=target_shape)
real = image[:, :, :, image_width // 2:]
real = nn.functional.interpolate(real, size=target_shape)
cur_batch_size = len(condition)
condition = condition.to(device)
real = real.to(device)
### Update discriminator ###
disc_opt.zero_grad() # Zero out the gradient before backpropagation
with torch.no_grad():
fake = gen(condition)
disc_fake_hat = disc(fake.detach(), condition) # Detach generator
disc_fake_loss = adv_criterion(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(real, condition)
disc_real_loss = adv_criterion(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
disc_loss.backward(retain_graph=True) # Update gradients
disc_opt.step() # Update optimizer
### Update generator ###
gen_opt.zero_grad()
gen_loss = get_gen_loss(gen, disc, real, condition, adv_criterion, recon_criterion, lambda_recon)
gen_loss.backward() # Update gradients
gen_opt.step() # Update optimizer
# Keep track of the average discriminator loss
mean_discriminator_loss += disc_loss.item() / display_step
# Keep track of the average generator loss
mean_generator_loss += gen_loss.item() / display_step
### Visualization code ###
if cur_step % display_step == 0:
if cur_step > 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
else:
print("Pretrained initial state")
show_tensor_images(condition, size=(input_dim, target_shape, target_shape))
show_tensor_images(real, size=(real_dim, target_shape, target_shape))
show_tensor_images(fake, size=(real_dim, target_shape, target_shape))
mean_generator_loss = 0
mean_discriminator_loss = 0
# You can change save_model to True if you'd like to save the model
if save_model:
torch.save({'gen': gen.state_dict(),
'gen_opt': gen_opt.state_dict(),
'disc': disc.state_dict(),
'disc_opt': disc_opt.state_dict()
}, f"pix2pix_{cur_step}.pth")
cur_step += 1
train()
# -
| C3 - Apply Generative Adversarial Network (GAN)/Week 2/C3W2B_Assignment-original.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# # Data Generation with Flatland
#
# This notebook begins to outline how training datasets will be generated in Flatland. This notebook will evolve into a documentation of how to use the tool through a more formal and simplified API.
#
# ### Here's the idea
#
# Some modern protein structure prediction approaches are a little bit complicated to implement. That's why it seems useful to have a simulator that can generate data at least of the same structure that researchers seek to use in such systems. Over time, these simulators can be improved progressively to add some minimal level of realism that should be helpful for initial model debugging. This might include for example modifying simulation parameters to enable a model to train effectively then returning to the more complex form once the simpler problem has been solved. Thus we hope to create a much more smooth path to solving the larger problem than is often followed by those seeking to solve it directly.
#
# Further, even when training larger systems on real data it will be important that system components remain integrated and both the system and its individual components continue to function correctly. Simple toy test cases are often used for this purpose in software test engineering. But in the case of ML software engineering, sometimes it helps if these are slightly realistic. Even further, we are interested in understanding the potential of various additional sources of data to enhance the performance of structure prediction systems.
#
# Below we will evolve a population of polymers using a trivially simple fitness metric and in the course of that retain a "genetic history" of the evolved population. Then, we will compute structures for these polymers using Jax MD. For each "solved" structure we will compute a pairwise "residue" distance matrix and a vector of "bond" angles. Lastly, we will simulate a compound-protein interaction experiment again using Jax MD.
#
# ### Imports
# +
# For now, at bottom of notebook
# -
# ### Configuration
# +
alphabet_size = 3
population_size = 500
genome_length = 10
mutation_rate = 0.15
num_generations = 50
dimension = 2
box_size = 6.8
num_water_particles = 100
# -
# ### Input
#
# The input to this will be a vector specifying which species of particle occur in what order in our polymer, such as the following. I'm so curious how these might fold up and potentially interact!
# +
def fitness_mean_value_target(v, target_value=1.0):
return 1 - jnp.abs(jnp.mean(v) - target_value)/target_value
@jit
def batched_fitness_mean_value_target(population):
return vmap(fitness_mean_value_target)(population)
res = evolve_with_mutation(fitness_fn=batched_fitness_mean_value_target,
num_generations=num_generations,
pop_size=population_size,
genome_length=genome_length,
mutation_rate=mutation_rate,
alphabet_size=alphabet_size,
keep_full_population_history=True,
key=random.PRNGKey(1))
fitness, population_history, last_population = res
# -
last_population[0:10]
# +
polymer = last_population[0]
polymer_len = len(polymer)
polymer
# -
#
# ### Simulate folding
#
# Let's simulate a polymer folding!
#
# Usual Jax MD setup
# +
displacement, shift = space.periodic(box_size)
# -
# Define a species matrix describing how pairs should attract or repel
# +
r0_species_matrix = jnp.array(
[[1.0, 1.0, 0.5 ],
[1.0, 1.0 , 1.0],
[0.5, 1.0 , 1.0 ]]
)
# -
# Provide the evolved polymer sequence as a species parameter to the "hydrogen bond" energy function
# +
energy_fn = harmonic_morse_pair(displacement, D0=0., alpha=10.0, k=1.0,
species=polymer, # <======== Here!! ====================
r0=r0_species_matrix)
# -
# Compute the initial positions of all of the particles in the polymer
# +
R = jnp.array([[box_size/2, box_size/4 + 0.5*i*box_size/polymer_len] for i in range(polymer_len)])
R
# -
# Specify that polymer elements should be bonded in the order of the provided sequence and compute initial bond lengths
# +
bonds = jnp.array([[i, i+1] for i in range(polymer_len - 1)], dtype=jnp.int64)
lengths = [0]*len(bonds)
for i in range(polymer_len - 1):
lengths[i] = R[i+1][1] - R[i][1]
# -
# This is what that looks like
# + active=""
#
# plot_system(R=R, box_size=box_size, species=polymer, bonds=bonds)
#
# -
# Combine the "hydrogen" and "covalent" energy functions
# +
bond_energy_fn = bistable_spring_bond(displacement, bonds, r0=lengths)
def combined_energy_fn(R):
return energy_fn(R) + 2*bond_energy_fn(R)
# -
# Find a minimum energy using gradient methods with repeated re-starts / perturbations
# +
key, subkey1, subkey2 = random.split(key, 3)
perturb_steps = 1000
perturb_kt = 500
e_best = None
R_best = R
minimize_steps = 100000
num_rounds = 5
solutions = [None for _ in range(num_rounds)]
energies = [None for _ in range(num_rounds)]
for i in range(num_rounds):
key, subkey = random.split(key)
print("Running minimization %s" % i)
print("Perturbing initial conformation...")
# Initialize a messy starting structure by simulating 10 steps at 500 kT
R_current, max_force_component = run_brownian(combined_energy_fn,
#R_best,
R, # <============ Use the initial conformation instead of re-starting
shift,
key=subkey,
num_steps=perturb_steps,
kT=perturb_kt)
print("Minimizing energy...")
R_current, max_force_component = run_minimization(combined_energy_fn, R_current, shift, num_steps=minimize_steps)
print("Computing solved energy...")
e = combined_energy_fn(R_current)
print("Energy for this iteration was %s" % e)
if not e_best or e < e_best:
e_best = e
R_best = R_current
print("Improved lowest energy.")
solutions[i] = R_current
energies[i] = e
print("Lowest energy found was %s" % e)
plot_system(R=R_best, box_size=box_size, species=polymer, bonds=bonds)
# -
# ### Pairwise distances
#
# Given a "folded" structure, obtain a matrix of pairwise distances and visualize.
# +
import numpy as np
def compute_distance_matrix(positions):
distances = np.zeros(shape=(polymer_len, polymer_len))
for i in range(polymer_len):
for j in range(polymer_len):
distances[i][j] = jnp.linalg.norm(positions[i] - positions[j])
return distances
d = compute_distance_matrix(R_best)
plt.imshow(d)
# -
# ### Bond angles
#
# Given a "folded" structure, obtain a vector of bond angles.
# +
def compute_angle(p0: jnp.ndarray, p1: jnp.ndarray, p2: jnp.ndarray) -> jnp.float32:
"""Compute the angle centered at `p1` between the other two points."""
a = p1 - p0
da = np.linalg.norm(a)
b = p1 - p2
db = np.linalg.norm(b)
c = p0 - p2
dc = np.linalg.norm(c)
x = (da**2 + db**2 - dc**2)/(2.0*da*db)
angle = jnp.arccos(x)*180.0/jnp.pi
return min(angle, 360 - angle)
p0 = jnp.array([0, 1], dtype=jnp.float32)
p1 = jnp.array([0, 0], dtype=jnp.float32)
p2 = jnp.array([1, 0], dtype=jnp.float32)
assert (90.0 - compute_angle(p0, p1, p2)) < 0.00001
# +
def compute_angles_for_structure(positions: jnp.ndarray) -> jnp.ndarray:
angles = np.zeros(shape=(len(positions) - 2,))
for i in range(len(positions) - 2):
angles[i] = compute_angle(positions[i], positions[i + 1], positions[i + 2])
return angles
assert (compute_angles_for_structure(jnp.array([[1,0],[0,0],[0,1]])) - jnp.array([90.0])) < 0.000001
angles = compute_angles_for_structure(R)
assert np.array_equal(angles, jnp.array([180.0 for _ in range(8)]))
print(angles)
# -
compute_angles_for_structure(R_best)
# ### Adding water
#
# Let's see if adding water and patterns of hydrophobicity can help form more regular structures.
#
# +
def add_water(key, existing_coordinates, existing_species, num_water_particles, box_size, dimension=2):
"""Add water coordinates and species to an existing system."""
# Determine the species index for the additional water
max_species = max(existing_species)
water_species = max_species + 1
# Add and additional species index for each additional water particle
species = jnp.concatenate((existing_species, jnp.array([water_species]*num_water_particles)))
key, split = random.split(key)
water_coords = random.uniform(
key, shape=(num_water_particles, dimension),
minval=0.0, maxval=box_size)
coordinates = jnp.concatenate((existing_coordinates, water_coords))
return jnp.array(coordinates), jnp.array(species)
R_solution, species_solution = add_water(
key=key, existing_coordinates=R,
existing_species=polymer,
num_water_particles=num_water_particles,
box_size=box_size,
dimension=dimension)
plot_system(R=R_solution, box_size=box_size, species=species_solution, bonds=bonds)
# -
# ### Compound-target interactions
#
# It's possible that co- or pre-training on compound-protein or protein-protein interaction datasets will further enhance our ability to predict the structure of proteins. Let's simulate the interaction of two different polymers with a range of compounds the visualize these two "structural spectra".
#
# +
# Obtain a polymer sequence from the last population
polymer_a = last_population[0]
#
polymer_a_soln_coords, polymer_a_soln_species = add_water(
key=key, existing_coordinates=R,
existing_species=polymer,
num_water_particles=num_water_particles,
box_size=box_size,
dimension=dimension)
polymer_a_structure = fold_polymer(polymer_a)
# -
polymer_b = last_population[1]
polymer_b
# ## Planned Additions
#
# * Adding water and water (anti-)affinities
# * Ensuring backbone particles stay within reasonable ranges
# * Compound-target interactions
#
# ## Imports
# +
#@title Imports & Utils
# !pip install -q git+https://www.github.com/google/jax-md
import numpy as onp
import jax.numpy as jnp
from jax import random
from jax import jit, grad, vmap, value_and_grad
from jax import lax
from jax import ops
from jax.config import config
config.update("jax_enable_x64", True)
from jax_md import space, smap, energy, minimize, quantity, simulate, partition
from functools import partial
import time
f32 = jnp.float32
f64 = jnp.float64
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
#import seaborn as sns
#sns.set_style(style='white')
def format_plot(x, y):
plt.grid(True)
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
def finalize_plot(shape=(1, 0.7)):
plt.gcf().set_size_inches(
shape[0] * 1.5 * plt.gcf().get_size_inches()[1],
shape[1] * 1.5 * plt.gcf().get_size_inches()[1])
plt.tight_layout()
def calculate_bond_data(displacement_or_metric, R, dr_cutoff, species=None):
if( not(species is None)):
assert(False)
metric = space.map_product(space.canonicalize_displacement_or_metric(displacement))
dr = metric(R,R)
dr_include = jnp.triu(jnp.where(dr<1, 1, 0)) - jnp.eye(R.shape[0],dtype=jnp.int32)
index_list=jnp.dstack(jnp.meshgrid(jnp.arange(N), jnp.arange(N), indexing='ij'))
i_s = jnp.where(dr_include==1, index_list[:,:,0], -1).flatten()
j_s = jnp.where(dr_include==1, index_list[:,:,1], -1).flatten()
ij_s = jnp.transpose(jnp.array([i_s,j_s]))
bonds = ij_s[(ij_s!=jnp.array([-1,-1]))[:,1]]
lengths = dr.flatten()[(ij_s!=jnp.array([-1,-1]))[:,1]]
return bonds, lengths
def plot_system(R, box_size, species=None, ms=10, bonds=[]):
R_plt = jnp.array(R)
for b in bonds:
idx0 = b[0]
idx1 = b[1]
coord_a = R_plt[idx0]
coord_b = R_plt[idx1]
plt.plot([coord_a[0], coord_b[0]], [coord_a[1], coord_b[1]], linewidth=4, color="lightgrey")
if(species is None):
plt.plot(R_plt[:, 0], R_plt[:, 1], 'o', markersize=ms)
else:
for ii in range(jnp.amax(species)+1):
Rtemp = R_plt[species==ii]
plt.plot(Rtemp[:, 0], Rtemp[:, 1], 'o', markersize=ms)
plt.xlim([0, box_size])
plt.ylim([0, box_size])
plt.xticks([], [])
plt.yticks([], [])
finalize_plot((1,1))
def setup_periodic_box(box_size):
def displacement_fn(Ra, Rb, **unused_kwargs):
dR = Ra - Rb
return jnp.mod(dR + box_size * f32(0.5), box_size) - f32(0.5) * box_size
def shift_fn(R, dR, **unused_kwargs):
return jnp.mod(R + dR, box_size)
return displacement_fn, shift_fn
key = random.PRNGKey(0)
def run_minimization(energy_fn, R_init, shift, num_steps=5000):
"""Minimize the particle conformation with respect to `energy_fn`."""
dt_start = 0.001
dt_max = 0.004
energy_fn = jit(energy_fn)
# Instantiate the optimization initial conditions (just R_init) and optimization step function
# given by the "fire descent" algorithm, sounds like gradient descent.
init, apply = minimize.fire_descent(energy_fn, shift, dt_start=dt_start, dt_max=dt_max)
apply = jit(apply)
@jit
def scan_fn(state, i):
return apply(state), 0.
# Why don't we do scan_fn = jit(scan_fn) if that's how we're jitting the other functions?
# And do we need to jit any of the energy_fn and apply functions if we are going to jit
# scan_fn anyway?
# Initialize the state of the system
state = init(R_init)
# Perform num_steps of optimization (through which we could differentiate if we needed to
# for some reason given we're using lax.scan... probably faster to use lax.scan).
state, _ = lax.scan(scan_fn, state, jnp.arange(num_steps))
return state.position, jnp.amax(jnp.abs(-grad(energy_fn)(state.position)))
def harmonic_morse(dr, D0=5.0, alpha=5.0, r0=1.0, k=50.0, **kwargs):
"""Compute the harmonic morse potential for a pair of particles at distance `dr`."""
U = jnp.where(dr < r0,
0.5 * k * (dr - r0)**2 - D0,
D0 * (jnp.exp(-2. * alpha * (dr - r0)) - 2. * jnp.exp(-alpha * (dr - r0)))
)
return jnp.array(U, dtype=dr.dtype)
def harmonic_morse_pair(
displacement_or_metric, species=None, D0=5.0, alpha=10.0, r0=1.0, k=50.0):
"""The harmonic morse function over all pairs of particles in a system."""
# Initialize various parameters of the harmonic morse function
D0 = jnp.array(D0, dtype=f32)
alpha = jnp.array(alpha, dtype=f32)
r0 = jnp.array(r0, dtype=f32)
k = jnp.array(k, dtype=f32)
# Pass the harmonic morse function defined above along with its parameters and a
# displacement/metric function.
return smap.pair(
harmonic_morse,
space.canonicalize_displacement_or_metric(displacement_or_metric),
species=species,
D0=D0,
alpha=alpha,
r0=r0,
k=k)
def bistable_spring(dr, r0=1.0, a2=2, a4=5, **kwargs):
return (a4*(dr-r0)**4 - a2*(dr-r0)**2)
def bistable_spring_bond(
displacement_or_metric, bond, bond_type=None, r0=1, a2=2, a4=5):
"""Convenience wrapper to compute energy of particles bonded by springs."""
r0 = jnp.array(r0, f32)
a2 = jnp.array(a2, f32)
a4 = jnp.array(a4, f32)
return smap.bond(
bistable_spring,
space.canonicalize_displacement_or_metric(displacement_or_metric),
bond,
bond_type,
r0=r0,
a2=a2,
a4=a4)
def run_brownian(energy_fn, R_init, shift, key, num_steps, kT):
init, apply = simulate.brownian(energy_fn, shift, dt=0.00001, kT=kT, gamma=0.1)
apply = jit(apply)
@jit
def scan_fn(state, current_step):
# Dynamically pass r0 to apply, which passes it on to energy_fn
return apply(state), 0
key, split = random.split(key)
state = init(split, R_init)
state, _ = lax.scan(scan_fn, state, jnp.arange(num_steps))
return state.position, jnp.amax(jnp.abs(-grad(energy_fn)(state.position)))
# +
import jax.numpy as jnp
from jax import random, jit, vmap
from matplotlib import pyplot
import numpy as np
key = random.PRNGKey(1234)
def generate_population(key, length=10, pop_size=10):
return random.randint(minval=0, maxval=4, shape=(pop_size,length), key=key)
def fitness_mean_value_target(v, target_value=1.0):
return 1 - jnp.abs(jnp.mean(v) - target_value)/target_value
@jit
def batched_fitness_mean_value_target(population):
return vmap(fitness_mean_value_target)(population)
def mutate_population(key, population, mutation_rate=0.15,
alphabet_size=4):
individual_p_mutation = mutation_rate/alphabet_size
# Lazily double-counts self-transitions as a type of mutation
# in the interest of prototyping
p_no_mutation = (1 - mutation_rate)
mutation_probs = [
individual_p_mutation for _ in range(alphabet_size)
]
mutation_probs = [p_no_mutation] + mutation_probs
mutation = random.choice(
key,
a=jnp.array(range(alphabet_size + 1)),
shape=population.shape,
p=jnp.array(mutation_probs))
return jnp.mod(population + mutation, alphabet_size - 1)
def resample_population(key, population, fitnesses):
retain_members = random.choice(
key=key,
a=jnp.array([i for i in range(population.shape[0])]),
shape=(population.shape[0],),
p=fitnesses, replace=True)
return jnp.stack([population[i] for i in retain_members])
def evolve_with_mutation(key,
fitness_fn,
keep_full_population_history=False,
num_generations=100, pop_size=100,
genome_length=10, report_every=10,
mutation_rate=0.15,
alphabet_size=4):
base_mutation_rate = mutation_rate
fitness_history = np.zeros(shape=(num_generations,))
population_history = None
if keep_full_population_history:
population_history = np.zeros(shape=(num_generations, pop_size, genome_length))
population = generate_population(
key=key,
length=genome_length,
pop_size=pop_size)
for i in range(num_generations):
if keep_full_population_history:
# Could keep track at any point in this loop, making sure
# at least fitness_history and population_history are in
# sync.
population_history[i] = population
fitnesses = fitness_fn(population)
fitness_history[i] = jnp.mean(fitnesses)
max_fitness = jnp.max(fitnesses)
min_fitness = jnp.min(fitnesses)
epsilon = 0.000000000000001
norm_fitnesses = (fitnesses - min_fitness + epsilon)/(max_fitness - min_fitness + epsilon)
if i % report_every == 0.0:
print("Current average fitness: %s" % jnp.mean(fitnesses))
key, _ = random.split(key)
population = resample_population(
key=key,
population=population,
fitnesses=norm_fitnesses)
mutation_rate = base_mutation_rate*(1-jnp.mean(norm_fitnesses)/2)
key, _ = random.split(key)
population = mutate_population(
key, population,
mutation_rate=mutation_rate
)
return fitness_history, population_history, population
# -
| nb/dev/data-generation-dev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 04_03: Indexing and Slicing NumPy Arrays
# +
import math
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as pp
# %matplotlib inline
# -
monalisa = np.load('monalisa.npy')
monalisa.shape
pp.imshow(monalisa)
monalisa[600,400,0]
monalisa[-50,-50,1]
monalisa[1148, 754, 1]
monalisa[1000,900,2]
monalisa[600,400,0] = monalisa[600,400,1] = monalisa[600,400,2] = 0
just_a_list = [[1,2,3],[4,5,6],[7,8,9]]
just_a_list[1,2]
just_a_list[1][2]
pp.imshow(monalisa[400:800,200:600,0:3])
pp.imshow(monalisa[400:800,:,:])
pp.imshow(monalisa[400:800,...])
pp.imshow(monalisa[::20,::20,:])
pp.imshow(monalisa[::-20,::20,:])
row = monalisa[20,::20,0]
row.shape
row
pp.plot(monalisa[20,::20,0])
rect = monalisa[20:21,::20,0]
rect.shape
rect
monalisa[20:300,20:300,:] = 255
pp.imshow(monalisa)
monalisa[20:300,20:300,:] = np.random.randint(100,255,size=(280,280,3))
pp.imshow(monalisa)
monalisa_bw = np.loadtxt('monalisa.txt')
monalisa_bw < 120
monalisa_bw[monalisa_bw < 120] = 0
pp.imshow(monalisa_bw, cmap='gray')
mylist = [0,1,2,3,4,5]
myslice = mylist[0:4]
myslice[2] = myslice[3] = 100
myslice
mylist
monaslice = monalisa[20:300,20:300,:]
monaslice[:,:,:] = 255
pp.imshow(monalisa)
monacopy = monalisa.copy()
monacopy[400:750,400:750,:] = 0
pp.imshow(monacopy)
pp.imshow(monalisa)
| chapter4/04_03_indexing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Classifying Images with CNNs
#
# In this exercise you will design a Convolutional Neural Network (CNN) for Fashion Mnist.
#
# CNNs are the workhorses of modern computer vision.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# -
from keras.datasets import fashion_mnist
# +
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255 #-1 makes the reshape function adaptive
X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255
X_train.shape
# -
# ### Exercise 1
#
# Why are we reshaping the data?
#
# What's the new shape?
plt.imshow(X_train[0, :, :, 0], cmap='gray')
# ## Simplest CNN
#
# Let's build a convolutional model! For this, we need to have the data in its original shape. Also note that when we reshape the data below, we add a dimension of 1 - this is the number of **channels** in the image, which is just 1 because these are grayscale images. If they were color, this would be 3 for RGB.
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Flatten
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# -
# ## Exercise 2
#
# - Why are we using the `sparse_categorical_crossentropy` above?
# ## Exercise 3: A better CNN
#
# Improve the CNN design above. It is up to you what the model will be. Here are some things you need to decide:
# * how many convolutional layers?
# * what spatial size will your convolutions be?
# * how many channels will your convolutions be?
# * what nonlinearity will you use?
# * will you use pooling? what type?
# * how many fully-connected layers will you have?
# * will you use dropout or batch normalization or regularization?
# * what batch size will you use for training?
#
# Keras provides a special layer called `Flatten` to flatten the convolutional features into a vector before the fully-connected layers. You should look at the documentation for Keras's convolutional layers: http://keras.io/layers/convolutional/. In particular, you may want to look at `Conv2D`, `MaxPooling2D`, `AveragePooling2D`, `Flatten`, and `Dropout`. For this problem, you make want to use the `'rmsprop'` optimizer - it is an algorithm that adapts the learning rate during learning for you automatically.
#
# Can you get to 98% accuracy? You shouldn't need more than a few epochs to do pretty well.
#
# Suggestions:
# * Try using at least 2 convolutional layers. This should get you off to a good start and it will come in handy later.
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# -
score = model.evaluate(X_test, y_test, verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(32, (5, 5), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(16, (5, 5), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(64, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(32, (5, 5), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(64, (5, 5), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(64, (5, 5), activation='relu'),
Conv2D(24, (3, 3), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(64, (5, 5), activation='relu'),
Conv2D(24, (3, 3), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=256, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(64, (5, 5), activation='relu'),
Conv2D(24, (3, 3), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=1028, epochs=1)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(64, (5, 5), activation='relu'),
Conv2D(24, (3, 3), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
model.summary()
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(64, (5, 5), activation='relu'),
Conv2D(24, (3, 3), activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=1)
model.summary()
# -
# +
hists = []
nodeRange = []
for i in range(16,128,16):
model = Sequential([
Conv2D(int(i), (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(32, (5, 5), activation='relu'),
Conv2D(32, (3, 3), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=128, epochs=1)
nodeRange.append(int(i))
hists.append(h.history['acc'])
plt.scatter(nodeRange, hists)
plt.pause(0.05)
# +
hists = []
nodeRange = []
for i in range(16,128,16):
model = Sequential([
Conv2D(64, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Conv2D(int(i), (5, 5), activation='relu'),
Conv2D(32, (3, 3), activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
h = model.fit(X_train, y_train, batch_size=128, epochs=1)
nodeRange.append(int(i))
hists.append(h.history['acc'])
plt.scatter(nodeRange, hists)
plt.pause(0.05)
# +
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Dropout(0.3),
Conv2D(48, (5, 5), activation='relu'),
MaxPooling2D(),
Conv2D(54, (5, 5), activation='relu'),
Dropout(0.3),
Conv2D(64, (5, 5), activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(200,activation='relu'),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=5)
model.summary()
# -
score = model.evaluate(X_test, y_test, verbose=2)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# +
#this will run the training on the cpu
import tensorflow as tf
with tf.device('cpu:0'):
model = Sequential([
Conv2D(32, (3, 3), input_shape=(28, 28, 1), activation='relu'),
Dropout(0.3),
Conv2D(48, (5, 5), activation='relu'),
MaxPooling2D(),
Conv2D(54, (5, 5), activation='relu'),
Dropout(0.3),
Conv2D(64, (5, 5), activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(200,activation='relu'),
Dense(10, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=128, epochs=5)
model.summary()
# -
| day_3/Lab_14_DL Fashion Mnist with CNNs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Item 1: Know Which Version of Python You're Using
# ## To check python version
# For python 2
# $ python -version
#
# For python 3
# $ python 3 -version
# ## To check at runtime
import sys
print(sys.version_info)
print('=====')
print(sys.version)
# ## Things to Remember
# * There are two major versions of Python still in active use: Python 2 and Python 3.
# * There are multiple popular runtimes for Python: CPython, Jython, IronPython, PyPy, etc.
# * Be sure that the command-line for running Python on your system is the version you expect it to be.
# * Prefer Python 3 for your next project because that is the primary focus of the Python community.
| notes/Chapter_1:_Pythonic_Thinking/Item 1 Know Which Version of Python You're Using.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="hKyncAkv60-X" colab_type="text"
# #Ejemplo 6: Predicción de spam
# ---
#
#
# + [markdown] id="EtNgaqB18EO3" colab_type="text"
# **Objetivo y comprensión del problema**
#
# El objetivo del problema consisten en predecir la posibilidad de que un texto corresponda a un mensaje de spam. Cada una de las tuplas tiene un texto y una clasificación que se utilizará en el entrenamiento.
#
# Se trata de un problema de clasificación.
#
#
#
#
# + [markdown] id="gjrOB4onWM2A" colab_type="text"
# ## Paso 1: Gestión de los datos
#
#
#
# + [markdown] id="le6H33Y5YNek" colab_type="text"
# Para la configuración de las bibliotecas a importar, se incluye Keras, scikit-learn y pandas.
#
# Cabe destacar el uso de diferentes tipos de capas:
#
# * [Embedding](https://keras.io/layers/embeddings/)
# * [LSTM](https://keras.io/layers/recurrent/)
# + id="7oiv47j46wFT" colab_type="code" outputId="9cbf5c3e-be5b-40f9-9142-7fcb54779576" executionInfo={"status": "ok", "timestamp": 1556271023048, "user_tz": -120, "elapsed": 2445, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
from keras.layers import Embedding, Dense, LSTM
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from sklearn.metrics import confusion_matrix
import pandas as pd
# + [markdown] id="1h5uEsH6jepk" colab_type="text"
# **1.1-Carga de datos**
#
# En este caso, los datos están disponibles como un CSV que se carga desde un directorio.
# + id="b4vLtyoB5tlN" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="EWlgqTRS6amW" colab_type="text"
# * Verificar dónde están nuestros datos.
# + id="nInSACwf6O2N" colab_type="code" colab={}
# !ls "/content/drive/My Drive"
# + [markdown] id="2FARKKjo6gIA" colab_type="text"
# * Cargar los datos en un dataset.
# + id="rDy0E3CD59JU" colab_type="code" colab={}
dataset = pd.read_csv("/content/drive/My Drive/Colab Notebooks/data/spam.csv")
# + [markdown] id="MwaEDP0n65Ig" colab_type="text"
# * Se cargan 5572 tuplas.
# + id="_-iTmFjA7Oe_" colab_type="code" outputId="7a4169a1-7e63-46be-b59b-50bf55b42f9e" executionInfo={"status": "ok", "timestamp": 1556271070834, "user_tz": -120, "elapsed": 622, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
print (len(dataset))
# + [markdown] id="P4PBAfXMYd0_" colab_type="text"
# **1.2-Visualización de los datos**
#
# * Se puede comprobar la forma que tienen nuestros datos. En este caso, 5572 instancias con 2 dimensiones (clasificación y texto).
#
# + id="LRLUEzvj7UwG" colab_type="code" outputId="b06e7f0f-d646-4092-f972-977663d7bdb4" executionInfo={"status": "ok", "timestamp": 1556271074604, "user_tz": -120, "elapsed": 611, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 357}
print(dataset.head(5))
print(dataset.columns.values)
print(dataset.info())
print(dataset.describe())
# + [markdown] id="NSBN3mvgZATQ" colab_type="text"
# **1.3-Codificar los datos**
#
# En este caso los datos no son numéricos con lo que requieren procesamiento:
#
# * Se separan los textos y las clases. Para ello, se proyecto la columna "Class" y se obtiene un índice (i) y su texto.
# * Si la clase es "ham" se pone 0 y en otro caso "spam" se pone 1.
# * Se convierten los arrays Python en arrays de numpy.
#
#
#
# + id="S7NvXKvZZRX5" colab_type="code" colab={}
texts = []
classes = []
for i, label in enumerate(dataset['Class']):
texts.append(dataset['Text'][i])
if label == 'ham':
classes.append(0)
else:
classes.append(1)
texts = np.asarray(texts)
classes = np.asarray(classes)
# + [markdown] id="Bj5UsXvwZGAY" colab_type="text"
# **1.4-Seleccionar los datos**
#
# En este caso, no se utiliza procesamiento de lenguaje natural clásico. Esto correspondería con utilizar técncias de lematización, tokenización, normalización, etc. e incluso semantización de los tokens. Para conocer más sobre el procesamiento de lenguaje natural clásico se recomienda la biblioteca de Python [NLTK](https://www.nltk.org/).
#
# El enfoque que se sigue en este caso es simplemente separar los tokens (palabras) y con ello generar vectores de descripción que después se utilizarán en el entrenamiento como instancias realizando comparaciones de cuán de lejos/cerca esta un vector de otro. Por lo tanto, se pasa de un entorno de texto a un entorno vectorial.
#
# Como los vectores de entrada no tendrán el mismo número de palabras, se rellenan hasta el máximo (pad_sequences).
#
# Finalmente, se barajan los datos transformados, vectores, para que formen aleatoriamente parte del entrenamiento y del test.
#
#
#
# + id="aZqddfL0a6r0" colab_type="code" outputId="6230af46-e86c-4ca9-951a-e25f3fd52e7a" executionInfo={"status": "ok", "timestamp": 1556271193598, "user_tz": -120, "elapsed": 794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
# number of words used as features
maxFeatures = 10000
# max document length
maxLen = 500
trainingData = int(len(texts) * .8)
validationData = int(len(texts) - trainingData)
# tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print("Se encontaron {0} palabras únicas: ".format(len(word_index)))
data = pad_sequences(sequences, maxlen=maxLen)
print("Forma de los datos: ", data.shape)
np.random.seed(42)
# shuffle data
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = classes[indices]
X_train = data[:trainingData]
y_train = labels[:trainingData]
X_test = data[trainingData:]
y_test = labels[trainingData:]
# + [markdown] id="3y6KP-u_ZKsx" colab_type="text"
# ## Paso 2: Arquitectura e implementación de nuestra red
#
#
# 1. La entrada de nuestra red será una capa de Embeddings para procesar los vectores de dimensión 500 y generar una salida de 32. A continuación, la siguiente capa tomará estos 32 valores.
# 2. La función de activación en la capa de salida será "sigmoid". De esta forma, se clasificará en spam o no spam el vector de entrada.
# 3. La función de pérdida será **binary_crossentropy**. Para realizar clasificación binaria.
# 4. La función de optimización **rmsprop**.
# 5. Métricas: en este caso se selecciona sólo la precisión.
#
#
# + id="zHliakwo7kEH" colab_type="code" outputId="1dd1b040-d2ed-4a76-d2fe-3b41f6ef4605" executionInfo={"status": "error", "timestamp": 1579204146579, "user_tz": -60, "elapsed": 619, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCG_bEAyeJe7s-Or3iyofto_TPC-l8O-btzUc_xjQ=s64", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 243}
network = Sequential()
network.add(Embedding(maxFeatures, 64))
network.add(SpatialDropout1D(0.2))
network.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
network.add(Dense(3, activation='softmax'))
network.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# + [markdown] id="OLLB1Lv4MdIJ" colab_type="text"
# * Se puede mostrar la red generada en un fichero.
#
# ---
#
#
# + id="eF11lwPlE143" colab_type="code" colab={}
from keras.utils import plot_model
plot_model(network, to_file='/content/drive/My Drive/Colab Notebooks/images/network.png')
# + [markdown] id="PDcXgfk_dqgX" colab_type="text"
#
#
# ```
# # This is formatted as code
# ```
#
# ## Paso 3: Entrenamiento
#
#
# + [markdown] id="3z79kgu1dukq" colab_type="text"
# En este caso el entrenamiento se realiza utilizando el 80% de los datos y el 20% para el test (validation_split)
# + id="YYQJ2URZ7y3-" colab_type="code" outputId="5a9effda-0e9c-4175-e7bc-0dded1a729ce" executionInfo={"status": "ok", "timestamp": 1556273157618, "user_tz": -120, "elapsed": 171230, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 381}
history = network.fit(X_train, y_train, epochs=10, batch_size=60, validation_split=0.2)
# + [markdown] id="5XST_lkhgVHp" colab_type="text"
#
# + id="lUVtTwap9H6M" colab_type="code" outputId="6d2669fc-fe13-43fe-dfcf-d260638ef734" executionInfo={"status": "ok", "timestamp": 1556271674088, "user_tz": -120, "elapsed": 526, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
print("Accuracy (entrenamiento):",history.history['acc'])
# + [markdown] id="SjKKUANAeV_R" colab_type="text"
# ## Paso 4: Test y Predicción
# + [markdown] id="gcRmWsOreYcX" colab_type="text"
# En este caso, se va a validar con el conjunto de test:
#
#
# * Se pasa como parámetro los datos de test.
# * Se obtiene el valor de la predicción como una probabilidad.
# * Se muestra la matriz de confusión y se calcula "a mano" la precisión de esta validación.
#
#
# + id="raAMnsmK76ko" colab_type="code" outputId="cf59191c-6d70-4172-cd22-9f9fbf14aed5" executionInfo={"status": "ok", "timestamp": 1556271713741, "user_tz": -120, "elapsed": 4230, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 85}
pred = network.predict_classes(X_test)
acc = network.evaluate(X_test, y_test)
proba_rnn = network.predict_proba(X_test)
print("Test loss is {0:.2f} accuracy is {1:.2f} ".format(acc[0],acc[1]))
print(confusion_matrix(pred, y_test))
# + [markdown] id="p2oxsPGxcTPy" colab_type="text"
# * Se muestra a continuación la precisión y pérdida por cada una de las iteraciones.
# + id="IS-0vI7FcK3h" colab_type="code" outputId="ce172c03-a63b-4f41-fe2f-19bbf4b59177" executionInfo={"status": "ok", "timestamp": 1556271776098, "user_tz": -120, "elapsed": 822, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 573}
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# + [markdown] id="vyiyqrgvRB6x" colab_type="text"
# #Paso 6: Guardar configuración del modelo
# + id="TuloBttNRIiK" colab_type="code" outputId="cf2bf682-e298-4e82-9b6f-1134e9a49367" executionInfo={"status": "ok", "timestamp": 1553204379012, "user_tz": -60, "elapsed": 3606, "user": {"displayName": "<NAME>", "photoUrl": "https://lh6.googleusercontent.com/-gI-H83zdbVM/AAAAAAAAAAI/AAAAAAAACLg/z151J6c9Qtk/s64/photo.jpg", "userId": "18171552009107651781"}} colab={"base_uri": "https://localhost:8080/", "height": 468}
network.save('/content/drive/My Drive/Colab Notebooks/models/ejemplo-6.h5')
network.summary()
from keras.models import load_model
network = load_model('/content/drive/My Drive/Colab Notebooks/models/ejemplo-6.h5')
network.summary()
# + [markdown] id="abfJ8bCxfDhW" colab_type="text"
# #Tareas
#
# 1. Cambiar el tipo de la función de pérdida y ver cómo afecta al resultado.
# 2. Cambiar la función de optimización y ver cómo afecta al resultado.
# 3. Cambiar la toplogía de la red.
# + [markdown] id="kVJ2xnABH725" colab_type="text"
# #Otras referencias
#
# * https://github.com/PacktPublishing/Keras-Deep-Learning-Cookbook/blob/master/Chapter04/spam-detection/spam_detection.py
| intro-deep-learning-es/Ejemplo-6.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Determinant" data-toc-modified-id="Determinant-1"><span class="toc-item-num">1 </span>Determinant</a></div><div class="lev2 toc-item"><a href="#Definition-of-determinant" data-toc-modified-id="Definition-of-determinant-11"><span class="toc-item-num">1.1 </span>Definition of determinant</a></div><div class="lev2 toc-item"><a href="#Some-interpretations-of-determinant" data-toc-modified-id="Some-interpretations-of-determinant-12"><span class="toc-item-num">1.2 </span>Some interpretations of determinant</a></div><div class="lev2 toc-item"><a href="#Some-properties-of-determinant-(important)" data-toc-modified-id="Some-properties-of-determinant-(important)-13"><span class="toc-item-num">1.3 </span>Some properties of determinant (important)</a></div>
# -
# # Determinant
using Pkg
Pkg.activate(pwd())
Pkg.instantiate()
using LinearAlgebra, Plots, Symbolics
# We review some basic facts about matrix determinant.
#
# ## Definition of determinant
#
# - The **determinant** of a square matrix $\mathbf{A} \in \mathbb{R}^{n \times n}$ is
# $$
# \det (\mathbf{A}) = \sum (-1)^{\phi(j_1,\ldots,j_n)} \prod_{i=1}^n a_{ij_i},
# $$
# where the summation is over all permutation $(j_1, \ldots, j_n)$ of the set of integers $(1,\ldots,n)$ and $\phi(j_1,\ldots,j_n)$ is the number of transposition to change $(1,\ldots,n)$ to $(j_1,\ldots,j_n)$. $(-1)^{\phi(j_1,\ldots,j_n)}$ is also called the **sign of permutation**.
#
# - Examples: $n = 2$ and 3.
# $$
# \det \begin{pmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{pmatrix} = (-1)^{\phi(1,2)} a_{11} a_{22} + (-1)^{\phi(2,1)} a_{12} a_{21} = a_{11} a_{22} - a_{12} a_{21}.
# $$
# n = 2
@variables A[1:2, 1:2]
det(A) |> Symbolics.scalarize
# n = 3
@variables A[1:3, 1:3]
det(A) |> Symbolics.scalarize |> expand
# n = 4
@variables A[1:4, 1:4]
det(A) |> Symbolics.scalarize |> expand
# ## Some interpretations of determinant
#
# <img src="./determinant.png" width=400 align="center"/>
#
# - Interpretation of the (absolute value of) determinant as the **volume of the parallelotope** defined by the columns of the matrix. For example, if $\mathbf{X} \in \mathbb{R}^2$ has two columns $\mathbf{x}_1$ and $\mathbf{x}_2$, then
# \begin{eqnarray*}
# \text{area} &=& bh = \|\mathbf{x}_1\|\|\mathbf{x}_2\| \sin(\theta) \\
# &=& \|\mathbf{x}_1\| \|\mathbf{x}_2\| \sqrt{1 - \left( \frac{\langle \mathbf{x}_1, \mathbf{x}_2 \rangle}{\|\mathbf{x}_1\| \|\mathbf{x}_2\|} \right)^2} \\
# &=& \sqrt{\|\mathbf{x}_1\|^2 \|\mathbf{x}\|^2 - (\langle \mathbf{x}_1, \mathbf{x}_2\rangle)^2} \\
# &=& \sqrt{(x_{11}^2 + x_{12}^2)(x_{21}^2+x_{22}^2) - (x_{11}x_{21} + x_{12}x_{22})^2} \\
# &=& |x_{11} x_{22} - x_{12} x_{21}| \\
# &=& |\det(\mathbf{X})|.
# \end{eqnarray*}
#
# <img src="./Determinant_parallelepiped.svg" width=400 align="center"/>
#
# - Another interpretation of the determinant is the volume changing factor when operating on a set in $\mathbb{R}^n$. $\text{vol}(f(S)) = |\det(\mathbf{A})| \text{vol}(S)$ where $f: \mathbb{R}^n \mapsto \mathbb{R}^n$ is the linear mapping defined by $\mathbf{A}$.
#
# - Recall that for differentiable function $f: \mathbb{R}^n \mapsto \mathbb{R}^n$, the **Jacobian matrix** $\operatorname{D} f(\mathbf{x}) \in \mathbb{R}^{n \times n}$ is
# $$
# \operatorname{D} f(\mathbf{x}) = \begin{pmatrix}
# \frac{\partial f_1}{\partial x_1} (\mathbf{x}) & \frac{\partial f_1}{\partial x_2} (\mathbf{x}) & \cdots & \frac{\partial f_1}{\partial x_n} (\mathbf{x}) \\
# \frac{\partial f_2}{\partial x_1} (\mathbf{x}) & \frac{\partial f_2}{\partial x_2} (\mathbf{x}) & \cdots & \frac{\partial f_2}{\partial x_n} (\mathbf{x}) \\
# \vdots & \vdots & & \vdots \\
# \frac{\partial f_n}{\partial x_1} (\mathbf{x}) & \frac{\partial f_n}{\partial x_2} (\mathbf{x}) & \cdots & \frac{\partial f_n}{\partial x_n} (\mathbf{x})
# \end{pmatrix} = \begin{pmatrix}
# \nabla f_1(\mathbf{x})' \\
# \nabla f_2(\mathbf{x})' \\
# \vdots \\
# \nabla f_n(\mathbf{x})'
# \end{pmatrix}.
# $$
# Its determinant, the **Jacobian determinant**, appears in the higher-dimensional version of **integration by substitution** or **change of variable**
# $$
# \int_{f(U)} \phi(\mathbf{v}) \, \operatorname{d} \mathbf{v} = \int_U \phi(f(\mathbf{u})) | \det \operatorname{D} f(\mathbf{u})| \, \operatorname{d} \mathbf{u}
# $$
# for function $\phi: \mathbb{R}^n \mapsto \mathbb{R}$. This result will be used in transformation of random variables in 202A.
#
# For an example of $n=1$, an indefinite integral can be transformed to a definite integral over box [-1,1] via change of variable $v = u / (1-u^2)$:
# $$
# \int_{-\infty}^\infty f(v) \, dv = \int_{-1}^1 f\left(\frac{u}{1-u^2}\right) \frac{1+u^2}{(1-u^2)^2} \, du.
# $$
# ## Some properties of determinant (important)
#
# - The determinant of a **lower or upper triangular matrix** $\mathbf{A}$ is the product of the diagonal elements $\prod_{i=1}^n a_{ii}$. (Why?)
#
# - Any square matrix $\mathbf{A}$ is singular if and only if $\det(\mathbf{A}) = 0$.
#
# - Product rule: $\det(\mathbf{A} \mathbf{B}) = \det(\mathbf{A}) \det(\mathbf{B})$.
#
# Product rule is extremely useful. For example, computer calculates the determinant of a square matrix $\mathbf{A}$ by first computing the LU decomposition $\mathbf{A} = \mathbf{L} \mathbf{U}$ and then $\det(\mathbf{A}) = \det(\mathbf{L}) \det(\mathbf{U})$.
#
# - Determinant of an orthogonal matrix is 1 (**rotation**) or -1 (**reflection**).
# This classifies orthogonal matrices into two classes: rotations and reflections.
# a rotator
θ = π/4
A = [cos(θ) -sin(θ);
sin(θ) cos(θ)]
det(A)
# a reflector
B = [cos(θ) sin(θ);
sin(θ) -cos(θ)]
B'B
det(B)
# 3 points for a triangle
X = [1 1 2 1; 1 3 1 1]
# rotation
Xrot = A * X
# reflection
Xref = B * X
plt = plot(X[1, :], X[2, :], color = :blue,
legend = :none, xlims = (-3, 3), ylims = (-3, 3),
xticks = -3:1:3, yticks = -3:1:3,
framestyle = :origin,
aspect_ratio = :equal)
plot!(plt, Xrot[1, :], Xrot[2, :])
plot!(plt, Xref[1, :], Xref[2, :],
annotations = [(-1.5, 2, "rotation"), (2, -1.75, "reflection")])
# - $\det(\mathbf{A}') = \det(\mathbf{A})$.
#
# - $\det(\mathbf{A}^{-1}) = 1/\det(\mathbf{A})$.
#
# - $\det(c\mathbf{A}) = c^n \det(\mathbf{A})$.
#
# - Determinant of a permutation matrix is the sign of the permutation.
#
# - For $\mathbf{A}$ and $\mathbf{D}$ square and nonsingular,
# \begin{eqnarray*}
# \det \left( \begin{pmatrix}
# \mathbf{A} & \mathbf{B} \\
# \mathbf{C} & \mathbf{D}
# \end{pmatrix} \right) = \det (\mathbf{A}) \det (\mathbf{D} - \mathbf{C} \mathbf{A}^{-1} \mathbf{B}) = \det(\mathbf{D}) \det(\mathbf{A} - \mathbf{B} \mathbf{D}^{-1} \mathbf{C}).
# \end{eqnarray*}
#
# Proof: Take determinant on the both sides of the matrix identity
# \begin{eqnarray*}
# \begin{pmatrix}
# \mathbf{A} & \mathbf{0} \\
# \mathbf{0} & \mathbf{D} - \mathbf{C} \mathbf{A}^{-1} \mathbf{B}
# \end{pmatrix} = \begin{pmatrix}
# \mathbf{I} & \mathbf{0} \\ - \mathbf{C} \mathbf{A}^{-1} & \mathbf{I}
# \end{pmatrix} \begin{pmatrix}
# \mathbf{A} & \mathbf{B} \\
# \mathbf{C} & \mathbf{D}
# \end{pmatrix} \begin{pmatrix}
# \mathbf{I} & - \mathbf{A}^{-1} \mathbf{B} \\
# \mathbf{0} & \mathbf{I}
# \end{pmatrix}.
# \end{eqnarray*}
| slides/09-det/09-det.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from combined_attributes_adder import CombinedAttributesAdder
from load_dataset import load_housing_data
from manipulate_dataset import split_train_test, split_train_test_with_id
from pandas.plotting import scatter_matrix
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, StandardScaler
from sklearn.tree import DecisionTreeRegressor
print("hello, world")
# # Exploration
housing = load_housing_data()
housing.head()
housing.info()
housing["ocean_proximity"].value_counts()
housing.describe()
housing.hist(bins = 50, figsize=(20, 15))
plt.show()
train_set, test_set = split_train_test(housing, 0.2)
len(train_set), len(test_set)
housing_with_id = housing.reset_index()
train_set, test_set = split_train_test_with_id(housing_with_id, 0.2, "index")
len(train_set), len(test_set)
housing["income_cat"] = pd.cut(housing["median_income"], bins=[0., 1.5, 3.0, 4.5, 6., np.inf], labels=[1, 2, 3, 4, 5])
housing["income_cat"].hist()
# +
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts()
# -
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
housing = strat_train_set.copy()
housing.plot(
kind="scatter",
x="longitude",
y="latitude",
alpha=0.4,
s=housing["population"]/100,
label="population",
figsize=(10, 7),
c="median_house_value",
cmap=plt.get_cmap("jet"),
colorbar=True,
)
plt.legend()
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
housing.plot(kind="scatter", x="median_income", y="median_house_value", alpha=0.1)
housing["rooms_per_household"] = housing["total_rooms"] / housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"] / housing["total_rooms"]
housing["population_per_household"] = housing["population"] / housing["households"]
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# # Data Preparation
housing = strat_train_set.drop("median_house_value", axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
imputer = SimpleImputer(strategy="median")
housing_num = housing.drop("ocean_proximity", axis=1)
housing_tr = pd.DataFrame(
imputer.fit_transform(housing_num),
columns=housing_num.columns)
# +
housing_cat = housing[["ocean_proximity"]]
housing_cat.head()
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
print(ordinal_encoder.categories_)
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
print(cat_encoder.categories_)
print(housing_cat_1hot.toarray()[:10])
# -
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attrs = attr_adder.transform(housing.values)
# +
num_pipeline = Pipeline([
("imputer", SimpleImputer(strategy="median")),
("attribs_adder", CombinedAttributesAdder()),
("std_scaler", StandardScaler()),
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
# +
num_attribs = housing_num.columns
cat_attribs = ["ocean_proximity"]
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared[:5]
# -
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
some_data = housing[:5]
some_labels = housing_labels[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:", lin_reg.predict(some_data_prepared))
print("Labels:", list(some_labels))
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
| housing/Housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="xgKnbRNNYT62"
# # Comparison of Batch, Mini-Batch and Stochastic Gradient Descent
# + [markdown] id="u5TOjBBiYT69"
# This notebook displays an animation comparing Batch, Mini-Batch and Stochastic Gradient Descent (introduced in Chapter 4). Thanks to [<NAME>](https://github.com/daniel-s-ingram) who contributed this notebook.
# + [markdown] id="5a8241QpYT69"
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/extra_gradient_descent_comparison.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# </table>
# + id="JlDtoJ8HYT6-"
import numpy as np
# %matplotlib nbagg
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
# + id="ROTXMGxDYT6_"
m = 100
X = 2*np.random.rand(m, 1)
X_b = np.c_[np.ones((m, 1)), X]
y = 4 + 3*X + np.random.rand(m, 1)
# + id="gAuLoF_uYT6_"
def batch_gradient_descent():
n_iterations = 1000
learning_rate = 0.05
thetas = np.random.randn(2, 1)
thetas_path = [thetas]
for i in range(n_iterations):
gradients = 2*X_b.T.dot(X_b.dot(thetas) - y)/m
thetas = thetas - learning_rate*gradients
thetas_path.append(thetas)
return thetas_path
# + id="6FFUIS7yYT7A"
def stochastic_gradient_descent():
n_epochs = 50
t0, t1 = 5, 50
thetas = np.random.randn(2, 1)
thetas_path = [thetas]
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2*xi.T.dot(xi.dot(thetas) - yi)
eta = learning_schedule(epoch*m + i, t0, t1)
thetas = thetas - eta*gradients
thetas_path.append(thetas)
return thetas_path
# + id="sx48W4kGYT7A"
def mini_batch_gradient_descent():
n_iterations = 50
minibatch_size = 20
t0, t1 = 200, 1000
thetas = np.random.randn(2, 1)
thetas_path = [thetas]
t = 0
for epoch in range(n_iterations):
shuffled_indices = np.random.permutation(m)
X_b_shuffled = X_b[shuffled_indices]
y_shuffled = y[shuffled_indices]
for i in range(0, m, minibatch_size):
t += 1
xi = X_b_shuffled[i:i+minibatch_size]
yi = y_shuffled[i:i+minibatch_size]
gradients = 2*xi.T.dot(xi.dot(thetas) - yi)/minibatch_size
eta = learning_schedule(t, t0, t1)
thetas = thetas - eta*gradients
thetas_path.append(thetas)
return thetas_path
# + id="Ks8DWvTXYT7A"
def compute_mse(theta):
return np.sum((np.dot(X_b, theta) - y)**2)/m
# + id="Y7PxbsNnYT7B"
def learning_schedule(t, t0, t1):
return t0/(t+t1)
# + id="vBD5jVmjYT7B"
theta0, theta1 = np.meshgrid(np.arange(0, 5, 0.1), np.arange(0, 5, 0.1))
r, c = theta0.shape
cost_map = np.array([[0 for _ in range(c)] for _ in range(r)])
for i in range(r):
for j in range(c):
theta = np.array([theta0[i,j], theta1[i,j]])
cost_map[i,j] = compute_mse(theta)
# + id="oOsfECOLYT7B"
exact_solution = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
bgd_thetas = np.array(batch_gradient_descent())
sgd_thetas = np.array(stochastic_gradient_descent())
mbgd_thetas = np.array(mini_batch_gradient_descent())
# + id="BNsrVAsVYT7B"
bgd_len = len(bgd_thetas)
sgd_len = len(sgd_thetas)
mbgd_len = len(mbgd_thetas)
n_iter = min(bgd_len, sgd_len, mbgd_len)
# + id="x85lSWEmYT7C" outputId="1839e1f8-b7aa-4c6b-ec52-7023f894311a"
fig = plt.figure(figsize=(10, 5))
data_ax = fig.add_subplot(121)
cost_ax = fig.add_subplot(122)
cost_ax.plot(exact_solution[0,0], exact_solution[1,0], 'y*')
cost_img = cost_ax.pcolor(theta0, theta1, cost_map)
fig.colorbar(cost_img)
# + id="0v0vI5OdYT7D"
def animate(i):
data_ax.cla()
cost_ax.cla()
data_ax.plot(X, y, 'k.')
cost_ax.plot(exact_solution[0,0], exact_solution[1,0], 'y*')
cost_ax.pcolor(theta0, theta1, cost_map)
data_ax.plot(X, X_b.dot(bgd_thetas[i,:]), 'r-')
cost_ax.plot(bgd_thetas[:i,0], bgd_thetas[:i,1], 'r--')
data_ax.plot(X, X_b.dot(sgd_thetas[i,:]), 'g-')
cost_ax.plot(sgd_thetas[:i,0], sgd_thetas[:i,1], 'g--')
data_ax.plot(X, X_b.dot(mbgd_thetas[i,:]), 'b-')
cost_ax.plot(mbgd_thetas[:i,0], mbgd_thetas[:i,1], 'b--')
data_ax.set_xlim([0, 2])
data_ax.set_ylim([0, 15])
cost_ax.set_xlim([0, 5])
cost_ax.set_ylim([0, 5])
data_ax.set_xlabel(r'$x_1$')
data_ax.set_ylabel(r'$y$', rotation=0)
cost_ax.set_xlabel(r'$\theta_0$')
cost_ax.set_ylabel(r'$\theta_1$')
data_ax.legend(('Data', 'BGD', 'SGD', 'MBGD'), loc="upper left")
cost_ax.legend(('Normal Equation', 'BGD', 'SGD', 'MBGD'), loc="upper left")
# + id="Ok5So-4AYT7D"
animation = FuncAnimation(fig, animate, frames=n_iter)
plt.show()
# + id="p2BhL2sAYT7E"
| Kata Fundamentos/CalculoML/extra_gradient_descent_comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import datetime
import seaborn as sns
import pydicom
import time
import gc
import operator
from apex import amp
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.utils.data as D
import torch.nn.functional as F
from sklearn.model_selection import KFold
from tqdm import tqdm, tqdm_notebook
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import warnings
warnings.filterwarnings(action='once')
import pickle
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from skimage.io import imread,imshow
from helper import *
from apex import amp
import helper
import torchvision.models as models
import pretrainedmodels
from torch.optim import Adam
from defenitions import *
# -
SEED = 8153
device=device_by_name("Tesla")
#device=device_by_name("RTX")
#device = "cpu"
torch.cuda.set_device(device)
sendmeemail=Email_Progress(my_gmail,my_pass,to_email,'se_resnet101-folds results')
def get_submission(test_df,pred,do_sigmoid=True):
if do_sigmoid:
func = lambda x:torch.sigmoid(x)
else:
func = lambda x:x
epidural_df=pd.DataFrame(data={'ID':'ID_'+test_df.PatientID.values+'_epidural','Label':func(pred[:,0])})
intraparenchymal_df=pd.DataFrame(data={'ID':'ID_'+test_df.PatientID.values+'_intraparenchymal','Label':func(pred[:,1])})
intraventricular_df=pd.DataFrame(data={'ID':'ID_'+test_df.PatientID.values+'_intraventricular','Label':func(pred[:,2])})
subarachnoid_df=pd.DataFrame(data={'ID':'ID_'+test_df.PatientID.values+'_subarachnoid','Label':func(pred[:,3])})
subdural_df=pd.DataFrame(data={'ID':'ID_'+test_df.PatientID.values+'_subdural','Label':func(pred[:,4])})
any_df=pd.DataFrame(data={'ID':'ID_'+test_df.PatientID.values+'_any','Label':func(pred[:,5])})
return pd.concat([epidural_df,
intraparenchymal_df,
intraventricular_df,
subarachnoid_df,
subdural_df,
any_df]).sort_values('ID').reset_index(drop=True)
train_df = pd.read_csv(data_dir+'train.csv')
train_df.shape
train_df=train_df[~train_df.PatientID.isin(bad_images)].reset_index(drop=True)
train_df=train_df.drop_duplicates().reset_index(drop=True)
train_df.shape
train_df.head()
test_df = pd.read_csv(data_dir+'test.csv')
test_df.head()
split_sid = train_df.PID.unique()
splits=list(KFold(n_splits=3,shuffle=True, random_state=SEED).split(split_sid))
pickle_file=open(outputs_dir+"PID_splits.pkl",'wb')
pickle.dump((split_sid,splits),pickle_file,protocol=4)
pickle_file.close()
def my_loss(y_pred,y_true,weights):
if len(y_pred.shape)==len(y_true.shape):
loss = F.binary_cross_entropy_with_logits(y_pred,y_true,weights.expand_as(y_pred))
else:
loss0 = F.binary_cross_entropy_with_logits(y_pred,y_true[...,0],weights.repeat(y_pred.shape[0],1),reduction='none')
loss1 = F.binary_cross_entropy_with_logits(y_pred,y_true[...,1],weights.repeat(y_pred.shape[0],1),reduction='none')
loss = (y_true[...,2]*loss0+(1.0-y_true[...,2])*loss1).mean()
return loss
class parameter_scheduler():
def __init__(self,model,do_first=['classifier'],num_epoch=1):
self.model=model
self.do_first = do_first
self.num_epoch=num_epoch
def __call__(self,epoch):
if epoch>=self.num_epoch:
for n,p in self.model.named_parameters():
p.requires_grad=True
else:
for n,p in self.model.named_parameters():
p.requires_grad= any(nd in n for nd in self.do_first)
# +
def get_optimizer_parameters(model,klr):
param_optimizer = list(model.named_parameters())
num_blocks=5
no_decay=['bias']
optimizer_grouped_parameters=[
{'params': [p for n, p in param_optimizer if (not any(nd in n for nd in no_decay) and ('classifier' in n))], 'lr':klr*2e-4,'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay) and ('classifier' in n)], 'lr':klr*2e-4, 'weight_decay': 0.0}
]
optimizer_grouped_parameters.extend([
{'params': [p for n, p in param_optimizer if (not any(nd in n for nd in no_decay) and ('wso' in n))], 'lr':klr*5e-6,'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay) and ('wso' in n)], 'lr':klr*5e-6, 'weight_decay': 0.0}
])
for i in range(num_blocks):
optimizer_grouped_parameters.extend([
{'params': [p for n, p in param_optimizer if (not any(nd in n for nd in no_decay) and ('layer{}'.format(i) in n))], 'lr':klr*(2.0**i)*1e-5,'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay) and ('layer{}'.format(i) in n)], 'lr':klr*(2.0**i)*1e-5, 'weight_decay': 0.0}
])
return(optimizer_grouped_parameters)
# +
# %matplotlib nbagg
num_split=0
np.random.seed(SEED+num_split)
torch.manual_seed(SEED+num_split)
torch.cuda.manual_seed(SEED+num_split)
#torch.backends.cudnn.deterministic = True
idx_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].index.values
idx_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].index.values
idx_train.shape
idx_validate.shape
klr=1
batch_size=32
num_workers=12
num_epochs=5
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
)
_=model.to(device)
weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device)
loss_func=my_loss
targets_dataset=D.TensorDataset(torch.tensor(train_df[hemorrhage_types].values,dtype=torch.float))
transform=MyTransform(mean_change=15,
std_change=0,
flip=True,
zoom=(0.2,0.2),
rotate=30,
out_size=512,
shift=10,
normal=False)
imagedataset = ImageDataset(train_df,transform=transform.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True)
transform_val=MyTransform(out_size=512)
imagedataset_val = ImageDataset(train_df,transform=transform_val.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True)
combined_dataset=DatasetCat([imagedataset,targets_dataset])
combined_dataset_val=DatasetCat([imagedataset_val,targets_dataset])
optimizer_grouped_parameters=get_optimizer_parameters(model,klr)
sampling=simple_sampler(train_df[hemorrhage_types].values[idx_train],0.25)
sample_ratio=1.0
train_dataset=D.Subset(combined_dataset,idx_train)
validate_dataset=D.Subset(combined_dataset_val,idx_validate)
num_train_optimization_steps = num_epochs*(sample_ratio*len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0))
fig,ax = plt.subplots(figsize=(10,7))
gr=loss_graph(fig,ax,num_epochs,int(num_train_optimization_steps/num_epochs)+1,limits=(0.05,0.2))
sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=num_epochs,tau=1)
#param_optimizer = model.parameters()
#optimizer = torch.optim.Adam(param_optimizer, lr=klr*6e-5)
optimizer = BertAdam(optimizer_grouped_parameters,lr=klr*1e-3,schedule=sched)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1",verbosity=0)
history,best_model= model_train(model,
optimizer,
train_dataset,
batch_size,
num_epochs,
loss_func,
weights=weights,
do_apex=False,
model_apexed=True,
validate_dataset=validate_dataset,
param_schedualer=None,
weights_data=None,
metric=None,
return_model=True,
num_workers=num_workers,
sampler=None,
pre_process = None,
graph=gr,
call_progress=sendmeemail)
torch.save(model.state_dict(), models_dir+models_format.format(model_name,version,num_split))
torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version+'_best',num_split))
# +
# %matplotlib nbagg
num_split=1
np.random.seed(SEED+num_split)
torch.manual_seed(SEED+num_split)
torch.cuda.manual_seed(SEED+num_split)
#torch.backends.cudnn.deterministic = True
idx_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].index.values
idx_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].index.values
idx_train.shape
idx_validate.shape
klr=1
batch_size=32
num_workers=12
num_epochs=6
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
)
_=model.to(device)
weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device)
loss_func=my_loss
targets_dataset=D.TensorDataset(torch.tensor(train_df[hemorrhage_types].values,dtype=torch.float))
transform=MyTransform(mean_change=15,
std_change=0,
flip=True,
zoom=(0.2,0.2),
rotate=30,
out_size=512,
shift=10,
normal=False)
imagedataset = ImageDataset(train_df,transform=transform.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True)
transform_val=MyTransform(out_size=512)
imagedataset_val = ImageDataset(train_df,transform=transform_val.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True)
combined_dataset=DatasetCat([imagedataset,targets_dataset])
combined_dataset_val=DatasetCat([imagedataset_val,targets_dataset])
optimizer_grouped_parameters=get_optimizer_parameters(model,klr)
sampling=simple_sampler(train_df[hemorrhage_types].values[idx_train],0.25)
sample_ratio=1.0
train_dataset=D.Subset(combined_dataset,idx_train)
validate_dataset=D.Subset(combined_dataset_val,idx_validate)
num_train_optimization_steps = num_epochs*(sample_ratio*len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0))
fig,ax = plt.subplots(figsize=(10,7))
gr=loss_graph(fig,ax,num_epochs,int(num_train_optimization_steps/num_epochs)+1,limits=(0.05,0.2))
sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=num_epochs,tau=1)
#param_optimizer = model.parameters()
#optimizer = torch.optim.Adam(param_optimizer, lr=klr*6e-5)
optimizer = BertAdam(optimizer_grouped_parameters,lr=klr*1e-3,schedule=sched)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1",verbosity=0)
history,best_model= model_train(model,
optimizer,
train_dataset,
batch_size,
num_epochs,
loss_func,
weights=weights,
do_apex=False,
model_apexed=True,
validate_dataset=validate_dataset,
param_schedualer=None,
weights_data=None,
metric=None,
return_model=True,
num_workers=num_workers,
sampler=None,
pre_process = None,
graph=gr,
call_progress=sendmeemail)
torch.save(model.state_dict(), models_dir+models_format.format(model_name,version,num_split))
torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version+'_best',num_split))
# +
# %matplotlib nbagg
num_split=2
np.random.seed(SEED+num_split)
torch.manual_seed(SEED+num_split)
torch.cuda.manual_seed(SEED+num_split)
#torch.backends.cudnn.deterministic = True
idx_train = train_df[train_df.PID.isin(set(split_sid[splits[num_split][0]]))].index.values
idx_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].index.values
idx_train.shape
idx_validate.shape
klr=0.75
batch_size=24
num_workers=12
num_epochs=5
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
)
_=model.to(device)
weights = torch.tensor([1.,1.,1.,1.,1.,2.],device=device)
loss_func=my_loss
targets_dataset=D.TensorDataset(torch.tensor(train_df[hemorrhage_types].values,dtype=torch.float))
transform=MyTransform(mean_change=15,
std_change=0,
flip=True,
zoom=(0.2,0.2),
rotate=30,
out_size=512,
shift=10,
normal=False)
imagedataset = ImageDataset(train_df,transform=transform.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True)
transform_val=MyTransform(out_size=512)
imagedataset_val = ImageDataset(train_df,transform=transform_val.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True)
combined_dataset=DatasetCat([imagedataset,targets_dataset])
combined_dataset_val=DatasetCat([imagedataset_val,targets_dataset])
optimizer_grouped_parameters=get_optimizer_parameters(model,klr)
sampling=simple_sampler(train_df[hemorrhage_types].values[idx_train],0.25)
sample_ratio=1.0
train_dataset=D.Subset(combined_dataset,idx_train)
validate_dataset=D.Subset(combined_dataset_val,idx_validate)
num_train_optimization_steps = num_epochs*(sample_ratio*len(train_dataset)//batch_size+int(len(train_dataset)%batch_size>0))
fig,ax = plt.subplots(figsize=(10,7))
gr=loss_graph(fig,ax,num_epochs,int(num_train_optimization_steps/num_epochs)+1,limits=(0.05,0.2))
sched=WarmupExpCosineWithWarmupRestartsSchedule( t_total=num_train_optimization_steps, cycles=num_epochs,tau=1)
#param_optimizer = model.parameters()
#optimizer = torch.optim.Adam(param_optimizer, lr=klr*6e-5)
optimizer = BertAdam(optimizer_grouped_parameters,lr=klr*1e-3,schedule=sched)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1",verbosity=0)
history,best_model= model_train(model,
optimizer,
train_dataset,
batch_size,
num_epochs,
loss_func,
weights=weights,
do_apex=False,
model_apexed=True,
validate_dataset=validate_dataset,
param_schedualer=None,
weights_data=None,
metric=None,
return_model=True,
num_workers=num_workers,
sampler=None,
pre_process = None,
graph=gr,
call_progress=sendmeemail)
torch.save(model.state_dict(), models_dir+models_format.format(model_name,version,num_split))
torch.save(best_model.state_dict(), models_dir+models_format.format(model_name,version+'_best',num_split))
# -
num_split = 0
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=15,
std_change=0,
flip=True,
zoom=(0.2,0.2),
rotate=30,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(test_df.shape[0]).repeat(8)
imagedataset_test=D.Subset(ImageDataset(test_df,transform=transform.random,base_path=test_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,imagedataset_test,do_apex=True,batch_size=96,num_workers=18)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
num_split = 1
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=15,
std_change=0,
flip=True,
zoom=(0.2,0.2),
rotate=20,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(test_df.shape[0]).repeat(8)
imagedataset_test=D.Subset(ImageDataset(test_df,transform=transform.random,base_path=test_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,imagedataset_test,do_apex=True,batch_size=96,num_workers=18)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
num_split = 2
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=10,
std_change=0,
flip=True,
zoom=(0.15,0.15),
rotate=20,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(test_df.shape[0]).repeat(8)
imagedataset_test=D.Subset(ImageDataset(test_df,transform=transform.random,base_path=test_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,imagedataset_test,do_apex=True,batch_size=96,num_workers=18)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
num_split = 0
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=10,
std_change=0,
flip=True,
zoom=(0.15,0.15),
rotate=20,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(test_df.shape[0]).repeat(8)
imagedataset_test=D.Subset(ImageDataset(test_df,transform=transform.random,base_path=test_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,imagedataset_test,do_apex=True,batch_size=96,num_workers=18)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
num_split = 1
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=10,
std_change=0,
flip=True,
zoom=(0.15,0.15),
rotate=20,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(test_df.shape[0]).repeat(8)
imagedataset_test=D.Subset(ImageDataset(test_df,transform=transform.random,base_path=test_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,imagedataset_test,do_apex=True,batch_size=96,num_workers=18)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
num_split = 2
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=10,
std_change=0,
flip=True,
zoom=(0.15,0.15),
rotate=20,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(test_df.shape[0]).repeat(8)
imagedataset_test=D.Subset(ImageDataset(test_df,transform=transform.random,base_path=test_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,imagedataset_test,do_apex=True,batch_size=96,num_workers=18)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_test',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
for num_split in range(3):
idx_validate = train_df[train_df.PID.isin(set(split_sid[splits[num_split][1]]))].index.values
model_name,version = 'se_resnet101' , 'classifier_splits'
model = MySENet(pretrainedmodels.__dict__['se_resnet101'](num_classes=1000, pretrained='imagenet'),
len(hemorrhage_types),
num_channels=3,
dropout=0.2,
wso=((40,80),(80,200),(40,400)),
dont_do_grad=[],
extra_pool=8,
return_features=True
)
model.load_state_dict(torch.load(models_dir+models_format.format(model_name,version,num_split),map_location=torch.device(device)))
_=model.to(device)
transform=MyTransform(mean_change=10,
std_change=0,
flip=True,
zoom=(0.15,0.15),
rotate=20,
out_size=512,
shift=0,
normal=False)
transform_val=MyTransform(out_size=512)
indexes=np.arange(train_df.shape[0]).repeat(4)
train_dataset=D.Subset(ImageDataset(train_df,transform=transform.random,base_path=train_images_dir,
window_eq=False,equalize=False,rescale=True),indexes)
pred,features = model_run(model,train_dataset,do_apex=True,batch_size=96,num_workers=14)
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'features_train_tta',num_split),'wb')
pickle.dump(features,pickle_file,protocol=4)
pickle_file.close()
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_train_tta',num_split),'wb')
pickle.dump(pred,pickle_file,protocol=4)
pickle_file.close()
my_loss(pred[(idx_validate*4+np.arange(4)[:,None]).transpose(1,0)].mean(1),
torch.tensor(train_df[hemorrhage_types].values[idx_validate],dtype=torch.float),
torch.tensor([1.,1.,1.,1.,1.,2.]))
preds=[]
for i in tqdm_notebook(range(3)):
model_name,version, num_split = 'se_resnext101_32x4d' , 'classifier_splits',i
pickle_file=open(outputs_dir+outputs_format.format(model_name,version,'predictions_test',num_split),'rb')
pred=pickle.load(pickle_file)
pickle_file.close()
preds.append(pred[(np.arange(pred.shape[0]).reshape(pred.shape[0]//8,8))])
predss = torch.cat(preds,1)
predss.shape
torch.sigmoid(pred).mean(1)
.mean(1).shape
pred[(np.arange(pred.shape[0]).reshape()).transpose(1,0)].mean(1)
submission_df=get_submission(test_df,torch.sigmoid(predss).mean(1),False)
submission_df.head(12)
submission_df.shape
sub_num=41
submission_df.to_csv('/media/hd/notebooks/data/RSNA/submissions/submission{}.csv'.format(sub_num),
index=False, columns=['ID','Label'])
| Production/se_resnet101.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
# read in high and low expression one_hot files
high_exp = pd.concat(
[pd.read_csv(f"high_exp_one_hot_{i}.csv", index_col=0)
for i in range(1, 5)]
)
low_exp = pd.concat(
[pd.read_csv(f"low_exp_one_hot_{i}.csv", index_col=0)
for i in range(1, 5)]
)
# concatenate to form a single dataframe
data_df = pd.concat([high_exp, low_exp], axis=0)
# function to convert csv files into
def string_to_matrix(string):
# convert string to list of one_hot lists
string = str(string)
list_of_strings = string.split('], [')
list_of_lists = [channels.strip().replace('[', '').replace(']', '').replace(',', '').split()
for channels in list_of_strings
if 'nan' not in list_of_strings
]
# add padding
remaining_pad = 181 - len(list_of_lists)
while remaining_pad > 0:
list_of_lists.append(list([0 for x in range(0, 64)]))
remaining_pad = remaining_pad - 1
# return padded one_hot matrix
return np.array(list_of_lists).astype(np.float)
data_df['one_hot_matrix'] = data_df['one_hot_matrix'].apply(string_to_matrix)
# +
# create train test split
from sklearn.model_selection import train_test_split
max_len = 181
width = 64
X = np.zeros((22615, max_len, width))
for idx, one_hot_matrix in enumerate(data_df['one_hot_matrix'].values):
X[idx, :, :] = one_hot_matrix
y = data_df['class'].values
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
# +
# simple model per <NAME> (2014)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv1D, GlobalMaxPooling1D
model = Sequential()
model.add(Conv1D(100, 3, activation='relu', input_shape=(181, 64)))
model.add(GlobalMaxPooling1D())
# model.add(Flatten())
# model.add(Dense(32))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=50, epochs=10,
validation_data=(x_test, y_test), verbose=2)
# +
# Now trying to use an example based on Kim's paper.
# adapted from https://github.com/alexander-rakhlin/CNN-for-Sentence-Classification-in-Keras/blob/master/sentiment_cnn.py
# +
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, MaxPooling1D, Convolution1D, Flatten
from keras.layers.merge import Concatenate
filter_sizes = (3, 4, 5)
num_filters = 10
dropout_prob = (0.5, 0.8)
hidden_dims = 50
# prepare input shape
input_shape = (181, 64)
model_input = Input(shape=input_shape)
z = model_input
# z = Dropout(dropout_prob[0])(z)
# Convolutional ddddblock
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
# z = Flatten()(z)
z = Dropout(dropout_prob[1])(z)
# z = Dense(hidden_dims, activation="selu")(z)
model_output = Dense(1, activation="sigmoid")(z)
model = Model(model_input, model_output)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Train the model
model.fit(x_train, y_train, batch_size=50, epochs=10,
validation_data=(x_test, y_test), verbose=2)
# -
# ### the following architecture comes from the keras docs... seems to overfit
# +
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(181, 64)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.8))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=50, epochs=10,
validation_data=(x_test, y_test), verbose=2)
# -
| archive/c_nyambr/one_hot_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Optimization Algorithms
#
# Deep learning models are comprised of a model architecture and the model parameters. The model architecture is chosen based on the task - for example Convolutional Neural Networks (CNNs) are very successful in handling image based tasks and Recurrent Neural Networks (RNNs) are better suited for sequential prediction tasks. However, the values of the model parameters are learned by solving an optimization problem during model training.
#
# To learn the parameters, we start with an initialization scheme and iteratively refine the parameter initial values by moving them along a direction that is opposite to the (approximate) gradient of the loss function. The extent to which the parameters are updated in this direction is governed by a hyperparameter called the learning rate. This process, known as gradient descent, is the backbone of optimization algorithms in deep learning. In MXNet, this functionality is abstracted by the [Optimizer API](http://beta.mxnet.io/api/gluon-related/mxnet.optimizer.html).
#
# When training a deep learning model using the MXNet [gluon API](http://beta.mxnet.io/guide/packages/gluon/index.html), a gluon [Trainer](http://beta.mxnet.io/guide/packages/gluon/trainer.html) is initialized with the all the learnable parameters and the optimizer to be used to learn those parameters. A single step of iterative refinement of model parameters in MXNet is achieved by calling [`trainer.step`](http://beta.mxnet.io/api/gluon/_autogen/mxnet.gluon.Trainer.step.html) which in turn uses the gradient (and perhaps some state information) to update the parameters by calling `optimizer.update`.
#
# Here is an example of how a trainer with an optimizer is created for, a simple Linear (Dense) Network.
# ```python
# from mxnet import gluon, optimizer
#
# net = gluon.nn.Dense(1)
# net.initialize()
# optim = optimizer.SGD(learning_rate=0.1)
# trainer = gluon.Trainer(net.collect_params(), optimizer=optim)
# ```
#
# In model training, the code snippet above would be followed by a training loop which, at every iteration performs a forward pass (to compute the loss), a backward pass (to compute the gradient of the loss with respect to the parameters) and a trainer step (which updates the parameters using the gradient). See the [gluon Trainer guide](http://beta.mxnet.io/guide/packages/gluon/trainer.html) for a complete example.
#
# We can also create the trainer by passing in the optimizer name and optimizer params into the trainer constructor directly, as shown below.
# ```python
# trainer = gluon.Trainer(net.collect_params(), optimizer='adam', optimizer_params={'learning_rate':1})
# ```
#
# ### What should I use?
# For many deep learning model architectures, the `sgd` and `adam` optimizers are a really good place to start. If you are implementing a deep learning model and trying to pick an optimizer, start with [`'sgd'`](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.SGD.html#mxnet.optimizer.SGD) as you will often get good enough results as long as your learning problem is tractable. If you already have a trainable model and you want to improve the convergence then you can try [`'adam'`](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.SGD.html#mxnet.optimizer.Adam). If you would like to improve your model training process further, there are a number of specialized optimizers out there with many of them already implemented in MXNet. This guide walks through these optimizers in some detail.
#
# ## Stochastic Gradient Descent
# [Gradient descent](https://en.wikipedia.org/wiki/Gradient_descent) is a general purpose algorithm for minimizing a function using information from the gradient of the function with respect to its parameters. In deep learning, the function we are interested in minimizing is the [loss function](http://beta.mxnet.io/guide/packages/gluon/loss.html). Our model accepts training data as inputs and the loss function tells us how good our model predictions are. Since the training data can routinely consist of millions of examples, computing the loss gradient on the full batch of training data is very computationally expensive. Luckily, we can effectively approximate the full gradient with the gradient of the loss function on randomly chosen minibatches of our training data. This variant of gradient descent is [stochastic gradient descent](https://en.wikipedia.org/wiki/Stochastic_gradient_descent).
#
# Technically, stochastic gradient descent (SGD) refers to an online approximation of the gradient descent algorithm that computes the gradient of the loss function applied to a *single datapoint*, instead of your entire dataset, and uses this approximate gradient to update the model parameter values. However, in MXNet, and other deep learning frameworks, the SGD optimizer is agnostic to how many datapoints the loss function is applied to, and it is more effective to use a mini-batch loss gradient, as described earlier, instead of a single datapoint loss gradient.
#
# ### [SGD optimizer](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.SGD.html#mxnet.optimizer.SGD)
#
# For an SGD optimizer initialized with learning rate $lr$, the update function accepts parameters (weights) $w_i$, and their gradients $grad(w_i)$, and performs the single update step:
#
# $$w_{i+1} = w_i + lr\cdot -grad(w_i)$$
#
# visualized in the diagram shown below.
#
# <p align="center">
# <img src="images/sgd_animation.gif" alt="drawing"/>
# </p>
#
#
# ### Weight decay
# The SGD update step can be modified by introducing an extra term that enforces a penalty on the size of the parameters. This is achieved by subtracting a fraction of the weight $\delta\cdot w$ during the weight update as shown below.
#
# $$w_{i+1} = w_i + lr\cdot (-grad(w_i) -\delta\cdot w_i)$$
#
# Introducing weight decay modifies the objective of the optimization problem by adding an implicit regularization term to penalizes large weights. Weight decay is discussed more extensively in this [paper](https://papers.nips.cc/paper/563-a-simple-weight-decay-can-improve-generalization.pdf).
#
# ### Momentum
# The convergence of the SGD optimizer can be accelerated by incorporating momentum. Originally proposed by [Polyak (1964)](https://www.sciencedirect.com/science/article/abs/pii/0041555364901375), SGD with momentum improves the approximation of the gradient term by incorporating the gradients from previous update steps. To achieve this, SGD with momentum stores and 'remembers' the update at each iteration to be included in the next iteration. In the equations below we denote the momentum history as $v$.
#
# For the first update the SGD optimizer with momentum performs the single update step:
#
# $$ v_1= lr\cdot -grad(w_0)$$
# $$ w_1= w_0 + v_1 $$
#
# For subsequent updates, SGD with momentum, with momentum parameter $\gamma$, performs the update step:
#
# $$ v_{i+1} = \gamma \cdot v_{i} + lr\cdot -grad(w_{i}) $$
# $$ w_{i+1} = w_i + v_{i+1} $$
#
# This is also shown in the diagram below.
#
# <p align="center">
# <img src="images/momentum_sgd_animation.gif" alt="drawing"/>
# </p>
#
#
# The use of SGD with momentum for learning in neural networks was introduced by Rumelhart, <NAME> Williams in [Learning Internal Representations by Error Propagation](https://dl.acm.org/citation.cfm?id=104279.104293).
#
# To create an SGD optimizer with momentum $\gamma$ and weight decay in MXNet simply use the following code.
# ```python
# sgd_optimizer = optimizer.SGD(learning_rate=0.1, wd=0., momentum=0.8)
# ```
#
# ### [Nesterov Accelerated Stochastic Gradient Descent](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.NAG.html#mxnet.optimizer.NAG)
#
# The momentum method of [Nesterov](https://goo.gl/M5xbuX) is a modification to SGD with momentum that allows for even faster convergence in practice. With Nesterov accelerated gradient (NAG) descent, the update term is derived from the gradient of the loss function with respect to *refined parameter values*. These refined parameter values are computed by performing a SGD update step using the momentum history as the gradient term.
#
# Alternatively, you can think of the NAG optimizer as performing two update steps:
# * The first (internal) update step approximates uses the current momentum history $v_i$ to calculate the refined parameter values $(w_i + \gamma \cdot v_i)$. This is also known as the lookahead step.
# * The second (actual) step uses the gradient of the loss function with respect to the lookahead parameter values from the first step and the current momentum history $v_i$ to obtain a new direction to update our original parameter values, like classical momentum.
#
# The NAG optimizer with momentum parameter $\gamma$ performs the update step:
#
# $$ v_{i+1} = \gamma \cdot v_{i} + lr\cdot -grad(w_{i} + \gamma \cdot v_i) $$
# $$ w_{i+1} = w_i + v_{i+1} $$
#
# <p align="center">
# <img src="images/nesterov_momentum_animation.gif" alt="drawing"/>
# </p>
#
#
# The effects of using NAG over SGD and classical momentum are discussed in this [paper](http://proceedings.mlr.press/v28/sutskever13.pdf) by Sutskever et al.
#
# The NAG optimizer can be initialized in MXNet by using the code snippet below or by creating a trainer with argument `optimizer='nag'`.
# ```python
# nag_optimizer = optimizer.NAG(learning_rate=0.1, momentum=0.8)
# ```
#
# ## Adaptive Learning Rate Methods
#
# The gradient methods implemented by the optimizers described above use a global learning rate hyperparameter for all parameter updates. This has a well-documented shortcoming in that it makes the training process and convergence of the optimization algorithm really sensitive to the choice of the global learning rate. Adaptive learning rate methods avoid this pitfall by incorporating some history of the gradients observed in earlier iterations to scale step sizes (learning rates) to each learnable parameter in the model.
#
# ### [AdaGrad](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.AdaGrad.html)
#
# The AdaGrad optimizer, which implements the optimization method originally described by [Duchi et al](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf), multiplies the global learning rate by the $L_2$ norm of the preceeding gradient estimates for each paramater to obtain the per-parameter learning rate. To achieve this, AdaGrad introduces a new term which we'll denote as $g^2$ - the accumulated square of the gradient of the loss function with respect to the parameters.
#
# Thus the AdaGrad optimizer update function performs the update steps below to obtain $i+1$th refinement.
#
# $$ g^2_{i+1} = g^2_{i} + grad(w_i)^2 $$
# $$ w_{i+1} = w_i + \dfrac{lr}{\sqrt{g^2 + \epsilon}}\cdot -grad(w_i)$$
#
# The $\epsilon$ term is a tiny positive value introduced to avoid division by zero due to floating point issues.
#
# The overaching benefit of AdaGrad over SGD is that it ensures the overall convergence is more resilient to the choice of the global learning rate $lr$ especially in tasks, such as natural language processing some data is sparse but the parameters influenced by the sparse data are quite informative.
#
# To instantiate the Adagrad optimizer in MXNet you can use the following line of code.
# ```python
# adagrad_optimizer = optimizer.AdaGrad(learning_rate=0.1, eps=1e-07)
# ```
#
# ### [RMSProp](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.RMSProp.html)
#
# RMSProp, introduced by [<NAME>](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf), is similar to AdaGrad described above, but, instead of accumulating the sum of historical square gradients, maintains an exponential decaying average of the historical square gradients, in order to give more weighting to more recent gradients.
#
# For rmsprop, we introduce the term $\mathbb{E}[g^2]$ - the decaying average over past squared gradients and $\beta$ as the forgetting factor. The rmsprop optimizer performs the update given below.
#
#
# $$ \mathbb{E}[g^2]_{i+1} = \beta\cdot\mathbb{E}[g^2]_{i} + (1-\beta)\cdot [grad(w_{i})]^2 $$
# $$ w_{i+1} = w_i + \dfrac{lr}{\sqrt{\mathbb{E}[g^2]_{i+1} + \epsilon}}\cdot -grad(w_i) $$
#
# The $\epsilon$ term is included, as in AdaGrad, for numerical stability.
#
# RMSProp was derived independently of AdaGrad and the name RMSProp derives from a combination of [RProp](https://en.wikipedia.org/wiki/Rprop) and the RMS, root mean square, operation in the denominator of the weight update.
#
#
# #### RMSProp (Centered)
# The MXNet RMSProp optimizer with the `centered=True` argument implements a variant of the RMSProp update described by [<NAME>](https://arxiv.org/pdf/1308.0850v5.pdf), which centres the second moment $\mathbb{E}[g^2]$ or decaying average of square gradients by subtracting the square of decaying average of gradients. It also adds an explicit momentum term to weight past update steps. Representing the decaying average of gradients as $\mathbb{E}[g]$ and momentum parameter as $\gamma$, we add another equation to the non-centered rmsprop update described above.
#
# The centered RMSProp optimizer performs the update step:
#
# $$ \mathbb{E}[g]_{i+1} = \beta\cdot\mathbb{E}[g]_{i} + (1-\beta)\cdot [grad(w_{i})] $$
# $$ \mathbb{E}[g^2]_{i+1} = \beta\cdot\mathbb{E}[g^2]_{i} + (1-\beta)\cdot [grad(w_{i})]^2 $$
# $$ v_{i+1} = \gamma \cdot v_{i} + \dfrac{lr}{\sqrt{\mathbb{E}[g^2]_{i+1} - \mathbb{E}[g]^2_{i+1}+ \epsilon}}\cdot -grad(w_{i}) $$
# $$ w_{i+1} = w_i + v_{i+1} $$
#
# Here is an example snippet creating the RMSProp optimizer in MXNet.
# ```python
# rmsprop_optimizer = optimizer.RMSProp(learning_rate=0.001, gamma1=0.9, gamma2=0.9, epsilon=1e-07, centered=False)
# ```
#
# In the code snippet above, `gamma1` is $\beta$ in the equations above and `gamma2` is $\gamma$, which is only used where `centered=True`.
#
# ### [AdaDelta](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.AdaDelta.html)
#
# AdaDelta was introduced to address some remaining lingering issues with AdaGrad and RMSProp - the selection of a global learning rate. AdaGrad and RMSProp assign each parameter its own learning rate but the per-parameter learning rate are still calculated using the global learning rate. In contrast, AdaDelta does not require a global learning rate, instead, it tracks the square of previous update steps, represented below as $\mathbb{E}[\Delta w^2]$ and uses the root mean square of the previous update steps as an estimate of the learning rate.
#
# The AdaDelta optimizer performs the following equations in its update step:
#
# $$ \mathbb{E}[\Delta w^2]_{i+1} = \beta\cdot\mathbb{E}[\Delta w^2]_i + (1 - \beta) \cdot (w_i - w_{i-1})^2 $$
# $$ \mathbb{E}[g^2]_{i+1} = \beta\cdot\mathbb{E}[g^2]_{i} + (1-\beta)\cdot [grad(w_{i})]^2 $$
# $$ w_{i+1} = w_i + \dfrac{\sqrt{\mathbb{E}[\Delta w^2] + \epsilon}}{\sqrt{\mathbb{E}[g^2]_{i+1} + \epsilon}} \cdot -grad(w_i)$$
#
# As evident from the above equations, AdaDelta is similar to RMSProp but does not require you to specify $lr$ and instead uses $\sqrt{\mathbb{E}[\Delta w^2] + \epsilon}$ as the estimated learning rate. AdaDelta was introduced by Zeiler in this [paper](https://arxiv.org/abs/1212.5701).
#
# Here is the code snippet creating the AdaDelta optimizer in MXNet. The argument `rho` in the code is $\beta$ in the update equations. Notice there is no learning rate argument in the code.
# ```python
# adadelta_optimizer = optimizer.AdaDelta(rho=0.9, epsilon=1e-07)
# ```
#
# ### [Adam](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.Adam.html)
# Adam, introduced by [Kingma and Ba](https://arxiv.org/abs/1412.6980), is one of the popular adaptive algorithms for deep learning. It combines elements of RMSProp with momentum SGD. Like RMSProp, Adam uses the RootMeanSquare of decaying average of historical gradients but also explicitly keeps track of a decaying average of momentum and uses that for the update step direction. Thus, Adam accepts two hyperparameters $\beta_1$ and $\beta_2$ for momentum weighting and gradient RMS weighting respectively. Adam also accepts a global learning rate that's adaptively tuned to each parameter with the gradient RootMeanSquare. Finally, Adam also includes bias correction steps within the update that transform the biased estimates of first and second order moments, $v_{i+1}$ and $\mathbb{E}[g^2]_{i+1}$ to their unbiased counterparts $\tilde{v}_{i+1}$ and $\tilde{\mathbb{E}[g^2]}_{i+1}$
#
# The Adam optimizer performs the update step described the following equations:
#
# $$ v_{i+1} = \beta_1 \cdot v_{i} + (1 - \beta_1) \cdot grad(w_i) $$
# $$ \mathbb{E}[g^2]_{i+1} = \beta_2\cdot\mathbb{E}[g^2]_{i} + (1-\beta_2)\cdot [grad(w_{i})]^2 $$
# $$ \tilde{v}_{i+1} = \dfrac{v_{i+1}}{1 - (\beta_1)^{i+1}} $$
# $$ \tilde{\mathbb{E}[g^2]}_{i+1} = \dfrac{\mathbb{E}[g^2]_{i+1}}{1 - (\beta_2)^{i+1}} $$
# $$ w_{i+1} = w_i + \dfrac{lr}{\sqrt{\tilde{\mathbb{E}[g^2]}_{i+1}} + \epsilon} \cdot -\tilde{v}_{i+1} $$
#
# In MXNet, you can construct the Adam optimizer with the following line of code.
# ```python
# adam_optimizer = optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08)
# ```
#
# ### [Adamax](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.Adamax.html)
# Adamax is a variant of Adam also included in the original paper by [<NAME>](https://arxiv.org/abs/1412.6980). Like Adam, Adamax maintains a moving average for first and second moments but Adamax uses the $L_{\infty}$ norm for the exponentially weighted average of the gradients, instead of the $L_2$ norm used in Adam used to keep track of the gradient second moment. The $L_{\infty}$ norm of a vector is equivalent to take the maximum absolute value of elements in that vector.
#
# $$ v_{i+1} = \beta_1 \cdot v_{i} + (1 - \beta_1) \cdot grad(w_i) $$
# $$ g^\infty_{i+1} = \mathtt{max}(\beta_2\cdot g^\infty_{i}, |{grad(w_i)}|) $$
# $$ \tilde{v}_{i+1} = \dfrac{v_{i+1}}{1 - \beta_1^{i+1}} $$
# $$ w_{i+1} = w_i + \dfrac{lr}{g^\infty_{i+1} + \epsilon} \cdot - \tilde{v}_{i+1} $$
#
# See the code snippet below for how to construct Adamax in MXNet.
# ```python
# adamax_optimizer = optimizer.Adamax(learning_rate=0.002, beta1=0.9, beta2=0.999)
# ```
#
# ### [Nadam](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.Nadam.html)
# Nadam is also a variant of Adam and draws from the perspective that Adam can be viewed as a combination of RMSProp and classical Momentum (or Polyak Momentum). Nadam replaces the classical Momentum component of Adam with Nesterov Momentum (See [paper](http://cs229.stanford.edu/proj2015/054_report.pdf) by Dozat). The consequence of this is that the gradient used to update the weighted average of the momentum term is a lookahead gradient as is the case with NAG.
#
# The Nadam optimizer performs the update step:
#
# $$ v_{i+1} = \beta_1 \cdot v_{i} + (1 - \beta_1) \cdot grad(w_i + \beta_1 \cdot v_{i}) $$
# $$ \mathbb{E}[g^2]_{i+1} = \beta_2\cdot\mathbb{E}[g^2]_{i} + (1-\beta_2)\cdot [grad(w_{i})]^2 $$
# $$ \tilde{v}_{i+1} = \dfrac{v_{i+1}}{1 - \beta_1^{i+1}} $$
# $$ \tilde{\mathbb{E}[g^2]}_{i+1} = \dfrac{\mathbb{E}[g^2]_{i+1}}{1 - \beta_2^{i+1}} $$
# $$ w_{i+1} = w_i + \dfrac{lr}{\sqrt{\tilde{\mathbb{E}[g^2]}_{i+1}} + \epsilon}\cdot - \tilde{v}_{i+1} $$
#
# Here is the line of code to create the NAdam optimizer in MXNet.
# ```python
# nadam_optimizer = optimizer.Nadam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08)
# ```
#
# ## SGD optimized for large scale distributed training
#
# Training very deep neural networks can be time consuming and as such it is very common now to see practitioners turn to distributed training on multiple processors on the same machine or even across a fleet of machines to parallelize network training because this can reduce neural network training time from days to minutes.
#
# While all the preceding optimizers, from SGD to Adam, can be readily used in the distributed setting, the following optimizers in MXNet provide extra features targeted at alleviating some of the problems associated with distributed training.
#
# ### [Signum](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.Signum.html)
# In distributed training, communicating gradients across multiple worker nodes can be expensive and create a performance bottleneck. The Signum optimizer addresses this problem by transmitting just the sign of each minibatch gradient instead of the full precision gradient. In MXNet, the signum optimizer implements two variants of compressed gradients described in the paper by [Bernstein et al](https://arxiv.org/pdf/1802.04434.pdf).
#
# The first variant, achieved by constructing the Signum optimizer with `momentum=0`, implements SignSGD update which performs the update below.
#
# $$ w_{i+1} = w_i - lr \cdot sign(grad(w_i)) $$
#
# The second variant, achieved by passing a non-zero momentum parameter implements the Signum update which is equivalent to SignSGD and momentum. For momentum parameter $0 < \gamma < 1 $, the Signum optimizer performs the following update:
#
# $$ v_{i+1} = \gamma \cdot v_i + (1 - \gamma) \cdot grad(w_i) $$
# $$ w_{i+1} = w_i - lr \cdot sign(v_{i+1}) $$
#
# Here is how to create the signum optimizer in MXNet.
# ```python
# signum_optimizer = optimizer.Signum(learning_rate=0.01, momentum=0.9, wd_lh=0.0)
# ```
#
# ### [LBSGD](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.LBSGD.html)
# LBSGD stands for Large Batch Stochastic Gradient Descent and implements a technique where Layer-wise Adaptive Rate Scaling (LARS) is used to maintain a separate learning rate for each layer of the neural network. LBSGD has no additional modifications to SGD and performs the same parameter update steps as the SGD optimizer described above.
#
# LBSGD was introduced by [You et al](https://arxiv.org/pdf/1708.03888.pdf) for distributed training with data-parallel synchronous SGD across multiple worker nodes to overcome the issue of reduced model accuracy when the number of workers, and by extension effective batch size, is increased.
#
# Here is how to initialize the LBSGD optimizer in MXNet.
# ```python
# lbsgd_optimizer = optimizer.LBSGD(momentum=0.0,
# multi_precision=False,
# warmup_strategy='linear',
# warmup_epochs=5,
# batch_scale=1,
# updates_per_epoch=32,
# begin_epoch=0,
# num_epochs=60)
# ```
#
# LBSGD has a number of extra keyword arguments described below
# * `multi_precision` - When True performs updates with float32 precision weights regardless of whether weights are initialized with lower precision. When False perform updates with same precision as the weights when initialized. Set to True to improve performance when training with low precision weight represenations.
# * `warmup_strategy` - The warmup is period where the learning rate is increased through the first few epochs. The following strategies are supported: ['linear', 'power2', 'sqrt','lars']
# * `warmup_epochs` - How many epochs to perform warmup for
# * `batch_scale` - use batch size*numworkers
# * `updates_per_epoch` - How many updates to the learning rate to perform every epoch. For example during warmup the warmup strategy is applied to increase the learning rate a total of `warmup_epochs*updates_per_epoch` number of times.
# * `begin_epoch` - The epoch at which to start warmup.
#
# ### [DCASGD](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.DCASGD.html)
#
# The DCASGD optimizer implements Delay Compensated Asynchronous Stochastic Gradient Descent by [Zheng et al](https://arxiv.org/pdf/1609.08326.pdf). In asynchronous distributed SGD, it is possible that a training worker node add its gradients too late to the global (parameter) server resulting in a delayed gradient being used to update the current parameters. DCASGD addresses this issue of delayed gradients by compensating for this delay in the parameter update steps.
#
# If $grad(w_i)$ denotes the delayed gradient, $w_{i+\tau}$ denotes the parameter values at the current iteration, and $\lambda$ is the delay scale factor, the DCASGD optimizer update function performs the update:
#
# $$ w_{i+\tau+1} = w_{i+\tau} − lr \cdot (grad(w_i) + \lambda \cdot grad(w_i)^2 \cdot (w_{i+\tau} − w_i)) $$
#
# The DCASGD optimizer in MXNet can be initialized using the code below.
# ```python
# dcasgd_optimizer = optimizer.DCASGD(momentum=0.0, lamda=0.04)
# ```
#
# ## Online Learning Algorithms
# Before deep neural networks became popular post 2012, people were already solving large scale optimization problems to train (shallow) machine learning models. One particular area this was done was active or online learning where the model is continually learning and updating its parameters after it is deployed to production. In online learning, the model has to make predictions on new inputs but moments later may become aware of the true value of what it tried to predict and use this information to update its parameters.
#
# The class of optimization algorithms designed to tackle online learning problems have also seen some success in offline training of deep neural models. The following optimizers are algorithms taken from online learning that have been implemented in MXNet.
#
# ### [FTRL](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.Ftrl.html)
#
# FTRL stands for Follow the Regularized Leader and describes a family of algorithms originally designed for online learning tasks.
#
# For each iteration, FTRL algorithms finds the next parameter by solving the following optimization problem which minimizes the total regret i.e the sum of the inner product all preceding gradients and next parameter. The optimization objective is regularized so that the next parameter is close (proximal) in $L2$ norm to the preceding parameter values and is sparse which is enforced by the $L1$ norm.
#
# $$ w_{i+1} = \texttt{argmin}_{w} \left[\sum_{j=1}^{i} grad(w_i)\cdot w + \dfrac{1}{2}\sum_{j=1}^{i} \sigma_j \cdot ||w - w_j||_2^2 + \lambda ||w||_1\right]$$
#
# Due to the similarity of online learning and neural network training, there is an [equivalence](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37013.pdf) between variants of gradient descent and FTRL algorithms. In fact, the $w$ that minimizes FTRL with only $L_2$ regularization (i.e $\lambda$ in the equation above is set to 0) is exactly the $w$ derived from stochastic gradient descent update.
#
# The version of FTRL implemented as an MXNet optimizer is from [McMahan et al](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/41159.pdf) and encourages sparse parameters due to $L_1$ regularization. It performs the following update:
#
# $$ z_{i+1} = z_i + \dfrac{\left(\sqrt{\eta_i + grad(w_i)^2} - \sqrt{\eta_i}\right) \cdot w_i}{lr}$$
# $$ \eta_{i+1} = \eta_i + grad(w_i)^2$$
# $$ w_{i+1} = (|z_{i+1}| > \lambda) \cdot \left[ \dfrac{-lr}{\beta + \sqrt{\eta_{i+1}}} (z_{i+1} - \lambda \cdot sign(z_{i+1}))\right] $$
#
# Here is how to initialize the FTRL optimizer in MXNet
# ```python
# ftrl_optimizer = optimizer.Ftrl(lamda1=0.01, learning_rate=0.1, beta=1)
# ```
#
# ### [FTML](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.FTML.html)
#
# FTML stands for Follow the Moving Leader and is a variant of the FTRL family of algorithms adapted specifically to deep learning. Regular FTRL algorithms, described above, solve an optimization problem every update that involves the sum of all previous gradients. This is not well suited for the non-convex loss functions in deep learning. In the non-convex settings, older gradients are likely uninformative as the parameter updates can move to converge towards different local minima at different iterations. FTML addresses this problem by reweighing the learning subproblems in each iteration as shown below.
#
#
# $$ w_{i+1} = \texttt{argmin}_{w} \left[\sum_{j=1}^{i} (1 − \beta_1)\beta_1^{i−j} grad(w_i)\cdot w + \dfrac{1}{2}\sum_{j=1}^{i} \sigma_j \cdot ||w - w_j||_2^2 \right]$$
#
# $\beta_1$ is introduced to compute the exponential moving average of the previous accumulated gradient. The improvements of FTML over FTRL can be compared to the improvements of RMSProp/Adam to AdaGrad. According to [Zheng et al](http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf), FTML enjoys some of the nice properties of RMSProp and Adam while avoiding their pitfalls.
#
# The FTML optimizer performs the following update:
#
# $$ v_{i+1} = \beta_2 \cdot v_i + (1 - \beta_2) \cdot grad(w_i)^2$$
# $$ d_{i+1} = \dfrac{1 - \beta_1^{i+1}}{lr} \big(\sqrt{\dfrac{v_{i+1}}{1 - \beta_2^{i+1}}} + \epsilon\big)$$
# $$ z_{i+1} = \beta_1 \cdot z_i + (1 - \beta_1)\cdot grad(w_i) - (d_{i+1} - \beta_1 \cdot d_i) \cdot w_i$$
# $$ w_{i+1} = \dfrac{-z_{i+1}}{d_{i+1}} $$
#
# In MXNet, you can initialize the FTML optimizer using
# ```python
# ftml_optimizer = optimizer.FTML(beta1=0.6, beta2=0.999, epsilon=1e-08)
# ```
#
# Here `beta1` and `beta2` are similar to the arguments in the Adam optimizer.
#
# ## Bayesian SGD
# A notable shortcoming of deep learning is that the model parameters learned after training are only point estimates, therefore deep learning model predictions have no information about uncertainty or confidence bounds. This is in contrast to a fully Bayesian approach which incorporates prior distributions on the model parameters and estimates the model parameters as belonging to a posterior distribution. This approach allows the predictions of a bayesian model to have information about uncertainty, as you can sample different values from the posterior distribution to obtain different model parameters. One approach to close the bayesian gap in deep learning is to incorporate the gradient descent algorithm with properties that allow the model parameters to converge to a distribution instead of a single value or point estimate.
#
# ### [SGLD](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.SGLD.html)
# Stochastic Gradient Langevin Dynamics or SGLD was introduced to allow uncertainties around model parameters to be captured directly during model training. With every update in SGLD, the learning rate decreases to zero and a gaussian noise of known variances is injected into the SGD step. This has the effect of having the training parameters converge to a sufficient statistic for a posterior distribution instead of simply a point estimate of the model parameters.
#
# SGLD performs the parameter update:
#
# $$ w_{i+1} = w_i + \dfrac{lr_{i+1}}{2}\cdot -grad(w_i) + \eta_{i+1}$$
#
# where $ \eta_{i+1} \sim N(0, lr_{i+1})$ i.e $\eta_{i+1}$ is drawn from a zero centered gaussian with variance $lr_{i+1}$
#
# SGLD was introduced by [<NAME>](https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf) and the optimizer can be created in MXNet with the following line of code.
# ```python
# sgld_optimizer = optimizer.SGLD()
# ```
#
# ## Custom Optimizer
#
# If you would like to use a particular optimizer that is not yet implemented in MXNet or you have a custom optimization algorithm of your own that you would like to use to train your model, it is very straightforward to create a custom optimizer.
#
# Step 1: First create a function that is able to perform your desired updates given the weights, gradients and other state information.
#
# Step 2: You will have to write your own optimizer class that extends the [base optimizer class](http://beta.mxnet.io/api/gluon-related/_autogen/mxnet.optimizer.Optimizer.html#mxnet.optimizer.Optimizer) and override the following functions
# * `__init__`: accepts the parameters of your optimizer algorithm as inputs as saves them as member variables.
# * `create_state`: If your custom optimizer uses some additional state information besides the gradient, then you should implement a function that accepts the weights and returns the state.
# * `update`: Implement your optimizer update function using the function in Step 1
#
# Step 3: Register your optimizer with `@register` decorator on your optimizer class.
#
# See the [source code](http://beta.mxnet.io/_modules/mxnet/optimizer/optimizer.html#NAG) for the NAG optimizer for a concrete example.
#
# ## Summary
# * MXNet implements many state-of-the-art optimizers which can be passed directly into a gluon trainer object. Calling `trainer.step` during model training uses the optimizers to update the model parameters.
# * Gradient descent algorithms minimize the loss function by using information from the gradient of the loss function and a learning rate hyperparameter.
# * Stochastic Gradient Descent is the backbone of deep learning optimization algorithms and simple SGD optimizers can be made really powerful by incorporating momentum, for example `sgd` with momentum and `nag`.
# * Adaptive learning rate methods compute per-parameter learning rates to make optimization less sensitive to the choice of global learning rate. `adam` is a popular adaptive learning rate optimizer.
# * Certain MXNet optimizers like `Signum` and Large Batch SGD are well suited for large scale distributed training as they consider challenges specific these tasks.
# * MXNet also implements optimizers from active learning like `FTML`, `FTRL`, and optimizers for bayesian learning like `SGLD`.
# * Finally, it is easy to create a custom optimizer by following the patterns in the source code implementation for the optimizers that already exist in MXNet.
#
# ## Next Steps
# While optimization and optimizers play a significant role in deep learning model training, there are still other important components to model training. Here are a few suggestions about where to look next.
# * The [trainer API](http://beta.mxnet.io/api/gluon/mxnet.gluon.Trainer.html) and [guide](http://beta.mxnet.io/guide/packages/gluon/trainer.html) have information about how to construct the trainer that encapsulate the optimizers and will actually be used in your model training loop.
# * Check out the guide to MXNet gluon [Loss functions](http://beta.mxnet.io/guide/packages/gluon/loss.html) and [custom losses](http://beta.mxnet.io/guide/packages/gluon/custom-loss/custom-loss.html) to learn about the loss functions optimized by these optimizers, see what loss functions are already implemented in MXNet and understand how to write your own custom loss functions.
# * Take a look at the [guide to parameter initialization](http://beta.mxnet.io/guide/packages/gluon/init.html) in MXNet to learn about what initialization schemes are already implemented, and how to implement your custom initialization schemes.
# * Also check out the [autograd guide](http://beta.mxnet.io/guide/packages/autograd/autograd.html) to learn about automatic differentiation and how gradients are automatically computed in MXNet.
# * Make sure to take a look at the [guide to scheduling learning rates](https://mxnet.incubator.apache.org/versions/master/tutorials/gluon/learning_rate_schedules.html) to learn how to create learning rate schedules to supercharge the convergence of your optimizer.
# * Finally take a look at the [KVStore API](http://beta.mxnet.io/api/gluon-related/mxnet.kvstore.KVStore.html#mxnet.kvstore.KVStore) to learn how parameter values are synchronized over multiple devices.
| static_websites/python/docs/_sources/tutorials/packages/optimizer/optimizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/LeonVillanueva/CoLab/blob/master/Google_CoLab_MNIST.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="yKl3AHP_tngM" colab_type="text"
# # Setup and Data
# + id="YiH3HRTgtX68" colab_type="code" outputId="46168a4d-6e1e-4439-df1d-1a98fcb42139" colab={"base_uri": "https://localhost:8080/", "height": 110}
# !pip install -q tensorflow==2.0.0-beta1
# + id="FkosqT3NtwWD" colab_type="code" colab={}
# %%capture
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import seaborn as sns
# + id="IrJyZ3nZ9Mrx" colab_type="code" colab={}
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# + id="_03GS5IH9u3K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="969aed45-94e3-4294-f63f-d8ab10cff8a8"
tf.__version__
# + id="xlEwe6Wz9ys9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="8180b845-a1e2-41f5-c26a-fba30f4a6c91"
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
# + id="ujuv-Sui_h_L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="adf9504f-ca05-48e9-90aa-3ab374955e55"
print (np.max (X_train))
print (np.max (X_test))
# + id="JTBgALy6_lFj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="0865add0-932c-4b89-c0ae-ed482958b1b6"
X_train = X_train / np.max (X_train)
X_test = X_test / np.max (X_test)
X_train.shape
# + [markdown] id="bRl8RO0qA2L-" colab_type="text"
# # Model
# + id="3KGmfQeqCxt1" colab_type="code" colab={}
M, N, D = X_train.shape
# + id="R8O3P1hYAzwe" colab_type="code" colab={}
layers_multi = [tf.keras.layers.Flatten (input_shape=(D,D)),
tf.keras.layers.Dense (256, activation='relu'),
tf.keras.layers.Dropout (0.25),
tf.keras.layers.Dense (10, activation='softmax')]
# + id="3Nd3WHq4AnAy" colab_type="code" colab={}
multi = tf.keras.models.Sequential (layers_multi)
# + id="A25ZjlYgGP8w" colab_type="code" colab={}
adam = tf.keras.optimizers.Adam (learning_rate=0.0001, decay=1e-6)
# + id="ZbXED7EXC570" colab_type="code" colab={}
multi.compile (optimizer=adam,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + id="r4JLxndbDefS" colab_type="code" colab={}
# %%capture
m = multi.fit (X_train, y_train, validation_data=(X_test, y_test), epochs=10)
# + id="GZ9fLdQVE9rI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="1db8dc74-96e4-4d78-e2f2-ce1c5346b3b3"
print ('Training Set Evaluation : ' + str(multi.evaluate (X_train, y_train)))
print ('Test Set Evaluation : ' + str(multi.evaluate (X_test, y_test)))
# + id="nmsNYlKvEAHV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="9380da5b-800d-41e1-a29d-d38704f729a2"
plt.plot (m.history['loss'], label='loss', color='#840000')
plt.plot (m.history['val_loss'], label='validation loss', color='#00035b')
plt.legend ()
# + id="3k7yAhOMFXiM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="dc985d6f-029c-4fe4-e017-0065718a8c7c"
plt.plot (m.history['accuracy'], label='accuracy', color='#840000')
plt.plot (m.history['val_accuracy'], label='validation accuracy', color='#00035b')
plt.legend ()
# + [markdown] id="MtatE7YoL-YJ" colab_type="text"
# # Confussion Map Evaluation
# > *https://androidkt.com/keras-confusion-matrix-in-tensorboard/*
# + id="yKbBGKIiMeIM" colab_type="code" colab={}
y_pred = multi.predict_classes (X_test)
# + id="8cUK_KSiP_1D" colab_type="code" colab={}
con_mat = tf.math.confusion_matrix(labels=y_test, predictions=y_pred).numpy()
# + [markdown] id="AngpnTcuQIEW" colab_type="text"
# ### Normalize and Graph
# + id="ZEgWtT1rQE-Y" colab_type="code" colab={}
con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)
# + id="aBz_oXfKQM0c" colab_type="code" colab={}
con_mat_df = pd.DataFrame(con_mat_norm)
# + id="3-wqboYBQVLP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 599} outputId="80a6004d-c5fd-4298-b1e4-b48e99f0b019"
figure = plt.figure(figsize=(8, 8))
sns.heatmap(con_mat_df, annot=True, cmap=plt.cm.BuPu)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# + id="yQ-FkRV7RMpl" colab_type="code" colab={}
misclass = np.where (y_pred != y_test)[0]
# + id="wmtAPQLKRdtO" colab_type="code" colab={}
i = np.random.choice (misclass)
# + id="9tWlhtU1Reet" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="c47ccc69-e562-49f9-8493-b8ee5ef59687"
plt.imshow (X_test[i], cmap='Blues')
plt.title ('True: %s, Predicted: %s' % (y_test[i], y_pred[i]))
# + id="I1oeR4o2SL5X" colab_type="code" colab={}
# + id="oCGqlpHeSRC1" colab_type="code" colab={}
# + id="eE7kLj_VRwWf" colab_type="code" colab={}
| Google_CoLab_MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MLServer - Retrieve and Classify
#
# This notebook retrieves data from an Azure Function server, performs classification using a machine learning model and uploads the results back to the cloud.
#
# All of that is performed using API REST endpoints exposed in Azure.
#
# Segmentation model is DeepLabV3+, at https://github.com/bonlime/keras-deeplab-v3-plus
#
# ## Retrieving and Visualizing Next Task
# +
import requests
import base64
from PIL import Image
import io
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
getnexttask_url = 'https://blobstorefuncteste.azurewebsites.net/api/getnexttask_v0?code=YCbRpKphpfyJR5Sa4oELsAaV4Zz6Pt8EdtasFt10zazr6ae3mQZapA=='
puttaskresult_url = 'https://blobstorefuncteste.azurewebsites.net/api/puttaskresult_v0?code=skiy0tiMnOol4m/MgPBvCtTZIHri6DFB3wW8491dhFmyIYybABaCSw=='
# +
def get_next_task(url):
r = requests.post(url, data={})
split_data = r.text.split('|')
return { 'task_id' : split_data[0], 'payload' : base64.b64decode(split_data[1]) }
next_task = get_next_task(getnexttask_url)
next_task['task_id']
# -
image_data = next_task['payload']
image = np.asarray(Image.open(io.BytesIO(image_data)))
plt.figure(figsize=(10,10))
plt.imshow(image)
# ## Compute Predictions
# +
from matplotlib import pyplot as plt
import cv2 # used for resize. if you dont have it, use anything else
from deepLab import Deeplabv3
deeplab_model = Deeplabv3(backbone='xception')
label_names = np.asarray([
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'
])
# +
img = image
w, h, _ = img.shape
ratio = 512. / np.max([w,h])
resized = cv2.resize(img,(int(ratio*h),int(ratio*w)))[:,:,0:3]
pad_x = int(512 - resized.shape[0])
pad_y = int(512 - resized.shape[1])
resized2 = np.pad(resized,((0,pad_x),(0,pad_y),(0,0)),mode='constant')
img_resized = resized2
resized2 = resized2 / 127.5 - 1.
# -
res = deeplab_model.predict(np.expand_dims(resized2,0), verbose=1)
labels = np.argmax(res.squeeze(),-1)
plt.figure(figsize=(10,10))
plt.imshow(labels)
plt.axis('off')
#plt.savefig('mask.png')
plt.figure(figsize=(15,10))
plt.subplot(1,2,1)
plt.title('Original image, resized to 512x512')
plt.imshow(img_resized)
plt.subplot(1,2,2)
cur_labels = ' '.join(label_names[np.unique(labels)])
plt.title(cur_labels)
plt.imshow(labels)
np.unique(labels)
import imageio
imageio.imwrite('mask.png', labels.astype(np.uint8))
# +
with open('mask.png', "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
#encoded_string = base64.urlsafe_b64encode(image_file.read())
encimg=base64.b64decode(encoded_string)
#encimg=base64.urlsafe_b64decode(encoded_string)
image_data = encimg
image = np.asarray(Image.open(io.BytesIO(image_data)))
plt.figure(figsize=(10,10))
plt.axis('off')
plt.imshow(image)
# -
# ## Send Predictions to Server
type(encoded_string)
def send_task_result(url, task_id, result_b64):
x = result_b64.decode("utf8")
r = requests.post(url, json={'task_id':task_id, 'result':x})
return r
ans = send_task_result(puttaskresult_url, next_task['task_id'], encoded_string)
ans
| python/MLServer - Retrieve and Classify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from collections import deque,namedtuple
import random
import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import models, layers, optimizers
from tqdm import tqdm_notebook
import warnings
warnings.filterwarnings('ignore')
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state','done'))
class VanillaMemory:
def __init__(self, memory_size):
self.memory_size = memory_size
self.memory = deque(maxlen=memory_size)
def add(self, *args):
t = Transition(*args)
self.memory.append(t)
def sample(self, batch_size):
ts = random.sample(self.memory, batch_size)
states = np.vstack([t.state for t in ts])
actions = np.vstack([t.action for t in ts])
rewards = np.vstack([t.reward for t in ts])
next_states = np.vstack([t.next_state for t in ts])
dones = np.vstack([t.done for t in ts]).astype(np.uint8)
return(states,actions,rewards,next_states,dones)
return random.sample(self.memory, batch_size)
def __len__(self):
return(len(self.memory))
# -
mem = VanillaMemory(1000)
len(mem)
env = gym.make('MountainCar-v0')
state = env.reset()
score = 0
t=0
frames = []
while True:
action = np.random.choice([0,1,2])
next_state, reward, done, info = env.step(action)
mem.add(state,action,reward,next_state,done)
state = next_state
t+=1
score+=reward
if done: break
print(f'Done in {t} timsteps with score {score}.')
mem.sample(10)[4]
class DQNAgent:
def __init__(self, state_size, action_size, replay_memory,
lr=5e-3, bs = 64, clip=1., nb_hidden = [256],
gamma=0.99, tau= 1e-3, update_interval = 5, update_times = 1, tpe = 200):
self.state_size = state_size
self.action_size = action_size
self.nb_hidden = nb_hidden
self.lr = lr
self.bs = bs
self.gamma = gamma
self.update_interval = update_interval
self.update_times = update_times
self.tau = tau
self.losses = []
self.tpe = tpe
self.clip = clip
#vanilla
self.network_local = self.create_critic_network()
self.network_target = tf.keras.models.clone_model(self.network_local)
self.network_target.set_weights(self.network_local.get_weights())
# replay memory
self.memory = replay_memory
# count time steps
self.t_step = 0
def vanilla_loss(self, targets, preds):
actions, q_targets = targets[:, 0], targets[:, 1]
seq = tf.cast(tf.range(0, tf.shape(actions)[0]), tf.int32)
actions = tf.cast(actions, tf.int32)
action_idxs = tf.transpose(tf.stack([seq, actions]))
q_expected = tf.gather_nd(preds, action_idxs)
return tf.keras.losses.mse(q_targets, q_expected)
def create_critic_network(self):
l = [layers.Dense(self.nb_hidden[0], input_dim=self.state_size, activation='relu')]
for h in self.nb_hidden[1:]: l.append(layers.Dense(h, activation='relu'))
l.append(layers.Dense(self.action_size, activation="linear"))
network = models.Sequential(l)
network.compile(loss=self.vanilla_loss,
optimizer=tf.keras.optimizers.Adam(learning_rate = self.lr,
clipvalue=self.clip))
return network
def get_eps(self, i, eps_start = 1., eps_end = 0.001, eps_decay = 0.9):
eps = max(eps_start * (eps_decay ** i), eps_end)
return(eps)
def step(self, state, action, reward, next_state, done):
#add transition to replay memory
self.memory.add(state, action, reward, next_state, done)
#update target network
self.soft_update()
# learn every self.t_step
self.t_step += 1
if self.t_step % self.update_interval == 0:
if len(self.memory) > self.bs:
#vanilla
for _ in range(self.update_times):
transitions = self.memory.sample(self.bs)
loss = self.learn(transitions)
self.losses.append(loss)
def act(self, state):
eps = self.get_eps(int(self.t_step / self.tpe))
action_values = self.network_local(state[None,:])
#epsilon greedy
if random.random() > eps:
return np.argmax(action_values.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, transitions):
states, actions, rewards, next_states, dones = transitions
q_targets_next = self.network_target(next_states).numpy().max(1)[:,None]
q_targets = rewards + (self.gamma * q_targets_next) * (1 - dones)
loss = self.network_local.train_on_batch(states, np.hstack([actions,q_targets]))
return loss
def hard_update(self):
if self.t_step % 1/self.tau==0:
self.network_target.set_weights(self.network_local.get_weights())
def soft_update(self):
weights_local = np.array(self.network_local.get_weights())
weights_target = np.array(self.network_target.get_weights())
self.network_target.set_weights(self.tau * weights_local + (1 - self.tau) * weights_target)
# env = gym.make('MountainCar-v0')
env = gym.make('CartPole-v1')
mem = VanillaMemory(int(1e5))
a = DQNAgent(state_size = env.reset().shape[0],
action_size = env.action_space.n, replay_memory = mem)
# +
from datetime import datetime
scores = []
scores_deque = deque(maxlen=100)
moving_scores = []
start_time = datetime.now()
solved_score=190
for i in tqdm_notebook(range(1000)):
if (i+1) % 100==0: print(f'Episdoe {i} Moving Average: {np.mean(scores_deque)}')
state = env.reset()
score = 0
t=0
while True:
t+=1
#select action
action = a.act(state)
#env step
next_state, reward, done, info = env.step(action)
#engineer the reward to motive your agent even more
if done:
fake_reward = reward #+ (200-t)/10
else:
fake_reward = reward
#agent step
a.step(state,action,fake_reward,next_state,done)
#collect score
score += reward
#go to next state
state = next_state
#break if done
if done: break
#book keeping
scores.append(score)
scores_deque.append(score)
moving_scores.append(np.mean(scores_deque))
if moving_scores[-1] > solved_score:
print(f'Solved at Play {i}: {datetime.now() - start_time} Moving average: {moving_scores[-1]}')
break
# -
import matplotlib.pyplot as plt
plt.plot(scores)
plt.plot(moving_scores)
plt.plot(a.losses)
| rl-workshop/model_compile.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Tensorflow (GPU)
# language: python
# name: py3.6-tfgpu
# ---
# + id="aA1Esp6VoDZO"
# %matplotlib inline
# + id="q1tUJZrcoDZY"
import numpy as np
import matplotlib.pyplot as plt
import random
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.utils import np_utils
# + [markdown] id="cPD_OT_2oDZZ"
# ## Loading Training Data
#
# The MNIST dataset is conveniently bundled within Keras, and we can easily analyze some of its features in Python.
# + id="DwV3XeatoDZa" outputId="2dbde000-2843-409a-c3be-8b250feb0044" colab={"base_uri": "https://localhost:8080/"}
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print("X_train shape", X_train.shape)
print("y_train shape", y_train.shape)
print("X_test shape", X_test.shape)
print("y_test shape", y_test.shape)
# + id="jZzP_qTioDZb" outputId="7676bcb2-d3f8-43bb-f761-f313230896b6" colab={"base_uri": "https://localhost:8080/", "height": 657}
plt.rcParams['figure.figsize'] = (9,9)
for i in range(9):
plt.subplot(3,3,i+1)
num = random.randint(0, len(X_train))
plt.imshow(X_train[num], cmap='gray', interpolation='none')
plt.title("Class {}".format(y_train[num]))
plt.tight_layout()
# + id="T8reP2HpoDZ8"
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D, Flatten
from tensorflow.keras.layers import BatchNormalization
# + id="s8hVREZToDZ9"
# Reload the MNIST data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# + id="C2vIPg-zoDZ9" outputId="54d69a4d-9e3e-4de1-aa01-b8f512fa9cc9" colab={"base_uri": "https://localhost:8080/"}
# Again, do some formatting
# Except we do not flatten each image into a 784-length vector because we want to perform convolutions first
X_train = X_train.reshape(60000, 28, 28, 1) #add an additional dimension to represent the single-channel
X_test = X_test.reshape(10000, 28, 28, 1)
X_train = X_train.astype('float32') # change integers to 32-bit floating point numbers
X_test = X_test.astype('float32')
X_train /= 255 # normalize each value for each pixel for the entire vector for each input
X_test /= 255
print("Training matrix shape", X_train.shape)
print("Testing matrix shape", X_test.shape)
# + id="GELShg1XoDZ-"
# one-hot format classes
nb_classes = 10 # number of unique digits
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# + id="XP1ztal5oDZ-"
model = Sequential() # Linear stacking of layers
# Convolution Layer 1
model.add(Conv2D(32, (3, 3), input_shape=(28,28,1))) # 32 different 3x3 kernels -- so 32 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
convLayer01 = Activation('relu') # activation
model.add(convLayer01)
# Convolution Layer 2
model.add(Conv2D(32, (3, 3))) # 32 different 3x3 kernels -- so 32 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
model.add(Activation('relu')) # activation
convLayer02 = MaxPooling2D(pool_size=(2,2)) # Pool the max values over a 2x2 kernel
model.add(convLayer02)
# Convolution Layer 3
model.add(Conv2D(64,(3, 3))) # 64 different 3x3 kernels -- so 64 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
convLayer03 = Activation('relu') # activation
model.add(convLayer03)
# Convolution Layer 4
model.add(Conv2D(64, (3, 3))) # 64 different 3x3 kernels -- so 64 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
model.add(Activation('relu')) # activation
convLayer04 = MaxPooling2D(pool_size=(2,2)) # Pool the max values over a 2x2 kernel
model.add(convLayer04)
model.add(Flatten()) # Flatten final 4x4x64 output matrix into a 1024-length vector
# Fully Connected Layer 5
model.add(Dense(512)) # 512 FCN nodes
model.add(BatchNormalization()) # normalization
model.add(Activation('relu')) # activation
# Fully Connected Layer 6
model.add(Dropout(0.2)) # 20% dropout of randomly selected nodes
model.add(Dense(10)) # final 10 FCN nodes
model.add(Activation('softmax')) # softmax activation
# + id="0AQRqycGoDZ_" outputId="0e6c7b4d-92db-4b5f-ae14-7b554fa4255c" colab={"base_uri": "https://localhost:8080/"}
model.summary()
# + id="4ZTMSDMDoDZ_"
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# + id="ZQ3p_U3_oDZ_"
gen = ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
test_gen = ImageDataGenerator()
# + id="gTN890Q4oDaA"
train_generator = gen.flow(X_train, Y_train, batch_size=128)
test_generator = test_gen.flow(X_test, Y_test, batch_size=128)
# + id="t2ZuU602oDaA" outputId="7c5fc68a-a6a7-40a0-febb-199b59c9473c" colab={"base_uri": "https://localhost:8080/"}
model.fit_generator(train_generator, steps_per_epoch=60000//128, epochs=5, verbose=1,
validation_data=test_generator, validation_steps=10000//128)
# + id="a-AxweVdoDaB" outputId="c3c16c2b-3c92-4975-ffda-1a8f7be29300" colab={"base_uri": "https://localhost:8080/"}
score = model.evaluate(X_test, Y_test)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| MNIST in Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Project - LineArtTSP
#
# ### Group member: <NAME>, <NAME>, <NAME>
#
# ### Introduction:
# Approximate images using a single closed curve generated by halftoning the image and finding a cycle by solving TSP on the generated stippling points.
#
# The backbone of this project will consist of these major components:
#
# Halftone an arbitrary image and convert points to graph object Find non self intersection Hamiltonian cycle on nodes by solving TSP using heuristic Plot the resulting curve
# ## Halftoning
#
# The purpose of halftoning is to approximate an image with a set of points.
#
# Introduction to our Halftoning implementation:
#
# Takes a random selection of n points (see below) from the image. If one is using the contrast halftoning method, it will compare these points to the point x_pixel_distance and y_pixel_distance away from the chosen point
# (see docstring for specific information) and accept the point if it is sufficiently different from this neighbor point, based on the application of a euclidean metric to RGB values, conditioned on an input contrast_threshold value.
# One can 'smoothen' this acceptance of points (i.e. accepting points in a neighborhood of the contrast based on a probability function) by setting the smoothing_constant to a value between 0 and 1 (0 means no smoothing, 1 means most
# generous probability function).
#
# If one is using the brightness method, it will convert the image to grayscale, and perform rejection sampling (see https://en.wikipedia.org/wiki/Rejection_sampling) on n points, based on the brightness of n randomly chosen points.
#
# For either method, one can set 'invert' to True in order to reverse the functions of each of these methods. This ends up meaning exactly what one would intuitively expect - for the contrast method, it will instead only choose points that are similar to nearby points, and for the brightness method, it will choose points that are lighter, rather than points that are darker.
#
# An interesting extension about halftone: https://tabreturn.github.io/code/processing/python/2019/02/09/processing.py_in_ten_lessons-6.3-_halftones.html
#
#
# ## TSP
#
# TSP is the traveling salesman problem, the purpose of which is to find the shortest cycle that includes all nodes in a graph.
# In our case, this means we will attempt to find the shortest cycle that goes through every halftoning point, as this will
# approximate our image with one line, instead of a series of points.
#
# We do not implement a TSP solver ourselves, we use the OR-tools package from Google to do this, see more here:
# https://developers.google.com/optimization/routing/tsp
#
#
#
#
# ## Guide
# First you should decide on a couple of things:
#
# 1. Which image do you want to convert to a line drawing?
# 2. n - The number of points that the graph will use (think of it as the resolution of your output image)
# 3. timelimit - How long are you willing to let the TSP solver run? (the longer the better the result)
# 4. imagestyle - Which style do you want your halftoning to have?
# 5. style - Which style do you want your output to have?
#
# There are three different styles to choose, one is points, which is result of an image after halftoning, and the other two styles are line and spline, which connected selected points by a closed line, the latter being a smoothened variant.
#
# First we import our custom class and halftoning function:
from graph import Graph
from halftoning import rejectionSampling
# Set the path variable to the path of your chosen image.
# Set n to the amount of points you want.
# Halftone the image based on desired imagestyle (halftoning or brightness; for now, we will use brightness, with an example of when contrast can be better to use). Our program will select points randomly, and use rejection sampling based on the grey_scale value of these points (in the case of brightness) in order to choose whether the point will be accepted - this results in darker regions of an image having more points selected, and hence being accentuated more in the final product.
#
# By increasing n (number of points), the clarify of the resultant graph can be increased.
# +
# Generate test data points
n=1000
# Path to image file
path = 'recurrentTheme.jpg'
# Generate points
nodes1 = rejectionSampling(n, path, imagestyle = "brightness")
#See the image here:
from IPython.display import Image
Image(filename=path)
# -
# Then, we create our graph object, set the nodes to those chosen in our halftoning process, and calculate the distance matrix of these nodes.
#
# Finally, we solve the traveling salesman problem based on a time limit - the code will attempt to find an optimal path during this time, and return the best path at the end of the time.
# Create graph object solve TSP
G = Graph()
G.setNodes(nodes1)
G.setDistMatrix()
G.TSP(timelimit=30);
# Here is an example of the points chosen by the brightness method, plotted, i.e. the 'points' style.
G.plot(style="point")
# Here is an example of the graph following an application of the traveling salesman problem, with nodes connected by lines, i.e. the 'lines' style.
G.plot(style="line")
# Finally, here is an example of the graph following an application of the traveling salesman problem, with nodes connected by splines, i.e. the 'splines' style.
G.plot(style="spline")
# If, instead, we choose to use the imagestyle "contrast", then our halftoning will instead select points based on how different they are from a neighboring point, on a scale of 0 to 1.(this neighboring point can be determined, for all points, by using x_pixel_distance and y_pixel_distance parameters) using the euclidean metric applied to RGB values. How much contrast we use is determined by our contrast_threshold, which has a default value of 0.15 - this can be changed by setting contrast_threshold to the desired value in the rejectionSampling parameters.
#
# The contrast method tends to be a superior option when working with an image where regions of color are clearly separated, or where one determines that there is more value to be had in graphing the 'edges' of these images, rather than the contents of the regions of color - this method falls short where neighboring points of any point are almost ubiquitously sufficiently contrasting, such as in an extremely detailed painting like the 'Mona Lisa'.
# +
path2 = 'fedex.jpg'
nodes2 = rejectionSampling(n, path2, imagestyle = "contrast")
R = Graph()
R.setNodes(nodes2)
R.setDistMatrix()
R.TSP(timelimit=30);
R.plot(style="point")
# -
# An image such as the Fedex logo, for example, yields a better end result when using the contrast method.
nodes3 = rejectionSampling(n,path2,imagestyle='brightness')
A = Graph()
A.setNodes(nodes3)
A.setDistMatrix()
A.TSP(timelimit=30);
A.plot(style="point")
R.plot(style="line")
A.plot(style="line")
R.plot(style="spline")
A.plot(style="spline")
# There are a few other ways to differentiate the halftoning based on the image, in order to achieve a better end result.
#
# Within the contrast method, one can set a smoothing constant to a value between 0 and 1 (default value 0, to indicate no smoothing; 1 indicates most smoothing possible. This is done by setting smoothing_constant to desired value in rejectionSampling parameters). This results in the creation of a linear probability density function in a neighborhood of our contrast_threshold, that accepts points in this neighborhood with a probability that increases based on how much contrast is present. This is optimal when working with an image where important regions of the image are barely not sufficiently contrasting enough in order to lower the contrast threshold without introducing too much random noise, as it allows us to be more generous with these important areas, without losing too much clarity to noise.
#
# As mentioned, one can set the x_pixel_distance and y_pixel_distance in rejectionSampling parameters in order to determine the neighboring point that randomly chosen points are compared to in the contrast method. We reccomend keeping this very low (<10) - default values are 5 and 5 respectively. The best way to make use of these values is to choose distances that create a direction that is most perpendicular with the direction of edges in our image, as otherwise the method may occasionally compare points parallel to an 'edge' (border of color regions), and not notice any contrast.
#
# For either method, one can set the invert parameter in rejectionSampling to True (default is False) in order to reverse the functions of each halftoning method. For the brightness method, this ends up meaning choosing points based off how light they are, rather than how dark they are. For the contrast method, this means selecting points with neighboring points that are similar, rather than contrasting, to their RGB values. The utility of the invert for brightness intuitively obvious (object of focus within image is light, rather than dark). For the contrast method, we can use this invert method when working with an image where we have an abundance of noise (regions where pixels are tremendously contrasting), except for an object of focus with a mostly uniform color (imagine a picture of a uniformed football player, with a rainbow of thousands of fans in the background).
#
| demo_file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scatter_letters import sl
# simple use
sl.text_to_gif('data_')
# 
# +
# all parameters
sl.text_to_gif('data_', # text to be converted to gif
out_path='output', # relative path to save temp files and output
repeat=True, # repeat first letter at the end
intensity=10, # more info below*
rand=True, # True=random points, false= evenly sparced
gif_name='movie', # name of the output file. -> movie.gif
n_frames=24, # number of frames in the transition
bg_color='#95A4AD', # background color
marker='o', # marker style
marker_color='#283F4E', # marker color
marker_size=10, # marker size
fps=24, # frames per second
alpha=1, # markers opacity
axis_on=True, # plot spines and grid
sort_coords=False, # sort points in the transition - options(False, 'x', 'y')
sort_coords_asc=True, # True - sort ascending / False - sort descending
in_path=None, # for custom input paths
hold_frames=5) # hold the complete letter for x frames
# *intensity:
# When plotting random points (rand=True), this is how many times it'll generate 500 points at the start (before applying the mask).
# With randoms a higher intensity means more points.
# When plotting evenly sparced points (rand=False), this is the distance between the points.
# With even points a lower intensity means the points will be closer to each other, so more points are plotted.
# -
# 
# special masks
sl.text_to_gif('MAC[MAC]',
repeat=True,
intensity=40,
rand=True,
gif_name='movie2',
n_frames=32,
bg_color='#ffb400',
marker='o',
marker_color='#2b2300',
marker_size=3,
fps=32,
alpha=0.7,
axis_on=False,
sort_coords=False,
sort_coords_asc=True,
hold_frames=20)
# 
sl.text_to_gif('RICK[RICK][RICK]',
repeat=True,
intensity=70,
gif_name = 'movie3',
n_frames=24,
bg_color='#53abee',
marker_color='#1D1D1D',
marker_size = 1,
fps=24,
alpha=0.3,
axis_on=False)
# 
# not randomly positioned
sl.text_to_gif('abc', intensity=5, rand=False, gif_name='not_rand')
# 
# not randomly positioned and sorting points for transition
sl.text_to_gif('abc', intensity=5, rand=False, gif_name='sorted', sort_coords='y', sort_coords_asc=True)
# 
# +
# The program works with three methods
# 1- get masked data:
# generates the random points and apply a mask to it
# if you don't pass a in_path the program uses a default dataframe with previously generated masks
# those include all letters from A to Z and two special images 'RICK' and 'MAC'
# If a in_path is passed the program will look in that directory for a .png file of 1000x1000 named as the first argument
xy = sl.get_masked_data('a',
intensity=10,
rand=True,
in_path=None)
# creates two lists, x and y.
print('lists: ', len(xy))
print('size:', len(xy[0]))
import matplotlib.pyplot as plt
plt.scatter(xy[0], xy[1])
plt.show()
# +
# 2- text to data
# receives string and split the letters to apply the previous method
# to use special masks here you need to put the name of the mask or file between brackets - example '[RICK]'
coords = sl.text_to_data('abc', repeat=True, intensity = 10, rand=True, in_path=None)
# creates two lists, x and y.
print(len(coords), 'lists, one for each letter and one for the repeated letter')
print(len(coords[0]), 'lists inside each of the above for x and y')
print('size:', len(coords[0][0]))
# -
# 3- build gif
# Uses the coordinates lists generate in the previous method to plot the images and transitions, and create the gif
sl.build_gif(coords,
out_path='output', gif_name = 'movie', n_frames=10,
bg_color='#95A4AD', marker='.', marker_color='#283F4E',
marker_size = 25, fps=4, alpha=1, axis_on=True,
sort_coords = False, sort_coords_asc=False, hold_frames=5)
| examples/Examples_nb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lustraka/Data_Analysis_Workouts/blob/main/Communicate_Data_Findings/SandBox/UniVariate_Exploration_Pokemon.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uxUyKrJhVmIu"
# # Univariate Exploration Pattern Language
# ## Data Set: Pokemon
# + id="gNhVcEhCU879" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="634a39a8-658c-4d0f-93dd-c84b05812f29"
# Import dependencies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Read the csv file, and check its shape and head
path = 'https://github.com/lustraka/Data_Analysis_Workouts/raw/main/Communicate_Data_Findings/SandBox/'
pokemon = pd.read_csv(path + 'pokemon.csv')
print(pokemon.shape)
pokemon.head()
# + [markdown] id="fSKiXB6tViru"
# ## Bar Chart using Seaborn
# A basic bar chart of frequencies can be created through the use of seaborn's `countplot` function.
#
# ```
# seaborn.countplot(*, x=None, y=None, data=None, order=None, orient=None, color=None)
# ```
#
# ### Create a vertical bar chart with default colors
#
# + id="4ca9uJ2oVOFv" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="58b73578-1321-4382-99f5-d546e91df6e2"
sns.countplot(data=pokemon, x='generation_id');
# + [markdown] id="7Vy61lcUWWZf"
# ### Create a vertical bar chart with a uniform single color
#
# The `color_palette()` returns the the current / default palette as a list of RGB tuples. Each tuple consists of three digits specifying the red, green, and blue channel values to specify a color.
# + id="eFB1DU9LWT0v" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="7d4e3cde-00e4-4bc4-9557-57ec6f6cfc47"
# Choose the first tuple of RGB colors
base_color = sns.color_palette()[0]
# Use the `color` argument
sns.countplot(data=pokemon, x='generation_id', color=base_color);
# + [markdown] id="P_ZK_D9MXDQl"
# ### Create a vertical bar chart with ordered bars
# + id="oQA6Gz36WsH9" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="d76d4241-57f1-4f54-c50b-afaa274f6bb7"
# Static-ordering the bars
sns.countplot(data=pokemon, x='generation_id', color=base_color, order=[5,1,3,4,2,7,6]);
# + id="HxO21xrfXXQ-" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="ac33f04d-ee2f-4de5-80bf-17dc5630954b"
# Dynamic-ordering the bars
# The order of the display of the bars can be computed with the following logic.
# Count the frequency of each unique value in the 'generation_id' column, and sort it in descending order
# Returns a Series
freq = pokemon['generation_id'].value_counts()
# Get the indexes of the Series
gen_order = freq.index
# Plot the bar chart in the decreasing order of the frequency of the `generation_id`
sns.countplot(data=pokemon, x='generation_id', color=base_color, order=gen_order);
# + [markdown] id="ntdy2wzdXmRP"
# ### Rotate the category labels (not axes)
# + id="wB5gTC6LXl3u" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="1abb1b17-b8ca-4fec-c459-5cd6b079f0bc"
# Plot the Pokemon type on a Vertical bar chart
sns.countplot(data=pokemon, x='type_1', color=base_color);
# Use xticks to rotate the category labels (not axes) counter-clockwise
plt.xticks(rotation=90);
# + [markdown] id="tc7z6c7nX1lh"
# ### Rotate the axes clocwise = Create a horizontal bar chart
# + id="-Zemq95jX1B_" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="9a6d6fb0-e4e3-425c-800b-4c7d26fc6e37"
# Plot the Pokemon type on a horizontal bar chart
type_order = pokemon['type_1'].value_counts().index
sns.countplot(data=pokemon, y='type_1', color=base_color, order=type_order);
# + [markdown] id="222EPYzuYo75"
# ### Unpivot *type* variable = Reshape the pokemon dataframe
# We will use the `pandas.DataFrame.melt()` method to unpivot a DataFrame from wide to long format, optionally leaving identifiers set. The syntax is:
# ```
# DataFrame.melt(id_vars, value_vars, var_name, value_name, col_level, ignore_index)
# ```
# + id="A2OxDq55Xfq2" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="a27e617b-1f4d-471a-f4e0-ef4b5a8c4fac"
pkmn_types = pokemon.melt(id_vars=['id', 'species'],
value_vars=['type_1', 'type_2'],
var_name='type_level',
value_name='type')
print(pkmn_types.shape)
pkmn_types.head()
# + [markdown] id="58A65srwZs3D"
# ### Plot the horizontal bar chart
# + id="FZW8gJz9Zay5" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="8a048a15-ff48-458d-d6e9-302f068d51a0"
# Count the frequency of unique values in the `type` column of pkmn_types dataframe.
# By default, returns the decreasing order of the frequency.
type_counts = pkmn_types['type'].value_counts()
# Get the unique values of the `type` column, in the decreasing order of the frequency.
type_order = type_counts.index
# Plot the horizontal bar chart
base_color = sns.color_palette()[0]
sns.countplot(data=pkmn_types, y='type', color=base_color, order=type_order);
# + [markdown] id="PfXpzadeaVbE"
# ### Plot a bar chart with relative frequency (the proportions)
# [Udacity Concept: Absolute vs. Relative Frequency](https://classroom.udacity.com/nanodegrees/nd002/parts/cd0016/modules/52d11870-710a-49bc-bfdb-0844ebaa50d7/lessons/52112010-aca5-4910-88a6-103757d30904/concepts/e6f3d137-5f7f-4bf2-88cb-f6644fffed10)
# + id="nHSldKMvZG85" colab={"base_uri": "https://localhost:8080/"} outputId="7675d7d8-649c-4685-b78d-646703e697f5"
# Return the sum of all not-null values in `type` column
n_pokemon = pkmn_types['type'].value_counts().sum()
# Return the highest frequency in the `type` column
max_type_count = type_counts[0]
# Return the maximum proportion, or in other words,
# compute the length of the longest bar in terms of the proportion
max_prop = max_type_count / n_pokemon
# Use numpy.arange() function to produce a set of evenly spaced proportioned
# values between 0 and max_prop, with a step size 2\%
tick_props = np.arange(0, max_prop, 0.02)
# Use a list comprehension to create tick_names that we will apply to the tick labels.
# Pick each element `v` from the `tick_props`, and convert it into a formatted string.
# `{:0.2f}` denotes that before formatting, we 2 digits of precision and `f` is used to represent floating point number.
# Refer [here](https://docs.python.org/2/library/string.html#format-string-syntax) for more details
tick_names = ['{:0.2f}'.format(v) for v in tick_props]
# Print parameters
print('max_prop = ', max_prop)
print('tick_props = ', tick_props)
print('tick_names = ', tick_names)
# + [markdown] id="ABz0LqFIb6zM"
# The `xticks` and `yticks` functions aren't only about rotating the tick labels. You can also get and set their locations and labels as well. The first argument takes the tick locations: in this case, the tick proportions multiplied back to be on the scale of counts. The second argument takes the tick names: in this case, the tick proportions formatted as strings to two decimal places.
# + id="WVDjzFp5a_OE" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="a3889f9d-3972-4d6e-d85b-53c8a741bcb7"
sns.countplot(data=pkmn_types, y='type', color=base_color, order=type_order);
# Change the tick locations and labels
plt.xticks(tick_props * n_pokemon, tick_names)
plt.xlabel('proportion');
# + [markdown] id="lpLWVL93j5Xx"
# ### Print the text (proportion) on the bars of a horizontal plot
#
# Read more about the arguments of text() function [here](https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.text.html)
#
# ```
# matplotlib.pyplot.text(x, y, s, fontdict=None, withdash=<deprecated parameter>, **kwargs)
# ```
#
# + id="N-416GGUjn2L" colab={"base_uri": "https://localhost:8080/"} outputId="6eb02b4b-7e3f-4510-bbbe-1c8fbaf427c2"
print(type_counts.head())
# + id="avY6VzYsbHgT" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="81d47beb-40b2-49d8-f79a-3ed4a492826a"
# Plot the baseline bar chart
base_color = sns.color_palette()[0]
sns.countplot(data=pkmn_types, y='type', color=base_color, order=type_order);
# Logic to print the proportion text on the bars
for i in range (type_counts.shape[0]):
# Read 'count' form a .value_counts() Series.
count = type_counts[i]
# Convert count into a percentage, and then into string
pct_string = '{:0.1f}%'.format(100*count/n_pokemon)
# Print the string value on the bar.
plt.text(count+1, i, pct_string, va='center')
# + [markdown] id="9Q1FFaaEmqaB"
# ### Print the text (proportion) above the bars of a vertical bar chart
#
# We use the `.get_text()` method to obtain the category name, so we can get the count of each category level. At the end, we use the text function to print each percentage, with the x-position, y-position, and string as the three main parameters to the function.
# + id="rXivDkJQkcUh" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="c21032d0-5c38-44d7-abc3-9e6744c45613"
# Plot the baseline bar chart
base_color = sns.color_palette()[0]
sns.countplot(data=pkmn_types, x='type', color=base_color, order=type_order);
# Recalculating the type_counts just to have clarity.
type_counts = pkmn_types['type'].value_counts()
# Get the current tick locations and labels
locs, labels = plt.xticks(rotation=90)
# Loop through each pair of locations and labels
for loc, label in zip(locs, labels):
# Get the text property for the label to get the correct count
count = type_counts[label.get_text()]
pct_string = '{:0.1f}%'.format(100*count/n_pokemon)
# Print the annotation just below the top of the bar
plt.text(loc, count+2, pct_string, ha = 'center', color = 'black')
# + id="6gYPSRuxnoNf"
from matplotlib import rcParams
# Specify the figure size in inches, for both X, and Y axes
rcParams['figure.figsize'] = 12,4
# + id="8oar6dOLntbO" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="4051b92a-7c09-4dcb-a9bb-2c693e5af90c"
# Plot the baseline bar chart
base_color = sns.color_palette()[0]
sns.countplot(data=pkmn_types, x='type', color=base_color, order=type_order);
# Recalculating the type_counts just to have clarity.
type_counts = pkmn_types['type'].value_counts()
# Get the current tick locations and labels
locs, labels = plt.xticks(rotation=90)
# Loop through each pair of locations and labels
for loc, label in zip(locs, labels):
# Get the text property for the label to get the correct count
count = type_counts[label.get_text()]
pct_string = '{:0.1f}%'.format(100*count/n_pokemon)
# Print the annotation just below the top of the bar
plt.text(loc, count+2, pct_string, ha = 'center', color = 'black')
# + id="O2ZCuULBnyX2" colab={"base_uri": "https://localhost:8080/", "height": 362} outputId="24ddfe3c-ebdc-4dfd-aa9c-1c3d4110dacd"
# Plot the baseline bar chart
base_color = sns.color_palette()[0]
sns.countplot(data=pkmn_types, x='type', color=base_color, order=type_order);
locs, labels = plt.xticks(rotation=90)
print(locs)
print(list(labels))
# + [markdown] id="AE7h4Ntp3MT0"
# ### Visualize Missing Values
# + id="EQAMCScyorp9" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="594e6f22-4d7c-445c-ffb2-5bbb6b2eb59a"
# Read the data from a CSV file
# Original source of data: https://www.kaggle.com/manjeetsingh/retaildataset available under C0 1.0 Universal (CC0 1.0) Public Domain Dedication License
sales_data = pd.read_csv(path+'sales-data.csv')
print(sales_data.shape)
sales_data.head()
# + id="HJYWG27V3iGr" colab={"base_uri": "https://localhost:8080/"} outputId="19dbf8b4-cc34-4591-bb0a-5383a2228411"
# Initalize `sdm` aka sales data missing Series
sdm = sales_data.isna().sum()
sdm[sdm>0]
# + id="HmyqlSLj3lZV" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="e9a95cd5-b9d0-40fc-8b8c-33c99a64c815"
# Specify the default figure size in inches, for both X, and Y axes
rcParams['figure.figsize'] = 6.4, 4.8
# The first argument to the function below contains the x-values (column names), the second argument the y-values (our counts).
# Refer to the syntax and more example here - https://seaborn.pydata.org/generated/seaborn.barplot.html
na_counts = sdm[sdm>0].reset_index(name='missing_values_count')
# sns.barplot(na_counts.index.values, na_counts)
# FutureWarning: Pass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`,...
sns.barplot(x='index', y='missing_values_count', data=na_counts)
# get the current tick locations and labels
plt.xticks(rotation=45)
# Logic to print value on each bar
for i in na_counts.index:
count = na_counts.at[i,'missing_values_count']
# Refer here for details of the text() - https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.text.html
plt.text(i, count+300, count, ha = 'center', va='top')
bottom, top = plt.ylim()
plt.ylim(bottom, top+300)
plt.xlabel('Variables')
plt.ylabel('Count of Missing Values')
plt.title(f'Variables with Missing Values (Out of {sales_data.shape[0]})')
plt.show()
# + [markdown] id="0vqleWw7A_u9"
# **Note** - The `seaborn.barplot()` is a useful function to keep in mind if your data is summarized and you still want to build a bar chart. If your data is not yet summarized, however, just use the `countplot` function so that you don't need to do extra summarization work. In addition, you'll see what `barplot`'s main purpose is in the adaptations of univariate plots for plotting bivariate data.
# + [markdown] id="LLDmFns2VAEi"
# ## Plot (Top) Relative Frequencies Pattern
# + id="zbZujwco42Qf" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="34f0811a-c16c-47d1-a377-f411ac0f4bc6"
# Initialize parameters
data = pkmn_types['type'].value_counts()
top = 10
title = 'Top {} Pokemon\'s Types'
# Define a function
def plot_top_rel_freq(data, title, top=None, nobs=None):
"""Plot `top` values the `data` series.
Args:
data - a pd.Series with counts to plot
title - a chart's title ({} displays a number of categories)
top - if None, plot all categories
nobs - if None, use `data.sum()`"""
def set_step(step):
"""Set nicely rounded step size."""
lg = -1*np.log10(step)
if lg > 0.1:
return round(step, round(lg))
else:
return round(step, -1)
# Choose the first tuple of RGB colors to reduce distraction
base_color = sns.color_palette()[0]
# Check the order of values
data = data.sort_values(ascending=False)
# If top == None, plot all observations.
if top == None or top > data.shape[0]:
top = data.shape[0]
# If nobs == None, use data.sum()
if nobs == None:
nobs = data.sum()
# Compute the lenght of the longest bar in terms of proportion
max_prop = data[0] / nobs
# Produce a set of evenly spaced proportioned values
tick_props = np.arange(0, max_prop, set_step(max_prop/5))
# Create tick labels
tick_names = [f'{v:.1%}' for v in tick_props]
fig, ax = plt.subplots(figsize=(6.4, 4.8))
ax = sns.barplot(x=data[:top].values, y=data[:top].index, color=base_color)
# Change tick locations and labels
plt.xticks(tick_props * nobs, tick_names)
# Print the proportion text on the bars
for i in range(top):
# Read count
count = data[i]
# Convert count into a percentage, and then into string
pct_count = f'{count/nobs:.1%}'
# Print the string value on the bar
plt.text(count+round(data[0]/100), i, pct_count, va='center')
# Render the chart
left, right = plt.xlim()
plt.xlim(left, right+round(data[0]/15))
ax.set_title(title.format(top))
return None
plot_top_rel_freq(data, title, 10)
# + id="AViSGUaTvPuM" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="761985ed-e558-4852-8bf1-1484eb9b9ef1"
data = sdm[sdm>0]
plot_top_rel_freq(data, 'Variables with Missing Values', None, sales_data.shape[0])
# + [markdown] id="Bvxox51SfxaO"
# ## Histograms
# A **histogram** is used to plot the distribution of a numeric variable. It's a quantitative version of the bar chart. When creating histograms, it's useful to play around with different bin widths to see what represents the data best.
# + colab={"base_uri": "https://localhost:8080/", "height": 320} id="Boan7TY_f4Hd" outputId="5c082e27-b339-42f2-c1d3-e3e978e159f2"
# Resize the chart (size in inches)
plt.figure(figsize=[20, 5])
# Histogram on left
plt.subplot(1, 2, 1)
bins = np.arange(0, pokemon['speed'].max()+4, 4)
plt.hist(data=pokemon, x='speed', bins=bins);
# Histogram on righ
plt.subplot(1, 2, 2)
bins = np.arange(0, pokemon['speed'].max()+1/4, 1/4)
plt.hist(data=pokemon, x='speed', bins=bins);
# + [markdown] id="lfMCxUvYivfM"
# The `distplot` function has built-in rules for specifying histogram bins, and by default plots a curve depicting the kernel density estimate (KDE) on top of the data. The vertical axis is based on the KDE, rather than the histogram: you shouldn't expect the total heights of the bars to equal 1, but the area under the curve should equal 1.
# + [markdown] id="AwbD5sNixdvy"
# ### Be Aware of Two `pyplot` Interfaces
# See [The object-oriented interface and the pyplot interface](https://matplotlib.org/stable/tutorials/introductory/usage.html#the-object-oriented-interface-and-the-pyplot-interface)
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="yYIdyjuJjUEn" outputId="d1346263-6015-4f87-bcea-2e179518b9eb"
fig, axs = plt.subplots(1, 2, figsize=[20,5])
sns.distplot(pokemon['speed'], ax=axs[0])
sns.distplot(pokemon['speed'], ax=axs[1], kde=False);
# + [markdown] id="2b130FKbwHuy"
# Ref: [Subplotting with matplotlib and seaborn](https://dev.to/thalesbruno/subplotting-with-matplotlib-and-seaborn-5ei8)
# + colab={"base_uri": "https://localhost:8080/", "height": 416} id="kDwv-JfYgjDU" outputId="82f68f1e-1ab0-4339-96a4-6405bd8c7925"
# Resize the chart (size in inches)
plt.figure(figsize=[20, 5])
# Histogram on left
plt.subplot(1, 2, 1)
sns.distplot(pokemon['speed']);
# Histogram on righ
plt.subplot(1, 2, 2)
sns.distplot(pokemon['speed'], kde=False);
# + [markdown] id="_0f8CoXX3dSN"
# ### Explore Skewness and Focus on Data Points in Some Range
# + colab={"base_uri": "https://localhost:8080/", "height": 329} id="S6pWQVM5to87" outputId="f7c3bcd6-0dee-4c29-b362-1c64a0d444bf"
fig, axs = plt.subplots(1, 2, figsize=[20,5])
bins = np.arange(0, pokemon['height'].max()+0.5, 0.5)
sns.histplot(data=pokemon, x='height', bins=bins, ax=axs[0])
bins = np.arange(0, pokemon['height'].max()+0.2, 0.2)
sns.histplot(data=pokemon, x='height', bins=bins, ax=axs[1])
# Use an axis limit to right plot
axs[1].set_xlim(0, 6); # could also be called as plt.xlim((0, 6))
# + [markdown] id="pNaSkdk89lRn"
# ### Transform Axis Scale (Log, Sqrt)
#
# Be aware that a logarithmic transformation is not the only one possible. When we perform a logarithmic transformation, our data values have to all be positive; it's impossible to take a log of zero or a negative number. In addition, the transformation implies that additive steps on the log scale will result in multiplicative changes in the natural scale, an important implication when it comes to data modeling. The type of transformation that you choose may be informed by the context for the data. For example, [this Wikipedia section](https://en.wikipedia.org/wiki/Log-normal_distribution#Occurrence_and_applications) provides a few examples of places where log-normal distributions have been observed.
#
# If you want to use a different transformation that's not available in `xscale`, then you'll have to perform some feature engineering. In cases like this, we want to be systematic by writing a function that applies both the transformation and its inverse. The inverse will be useful in cases where we specify values in their transformed units and need to get the natural units back
# + colab={"base_uri": "https://localhost:8080/", "height": 334} id="VkyM7boj4jtD" outputId="778b799b-8ab9-458a-8021-6459d711e1b8"
fig, axs = plt.subplots(1, 2, figsize=[20,5])
# HISTOGRAM ON LEFT
# Get the ticks for log bins
bins = 10**np.arange(-1, 3+0.1, 0.1)
# Generate the x-ticks
ticks = [0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000]
# Convert ticks into string values
labels = [str(tick) for tick in ticks]
# Plot a histogram with a log scale
axs[0].hist(data=pokemon, x='weight', bins=bins, edgecolor='black')
axs[0].set_xscale('log')
# Render the chart
axs[0].set_xticks(ticks)
axs[0].set_xticklabels(labels)
axs[0].set_title('Histogram With a Log Scale')
# HISTOGRAM ON RIGHT
def sqrt_trans(x, inverse=False):
"""Get square root or square of the argument."""
if not inverse:
return np.sqrt(x)
else:
return x**2
# Resize bins, to transform the x-axis
bin_edges = np.arange(0, sqrt_trans(pokemon['weight'].max())+1, 1)
# Plot the scaled data
axs[1].hist(pokemon['weight'].apply(sqrt_trans), bins=bin_edges, edgecolor='black')
# Define the tick-locations
tick_locs = np.arange(0, sqrt_trans(pokemon['weight'].max())+10, 10)
# Apply x-ticks
axs[1].set_xticks(tick_locs)
axs[1].set_xticklabels(sqrt_trans(tick_locs, inverse=True).astype(int))
# Render the chart
axs[1].set_title('Histogram With a Square Root Scale')
plt.show()
# + [markdown] id="msMyNWo1SDgJ"
# ## Extra: Kernel Density Estimation
# + id="4twXweOaZvsX" colab={"base_uri": "https://localhost:8080/", "height": 330} outputId="1af126e7-d2e6-47c7-bf61-e3cd8ecec76c"
fig, axs = plt.subplots(1, 2, figsize=[20,5])
sns.histplot(pokemon['speed'], ax=axs[0]);
sns.kdeplot(pokemon['speed'], ax=axs[1]);
# + colab={"base_uri": "https://localhost:8080/", "height": 369} id="DvZ2hKKqFZdv" outputId="ba6ce807-79fc-473f-f952-0c1a81bde6d2"
# Beware, displot() is a figure-level function!
sns.displot(pokemon['speed'], kind='hist');
# + id="LKYZ0RsHSND2" colab={"base_uri": "https://localhost:8080/", "height": 369} outputId="35fe18cf-9ba5-4cbd-bd72-c8aa430df063"
sns.displot(pokemon['speed'], kind='kde');
# + id="WAZxdLgWSXmQ"
| Communicate_Data_Findings/SandBox/UniVariate_Exploration_Pokemon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: terry_stops
# language: python
# name: terry_stops
# ---
# %load_ext autoreload
# %autoreload 2
# +
# setting project path
import os
import sys
gparent = os.path.join(os.pardir, os.pardir)
sys.path.append(gparent)
# imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.compose import make_column_selector
from sklearn.compose import make_column_transformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, make_scorer
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import make_pipeline as make_sm_pipeline
import matplotlib.pyplot as plt
import seaborn as sns
# setting style
sns.set_theme('talk')
plt.style.use('fivethirtyeight')
sns.set_palette(palette='Blues_r')
# -
path = os.path.join(gparent, 'data/processed', 'cleaned.csv')
test_df = pd.read_csv(path, keep_default_na=False)
test_df.head(2)
test_df.info()
column_list = test_df.columns
excluded = ['Stop Resolution', 'Weapon Type', 'Officer ID',
'Initial Call Type', 'Final Call Type', 'Officer Squad',
'Precinct', 'Sector', 'Call Type', 'Arrest Flag',
'Frisk Flag', 'Beat']
cols = [x for x in column_list if x not in excluded]
cols
test_df = test_df[cols]
test_df.head()
X = test_df.drop('Target', axis=1)
y = test_df['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=2021,
stratify=y
)
string_selector = make_column_selector(dtype_include='object')
number_selector = make_column_selector(dtype_include='number', dtype_exclude='object')
preprocessing = make_column_transformer((OneHotEncoder
(handle_unknown='ignore'),string_selector),
(MinMaxScaler(), number_selector))
preprocessing.fit_transform(X_train)
preprocessing.transformers_[0][1].get_feature_names()
clf = LogisticRegression()
pipeline = make_pipeline(preprocessing, clf)
cross_val_score( pipeline, X_train, y_train, scoring='precision')
sm = SMOTE(random_state=2021)
pipeline2 = make_sm_pipeline(preprocessing, sm, clf)
cross_val_score(pipeline2, X_train, y_train, scoring='precision')
clf2 = LogisticRegression(max_iter=275)
pipeline3 = make_sm_pipeline(preprocessing, sm, clf2)
cross_val_score(pipeline3, X_train, y_train, scoring='precision')
preprocessing2 = make_column_transformer((OneHotEncoder
(handle_unknown='ignore'),string_selector),
(StandardScaler(), number_selector))
pipeline4 = make_sm_pipeline(preprocessing2, sm, clf2)
cross_val_score(pipeline4, X_train, y_train, scoring='precision')
# +
def pre_score(y_true, y_pred):
precision = precision_score(y_true, y_pred)
return precision
precision = make_scorer(pre_score)
class HarnessCCV:
def __init__(self, scorer, random_state=2021):
self.scorer = scorer
self.history = pd.DataFrame(columns=['Name', 'Accuracy', 'Notes'])
def report(self, estimator, X, y, name, notes=''):
# Create a list to hold the scores from each fold
kfold_val_scores = np.ndarray(5)
kfold_train_scores = np.ndarray(5)
# Instantiate a splitter object and loop over its result
kfold = StratifiedKFold(n_splits=5)
for fold, (train_index, val_index) in enumerate(kfold.split(X, y)):
# Extract train and validation subsets using the provided indices
X_t, X_val = X.iloc[train_index], X.iloc[val_index]
y_t, y_val = y.iloc[train_index], y.iloc[val_index]
# Instantiate StandardScaler
scaler = StandardScaler()
# Fit and transform X_t
X_t_scaled = scaler.fit_transform(X_t)
# Transform X_val
X_val_scaled = scaler.transform(X_val)
# Instantiate SMOTE
sm = SMOTE(random_state=2021)
# Fit and transform X_t_scaled and y_t using sm
X_t_oversampled, y_t_oversampled = sm.fit_resample(X_t_scaled, y_t)
# Clone the provided model and fit it on the train subset
temp_model = clone(estimator)
temp_model.fit(X_t_oversampled, y_t_oversampled)
# Evaluate the model on the validation subsets
score_train = precision_score(temp_model.predict(X_t_oversampled), y_t_oversampled)
scores_val = precision_score(temp_model.predict(X_val_scaled), y_val)
kfold_train_scores[fold] = score_train
kfold_val_scores[fold] = scores_val
frame = pd.DataFrame([[name, scores_val.mean(), notes]], columns=['Name', 'Accuracy', 'Notes'])
self.history = self.history.append(frame)
self.history = self.history.reset_index(drop=True)
self.history = self.history.sort_values('Accuracy')
self.print_error(name, scores_val.mean())
print(kfold_val_scores)
return kfold_train_scores, kfold_val_scores
def print_error(self, name, Accuracy):
print(f'{name} has an average percision of {Accuracy}')
# -
modeling = HarnessCCV(precision)
clf3 = KNeighborsClassifier()
pipeline5 = make_sm_pipeline(preprocessing2, sm, clf3)
FE = X_train.copy()
FE = FE.reset_index(drop=True)
FE = pd.get_dummies(FE)
modeling.report(clf3, FE, y_train, 'KNN')
modeling.history
| notebooks/exploratory/.ipynb_checkpoints/test_modeling-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Logisitic Regression (Multi-class Classification)
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
# loading training set of handwritten digits
training_digits = load_digits()
dir(training_digits)
training_digits.data[0]
plt.gray()
plt.matshow(training_digits.images[0])
training_digits.target[0:5]
# We will use 'data' and 'target' to train model
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(training_digits.data, training_digits.target, test_size=0.2)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
model.score(X_test, y_test)
# predicting handwritten digit values
plt.matshow(training_digits.images[67])
training_digits.target[67]
model.predict(training_digits.data[5:20])
# +
y_predicted = model.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_predictcm
# -
# importing Seaborn to visualize the confusion matrix
import seaborn as sn
plt.figure(figsize= (10,7))
sn.heatmap(cm, annot=True)
plt.xlabel('Predicted')
plt.ylabel('Truth')
| multiClassLogReg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # About: Moodleコンテナの起動
#
# ---
#
# Moodleコンテナを起動します。
# ## 全体構成
#
# 
# 構築するMoodle環境は次の2つのDockerコンテナで構成されます。
#
# * [Moodle](https://harbor.vcloud.nii.ac.jp/harbor/projects/2/repositories/vcp%2Fmoodle-simple)
# - MoodleのPHPスクリプトを実行するコンテナ
# * [MariaDB](https://hub.docker.com/_/mariadb/)
# - Moodleの設定を保存するデータベース
#
# 構築環境となるノードを事前に「011-VCノードの作成」、「012-EC2インスタンスの作成」、「013-Azure仮想マシンの作成」のいずれかのNotebookで作成しておく必要があります。
# ## 準備
# ### Group名の指定
#
# Moodle環境の構築対象を指定するために Ansible のグループ名を指定します。
# ノード作成をどのNotebookで行ったかによって、ここで指定する値は異なります。各Notebookでどの値がAnsibleのグループ名に対応するのかを以下に示します。
#
# * 011-VCノードの作成.ipynb
# - UnitGroup名: `ugroup_name`
# * 012-EC2インスタンスの作成.ipynb
# - EC2インスタンスの名前: `aws_ec2_name`
# * 013-Azure仮想マシンの作成.ipynb
# - 仮想マシンの名前: `azure_vm_name`
# + tags=["vcp:skip", "vcp:parameters"]
# (例)
# target_group = 'Moodle'
target_group =
# + [markdown] heading_collapsed=true
# ### 前提条件の確認
#
# このNotebookを実行するための前提条件を満たしていることを確認します。
# + [markdown] hidden=true
# 前提となる条件を以下に示します。
#
# * Ansibleから操作可能であること
# * Ansibleから管理者権限でコマンドを実行できること
# * Docker が利用可能なこと
# * docker-compose コマンドがインストールされていること
# * CentOS 7であること
# + [markdown] hidden=true
# 対象となるホストにAnsibleで到達可能なことを確認します。
# + hidden=true
# !ansible {target_group} -m ping
# + [markdown] hidden=true
# 管理者権限でコマンド実行可能なことを確認します。
# + hidden=true
# 管理者権限(-b)でのコマンド実行
# !ansible {target_group} -b -a 'whoami'
# + [markdown] hidden=true
# Dockerが利用可能なことを確認します。
# + hidden=true
# !ansible {target_group} -a 'docker info'
# + [markdown] hidden=true
# docker-composeコマンドがインストールされていることを確認します。
# + hidden=true
# !ansible {target_group} -a 'docker-compose version'
# + [markdown] hidden=true
# 構築対象のノードが CentOS 7 であること確認します。
# + hidden=true
import json
# out = !ansible {target_group} -m setup
try:
idx = [i for i, x in enumerate(out) if x.endswith('| SUCCESS => {')][0]
data = json.loads(' '.join(['{'] + out[(idx + 1):]))
distribution = data['ansible_facts']['ansible_distribution']
distribution_major_version = data['ansible_facts']['ansible_distribution_major_version']
if distribution != 'CentOS' or distribution_major_version != '7':
raise RuntimeError(f"ERROR: {distribution} {distribution_major_version}")
except:
for line in out:
print(line)
raise RuntimeError("error!")
# -
# ## パラメータ設定
# ### Moodle
# Moodleに関するパラメータを指定します。
#
# 
# #### Moodleのコンテナイメージ
#
# Moodleコンテナのイメージ名を指定してください。
# ここで指定する値は `harbor.vcloud.nii.ac.jp` にあらかじめ用意してある Moodle イメージから選択する必要があります。次のセルを実行すると選択可能な値の一覧を表示します。
# +
import requests
url = f'https://harbor.vcloud.nii.ac.jp/api/repositories/vcp/moodle-simple/tags'
res = requests.get(url)
for x in sorted([f"harbor.vcloud.nii.ac.jp/vcp/moodle-simple:{x['name']}"
for x in res.json()
if not x['name'].endswith('-ssl')], reverse=True):
print(x)
# -
# コンテナイメージのタグによって、どのリリースに対応するMoodle環境を構築するかを指定できます。`3.x.x` のようなタグはマイナーリリースまで特定したコンテナイメージとなります。また `3.x` のようなタグは各ブランチにおける最新のマイナーリリースを意味しています。Moodleのリリース状況に関しては[Moodle - Releases](https://docs.moodle.org/dev/Releases)を参照してください。現在の最新のLTS(Long Term Support)はMoodle 3.9となっています。
# + tags=["vcp:skip", "vcp:parameters"]
# (例)
# moodle_image_name = 'harbor.vcloud.nii.ac.jp/vcp/moodle-simple:3.9.9'
# moodle_image_name = 'harbor.vcloud.nii.ac.jp/vcp/moodle-simple:3.9'
moodle_image_name = 'harbor.vcloud.nii.ac.jp/vcp/moodle-simple:3.9.9'
# -
# #### Moodleの管理者ユーザ名
#
# Moodleの管理者ユーザ名を指定します。
# > Moodleのユーザ名に指定できる文字は、小文字英数字と`_`, `-`, `@`, `.`です。
# + tags=["vcp:skip", "vcp:parameters"]
# (例)
# moodle_admin_name = 'admin'
moodle_admin_name =
# -
# #### Moodleの管理者パスワード
#
# 管理者パスワードを指定します。
# 次のセルを実行するとパスワード入力用の枠が表示されます。管理者パスワードを入力してください(入力後に Enter キーを押すことで入力が完了します)。
#
# > パスワードの値は `admin` 以外の値を指定してください。
# + tags=["vcp:skip", "vcp:parameters"]
from getpass import getpass
moodle_admin_password = getpass()
# -
# #### MoodleのURL
#
# MoodleではサイトのURLを`config.php`の`$CFG->wwwroot`に設定する必要があります。構築対象のURLを指定してください。
# + tags=["vcp:skip", "vcp:parameters"]
# (例)
# moodle_url = 'http://moodle.example.org'
# moodle_url = 'http://172.30.2.100'
moodle_url =
# -
# #### リバースプロキシ
#
# 構築したMoodle環境に対して、一般利用者からのアクセスがリバースプロキシを経由して行われる場合は `moodle_reverseproxy` の値に `True` を設定してください。リバースプロキシを経由せずに構築環境に直接アクセスする構成をとる場合は `False` を設定してください。
# + tags=["vcp:skip", "vcp:parameters"]
# (例)
# moodle_reverseproxy = False # リバースプロキシを経由しない場合
# moodle_reverseproxy = True # リバースプロキシを経由する場合
moodle_reverseproxy =
# + [markdown] heading_collapsed=true
# #### パラメータの保存
#
# この節で指定したパラメータの値をファイルに保存します。ただしパスワードなどの秘匿情報についてはファイルへの保存を行いません。
# + [markdown] hidden=true
# 値の保存を行う前に、入力されたパラメータに対して簡易なチェックを行います。エラーになった場合はその後に表示される指示に従ってください。
# + hidden=true
# %run scripts/utils.py
check_parameters(
'moodle_admin_name',
'moodle_url',
nb_vars=locals(),
)
# + [markdown] hidden=true
# パラメータの値を group_vars ファイルに保存します。
# + hidden=true
# %run scripts/group.py
update_group_vars(
target_group,
moodle_image_name=moodle_image_name,
moodle_url=moodle_url,
moodle_reverseproxy=moodle_reverseproxy,
)
# -
# ### データベース
#
# Moodleの設定値などを保存するデータベースに関するパラメータを指定します。
#
# 
# #### データベースのコンテナイメージ
#
# データベースコンテナのイメージ名を指定してください。このNotebookが構築する環境では MariaDBのコンテナイメージを指定することができます。
# + tags=["vcp:parameters"]
db_image_name = 'mariadb:10.6'
# -
# Moodleの設定ファイル`config.php`の中で `$CFG->dbtype` に指定するデータベースの種別を指定してください。
#
# > このNotebookでは `mariadb` を指定した場合の動作確認のみ行っています。
# + tags=["vcp:parameters"]
# (例)
# db_type = 'mariadb' # MariaDB
# db_type = 'mysql' # MySQL
db_type = 'mariadb'
# -
# #### データベース名
#
# Moodleが使用するデータベース名を指定してください。
# + tags=["vcp:parameters"]
# (例)
# db_moodle_db = 'moodle'
db_moodle_db = 'moodle'
# -
# #### データベースの接続ユーザ
#
# Moodleのデータベースに接続するためのデータベースのユーザ名を指定してください。
# + tags=["vcp:parameters"]
# (例)
# db_moodle_db_user = 'moodle'
db_moodle_db_user = 'moodle'
# -
# #### データベースのパスワード
#
# Moodleのデータベースに接続するためのパスワードを指定します。
# + tags=["vcp:skip", "vcp:parameters"]
from getpass import getpass
db_moodle_db_password = getpass()
# -
# #### データベースの管理者パスワード
#
# 管理者のパスワードを指定します。
# + tags=["vcp:skip", "vcp:parameters"]
db_root_password = getpass()
# + [markdown] heading_collapsed=true
# #### パラメータの保存
#
# この節で指定したパラメータの値をファイルに保存します。ただしパスワードなどの秘匿情報についてはファイルへの保存を行いません。
# + hidden=true
update_group_vars(
target_group,
db_image_name=db_image_name,
db_type=db_type,
db_moodle_db=db_moodle_db,
)
# + [markdown] heading_collapsed=true
# ## 設定ファイルの配置
#
# Moodleコンテナ、DBコンテナを実行するのに必要となる設定ファイルを構築環境に配置します。
#
# 
# + [markdown] hidden=true
# ### docker-compose.yml の配置
# + [markdown] hidden=true
# このNotebookで構築するMoodle環境は複数のコンテナで構成されています。複数コンテナの管理を容易にするために`docker-compose`を利用します。YAMLで記述した設定ファイル`docker-compose.yml`にコンテナ構成を記述することで複数のコンテナの起動、停止などが行えます。
#
# ここでは`docker-compose.yml`を構築環境に配置します。
# + [markdown] hidden=true
# まず`docker-compose.yml`などを格納するディレクトリ`/srv/moodle`を構築環境に作成します。
# + hidden=true
# !ansible {target_group} -b -m file -a \
# 'path=/srv/moodle state=directory owner={{{{ansible_ssh_user}}}}'
# + [markdown] hidden=true
# 作成したディレクトリに `docker-compose.yml`を配置します。
#
# > ここで配置する `docker-compose.yml` はコンテナ内から設定ファイルをコピーするための一時的なものです。実運用に用いる `docker-compose.yml` は次章で改めて配置します。
# + hidden=true
from tempfile import TemporaryDirectory
from pathlib import Path
import yaml
params = {
'moodle_admin_name': moodle_admin_name,
'moodle_admin_password': <PASSWORD>,
'db_moodle_db_user': db_moodle_db_user,
'db_moodle_db_password': <PASSWORD>,
'db_root_password': <PASSWORD>root_password,
}
with TemporaryDirectory() as workdir:
vars_path = Path(workdir) / 'moodle.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
# !ansible {target_group} -m template \
# -e prepare=true -e @{vars_path} \
# -a 'src=template/docker-compose.yml dest=/srv/moodle/'
# + [markdown] hidden=true
# ### 各コンテナの設定ファイルを配置する
# + [markdown] hidden=true
# このNotebookで構築する環境では、コンテナ内で実行する Apache HTTP server の`httpd.conf`などの設定ファイルを構築環境のホスト側に配置しています。これは各コンテナイメージに状態を持たせないようにして、バックアップ、リストアなどを容易に行うための対応策です。ホスト側に配置した設定ファイルは`docker-compose.yml`で指定した[bind mount](https://docs.docker.com/storage/bind-mounts/)の機能を利用してコンテナから参照できるようにしています。
#
# bind mountによって配置する設定ファイルのホスト環境におけるパスとコンテナ環境におけるパスの対応関係を以下に示します。
#
# <table>
# <tr>
# <th style="text-align:left;">コンテナ名</th>
# <th style="text-align:left;">コンテナ環境のパス</th>
# <th style="text-align:left;">ホスト環境のパス</th>
# </tr>
# <tr>
# <td style="text-align:left;">moodle</td>
# <td style="text-align:left;">/etc/httpd/conf/httpd.conf</td>
# <td style="text-align:left;">/srv/moodle/moodle/conf/httpd/conf/httpd.conf</td>
# </tr>
# <tr>
# <td style="text-align:left;">moodle</td>
# <td style="text-align:left;">/etc/httpd/conf.d/</td>
# <td style="text-align:left;">/srv/moodle/moodle/conf/httpd/conf.d/</td>
# </tr>
# <tr>
# <td style="text-align:left;">moodle</td>
# <td style="text-align:left;">/etc/httpd/conf.modules.d/</td>
# <td style="text-align:left;">/srv/moodle/moodle/conf/httpd/conf.modules.d/</td>
# </tr>
# <tr>
# <td style="text-align:left;">moodle</td>
# <td style="text-align:left;">/etc/php.ini</td>
# <td style="text-align:left;">/srv/moodle/moodle/conf/php.ini</td>
# </tr>
# <tr>
# <td style="text-align:left;">moodle</td>
# <td style="text-align:left;">/etc/php.d/</td>
# <td style="text-align:left;">/srv/moodle/moodle/conf/php.d/</td>
# </tr>
# <tr>
# <td style="text-align:left;">db</td>
# <td style="text-align:left;">/etc/mysql/mariadb.conf.d/</td>
# <td style="text-align:left;">/srv/moodle/db-0/conf/mariadb.conf.d/</td>
# </tr>
# </table>
#
# この節では、コンテナイメージの設定ファイルをコピーして構築環境のホスト側への配置を行います。
# + [markdown] hidden=true
# コンテナ内の設定ファイルをホスト側にコピーするために、一時的にコンテナを起動します。まず、利用するコンテナイメージを取得します。
# + hidden=true
# !ansible {target_group} -a \
# 'chdir=/srv/moodle docker-compose pull'
# + [markdown] hidden=true
# 設定ファイルをコピーするために一時的なコンテナ起動を行います。
# + hidden=true
# !ansible {target_group} -a \
# 'chdir=/srv/moodle docker-compose up -d'
# + [markdown] hidden=true
# コンテナから設定ファイルなどのコピーを行います。
#
# > 不整合をさけるために、ホスト側に配置されているファイルを全て削除してからコピーを行います。
# + hidden=true
# !ansible {target_group} -a 'chdir=/srv/moodle \
# mkdir -p db/data db/misc moodle/conf moodle/data/moodledata'
# !ansible {target_group} -b -m shell -a 'chdir=/srv/moodle \
# rm -rf db/conf moodle/data/php moodle/conf/*'
# !ansible {target_group} -m shell -a 'chdir=/srv/moodle \
# docker cp db:/etc/mysql db/conf; \
# docker cp moodle:/etc/httpd moodle/conf; \
# docker cp moodle:/etc/php.ini moodle/conf; \
# docker cp moodle:/etc/php.d moodle/conf; \
# docker cp moodle:/var/www/moodle moodle/data/php'
# + [markdown] hidden=true
# 設定ファイルのコピーが完了したので、一時的に起動したコンテナを停止、削除します。
# + hidden=true
# !ansible {target_group} -a \
# 'chdir=/srv/moodle docker-compose down'
# + [markdown] hidden=true
# ### ログ関連の準備
#
# コンテナで実行するサービスのログに関するセットアップを行います。
# + [markdown] hidden=true
# #### ログ出力先のディレクトリの作成
# + [markdown] hidden=true
# このNotebookで構築する環境では、コンテナ内で実行する Apache HTTP server などのログ出力先ディレクトリをホスト側のディレクトリに[bind mount](https://docs.docker.com/storage/bind-mounts/)します。これによりホスト側からもログの参照が容易になります。
#
# ログ出力先ディレクトリのコンテナ環境とホスト環境での対応関係を以下に示します。
#
# <table>
# <tr>
# <th style="text-align:left;">コンテナ名</th>
# <th style="text-align:left;">コンテナ環境のパス</th>
# <th style="text-align:left;">ホスト環境のパス</th>
# </tr>
# <tr>
# <td style="text-align:left;">moodle</td>
# <td style="text-align:left;">/var/log/httpd</td>
# <td style="text-align:left;">/var/log/httpd</td>
# </tr>
# <tr>
# <td style="text-align:left;">db</td>
# <td style="text-align:left;">/var/log/mysql</td>
# <td style="text-align:left;">/var/log/mysql</td>
# </tr>
# </table>
# + [markdown] hidden=true
# ログ出力先のディレクトリを作成します。
# + hidden=true
# !ansible {target_group} -b -m file -a 'path=/var/log/httpd state=directory'
# !ansible {target_group} -b -m file -a 'path=/var/log/mysql owner=999 group=adm state=directory'
# + [markdown] hidden=true
# #### logrotateの設定ファイルを配置する
#
# Moodleコンテナ、DBコンテナのログをローテーションするための設定ファイルを配置します。logrotateはホスト環境で実行するので、ホスト側の `/etc/logrotate.d/` に配置します。
# + hidden=true
# !ansible {target_group} -b -m copy -a 'src=template/logrotate.d/httpd dest=/etc/logrotate.d/'
# !ansible {target_group} -b -m copy -a 'src=template/logrotate.d/mysql-server dest=/etc/logrotate.d/'
# + [markdown] hidden=true
# #### mysqladmin の設定ファイルを配置する
#
# MariaDB/MySQL に対して logrotate を行うには管理者権限で `mysqladmin flush-logs` などのコマンドを実行出来るようにする必要があります。これを可能にするために管理者パスワードの情報を `/root/.my.cnf` に格納します。
# + [markdown] hidden=true
# コンテナに状態を持たせないようにするために設定ファイルはホスト環境に配置したものを bind mountすることにします。コンテナ環境、ホスト環境におけるパスを以下に示します。
#
# <table>
# <tr>
# <th style="text-align:left;">コンテナ環境のパス</th>
# <th style="text-align:left;">ホスト環境のパス</th>
# </tr>
# <tr>
# <td style="text-align:left;">/root/.my.cnf</td>
# <td style="text-align:left;">/srv/moodle/db/misc/my.cnf</td>
# </tr>
# </table>
# + [markdown] hidden=true
# 設定ファイルをホスト側に配置します。
# + hidden=true
with TemporaryDirectory() as workdir:
vars_path = Path(workdir) / 'moodle.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
# !ansible {target_group} -b -m template \
# -e prepare=true -e @{vars_path} \
# -a 'src=template/mysql/my.cnf dest=/srv/moodle/db/misc/ mode=0600 owner=root group=root'
# + [markdown] hidden=true
# #### MariaDB 10.6 の設定ファイルを配置する
# + [markdown] hidden=true
# MariaDB 10.6 から導入されたシステム変数に [innodb_read_only_compressed](https://mariadb.com/kb/en/innodb-system-variables/#innodb_read_only_compressed) があります。デフォルトでは ON となっていますが、その場合Moodleのセットアップ時のテーブル操作がエラーとなってしまいます。これを回避するために `innodb_read_only_compressed` を `OFF`にする設定ファイルを配置します。設定は`[mariadb-10.6]`グループで行いMariaDB 10.6以外には影響が無いようにします。
# + [markdown] hidden=true
# 設定ファイルのコンテナ環境、ホスト環境におけるパスを以下に示します。
#
# <table>
# <tr>
# <th style="text-align:left;">コンテナ環境のパス</th>
# <th style="text-align:left;">ホスト環境のパス</th>
# </tr>
# <tr>
# <td style="text-align:left;">/etc/mysql/mariadb.conf.d/99-local.cnf</td>
# <td style="text-align:left;">/srv/moodle/db/conf/mariadb.conf.d/99-local.cnf</td>
# </tr>
# </table>
# + hidden=true
# !ansible {target_group} -m copy \
# -a 'src=template/mariadb.conf.d/99-local.cnf \
# dest=/srv/moodle/db/conf/mariadb.conf.d/'
# + [markdown] heading_collapsed=true
# ## アプリケーションコンテナの起動
#
# Moodleコンテナ、データベースコンテナを起動して、Moodle環境を起動します。
#
# 
# + [markdown] hidden=true
# `docker-compose.yml`を配置します。
#
# > ここで配置する`docker-compose.yml`が実運用で使用するものとなります。前章で配置したものとの違いは設定ファイルに関する bind mount 設定を追加していることです。
# + hidden=true
with TemporaryDirectory() as workdir:
vars_path = Path(workdir) / 'moodle.yml'
with vars_path.open(mode='w') as f:
yaml.safe_dump(params, f)
# !ansible {target_group} -m template \
# -e @{vars_path} \
# -a 'src=template/docker-compose.yml dest=/srv/moodle/'
# + [markdown] hidden=true
# コンテナの起動を行います。
# + hidden=true
# !ansible {target_group} -a 'chdir=/srv/moodle \
# docker-compose up -d'
# + [markdown] hidden=true
# Moodleのインストール処理が完了してMoodleのサービスが開始されるまで数分程度を要します。ここではMoodleが開始されるまでの待ち合わせを行います。
# + hidden=true
# %run scripts/utils.py
def check_http_access():
# !ansible {target_group} -a \
# "curl -s -f -I http://localhost" | grep -w HTTP
retry_exec(check_http_access, err=Exception)
# -
# ## Moodle を利用できることを確認
#
# 構築したMoodle環境にアクセスし、Moodle が利用できることを確認します。
# ### アドレスの確認
# 次のセルを実行すると表示されるリンクが、構築したMoodle環境のアドレスです。リンクをクリックしてMoodle環境にログインしてください。
from IPython.core.display import HTML
HTML(u'<a href="{0}" target="_blank">{0}</a>'.format(moodle_url))
# ### ライブログの確認
#
# Moodle のライブログを確認します。次のセルを実行すると表示されるリンクをクリックしてください。
from IPython.core.display import HTML
HTML(u'<a href="{0}/report/loglive/index.php" target="_blank">{0}/report/loglive/index.php</a>'.format(moodle_url))
# ## コンテナのログを確認する
# MoodleコンテナのApache HTTP Serverのログはホスト環境の `/var/log/httpd/` に出力されます。アクセスログ `access_log` の内容を確認してみます。
# !ansible {target_group} -a 'tail /var/log/httpd/access_log'
# DBコンテナのログはホスト環境の `/var/log/mysql/` に出力されます。エラーログ `error.log` の内容を確認してみます。
# !ansible {target_group} -b -a 'tail /var/log/mysql/error.log'
# + [markdown] heading_collapsed=true
# ## crontabの設定
#
# Moodleのスケジュールタスクを実行するために必要となるスクリプトを1分毎に実行するように crontab を設定します。
# + hidden=true
# !ansible {target_group} -m cron -a 'name={target_group} \
# job="cd /srv/moodle && docker-compose exec -T moodle /usr/bin/php /var/www/moodle/admin/cli/cron.php > /dev/null"'
| Moodle-Simple/notebooks/021-Moodleコンテナの起動.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
from pymongo import MongoClient
import tldextract
import math
import re
import pickle
from tqdm import tqdm_notebook as tqdm
import spacy
from numpy import dot
from numpy.linalg import norm
import csv
import random
import statistics
import copy
import itertools
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as SIA
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfTransformer
import scipy
nlp = spacy.load('en')
#Load the Spacy English Language model
client = MongoClient('mongodb://gdelt:meidnocEf1@gdeltmongo1:27017/')
#Connect to the GDELT Mongo database
#Credentials might be different now, ask David
db = client.gdelt.metadata
re_3986 = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
#Regular Expression to process web domains into chunks
wgo = re.compile("www.")
#For replacing www.
whitelist = ["NOUN", "PROPN", "ADJ", "ADV"]
#Types of words we'll look at
#This opens up the MBFC labels which were scraped off their website
bias = []
biasnames = []
pol = ['L', 'LC', 'C', 'RC', 'R'] #Political Bias
rep = ['VERY LOW', 'LOW', 'MIXED', 'HIGH', 'VERY HIGH'] #Reporting Quality
flag = ['F', 'X', 'S'] #Fake categories: Fake, Conspiracy, Satire
cats = pol
s2l = {}
with open('bias.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
name = re_3986.match(row[4]).group(4)
p = -1
r = -1
f = -1
if row[1] in pol:
p = pol.index(row[1])
s2l[name] = row[1]
if row[2] in rep:
r = rep.index(row[2])
if row[3] in flag:
f = flag.index(row[3])
s2l[name] = row[3]
bias.append(row + [name, p, r, f, 1 if p == -1 else 0])
biasnames.append(name)
# +
sample = 1000000
stuff = db.find({},{'text':1,'sourceurl':1}).sort("_id",-1)#.limit(sample)
arts = []
#Download articles and process them with SpaCy
for obj in tqdm(stuff):
if 'text' in obj:
sdom = wgo.sub("", re_3986.match(obj['sourceurl']).group(4))
if sdom in biasnames:
doc = nlp.tokenizer(obj['text'][:100*8])
nlp.tagger(doc)
#Only break into tokens and give them part of speech tags
arts.append((sdom, doc))
N = len(arts)
# +
vocab = set()
bivocab = set()
#Loop through all articles and create a big list of all occuring tokens
#We're doing tokens and bigrams
for (sdom, doc) in tqdm(arts):
mycat = s2l[sdom]
if mycat in cats:
for word in doc[:-1]:
if not word.is_stop and word.is_alpha and word.pos_ in whitelist:
if not word.lemma_ in vocab:
vocab.add(word.lemma_)
neigh = word.nbor()
if not neigh.is_stop and neigh.pos_ in whitelist:
bigram = word.lemma_+" "+neigh.lemma_
if not bigram in bivocab:
bivocab.add(bigram)
vsize = len(vocab)
print(vsize)
v2i = dict([(key, i) for i, key in enumerate(vocab)])
site_raw_tc = {}
site_raw_ts = {}
bvsize = len(bivocab)
print(bvsize)
bv2i = dict([(key, i) for i, key in enumerate(bivocab)])
site_raw_bc = {}
site_raw_bs = {}
#Build arrays for every site, containing counts of the terms and the average sentiment
# Sentiment is collected for each term by adding the article's sentiment every
#time the term is detected, then dividing by the term count to get the mean
sa = SIA()
for (sdom, doc) in tqdm(arts):
mycat = s2l[sdom]
if mycat in cats:
if sdom not in site_raw_tc:
site_raw_tc[sdom] = np.zeros(vsize)
site_raw_ts[sdom] = np.zeros(vsize)
site_raw_bc[sdom] = np.zeros(bvsize)
site_raw_bs[sdom] = np.zeros(bvsize)
c = sa.polarity_scores(doc.text)['compound']
for word in doc[:-1]:
if not word.is_stop and word.is_alpha and word.pos_ in whitelist:
site_raw_tc[sdom][v2i[word.lemma_]] += 1
site_raw_ts[sdom][v2i[word.lemma_]] += c
neigh = word.nbor()
if not neigh.is_stop and neigh.pos_ in whitelist:
bigram = word.lemma_+" "+neigh.lemma_
site_raw_bc[sdom][bv2i[bigram]] += 1
site_raw_bs[sdom][bv2i[bigram]] += c
# +
sites = [k for k in site_raw_tc.keys()]
#List of sites
site_tcv = np.array([v for v in site_raw_tc.values()])
site_tsv = np.array([v for v in site_raw_ts.values()])
site_bcv = np.array([v for v in site_raw_bc.values()])
site_bsv = np.array([v for v in site_raw_bs.values()])
# Create 2D arrays for bigram and term counts and sentiments
site_tfv = site_tcv/np.sum(site_tcv, axis=1)[:,None]
site_tfv[np.isnan(site_tfv)] = 0
site_tsv = site_tsv/site_tcv
site_tsv[np.isnan(site_tsv)] = 0
site_bfv = site_bcv/np.sum(site_bcv, axis=1)[:,None]
site_bfv[np.isnan(site_bfv)] = 0
site_bsv = site_bsv/site_bcv
site_bsv[np.isnan(site_bsv)] = 0
#Calculate average sentiment and frequencies
s2c = dict([(site, s2l[site]) for site in sites])
cat_tcv = np.array([sum([site_raw_tc[site] for site in sites if s2l[site] == cat]) for cat in cats])
cat_tfv = cat_tcv/np.sum(cat_tcv, axis=1)[:, None]
cat_bcv = np.array([sum([site_raw_bc[site] for site in sites if s2l[site] == cat]) for cat in cats])
cat_bfv = cat_bcv/np.sum(cat_bcv, axis=1)[:, None]
#Calculate frequencies for each category
doc_tcv = np.sum(site_tcv, axis=0)
doc_tfv = doc_tcv/np.sum(doc_tcv)
doc_bcv = np.sum(site_bcv, axis=0)
doc_bfv = doc_bcv/np.sum(doc_bcv)
#Overall corpus frequencies
site_tszv = scipy.stats.mstats.zscore(site_tsv,axis=0)
site_tszv[np.isnan(site_tszv)] = 0
print("sent tz score" + str(site_tszv.shape))
#Z scores for term sentiment
site_bszv = scipy.stats.mstats.zscore(site_bsv,axis=0)
site_bszv[np.isnan(site_bszv)] = 0
print("sent bz score" + str(site_bszv.shape))
#Z scores for bigram sentiment
transformer = TfidfTransformer(smooth_idf=False)
ttfidf = transformer.fit_transform(site_tcv)
print("ttfidf" + str(ttfidf.shape))
btfidf = transformer.fit_transform(site_bcv)
print("btfidf" + str(btfidf.shape))
#Calculate TFIDF scores
site_tfdv = site_tfv - doc_tfv
site_bfdv = site_bfv - doc_bfv
#Difference in term frequency
# +
#Run the models and score them
clf = RandomForestClassifier(random_state=42, n_estimators=200)
X = np.concatenate((ttfidf.toarray(),site_tszv,site_tfdv,btfidf.toarray(),site_bszv,site_bfdv), axis=1)
print(X.shape)
y = np.array([cats.index(s2l[site]) for site in sites])
print(len(y))
cscore = cross_val_score(clf, X, y, cv=3)
print(cscore)
print(sum(cscore)/3)
clf.fit(X, y)
plt.plot(clf.feature_importances_)
plt.show()
mask = [i for i, x in enumerate(clf.feature_importances_) if x > 0.00035]
cscore = cross_val_score(clf, X[:, mask], y, cv=3)
print(cscore)
print(sum(cscore)/3)
cms = []
for train, test in KFold(n_splits=3).split(X):
clf.fit(X[train,:][:,mask],y[train])
cms.append(confusion_matrix(y[test], clf.predict(X[test,:][:,mask])))
print(sum(cms))
plt.imshow(sum(cms))
plt.show()
print(sum(sum(sum(cms))))
# -
| notebooks/Content_Model_Article_Text_Features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # `BaikalETK`: NRPy+-Based BSSN Solver for the Einstein Toolkit
#
# ## Author: <NAME>
#
# #### Special thanks to <NAME> for help in answering many implementation questions
#
# ## This module generates `BaikalETK`, an [Einstein Toolkit](https://einsteintoolkit.org) thorn for solving Einstein's equations in the BSSN formalism, in Cartesian coordinates. It features SIMD intrinsics and OpenMP support.
#
# **Notebook Status:** <font color='orange'><b> Validated against the Einstein Toolkit `McLachlan` BSSN thorn, both in the context of black hole binary simulations (excellent gravitational waveform agreement) as well as binary neutron star simulations (when parameter `add_stress_energy_source_terms` below is set to `True`). Once plots demonstrating this agreement are added to this tutorial notebook, the validation status will be set to</b></font> <font color='green'><b>Validated</b></font>.
#
# **Validation Notes:** This tutorial notebook has been validated against a trusted Einstein Toolkit thorn, but plots demonstrating its validation have yet to be included in this notebook.
#
# ## Introduction
#
# ```
# How often did my soul cry out:
# Come back to Baikal once again?
# I still do not know this lake:
# To see does not mean to know.
# ```
# [<NAME>](https://en.wikipedia.org/wiki/Igor_Severyanin), [[1]](https://1baikal.ru/en/istoriya/let’s-turn-to-baikal-a-poetic-view).
#
# [Lake Baikal](https://en.wikipedia.org/wiki/Lake_Baikal) is home to the [nerpa seal](https://en.wikipedia.org/wiki/Baikal_seal), NRPy+'s mascot.
#
# This thorn is meant to reproduce the functionality of the `McLachlan` thorn, generated by the [Mathematica](https://www.wolfram.com/mathematica/)-based [Kranc](http://kranccode.org/) code, but using the NRPy+ infrastructure.
#
# ### Associated NRPy+ Source Code & Tutorial Modules for this module:
# * [BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb): Spherical/Cartesian ADM$\to$Curvilinear BSSN converter function, for which ADM quantities are assumed given at each gridpoint (i.e., exact, closed-form expressions are not given). This is used to generate BaikalETK's ADM$\to$BSSN function, as in the ETK spacetime evolution modules are to assume that initial data are given as ADM quantities in the Cartesian basis at each gridpoint.
# * [BSSN/ADM_in_terms_of_BSSN.py](../edit/BSSN/ADM_in_terms_of_BSSN.py); [\[**tutorial**\]](Tutorial-ADM_in_terms_of_BSSN.ipynb): Constructs ADM quantities in terms of BSSN quantities (in arbitrary curvilinear coordinates, though we use Cartesian here). This is used to generate BaikalETK's BSSN$\to$ADM function, which make ADM variables available to diagnostic thorns within the ETK.
# * [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\[**tutorial**\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian constraint in BSSN curvilinear basis/coordinates
# * [BSSN/BSSN_RHSs.py](../edit/BSSN/BSSN_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb): Generates the right-hand sides for the BSSN evolution equations in singular, curvilinear coordinates
# * [BSSN/BSSN_gauge_RHSs.py](../edit/BSSN/BSSN_gauge_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb): Generates the right-hand sides for the BSSN gauge evolution equations in singular, curvilinear coordinates
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 1. [Step 1](#initializenrpy): Initialize needed Python/NRPy+ modules
# 1. [Step 2](#bssn): NRPy+-generated C code kernels for BSSN spacetime solve
# 1. [Step 2.a](#bssnrhs): BSSN RHS expressions
# 1. [Step 2.b](#hammomconstraints): Hamiltonian & momentum constraints
# 1. [Step 2.c](#gamconstraint): Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (in Cartesian coordinates, $\det{\hat{\gamma}_{ij}}=1$)
# 1. [Step 2.d](#parallel_codegen): Generate all the above C code kernels in parallel
# 1. [Step 3](#cclfiles): CCL files - Define how this module interacts and interfaces with the wider Einstein Toolkit infrastructure
# 1. [Step 3.a](#paramccl): `param.ccl`: specify free parameters within `BaikalETK`
# 1. [Step 3.b](#interfaceccl): `interface.ccl`: define needed gridfunctions; provide keywords denoting what this thorn provides and what it should inherit from other thorns
# 1. [Step 3.c](#scheduleccl): `schedule.ccl`:schedule all functions used within `BaikalETK`, specify data dependencies within said functions, and allocate memory for gridfunctions
# 1. [Step 4](#cdrivers): C driver functions for ETK registration & NRPy+-generated kernels
# 1. [Step 4.a](#etkfunctions): Needed ETK functions: Banner, Symmetry registration, Parameter sanity check, Method of Lines (`MoL`) registration, Boundary condition
# 1. [Step 4.b](#bssnadmconversions): BSSN $\leftrightarrow$ ADM conversions
# 1. [Step 4.b.i](#admtobssn): ADM $\to$ BSSN
# 1. [Step 4.b.ii](#bssntoadm): BSSN $\to$ ADM
# 1. [Step 4.c](#bssnrhss) Evaluate BSSN right-hand-sides (RHSs)
# 1. [Step 4.c.i](#ricci): Evaluate Ricci tensor
# 1. [Step 4.c.ii](#bssnrhssricciinput): Evaluate BSSN RHSs, using Ricci tensor as input
# 1. [Step 4.d](#enforcegammahatconstraint): Enforcing conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (in Cartesian coordinates, $\det{\hat{\gamma}_{ij}}=1$)
# 1. [Step 4.e](#diagnostics): Diagnostics: Computing Hamiltonian & momentum constraints
# 1. [Step 4.f](#t4uu): `driver_BSSN_T4UU()`: Compute $T^{\mu\nu}$ from `TmunuBase`'s $T_{\mu\nu}$
# 1. [Step 4.g](#makecodedefn): `make.code.defn`: List of all C driver functions needed to compile `BaikalETK`
# 1. [Step 5](#code_validation): Code Validation
# 1. [Step 5.a](#self_validation): Validation against [BaikalETK.BaikalETK_Pymodule](../edit/BaikalETK/BaikalETK_Pymodule.py) module
# 1. [Step 6](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='initializenrpy'></a>
#
# # Step 1: Initialize needed Python/NRPy+ modules \[Back to [top](#toc)\]
#
# $$\label{initializenrpy}$$
# +
# Step 1: Import needed core NRPy+ modules
from outputC import * # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import loop as lp # NRPy+: Generate C code loops
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import shutil, os, sys # Standard Python modules for multiplatform OS-level functions
# Create directory for BaikalETK thorn & subdirectories in case they don't exist.
outrootdir = "BaikalETK/"
cmd.mkdir(os.path.join(outrootdir))
outdir = os.path.join(outrootdir,"src") # Main C code output directory
cmd.mkdir(outdir)
# Set spatial dimension (must be 3 for BSSN)
DIM = 3
par.set_parval_from_str("grid::DIM",DIM)
# Enable stress-energy terms?
add_stress_energy_source_terms = False
# Default Kreiss-Oliger dissipation strength
default_KO_strength = 0.1
# Step 2: Set some core parameters, including CoordSystem MoL timestepping algorithm,
# FD order, floating point precision, and CFL factor:
# Choices are: Spherical, SinhSpherical, SinhSphericalv2, Cylindrical, SinhCylindrical,
# SymTP, SinhSymTP
# NOTE: Only CoordSystem == Cartesian makes sense here; new
# boundary conditions are needed within the ETK for
# Spherical, etc. coordinates.
CoordSystem = "Cartesian"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Set the standard 1+log lapse condition
LapseCondition = "OnePlusLog"
# Set the standard, second-order advecting-shift, Gamma-driving shift condition
ShiftCondition = "GammaDriving2ndOrder_NoCovariant"
FD_order = 4 # Finite difference order: even numbers only, starting with 2. 12 is generally unstable
REAL = "CCTK_REAL" # Set REAL to CCTK_REAL, the ETK data type for
# floating point precision (typically `double`)
# Set finite differencing order:
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", FD_order)
# Copy SIMD/SIMD_intrinsics.h to $outdir/SIMD/SIMD_intrinsics.h
cmd.mkdir(os.path.join(outdir,"SIMD"))
shutil.copy(os.path.join("SIMD/")+"SIMD_intrinsics.h",os.path.join(outdir,"SIMD/"))
# Set the gridfunction memory access type to ETK-like, so that finite_difference
# knows how to read and write gridfunctions from/to memory.
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
# -
# <a id='bssn'></a>
#
# # Step 2: Output C code for BSSN spacetime solve \[Back to [top](#toc)\]
# $$\label{bssn}$$
#
# <a id='bssnrhs'></a>
#
# ## Step 2.a: BSSN RHS expressions \[Back to [top](#toc)\]
# $$\label{bssnrhs}$$
#
# `BaikalETK` implements a fully covariant version of the BSSN 3+1 decomposition of Einstein's equations of general relativity, which is fully documented within NRPy+ ([start here](Tutorial-BSSN_formulation.ipynb)). However, especially if you are a newcomer to the field of numerical relativity, you may also find the following lectures and papers useful for understanding the adopted formalism:
#
# * Mathematical foundations of BSSN and 3+1 initial value problem decompositions of Einstein's equations:
# * [<NAME>'s lectures on mathematical formulation of numerical relativity](https://www.youtube.com/watch?v=t3uo2R-yu4o&list=PLRVOWML3TL_djTd_nsTlq5aJjJET42Qke)
# * [<NAME>'s introduction to BSSN](http://www2.yukawa.kyoto-u.ac.jp/~yuichiro.sekiguchi/3+1.pdf)
# * Extensions to the standard BSSN approach used in NRPy+
# * [Brown's covariant "Lagrangian" formalism of BSSN](https://arxiv.org/abs/0902.3652)
# * [BSSN in spherical coordinates, using the reference-metric approach of Baumgarte, Montero, Cordero-Carrión, and Müller (2012)](https://arxiv.org/abs/1211.6632)
# * [BSSN in generic curvilinear coordinates, using the extended reference-metric approach of Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)
#
# Here, we simply call the [BSSN.BSSN_RHSs](../edit/BSSN/BSSN_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb) and [BSSN.BSSN_gauge_RHSs](../edit/BSSN/BSSN_gauge_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb) NRPy+ Python modules to generate the symbolic expressions, add Kreiss-Oliger dissipation, and then output the finite-difference C code form of the equations using NRPy+'s [finite_difference](../edit/finite_difference.py) ([**tutorial**](Tutorial-Finite_Difference_Derivatives.ipynb)) C code generation module.
# +
import time # Standard Python module; useful for benchmarking below expression & code generation.
import BSSN.BSSN_RHSs as rhs
import BSSN.BSSN_gauge_RHSs as gaugerhs
par.set_parval_from_str("BSSN.BSSN_gauge_RHSs::ShiftEvolutionOption", ShiftCondition)
par.set_parval_from_str("BSSN.BSSN_gauge_RHSs::LapseEvolutionOption", LapseCondition)
print("Generating symbolic expressions for BSSN RHSs...")
start = time.time()
# Enable rfm_precompute infrastructure, which results in
# BSSN RHSs that are free of transcendental functions,
# even in curvilinear coordinates, so long as
# ConformalFactor is set to "W" (default).
cmd.mkdir(os.path.join(outdir,"rfm_files/"))
par.set_parval_from_str("reference_metric::enable_rfm_precompute","True")
par.set_parval_from_str("reference_metric::rfm_precompute_Ccode_outdir",os.path.join(outdir,"rfm_files/"))
# Evaluate BSSN + BSSN gauge RHSs with rfm_precompute enabled:
import BSSN.BSSN_quantities as Bq
par.set_parval_from_str("BSSN.BSSN_quantities::LeaveRicciSymbolic","True")
rhs.BSSN_RHSs()
if add_stress_energy_source_terms == True:
T4UU = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","T4UU","sym01",DIM=4)
import BSSN.BSSN_stress_energy_source_terms as Bsest
Bsest.BSSN_source_terms_for_BSSN_RHSs(T4UU)
rhs.trK_rhs += Bsest.sourceterm_trK_rhs
for i in range(DIM):
# Needed for Gamma-driving shift RHSs:
rhs.Lambdabar_rhsU[i] += Bsest.sourceterm_Lambdabar_rhsU[i]
# Needed for BSSN RHSs:
rhs.lambda_rhsU[i] += Bsest.sourceterm_lambda_rhsU[i]
for j in range(DIM):
rhs.a_rhsDD[i][j] += Bsest.sourceterm_a_rhsDD[i][j]
gaugerhs.BSSN_gauge_RHSs()
# Add Kreiss-Oliger dissipation to the BSSN RHSs:
thismodule = "KO_Dissipation"
diss_strength = par.Cparameters("REAL", thismodule, "diss_strength", default_KO_strength)
alpha_dKOD = ixp.declarerank1("alpha_dKOD")
cf_dKOD = ixp.declarerank1("cf_dKOD")
trK_dKOD = ixp.declarerank1("trK_dKOD")
betU_dKOD = ixp.declarerank2("betU_dKOD","nosym")
vetU_dKOD = ixp.declarerank2("vetU_dKOD","nosym")
lambdaU_dKOD = ixp.declarerank2("lambdaU_dKOD","nosym")
aDD_dKOD = ixp.declarerank3("aDD_dKOD","sym01")
hDD_dKOD = ixp.declarerank3("hDD_dKOD","sym01")
for k in range(DIM):
gaugerhs.alpha_rhs += diss_strength*alpha_dKOD[k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
rhs.cf_rhs += diss_strength* cf_dKOD[k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
rhs.trK_rhs += diss_strength* trK_dKOD[k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
for i in range(DIM):
if "2ndOrder" in ShiftCondition:
gaugerhs.bet_rhsU[i] += diss_strength* betU_dKOD[i][k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
gaugerhs.vet_rhsU[i] += diss_strength* vetU_dKOD[i][k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
rhs.lambda_rhsU[i] += diss_strength*lambdaU_dKOD[i][k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
for j in range(DIM):
rhs.a_rhsDD[i][j] += diss_strength*aDD_dKOD[i][j][k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
rhs.h_rhsDD[i][j] += diss_strength*hDD_dKOD[i][j][k]*rfm.ReU[k] # ReU[k] = 1/scalefactor_orthog_funcform[k]
# We use betaU as our upwinding control vector:
Bq.BSSN_basic_tensors()
betaU = Bq.betaU
import BSSN.Enforce_Detgammabar_Constraint as EGC
enforce_detg_constraint_symb_expressions = EGC.Enforce_Detgammabar_Constraint_symb_expressions()
# Next compute Ricci tensor
par.set_parval_from_str("BSSN.BSSN_quantities::LeaveRicciSymbolic","False")
Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
# Now that we are finished with all the rfm hatted
# quantities in generic precomputed functional
# form, let's restore them to their closed-
# form expressions.
par.set_parval_from_str("reference_metric::enable_rfm_precompute","False") # Reset to False to disable rfm_precompute.
rfm.ref_metric__hatted_quantities()
end = time.time()
print("Finished BSSN symbolic expressions in "+str(end-start)+" seconds.")
def BSSN_RHSs():
print("Generating C code for BSSN RHSs in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.")
start = time.time()
BSSN_evol_rhss = [ \
lhrh(lhs=gri.gfaccess("rhs_gfs","aDD00"),rhs=rhs.a_rhsDD[0][0]),
lhrh(lhs=gri.gfaccess("rhs_gfs","aDD01"),rhs=rhs.a_rhsDD[0][1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","aDD02"),rhs=rhs.a_rhsDD[0][2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","aDD11"),rhs=rhs.a_rhsDD[1][1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","aDD12"),rhs=rhs.a_rhsDD[1][2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","aDD22"),rhs=rhs.a_rhsDD[2][2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","alpha"),rhs=gaugerhs.alpha_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","betU0"),rhs=gaugerhs.bet_rhsU[0]),
lhrh(lhs=gri.gfaccess("rhs_gfs","betU1"),rhs=gaugerhs.bet_rhsU[1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","betU2"),rhs=gaugerhs.bet_rhsU[2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","cf"), rhs=rhs.cf_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","hDD00"),rhs=rhs.h_rhsDD[0][0]),
lhrh(lhs=gri.gfaccess("rhs_gfs","hDD01"),rhs=rhs.h_rhsDD[0][1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","hDD02"),rhs=rhs.h_rhsDD[0][2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","hDD11"),rhs=rhs.h_rhsDD[1][1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","hDD12"),rhs=rhs.h_rhsDD[1][2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","hDD22"),rhs=rhs.h_rhsDD[2][2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","lambdaU0"),rhs=rhs.lambda_rhsU[0]),
lhrh(lhs=gri.gfaccess("rhs_gfs","lambdaU1"),rhs=rhs.lambda_rhsU[1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","lambdaU2"),rhs=rhs.lambda_rhsU[2]),
lhrh(lhs=gri.gfaccess("rhs_gfs","trK"), rhs=rhs.trK_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vetU0"),rhs=gaugerhs.vet_rhsU[0]),
lhrh(lhs=gri.gfaccess("rhs_gfs","vetU1"),rhs=gaugerhs.vet_rhsU[1]),
lhrh(lhs=gri.gfaccess("rhs_gfs","vetU2"),rhs=gaugerhs.vet_rhsU[2]) ]
BSSN_RHSs_string = fin.FD_outputC("returnstring",BSSN_evol_rhss, params="outCverbose=False,SIMD_enable=True",
upwindcontrolvec=betaU)
with open(os.path.join(outdir,"BSSN_RHSs.h"), "w") as file:
file.write(lp.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]","cctk_lsh[0]-cctk_nghostzones[0]"],
["1","1","SIMD_width"],
["#pragma omp parallel for",
"#include \"rfm_files/rfm_struct__SIMD_outer_read2.h\"",
"#include \"rfm_files/rfm_struct__SIMD_outer_read1.h\""],"",
"#include \"rfm_files/rfm_struct__SIMD_inner_read0.h\"\n"+BSSN_RHSs_string))
end = time.time()
print("Finished BSSN_RHS C codegen in " + str(end - start) + " seconds.")
def Ricci():
print("Generating C code for Ricci tensor in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.")
start = time.time()
Ricci_string = fin.FD_outputC("returnstring",
[lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD00"),rhs=Bq.RbarDD[0][0]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD01"),rhs=Bq.RbarDD[0][1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD02"),rhs=Bq.RbarDD[0][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD11"),rhs=Bq.RbarDD[1][1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD12"),rhs=Bq.RbarDD[1][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD22"),rhs=Bq.RbarDD[2][2])],
params="outCverbose=False,SIMD_enable=True")
with open(os.path.join(outdir,"BSSN_Ricci.h"), "w") as file:
file.write(lp.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]","cctk_lsh[0]-cctk_nghostzones[0]"],
["1","1","SIMD_width"],
["#pragma omp parallel for",
"#include \"rfm_files/rfm_struct__SIMD_outer_read2.h\"",
"#include \"rfm_files/rfm_struct__SIMD_outer_read1.h\""],"",
"#include \"rfm_files/rfm_struct__SIMD_inner_read0.h\"\n"+Ricci_string))
end = time.time()
print("Finished Ricci C codegen in " + str(end - start) + " seconds.")
# -
# <a id='hammomconstraints'></a>
#
# ## Step 2.b: Hamiltonian & momentum constraints \[Back to [top](#toc)\]
# $$\label{hammomconstraints}$$
#
# Next output the C code for evaluating the Hamiltonian & momentum constraints [(**Tutorial**)](Tutorial-BSSN_constraints.ipynb). In the absence of numerical error, this constraint should evaluate to zero. However it does not due to numerical (typically truncation and roundoff) error. Therefore it is useful to measure the Hamiltonian & momentum constraint violation to gauge the accuracy of our simulation, and, ultimately determine whether errors are dominated by numerical finite differencing (truncation) error as expected.
# +
# First register the Hamiltonian as a gridfunction.
H = gri.register_gridfunctions("AUX","H")
MU = ixp.register_gridfunctions_for_single_rank1("AUX", "MU")
# Then define the Hamiltonian constraint and output the optimized C code.
import BSSN.BSSN_constraints as bssncon
def BSSNconstraints():
bssncon.BSSN_constraints(add_T4UUmunu_source_terms=False)
if add_stress_energy_source_terms == True:
# T4UU = gri.register_gridfunctions_for_single_rank2("AUXEVOL","T4UU","sym01",DIM=4)
import BSSN.BSSN_stress_energy_source_terms as Bsest
Bsest.BSSN_source_terms_for_BSSN_constraints(T4UU)
bssncon.H += Bsest.sourceterm_H
for i in range(DIM):
bssncon.MU[i] += Bsest.sourceterm_MU[i]
start = time.time()
print("Generating optimized C code for Ham. & mom. constraints. May take a while, depending on CoordSystem.")
Ham_mom_string = fin.FD_outputC("returnstring",
[lhrh(lhs=gri.gfaccess("aux_gfs", "H"), rhs=bssncon.H),
lhrh(lhs=gri.gfaccess("aux_gfs", "MU0"), rhs=bssncon.MU[0]),
lhrh(lhs=gri.gfaccess("aux_gfs", "MU1"), rhs=bssncon.MU[1]),
lhrh(lhs=gri.gfaccess("aux_gfs", "MU2"), rhs=bssncon.MU[2])],
params="outCverbose=False")
with open(os.path.join(outdir,"BSSN_constraints.h"), "w") as file:
file.write(lp.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]","cctk_lsh[0]-cctk_nghostzones[0]"],
["1","1","1"],["#pragma omp parallel for","",""], "", Ham_mom_string))
end = time.time()
print("Finished Hamiltonian & momentum constraint C codegen in " + str(end - start) + " seconds.")
# -
# <a id='gamconstraint'></a>
#
# ## Step 2.c: Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (in Cartesian coordinates, $\det{\hat{\gamma}_{ij}}=1$) \[Back to [top](#toc)\]
# $$\label{gamconstraint}$$
#
# Then enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [<NAME>, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN-Enforcing_Determinant_gammabar_equals_gammahat_Constraint.ipynb)
#
# Applying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint:
def gammadet():
start = time.time()
print("Generating optimized C code for gamma constraint. May take a while, depending on CoordSystem.")
enforce_gammadet_string = fin.FD_outputC("returnstring", enforce_detg_constraint_symb_expressions,
params="outCverbose=False,preindent=0,includebraces=False")
with open(os.path.join(outdir,"enforce_detgammabar_constraint.h"), "w") as file:
file.write(lp.loop(["i2","i1","i0"],["0", "0", "0"],
["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],
["1","1","1"],
["#pragma omp parallel for",
"#include \"rfm_files/rfm_struct__read2.h\"",
"#include \"rfm_files/rfm_struct__read1.h\""],"",
"#include \"rfm_files/rfm_struct__read0.h\"\n"+enforce_gammadet_string))
end = time.time()
print("Finished gamma constraint C codegen in " + str(end - start) + " seconds.")
# <a id='parallel_codegen'></a>
#
# ## Step 2.d: Generate all C codes in parallel \[Back to [top](#toc)\]
# $$\label{parallel_codegen}$$
#
# +
# Step 0: Import the multiprocessing module.
import multiprocessing
# Step 1: Create a list of functions we wish to evaluate in parallel
funcs = [BSSN_RHSs,Ricci,BSSNconstraints,gammadet]
# Step 1.a: Define master function for parallelization.
# Note that lambdifying this doesn't work in Python 3
def master_func(arg):
funcs[arg]()
# Step 2: Evaluate list of functions in parallel if allowed;
# otherwise fallback to serial evaluation:
try:
if __name__ == '__main__':
pool = multiprocessing.Pool()
pool.map(master_func,range(len(funcs)))
except:
# If multiprocessing didn't work for whatever reason,
# evaluate functions in serial.
for func in funcs:
func()
# -
# <a id='cclfiles'></a>
#
# # Step 3: ETK `ccl` file generation \[Back to [top](#toc)\]
# $$\label{cclfiles}$$
#
# <a id='paramccl'></a>
#
# ## Step 3.a: `param.ccl`: specify free parameters within `BaikalETK` \[Back to [top](#toc)\]
# $$\label{paramccl}$$
#
# All parameters necessary for the computation of the BSSN right-hand side (RHS) expressions are registered within NRPy+; we use this information to automatically generate `param.ccl`. NRPy+ also specifies default values for each parameter.
#
# More information on `param.ccl` syntax can be found in the [official Einstein Toolkit documentation](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-265000C2.3).
# +
def keep_param__return_type(paramtuple):
keep_param = True # We'll not set some parameters in param.ccl;
# e.g., those that should be #define'd like M_PI.
typestring = ""
# Separate thorns within the ETK take care of grid/coordinate parameters;
# thus we ignore NRPy+ grid/coordinate parameters:
if paramtuple.module == "grid" or paramtuple.module == "reference_metric":
keep_param = False
partype = paramtuple.type
if partype == "bool":
typestring += "BOOLEAN "
elif partype == "REAL":
if paramtuple.defaultval != 1e300: # 1e300 is a magic value indicating that the C parameter should be mutable
typestring += "CCTK_REAL "
else:
keep_param = False
elif partype == "int":
typestring += "CCTK_INT "
elif partype == "#define":
keep_param = False
elif partype == "char":
# FIXME: char/string parameter types should in principle be supported
print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+
" has unsupported type: \""+ paramtuple.type + "\"")
sys.exit(1)
else:
print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+
" has unsupported type: \""+ paramtuple.type + "\"")
sys.exit(1)
return keep_param, typestring
with open(os.path.join(outrootdir,"param.ccl"), "w") as file:
file.write("""
# This param.ccl file was automatically generated by NRPy+.
# You are advised against modifying it directly.
shares: ADMBase
USES CCTK_INT lapse_timelevels # Needed to ensure ADMBase gridfunctions are allocated (see top of schedule.ccl)
USES CCTK_INT shift_timelevels # Needed to ensure ADMBase gridfunctions are allocated (see top of schedule.ccl)
USES CCTK_INT metric_timelevels # Needed to ensure ADMBase gridfunctions are allocated (see top of schedule.ccl)
EXTENDS CCTK_KEYWORD evolution_method "evolution_method"
{
"BaikalETK" :: ""
}
EXTENDS CCTK_KEYWORD lapse_evolution_method "lapse_evolution_method"
{
"BaikalETK" :: ""
}
EXTENDS CCTK_KEYWORD shift_evolution_method "shift_evolution_method"
{
"BaikalETK" :: ""
}
EXTENDS CCTK_KEYWORD dtshift_evolution_method "dtshift_evolution_method"
{
"BaikalETK" :: ""
}
EXTENDS CCTK_KEYWORD dtlapse_evolution_method "dtlapse_evolution_method"
{
"BaikalETK" :: ""
}
restricted:
""")
paramccl_str = ""
for i in range(len(par.glb_Cparams_list)):
# keep_param is a boolean indicating whether we should accept or reject
# the parameter. singleparstring will contain the string indicating
# the variable type.
keep_param, singleparstring = keep_param__return_type(par.glb_Cparams_list[i])
if keep_param:
parname = par.glb_Cparams_list[i].parname
partype = par.glb_Cparams_list[i].type
singleparstring += parname + " \""+ parname +" (see NRPy+ for parameter definition)\"\n"
singleparstring += "{\n"
if partype != "bool":
singleparstring += " *:* :: \"All values accepted. NRPy+ does not restrict the allowed ranges of parameters yet.\"\n"
singleparstring += "} "+str(par.glb_Cparams_list[i].defaultval)+"\n\n"
paramccl_str += singleparstring
file.write(paramccl_str)
# -
# <a id='interfaceccl'></a>
#
# ## Step 3.b: `interface.ccl`: define needed gridfunctions; provide keywords denoting what this thorn provides and what it should inherit from other thorns \[Back to [top](#toc)\]
# $$\label{interfaceccl}$$
#
# `interface.ccl` declares all gridfunctions and determines how `BaikalETK` interacts with other Einstein Toolkit thorns.
#
# The [official Einstein Toolkit (Cactus) documentation](http://cactuscode.org/documentation/referencemanual/ReferenceManual.html) defines what must/should be included in an `interface.ccl` file [**here**](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-260000C2.2).
# +
# First construct lists of the basic gridfunctions used in NRPy+.
# Each type will be its own group in BaikalETK.
evol_gfs_list = []
auxevol_gfs_list = []
aux_gfs_list = []
for i in range(len(gri.glb_gridfcs_list)):
if gri.glb_gridfcs_list[i].gftype == "EVOL":
evol_gfs_list.append( gri.glb_gridfcs_list[i].name+"GF")
if gri.glb_gridfcs_list[i].gftype == "AUX":
aux_gfs_list.append( gri.glb_gridfcs_list[i].name+"GF")
if gri.glb_gridfcs_list[i].gftype == "AUXEVOL":
auxevol_gfs_list.append(gri.glb_gridfcs_list[i].name+"GF")
# NRPy+'s finite-difference code generator assumes gridfunctions
# are alphabetized; not sorting may result in unnecessary
# cache misses.
evol_gfs_list.sort()
aux_gfs_list.sort()
auxevol_gfs_list.sort()
with open(os.path.join(outrootdir,"interface.ccl"), "w") as file:
file.write("""
# With "implements", we give our thorn its unique name.
implements: BaikalETK
# By "inheriting" other thorns, we tell the Toolkit that we
# will rely on variables/function that exist within those
# functions.
inherits: ADMBase Boundary Grid MethodofLines\n""")
if add_stress_energy_source_terms == True:
file.write("inherits: TmunuBase")
file.write("""
# Needed functions and #include's:
USES INCLUDE: Symmetry.h
USES INCLUDE: Boundary.h
# Needed Method of Lines function
CCTK_INT FUNCTION MoLRegisterEvolvedGroup(CCTK_INT IN EvolvedIndex, \
CCTK_INT IN RHSIndex)
REQUIRES FUNCTION MoLRegisterEvolvedGroup
# Needed Boundary Conditions function
CCTK_INT FUNCTION GetBoundarySpecification(CCTK_INT IN size, CCTK_INT OUT ARRAY nboundaryzones, CCTK_INT OUT ARRAY is_internal, CCTK_INT OUT ARRAY is_staggered, CCTK_INT OUT ARRAY shiftout)
USES FUNCTION GetBoundarySpecification
CCTK_INT FUNCTION SymmetryTableHandleForGrid(CCTK_POINTER_TO_CONST IN cctkGH)
USES FUNCTION SymmetryTableHandleForGrid
CCTK_INT FUNCTION Boundary_SelectVarForBC(CCTK_POINTER_TO_CONST IN GH, CCTK_INT IN faces, CCTK_INT IN boundary_width, CCTK_INT IN table_handle, CCTK_STRING IN var_name, CCTK_STRING IN bc_name)
USES FUNCTION Boundary_SelectVarForBC
# Needed for EinsteinEvolve/NewRad outer boundary condition driver:
CCTK_INT FUNCTION \\
NewRad_Apply \\
(CCTK_POINTER_TO_CONST IN cctkGH, \\
CCTK_REAL ARRAY IN var, \\
CCTK_REAL ARRAY INOUT rhs, \\
CCTK_REAL IN var0, \\
CCTK_REAL IN v0, \\
CCTK_INT IN radpower)
REQUIRES FUNCTION NewRad_Apply
# Needed to convert ADM initial data into BSSN initial data (gamma extrapolation)
CCTK_INT FUNCTION \\
ExtrapolateGammas \\
(CCTK_POINTER_TO_CONST IN cctkGH, \\
CCTK_REAL ARRAY INOUT var)
REQUIRES FUNCTION ExtrapolateGammas
# Tell the Toolkit that we want all gridfunctions
# to be visible to other thorns by using
# the keyword "public". Note that declaring these
# gridfunctions *does not* allocate memory for them;
# that is done by the schedule.ccl file.
# FIXME: add info for symmetry conditions:
# https://einsteintoolkit.org/thornguide/CactusBase/SymBase/documentation.html
public:
""")
# Next we declare gridfunctions based on their corresponding gridfunction groups as registered within NRPy+
def output_list_of_gfs(gfs_list,description="User did not provide description"):
gfsstr = " "
for i in range(len(gfs_list)):
gfsstr += gfs_list[i]
if i != len(gfs_list)-1:
gfsstr += "," # This is a comma-separated list of gridfunctions
else:
gfsstr += "\n} \""+description+"\"\n\n"
return gfsstr
# First EVOL type:
file.write("CCTK_REAL evol_variables type = GF Timelevels=3\n{\n")
file.write(output_list_of_gfs(evol_gfs_list,"BSSN evolved gridfunctions"))
# Second EVOL right-hand-sides
file.write("CCTK_REAL evol_variables_rhs type = GF Timelevels=1 TAGS=\'InterpNumTimelevels=1 prolongation=\"none\"\'\n{\n")
rhs_list = []
for gf in evol_gfs_list:
rhs_list.append(gf.replace("GF","")+"_rhsGF")
file.write(output_list_of_gfs(rhs_list,"right-hand-side storage for BSSN evolved gridfunctions"))
# Then AUX type:
file.write("CCTK_REAL aux_variables type = GF Timelevels=3\n{\n")
file.write(output_list_of_gfs(aux_gfs_list,"Auxiliary gridfunctions for BSSN diagnostics"))
# Finally, AUXEVOL type:
file.write("CCTK_REAL auxevol_variables type = GF Timelevels=1 TAGS=\'InterpNumTimelevels=1 prolongation=\"none\"\'\n{\n")
file.write(output_list_of_gfs(auxevol_gfs_list,"Auxiliary gridfunctions needed for evaluating the BSSN RHSs"))
# -
# <a id='scheduleccl'></a>
#
# ## Step 3.c: `schedule.ccl`: schedule all functions used within `BaikalETK`, specify data dependencies within said functions, and allocate memory for gridfunctions \[Back to [top](#toc)\]
# $$\label{scheduleccl}$$
#
# Official documentation on constructing ETK `schedule.ccl` files is found [here](http://cactuscode.org/documentation/referencemanual/ReferenceManualch8.html#x12-268000C2.4).
with open(os.path.join(outrootdir,"schedule.ccl"), "w") as file:
file.write("""
# First allocate storage for all ADMBase gridfunctions, which are needed by NRPy+
STORAGE: ADMBase::metric[metric_timelevels], ADMBase::curv[metric_timelevels], ADMBase::lapse[lapse_timelevels], ADMBase::shift[shift_timelevels]
# Next allocate storage for all 3 gridfunction groups used in BaikalETK
STORAGE: evol_variables[3] # Evolution variables
STORAGE: evol_variables_rhs[1] # Variables storing right-hand-sides
STORAGE: aux_variables[3] # Diagnostics variables
STORAGE: auxevol_variables[1] # Single-timelevel storage of variables needed for evolutions.
# The following scheduler is based on Lean/LeanBSSNMoL/schedule.ccl
schedule BaikalETK_Banner at STARTUP
{
LANG: C
OPTIONS: meta
} "Output ASCII art banner"
schedule BaikalETK_RegisterSlicing at STARTUP after BaikalETK_Banner
{
LANG: C
OPTIONS: meta
} "Register 3+1 slicing condition"
schedule BaikalETK_Symmetry_registration at BASEGRID
{
LANG: C
OPTIONS: Global
} "Register symmetries, the CartGrid3D way."
schedule BaikalETK_zero_rhss at BASEGRID after BaikalETK_Symmetry_registration
{
LANG: C
} "Idea from Lean: set all rhs functions to zero to prevent spurious nans"
schedule BaikalETK_ADM_to_BSSN at CCTK_INITIAL after ADMBase_PostInitial
{
LANG: C
OPTIONS: Local
SYNC: evol_variables
} "Convert initial data into BSSN variables"
schedule GROUP ApplyBCs as BaikalETK_ApplyBCs at CCTK_INITIAL after BaikalETK_ADM_to_BSSN
{
} "Apply boundary conditions"
# MoL: registration
schedule BaikalETK_MoL_registration in MoL_Register
{
LANG: C
OPTIONS: META
} "Register variables for MoL"
# MoL: compute RHSs, etc
""")
if add_stress_energy_source_terms == True:
file.write("""
schedule driver_BSSN_T4UU in MoL_CalcRHS as BaikalETK_T4UU before BaikalETK_BSSN_to_ADM
{
LANG: C
} "MoL: Compute T4UU, needed for BSSN RHSs."
schedule BaikalETK_BSSN_to_ADM in MoL_CalcRHS after BaikalETK_T4UU before BaikalETK_Ricci
{
LANG: C
} "Perform BSSN-to-ADM conversion. Needed for HydroBase coupling."
""")
file.write("""
schedule driver_pt1_BSSN_Ricci in MoL_CalcRHS as BaikalETK_Ricci before BaikalETK_RHS
{
LANG: C
} "MoL: Compute Ricci tensor"
schedule driver_pt2_BSSN_RHSs in MoL_CalcRHS as BaikalETK_RHS after BaikalETK_Ricci
{
LANG: C
} "MoL: Evaluate BSSN RHSs"
schedule BaikalETK_NewRad in MoL_CalcRHS after BaikalETK_RHS
{
LANG: C
} "NewRad boundary conditions, scheduled right after RHS eval."
schedule enforce_detgammabar_constraint in MoL_PostStep before BC_Update
{
LANG: C
} "Enforce detgammabar = detgammahat (= 1 in Cartesian)"
schedule BaikalETK_BoundaryConditions_evolved_gfs in MoL_PostStep
{
LANG: C
OPTIONS: LEVEL
SYNC: evol_variables
} "Apply boundary conditions and perform AMR+interprocessor synchronization"
schedule GROUP ApplyBCs as BaikalETK_ApplyBCs in MoL_PostStep after BaikalETK_BoundaryConditions_evolved_gfs
{
} "Group for applying boundary conditions"
# Next update ADM quantities
schedule BaikalETK_BSSN_to_ADM in MoL_PostStep after BaikalETK_ApplyBCs before ADMBase_SetADMVars
{
LANG: C
OPTIONS: Local
} "Perform BSSN-to-ADM conversion. Useful for diagnostics."
# Compute Hamiltonian & momentum constraints
""")
if add_stress_energy_source_terms == True:
file.write("""
schedule driver_BSSN_T4UU in MoL_PseudoEvolution before BaikalETK_BSSN_constraints
{
LANG: C
OPTIONS: Local
} "MoL_PseudoEvolution: Compute T4UU, needed for BSSN constraints"
""")
file.write("""
schedule BaikalETK_BSSN_constraints in MoL_PseudoEvolution
{
LANG: C
OPTIONS: Local
} "Compute BSSN (Hamiltonian and momentum) constraints"
schedule BaikalETK_BoundaryConditions_aux_gfs in MoL_PseudoEvolution after BaikalETK_BSSN_constraints
{
LANG: C
OPTIONS: LOCAL # Needed so that cctk_nghostzones[0] (the number of boundary points) is defined.
# In other words, don't use LEVEL mode here, or the number of boundary points
# filled may not match the actual number of ghost zones. Weird, huh?
SYNC: aux_variables
} "Enforce symmetry BCs in constraint computation"
""")
if add_stress_energy_source_terms == True:
file.write("""
schedule BaikalETK_BSSN_to_ADM in MoL_PseudoEvolution after BaikalETK_BoundaryConditions_aux_gfs
{
LANG: C
OPTIONS: Local
} "Perform BSSN-to-ADM conversion in MoL_PseudoEvolution. Needed for proper HydroBase integration."
""")
file.write("""
schedule GROUP ApplyBCs as BaikalETK_auxgfs_ApplyBCs in MoL_PseudoEvolution after BaikalETK_BoundaryConditions_aux_gfs
{
} "Apply boundary conditions"
""")
# <a id='cdrivers'></a>
#
# # Step 4: C driver functions for ETK registration & NRPy+-generated kernels \[Back to [top](#toc)\]
# $$\label{cdrivers}$$
#
# Now that we have constructed the basic C code kernels and the needed Einstein Toolkit `ccl` files, we next write the driver functions for registering `BaikalETK` within the Toolkit and the C code kernels. Each of these driver functions is called directly from [`schedule.ccl`](#scheduleccl).
#
# <a id='etkfunctions'></a>
# ## Step 4.a: Needed ETK functions: Banner, Symmetry registration, Parameter sanity check, Method of Lines (`MoL`) registration, Boundary condition \[Back to [top](#toc)\]
# $$\label{etkfunctions}$$
#
# ### To-do: Parameter sanity check function. E.g., error should be thrown if `cctk_nghostzones[]` is set too small for the chosen finite-differencing order within NRPy+.
make_code_defn_list = []
def append_to_make_code_defn_list(filename):
if filename not in make_code_defn_list:
make_code_defn_list.append(filename)
return os.path.join(outdir,filename)
with open(append_to_make_code_defn_list("RegisterSlicing.c"),"w") as file:
file.write("""
#include "cctk.h"
#include "Slicing.h"
int BaikalETK_RegisterSlicing (void)
{
Einstein_RegisterSlicing ("BaikalETK");
return 0;
}""")
# +
# First the ETK banner code, proudly showing the NRPy+ banner
import NRPy_logo as logo
with open(append_to_make_code_defn_list("Banner.c"),"w") as file:
file.write("""
#include <stdio.h>
void BaikalETK_Banner()
{
""")
logostr = logo.print_logo(print_to_stdout=False)
file.write("printf(\"BaikalETK: another Einstein Toolkit thorn generated by\\n\");\n")
for line in logostr.splitlines():
file.write(" printf(\""+line+"\\n\");\n")
file.write("}\n")
# -
# Next register symmetries
with open(append_to_make_code_defn_list("Symmetry_registration_oldCartGrid3D.c"),"w") as file:
file.write("""
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
void BaikalETK_Symmetry_registration(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
// Stores gridfunction parity across x=0, y=0, and z=0 planes, respectively
int sym[3];
// Next register parities for each gridfunction based on its name
// (to ensure this algorithm is robust, gridfunctions with integers
// in their base names are forbidden in NRPy+).
""")
full_gfs_list = []
full_gfs_list.extend(evol_gfs_list)
full_gfs_list.extend(auxevol_gfs_list)
full_gfs_list.extend(aux_gfs_list)
for gf in full_gfs_list:
file.write("""
// Default to scalar symmetry:
sym[0] = 1; sym[1] = 1; sym[2] = 1;
// Now modify sym[0], sym[1], and/or sym[2] as needed
// to account for gridfunction parity across
// x=0, y=0, and/or z=0 planes, respectively
""")
# If gridfunction name does not end in a digit, by NRPy+ syntax, it must be a scalar
if gf[len(gf) - 1].isdigit() == False:
pass # Scalar = default
elif len(gf) > 2:
# Rank-1 indexed expression (e.g., vector)
if gf[len(gf) - 2].isdigit() == False:
if int(gf[-1]) > 2:
print("Error: Found invalid gridfunction name: "+gf)
sys.exit(1)
symidx = gf[-1]
file.write(" sym["+symidx+"] = -1;\n")
# Rank-2 indexed expression
elif gf[len(gf) - 2].isdigit() == True:
if len(gf) > 3 and gf[len(gf) - 3].isdigit() == True:
print("Error: Found a Rank-3 or above gridfunction: "+gf+", which is at the moment unsupported.")
print("It should be easy to support this if desired.")
sys.exit(1)
symidx0 = gf[-2]
file.write(" sym["+symidx0+"] *= -1;\n")
symidx1 = gf[-1]
file.write(" sym["+symidx1+"] *= -1;\n")
else:
print("Don't know how you got this far with a gridfunction named "+gf+", but I'll take no more of this nonsense.")
print(" Please follow best-practices and rename your gridfunction to be more descriptive")
sys.exit(1)
file.write(" SetCartSymVN(cctkGH, sym, \"BaikalETK::"+gf+"\");\n")
file.write("}\n")
# Next register symmetries
with open(append_to_make_code_defn_list("zero_rhss.c"),"w") as file:
file.write("""
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "Symmetry.h"
void BaikalETK_zero_rhss(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
""")
set_rhss_to_zero = ""
for gf in rhs_list:
set_rhss_to_zero += gf+"[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)] = 0.0;\n"
file.write(lp.loop(["i2","i1","i0"],["0", "0", "0"],
["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],
["1","1","1"],
["#pragma omp parallel for","","",],"",set_rhss_to_zero))
file.write("}\n")
# Next registration with the Method of Lines thorn
with open(append_to_make_code_defn_list("MoL_registration.c"),"w") as file:
file.write("""
//--------------------------------------------------------------------------
// Register with the Method of Lines time stepper
// (MoL thorn, found in arrangements/CactusBase/MoL)
// MoL documentation located in arrangements/CactusBase/MoL/doc
//--------------------------------------------------------------------------
#include <stdio.h>
#include "cctk.h"
#include "cctk_Parameters.h"
#include "cctk_Arguments.h"
#include "Symmetry.h"
void BaikalETK_MoL_registration(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_INT ierr = 0, group, rhs;
// Register evolution & RHS gridfunction groups with MoL, so it knows
group = CCTK_GroupIndex("BaikalETK::evol_variables");
rhs = CCTK_GroupIndex("BaikalETK::evol_variables_rhs");
ierr += MoLRegisterEvolvedGroup(group, rhs);
if (ierr) CCTK_ERROR("Problems registering with MoL");
}
""")
# +
# Next register with the boundary conditions thorns.
# PART 1: Set BC type to "none" for all variables
# Since we choose NewRad boundary conditions, we must register all
# gridfunctions to have boundary type "none". This is because
# NewRad is seen by the rest of the Toolkit as a modification to the
# RHSs.
# This code is based on Kranc's McLachlan/ML_BSSN/src/Boundaries.cc code.
with open(append_to_make_code_defn_list("BoundaryConditions.c"),"w") as file:
file.write("""
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "cctk_Faces.h"
#include "util_Table.h"
#include "Symmetry.h"
// Set `none` boundary conditions on BSSN RHSs, as these are set via NewRad.
void BaikalETK_BoundaryConditions_evolved_gfs(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_INT ierr CCTK_ATTRIBUTE_UNUSED = 0;
""")
for gf in evol_gfs_list:
file.write("""
ierr = Boundary_SelectVarForBC(cctkGH, CCTK_ALL_FACES, 1, -1, "BaikalETK::"""+gf+"""", "none");
if (ierr < 0) CCTK_ERROR("Failed to register BC for BaikalETK::"""+gf+"""!");
""")
file.write("""
}
// Set `flat` boundary conditions on BSSN constraints, similar to what Lean does.
void BaikalETK_BoundaryConditions_aux_gfs(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_INT ierr CCTK_ATTRIBUTE_UNUSED = 0;
""")
for gf in aux_gfs_list:
file.write("""
ierr = Boundary_SelectVarForBC(cctkGH, CCTK_ALL_FACES, cctk_nghostzones[0], -1, "BaikalETK::"""+gf+"""", "flat");
if (ierr < 0) CCTK_ERROR("Failed to register BC for BaikalETK::"""+gf+"""!");
""")
file.write("}\n")
# PART 2: Set C code for calling NewRad BCs
# As explained in lean_public/LeanBSSNMoL/src/calc_bssn_rhs.F90,
# the function NewRad_Apply takes the following arguments:
# NewRad_Apply(cctkGH, var, rhs, var0, v0, radpower),
# which implement the boundary condition:
# var = var_at_infinite_r + u(r-var_char_speed*t)/r^var_radpower
# Obviously for var_radpower>0, var_at_infinite_r is the value of
# the variable at r->infinity. var_char_speed is the propagation
# speed at the outer boundary, and var_radpower is the radial
# falloff rate.
with open(append_to_make_code_defn_list("BoundaryCondition_NewRad.c"),"w") as file:
file.write("""
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void BaikalETK_NewRad(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
""")
for gf in evol_gfs_list:
var_at_infinite_r = "0.0"
var_char_speed = "1.0"
var_radpower = "1.0"
if gf == "alpha":
var_at_infinite_r = "1.0"
if LapseCondition == "OnePlusLog":
var_char_speed = "sqrt(2.0)"
else:
pass # 1.0 (default) is fine
if "aDD" in gf or "trK" in gf: # consistent with Lean code.
var_radpower = "2.0"
file.write(" NewRad_Apply(cctkGH, "+gf+", "+gf.replace("GF","")+"_rhsGF, "+var_at_infinite_r+", "+
var_char_speed+", "+var_radpower+");\n")
file.write("}\n")
# -
# <a id='bssnadmconversions'></a>
#
# ## Step 4.b: BSSN $\leftrightarrow$ ADM conversions \[Back to [top](#toc)\]
# $$\label{bssnadmconversions}$$
#
# <a id='admtobssn'></a>
#
# ### Step 4.b.i: ADM $\to$ BSSN \[Back to [top](#toc)\]
# $$\label{admtobssn}$$
#
# Initial data in the Einstein Toolkit are given in terms of [ADM quantities](https://en.wikipedia.org/wiki/ADM_formalism), so a conversion is necessary so the quantities are in terms of BSSN variables used for evolving the initial data forward in time.
# +
# First we convert from ADM to BSSN, as is required to convert initial data
# (given using) ADM quantities, to the BSSN evolved variables
import BSSN.ADM_Numerical_Spherical_or_Cartesian_to_BSSNCurvilinear as atob
IDhDD,IDaDD,IDtrK,IDvetU,IDbetU,IDalpha,IDcf,IDlambdaU = \
atob.Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear("Cartesian","DoNotOutputADMInputFunction",outdir)
alphaSphorCart = gri.register_gridfunctions( "AUXEVOL", "alphaSphorCart")
betaSphorCartU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL", "betaSphorCartU")
BSphorCartU = ixp.register_gridfunctions_for_single_rank1("AUXEVOL", "BSphorCartU")
gammaSphorCartDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL", "gammaSphorCartDD", "sym01")
KSphorCartDD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL", "KSphorCartDD", "sym01")
# Step : Output ADM to BSSN conversion.
with open(append_to_make_code_defn_list("ADM_to_BSSN.c"), "w") as file:
file.write("""
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void BaikalETK_ADM_to_BSSN(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
CCTK_REAL *alphaSphorCartGF = alp;
""")
# It's ugly if we output code in the following ordering, so we'll first
# output to a string and then sort the string to beautify the code a bit.
outstr = []
for i in range(DIM):
outstr.append(" CCTK_REAL *betaSphorCartU"+str(i)+"GF = beta"+chr(ord('x')+i)+";\n")
outstr.append(" CCTK_REAL *BSphorCartU"+str(i)+"GF = dtbeta"+chr(ord('x')+i)+";\n")
for j in range(i,DIM):
outstr.append(" CCTK_REAL *gammaSphorCartDD"+str(i)+str(j)+"GF = g"+chr(ord('x')+i)+chr(ord('x')+j)+";\n")
outstr.append(" CCTK_REAL *KSphorCartDD"+str(i)+str(j)+"GF = k"+chr(ord('x')+i)+chr(ord('x')+j)+";\n")
outstr.sort()
for line in outstr:
file.write(line)
file.write("""
const CCTK_REAL invdx0 = 1.0/CCTK_DELTA_SPACE(0);
const CCTK_REAL invdx1 = 1.0/CCTK_DELTA_SPACE(1);
const CCTK_REAL invdx2 = 1.0/CCTK_DELTA_SPACE(2);
""")
all_but_lambdaU_expressions = [
lhrh(lhs=gri.gfaccess("in_gfs","hDD00"),rhs=IDhDD[0][0]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD01"),rhs=IDhDD[0][1]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD02"),rhs=IDhDD[0][2]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD11"),rhs=IDhDD[1][1]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD12"),rhs=IDhDD[1][2]),
lhrh(lhs=gri.gfaccess("in_gfs","hDD22"),rhs=IDhDD[2][2]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD00"),rhs=IDaDD[0][0]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD01"),rhs=IDaDD[0][1]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD02"),rhs=IDaDD[0][2]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD11"),rhs=IDaDD[1][1]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD12"),rhs=IDaDD[1][2]),
lhrh(lhs=gri.gfaccess("in_gfs","aDD22"),rhs=IDaDD[2][2]),
lhrh(lhs=gri.gfaccess("in_gfs","trK"),rhs=IDtrK),
lhrh(lhs=gri.gfaccess("in_gfs","vetU0"),rhs=IDvetU[0]),
lhrh(lhs=gri.gfaccess("in_gfs","vetU1"),rhs=IDvetU[1]),
lhrh(lhs=gri.gfaccess("in_gfs","vetU2"),rhs=IDvetU[2]),
lhrh(lhs=gri.gfaccess("in_gfs","betU0"),rhs=IDbetU[0]),
lhrh(lhs=gri.gfaccess("in_gfs","betU1"),rhs=IDbetU[1]),
lhrh(lhs=gri.gfaccess("in_gfs","betU2"),rhs=IDbetU[2]),
lhrh(lhs=gri.gfaccess("in_gfs","alpha"),rhs=IDalpha),
lhrh(lhs=gri.gfaccess("in_gfs","cf"),rhs=IDcf)]
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
all_but_lambdaU_outC = fin.FD_outputC("returnstring",all_but_lambdaU_expressions, outCparams)
file.write(lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],
["1","1","1"],["#pragma omp parallel for","",""],"",all_but_lambdaU_outC))
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
lambdaU_expressions = [lhrh(lhs=gri.gfaccess("in_gfs","lambdaU0"),rhs=IDlambdaU[0]),
lhrh(lhs=gri.gfaccess("in_gfs","lambdaU1"),rhs=IDlambdaU[1]),
lhrh(lhs=gri.gfaccess("in_gfs","lambdaU2"),rhs=IDlambdaU[2])]
lambdaU_expressions_FDout = fin.FD_outputC("returnstring",lambdaU_expressions, outCparams)
file.write(lp.loop(["i2","i1","i0"],["cctk_nghostzones[2]","cctk_nghostzones[1]","cctk_nghostzones[0]"],
["cctk_lsh[2]-cctk_nghostzones[2]","cctk_lsh[1]-cctk_nghostzones[1]","cctk_lsh[0]-cctk_nghostzones[0]"],
["1","1","1"],["#pragma omp parallel for","",""],"",lambdaU_expressions_FDout))
file.write("""
ExtrapolateGammas(cctkGH,lambdaU0GF);
ExtrapolateGammas(cctkGH,lambdaU1GF);
ExtrapolateGammas(cctkGH,lambdaU2GF);
}
""")
# -
# <a id='bssntoadm'></a>
#
# ### Step 4.b.ii: BSSN $\to$ ADM \[Back to [top](#toc)\]
# $$\label{bssntoadm}$$
#
# All modules (thorns) in the Einstein Toolkit that deal with spacetime quantities do so via the core `ADMBase` module, which assumes variables are written in ADM form. Therefore, in order for `BaikalETK` to interface properly with the rest of the Toolkit, its native BSSN variables must be converted to ADM quantities.
# +
import BSSN.ADM_in_terms_of_BSSN as btoa
btoa.ADM_in_terms_of_BSSN()
Bq.BSSN_basic_tensors() # Gives us betaU & BU
with open(append_to_make_code_defn_list("BSSN_to_ADM.c"), "w") as file:
file.write("""
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void BaikalETK_BSSN_to_ADM(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
""")
btoa_lhrh = []
for i in range(DIM):
for j in range(i,DIM):
btoa_lhrh.append(lhrh(lhs="g"+chr(ord('x')+i)+chr(ord('x')+j)+"[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]",
rhs=btoa.gammaDD[i][j]))
for i in range(DIM):
for j in range(i,DIM):
btoa_lhrh.append(lhrh(lhs="k"+chr(ord('x')+i)+chr(ord('x')+j)+"[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]",
rhs=btoa.KDD[i][j]))
btoa_lhrh.append(lhrh(lhs="alp[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]",rhs=Bq.alpha))
for i in range(DIM):
btoa_lhrh.append(lhrh(lhs="beta"+chr(ord('x')+i)+"[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]",
rhs=Bq.betaU[i]))
for i in range(DIM):
btoa_lhrh.append(lhrh(lhs="dtbeta"+chr(ord('x')+i)+"[CCTK_GFINDEX3D(cctkGH,i0,i1,i2)]",
rhs=Bq.BU[i]))
outCparams = "preindent=1,outCfileaccess=a,outCverbose=False,includebraces=False"
bssn_to_adm_Ccode = fin.FD_outputC("returnstring",btoa_lhrh, outCparams)
file.write(lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],
["1","1","1"],["#pragma omp parallel for","",""],"",bssn_to_adm_Ccode))
file.write("}\n")
# -
# <a id='bssnrhss'></a>
#
# ## Step 4.c: Evaluate BSSN right-hand-sides (RHSs) \[Back to [top](#toc)\]
# $$\label{bssnrhss}$$
#
# <a id='ricci'></a>
#
# ### Step 4.c.i: Evaluate Ricci tensor \[Back to [top](#toc)\]
# $$\label{ricci}$$
#
# To slightly optimize the performance of `BaikalETK`'s BSSN solver, we split the computation of the [complicated expressions for the Ricci tensor $\\bar{R}_{ij}$](Tutorial-BSSN_quantities.ipynb#rbar) into its own function, and then use the result when evaluating the BSSN right-hand-side (RHS) expressions.
with open(append_to_make_code_defn_list("driver_pt1_BSSN_Ricci.c"), "w") as file:
file.write("""
#include <math.h>
#include "SIMD/SIMD_intrinsics.h"
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void driver_pt1_BSSN_Ricci(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
const CCTK_REAL NOSIMDinvdx0 = 1.0/CCTK_DELTA_SPACE(0);
const REAL_SIMD_ARRAY invdx0 = ConstSIMD(NOSIMDinvdx0);
const CCTK_REAL NOSIMDinvdx1 = 1.0/CCTK_DELTA_SPACE(1);
const REAL_SIMD_ARRAY invdx1 = ConstSIMD(NOSIMDinvdx1);
const CCTK_REAL NOSIMDinvdx2 = 1.0/CCTK_DELTA_SPACE(2);
const REAL_SIMD_ARRAY invdx2 = ConstSIMD(NOSIMDinvdx2);
#include "BSSN_Ricci.h"
}\n""")
# <a id='bssnrhssricciinput'></a>
#
# ### Step 4.c.ii: Evaluate BSSN RHSs, using Ricci tensor as input \[Back to [top](#toc)\]
# $$\label{bssnrhssricciinput}$$
#
# Next we construct the driver function for evaluating the BSSN RHSs, which make use of the Ricci tensor $\bar{R}_{ij}$, which has just been computed.
def SIMD_declare_C_params():
SIMD_declare_C_params_str = ""
for i in range(len(par.glb_Cparams_list)):
# keep_param is a boolean indicating whether we should accept or reject
# the parameter. singleparstring will contain the string indicating
# the variable type.
keep_param, singleparstring = keep_param__return_type(par.glb_Cparams_list[i])
if (keep_param) and ("CCTK_REAL" in singleparstring):
parname = par.glb_Cparams_list[i].parname
SIMD_declare_C_params_str += " const "+singleparstring + "*NOSIMD"+parname+\
" = CCTK_ParameterGet(\""+parname+"\",\"BaikalETK\",NULL);\n"
SIMD_declare_C_params_str += " const REAL_SIMD_ARRAY "+parname+" = ConstSIMD(*NOSIMD"+parname+");\n"
return SIMD_declare_C_params_str
with open(append_to_make_code_defn_list("driver_pt2_BSSN_RHSs.c"), "w") as file:
file.write("""
#include <math.h>
#include "SIMD/SIMD_intrinsics.h"
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
//void BSSN_RHSs()
void driver_pt2_BSSN_RHSs(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
const CCTK_REAL NOSIMDinvdx0 = 1.0/CCTK_DELTA_SPACE(0);
const REAL_SIMD_ARRAY invdx0 = ConstSIMD(NOSIMDinvdx0);
const CCTK_REAL NOSIMDinvdx1 = 1.0/CCTK_DELTA_SPACE(1);
const REAL_SIMD_ARRAY invdx1 = ConstSIMD(NOSIMDinvdx1);
const CCTK_REAL NOSIMDinvdx2 = 1.0/CCTK_DELTA_SPACE(2);
const REAL_SIMD_ARRAY invdx2 = ConstSIMD(NOSIMDinvdx2);
"""+SIMD_declare_C_params()+"""
#include "BSSN_RHSs.h"
}\n""")
# <a id='enforcegammahatconstraint'></a>
#
# ## Step 4.d: Enforcing conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (in Cartesian coordinates, $\det{\hat{\gamma}_{ij}}=1$) \[Back to [top](#toc)\]
# $$\label{enforcegammahatconstraint}$$
#
# Here we construct the driver function for enforcing the conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint. The BSSN equations are not strongly hyperbolic if this condition is not set.
with open(append_to_make_code_defn_list("enforce_detgammabar_constraint.c"), "w") as file:
file.write("""
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void enforce_detgammabar_constraint(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
#include "enforce_detgammabar_constraint.h"
}\n""")
# <a id='diagnostics'></a>
#
# ## Step 4.e: Diagnostics: Computing Hamiltonian & momentum constraints \[Back to [top](#toc)\]
# $$\label{diagnostics}$$
#
# The BSSN Hamiltonian & momentum constraints are useful diagnostics of a numerical-relativity calculation's health, as both should converge to zero with increasing numerical resolution. Here we construct the driver function.
with open(append_to_make_code_defn_list("BSSN_constraints.c"), "w") as file:
file.write("""
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
void BaikalETK_BSSN_constraints(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL invdx0 = 1.0/CCTK_DELTA_SPACE(0);
const CCTK_REAL invdx1 = 1.0/CCTK_DELTA_SPACE(1);
const CCTK_REAL invdx2 = 1.0/CCTK_DELTA_SPACE(2);
#include "BSSN_constraints.h"
}\n""")
# <a id='t4uu'></a>
#
# ## Step 4.f: `driver_BSSN_T4UU()`: Compute $T^{\mu\nu}$ from `TmunuBase`'s $T_{\mu\nu}$ \[Back to [top](#toc)\]
# $$\label{t4uu}$$
#
# Here we implement $T^{\mu\nu} = g^{\mu \delta} g^{\nu \gamma} T_{\delta \gamma}.$
if add_stress_energy_source_terms == True:
# Declare T4DD as a set of gridfunctions. These won't
# actually appear in interface.ccl, as interface.ccl
# was set above. Thus before calling the code output
# by FD_outputC(), we'll have to set pointers
# to the actual gridfunctions they reference.
# (In this case the eTab's.)
T4DD = ixp.register_gridfunctions_for_single_rank2("AUXEVOL","T4DD","sym01",DIM=4)
import BSSN.ADMBSSN_tofrom_4metric as AB4m
AB4m.g4UU_ito_BSSN_or_ADM("BSSN")
T4UUraised = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
for delta in range(4):
for gamma in range(4):
T4UUraised[mu][nu] += AB4m.g4UU[mu][delta]*AB4m.g4UU[nu][gamma]*T4DD[delta][gamma]
T4UU_expressions = [
lhrh(lhs=gri.gfaccess("in_gfs","T4UU00"),rhs=T4UUraised[0][0]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU01"),rhs=T4UUraised[0][1]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU02"),rhs=T4UUraised[0][2]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU03"),rhs=T4UUraised[0][3]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU11"),rhs=T4UUraised[1][1]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU12"),rhs=T4UUraised[1][2]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU13"),rhs=T4UUraised[1][3]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU22"),rhs=T4UUraised[2][2]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU23"),rhs=T4UUraised[2][3]),
lhrh(lhs=gri.gfaccess("in_gfs","T4UU33"),rhs=T4UUraised[3][3])]
outCparams = "outCverbose=False,includebraces=False,preindent=2,SIMD_enable=True"
T4UUstr = fin.FD_outputC("returnstring",T4UU_expressions, outCparams)
T4UUstr_loop = lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],
["1","1","SIMD_width"],["#pragma omp parallel for","",""],"",T4UUstr)
with open(append_to_make_code_defn_list("driver_BSSN_T4UU.c"), "w") as file:
file.write("""
#include <math.h>
#include "cctk.h"
#include "cctk_Arguments.h"
#include "cctk_Parameters.h"
#include "SIMD/SIMD_intrinsics.h"
void driver_BSSN_T4UU(CCTK_ARGUMENTS) {
DECLARE_CCTK_ARGUMENTS;
DECLARE_CCTK_PARAMETERS;
const CCTK_REAL *restrict T4DD00GF = eTtt;
const CCTK_REAL *restrict T4DD01GF = eTtx;
const CCTK_REAL *restrict T4DD02GF = eTty;
const CCTK_REAL *restrict T4DD03GF = eTtz;
const CCTK_REAL *restrict T4DD11GF = eTxx;
const CCTK_REAL *restrict T4DD12GF = eTxy;
const CCTK_REAL *restrict T4DD13GF = eTxz;
const CCTK_REAL *restrict T4DD22GF = eTyy;
const CCTK_REAL *restrict T4DD23GF = eTyz;
const CCTK_REAL *restrict T4DD33GF = eTzz;
"""+T4UUstr_loop+"""
}\n""")
# <a id='makecodedefn'></a>
#
# ## Step 4.g: `make.code.defn`: List of all C driver functions needed to compile `BaikalETK` \[Back to [top](#toc)\]
# $$\label{makecodedefn}$$
#
# When constructing each C code driver function above, we called the `append_to_make_code_defn_list()` function, which built a list of each C code driver file. We'll now add each of those files to the `make.code.defn` file, used by the Einstein Toolkit's build system.
with open(os.path.join(outdir,"make.code.defn"), "w") as file:
file.write("""
# Main make.code.defn file for thorn BaikalETK
# Source files in this directory
SRCS =""")
filestring = ""
for i in range(len(make_code_defn_list)):
filestring += " "+make_code_defn_list[i]
if i != len(make_code_defn_list)-1:
filestring += " \\\n"
else:
filestring += "\n"
file.write(filestring)
# <a id='code_validation'></a>
#
# # Step 5: Code validation \[Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# Here we will show plots demonstrating good agreement between `BaikalETK` and, e.g., `McLachlan` (another, trusted ETK thorn).
# <a id='self_validation'></a>
#
# ## Step 5.a: Validation against [BaikalETK.BaikalETK_Pymodule](../edit/BaikalETK/BaikalETK_Pymodule.py) module \[Back to [top](#toc)\]
# $$\label{self_validation}$$
#
# As a self-validation check, we verify agreement in all codes generated by
# 1. this tutorial notebook, and
# 2. the NRPy+ [Baikal.BaikalETK_Pymodule](../edit/BaikalETK/BaikalETK_Pymodule.py) module.
# +
# First generate all codes using BaikalETK.BaikalETK_Pymodule.BaikalETK_codegen():
gri.glb_gridfcs_list = []
import BaikalETK.BaikalETK_Pymodule as BE
outvalrootdir = "BaikalETK-validate"
BE.BaikalETK_codegen(outrootdir=outvalrootdir,
FD_order=FD_order, # Finite difference order: even numbers only, starting with 2. 12 is generally unstable
LapseCondition = LapseCondition,
ShiftCondition = ShiftCondition, # Set the standard, second-order advecting-shift,
# Gamma-driving shift condition
add_stress_energy_source_terms = add_stress_energy_source_terms, # Enable stress-energy terms?
default_KO_strength = default_KO_strength)
# +
# Then compare all files generated by this notebook & the separate Python module.
import difflib
rootfiles = [f for f in os.listdir(outrootdir) if os.path.isfile(os.path.join(outrootdir, f)) and "ccl" in os.path.join(outrootdir, f)]
srcfiles = [f for f in os.listdir(outdir) if os.path.isfile(os.path.join(outdir, f))]
def compare_two_files(outdir1,outdir2,filebase):
with open(os.path.join(outdir1,file)) as file1, open(os.path.join(outdir2,file)) as file2:
# Read the lines of each file
file1_lines = file1.readlines()
file2_lines = file2.readlines()
num_diffs = 0
file1_lines_noleadingwhitespace = []
for line in file1_lines:
if line.strip() == "": # If the line contains only whitespace, remove all leading whitespace
file1_lines_noleadingwhitespace.append(line.lstrip())
else:
file1_lines_noleadingwhitespace.append(line)
file2_lines_noleadingwhitespace = []
for line in file2_lines:
if line.strip() == "": # If the line contains only whitespace, remove all leading whitespace
file2_lines_noleadingwhitespace.append(line.lstrip())
else:
file2_lines_noleadingwhitespace.append(line)
for line in difflib.unified_diff(file1_lines_noleadingwhitespace, file2_lines_noleadingwhitespace,
fromfile=os.path.join(outdir1,file),
tofile =os.path.join(outdir2,file)):
sys.stdout.writelines(line)
num_diffs = num_diffs + 1
if num_diffs == 0:
print("PASSED: "+file)
else:
print("FAILED (see diff above): "+file)
print("Ignoring lines with only whitespace:")
for file in rootfiles:
compare_two_files(outrootdir,outvalrootdir,file)
for file in srcfiles:
compare_two_files(outdir,os.path.join(outvalrootdir,"src"),file)
# -
# <a id='latex_pdf_output'></a>
#
# # Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-BaikalETK.pdf](Tutorial-BaikalETK.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-BaikalETK.ipynb
# !pdflatex -interaction=batchmode Tutorial-BaikalETK.tex
# !pdflatex -interaction=batchmode Tutorial-BaikalETK.tex
# !pdflatex -interaction=batchmode Tutorial-BaikalETK.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| Tutorial-BaikalETK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="fC59QJts4cXT"
# - 학습/테스트 데이터 셋 분리 - train_test_split()
# +
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
iris = load_iris()
dt_clf = DecisionTreeClassifier()
train_data = iris.data
train_label = iris.target
dt_clf.fit(train_data, train_label)
# 학습 데이터 셋으로 예측
pred = dt_clf.predict(train_data)
print('예측 정확도:', accuracy_score(train_label, pred))
# +
from sklearn.model_selection import train_test_split
dt_clf = DecisionTreeClassifier()
iris_data = load_iris()
# X_train, X_test : 학습용 feature
# y_train, y_test : test용 feature
X_train, X_test, y_train, y_test = train_test_split(
iris_data.data, iris_data.target, test_size=0.3, random_state=121)
# -
dt_clf.fit(X_train, y_train)
pred = dt_clf.predict(X_test)
print('예측 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
# +
import pandas as pd
iris_df = pd.DataFrame(iris_data.data, columns=iris_data.feature_names)
iris_df['target'] = iris_data.target
iris_df.head()
# -
ftr_df = iris_df.iloc[:, :-1] # ':-1' : 처음부터 -1은 빼고
tgt_df = iris_df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(ftr_df, tgt_df, test_size=0.3, random_state=121)
print(type(X_train), type(X_test), type(y_train), type(y_test))
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
pred = dt_clf.predict(X_test)
print('예측 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
# + [markdown] colab_type="text" id="xf3ZTEkO8Iga"
# 교차 검증
# - k 폴드
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import numpy as np
iris = load_iris()
features = iris.data
label = iris.target
dt_clf = DecisionTreeClassifier(random_state=156)
# 5개의 폴드 세트로 분리하는 KFold 객체와 폴드 세트별 정확도를 담을 리스트 객체 생성.
kfold = KFold(n_splits=5)
cv_accuracy = []
print('iris data set value:', features.shape[0])
# +
n_iter = 0
for train_index, test_index, in kfold.split(features):
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = label[train_index], label[test_index]
dt_clf.fit(X_train, y_train)
pred = dt_clf.predict(X_test)
n_iter += 1
accuracy = np.round(accuracy_score(y_test, pred), 4)
train_size = X_train.shape[0]
test_size = X_test.shape[0]
print('\n#{0} 교차 검증 정확도 :{1}, 학습 데이터 크기:{2}, 검증 데이터 크기: {3}'.\
format(n_iter, accuracy, train_size, test_size))
print('#{0} 검증 세트 인덱스:{1}'.format(n_iter, test_index))
cv_accuracy.append(accuracy)
print('\n## 평균 검증 정확도:', np.mean(cv_accuracy))
# + [markdown] colab_type="text" id="B6axErTS_wcl"
# - Stratified K Fold
# +
import pandas as pd
iris = load_iris()
iris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
print(iris_df)
iris_df['label']=iris.target
iris_df['label'].value_counts()
# +
kfold = KFold(n_splits=3)
# kfold.split(x)는 폴드 세트를 3번 반복할 때마다 달라지는 학습/테스트용 data row index number 반환.
n_iter=0
for train_index, test_index in kfold.split(iris_df):
n_iter += 1
label_train = iris_df['label'].iloc[train_index]
label_test = iris_df['label'].iloc[test_index]
print('## 교차 검증: {0}'.format(n_iter))
print('학습 레이블 데이터 분포:\n', label_train.value_counts())
print('검증 레이블 데이터 분포:\n', label_test.value_counts())
# +
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=3)
n_iter = 0
for train_index, test_index in skf.split(iris_df, iris_df['label']) :
n_iter += 1
label_train = iris_df['label'].iloc[train_index]
label_test = iris_df['label'].iloc[test_index]
print('## 교차 검증 : {0}'.format(n_iter))
print('학습 레이블 데이터 분포:\n', label_train.value_counts())
print('검증 레이블 데이터 분포:\n', label_test.value_counts())
# +
df_clf = DecisionTreeClassifier(random_state=156)
skfold = StratifiedKFold(n_splits=3)
n_iter=0
cv_accuracy=[]
# StratifiedKFold의 split() 호출 시 반드시 레이블 데이터 셋도 추가 입력 필요
for train_index, test_index in skfold.split(features, label):
# split() 으로 반환된 인덱스를 이용하여 학습용 검증용 테스트 데이터 추출
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = label[train_index], label[test_index]
# 학습 및 예측
dt_clf.fit(X_train, y_train)
pred = dt_clf.predict(X_test)
# 반복 시 마다 정확도 측정
n_iter += 1
accuracy = np.round(accuracy_score(y_test, pred), 4)
train_size = X_train.shape[0]
test_size = X_test.shape[0]
print('\n#{0} 교차 검증 정확도 : {1}, 학습 데이터 크기: {2}, 검증 데이터 크기: {3}'. \
format(n_iter, accuracy, train_size, test_size))
print('#{0} 검증 세트 인덱스:{1}'.format(n_iter, test_index))
cv_accuracy.append(accuracy)
# 교차 검증별 정확도 및 평균 정확도 계산
print('\n## 교차 검증별 정확도:', np.round(cv_accuracy, 4))
print('## 평균 검증 정확도:', np.mean(cv_accuracy))
# + [markdown] colab_type="text" id="I4YQlLoLF_xu"
# - cross_val_score()
# +
#@title 기본 제목 텍스트
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.datasets import load_iris
import numpy as np
iris_data = load_iris()
dt_clf = DecisionTreeClassifier(random_state=156)
data = iris_data.data
label = iris_data.target
# 성능 지표는 정확도(accuracy), 교차 검증 세트는 3개
scores = cross_val_score(dt_clf, data, label, scoring='accuracy', cv=3)
print('교차 검증별 정확도:', np.round(scores, 4))
print('평균 검증 정확도:', np.round(np.mean(scores), 4))
# + [markdown] colab_type="text" id="CgIIbN8yGy72"
# - GridSearchCV
# +
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import accuracy_score
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris_data.data, iris_data.target, test_size=0.2, random_state=121)
dtree = DecisionTreeClassifier()
parameters = {'max_depth':[1, 2, 3], 'min_samples_split':[2,3]}
# +
import pandas as pd
#param_grid의 하이퍼 파라미터들을 3개의 train, test set fold로 나누어서 테스트 수행 설정
### refit=True 가 default. True이면 가장 좋은 파라미터 설정으로 재학습 시킴.
grid_dtree = GridSearchCV(dtree, param_grid=parameters, cv=3, refit=True, return_train_score=True)
# 붓꽃 Train 데이터로 param_grid의 하이퍼 파라미터들을 순차적으로 학습/평가
grid_dtree.fit(X_train, y_train)
# GridSearchCV 결과는 cv_results_ 라는 딕셔너리로 저장됨. 이를 DataFrame으로 변환
scores_df = pd.DataFrame(grid_dtree.cv_results_)
scores_df[['params', 'mean_test_score', 'rank_test_score', 'split0_test_score',
'split1_test_score', 'split2_test_score']]
# -
grid_dtree.cv_results_
# +
print('GridSearchCV 최적 파라미터:', grid_dtree.best_params_)
print('GridSearchCV 최고 정확도:{0:.4f}'.format(grid_dtree.best_score_))
#refit=True로 설정된 GridSearchCV 객체가 fit() 수행 시 학습이 완료된 Estimator를 내포하고 있으므로
#predict()를 통해 예측도 가능.
pred = grid_dtree.predict(X_test)
print('테스트 데이터 세트 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
# +
# GridSearchCV의 refit 으로 이미 학습된 estimator 반환
estimator = grid_dtree.best_estimator_
# GridSearchCV의 best_estimator_는 이미 최적 하이퍼 파라미터로 학습이 됨
pred = estimator.predict(X_test)
print('테스트 데이터 세트 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
# + [markdown] id="I4YQlLoLF_xu" colab_type="text"
# - cross_val_score()
# + id="XMK8z03_F-06" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 55} cellView="code" outputId="c5e4499b-41ca-4567-a918-31606f7a7a90"
#@title 기본 제목 텍스트
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score, cross_validate
from sklearn.datasets import load_iris
import numpy as np
iris_data = load_iris()
dt_clf = DecisionTreeClassifier(random_state=156)
data = iris_data.data
label = iris_data.target
# 성능 지표는 정확도(accuracy), 교차 검증 세트는 3개
scores = cross_val_score(dt_clf, data, label, scoring='accuracy', cv=3)
print('교차 검증별 정확도:', np.round(scores, 4))
print('평균 검증 정확도:', np.round(np.mean(scores), 4))
# + [markdown] id="CgIIbN8yGy72" colab_type="text"
# - GridSearchCV
# + id="PdjhHWzsG1aP" colab_type="code" colab={}
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import accuracy_score
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris_data.data, iris_data.target, test_size=0.2, random_state=121)
dtree = DecisionTreeClassifier()
parameters = {'max_depth':[1, 2, 3], 'min_samples_split':[2,3]}
# + id="QJVYdJM5HyiE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="e83c0693-80f9-42f1-b0ff-50751a7d4d70"
import pandas as pd
#param_grid의 하이퍼 파라미터들을 3개의 train, test set fold로 나누어서 테스트 수행 설정
### refit=True 가 default. True이면 가장 좋은 파라미터 설정으로 재학습 시킴.
grid_dtree = GridSearchCV(dtree, param_grid=parameters, cv=3, refit=True, return_train_score=True)
# 붓꽃 Train 데이터로 param_grid의 하이퍼 파라미터들을 순차적으로 학습/평가
grid_dtree.fit(X_train, y_train)
# GridSearchCV 결과는 cv_results_ 라는 딕셔너리로 저장됨. 이를 DataFrame으로 변환
scores_df = pd.DataFrame(grid_dtree.cv_results_)
scores_df[['params', 'mean_test_score', 'rank_test_score', 'split0_test_score',
'split1_test_score', 'split2_test_score']]
# + id="xNVldRuwIxqP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 715} outputId="7def5280-323f-42db-ef8c-2b1f0c4b37f9"
grid_dtree.cv_results_
# + id="ho_OfvFtIutO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 74} outputId="31025340-e6e5-4705-f9f5-8cd533a9ccdc"
print('GridSearchCV 최적 파라미터:', grid_dtree.best_params_)
print('GridSearchCV 최고 정확도:{0:.4f}'.format(grid_dtree.best_score_))
#refit=True로 설정된 GridSearchCV 객체가 fit() 수행 시 학습이 완료된 Estimator를 내포하고 있으므로
#predict()를 통해 예측도 가능.
pred = grid_dtree.predict(X_test)
print('테스트 데이터 세트 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
# + id="8P2CXXbbJgBe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="1311638d-43cb-467c-fd46-22ec6e255c87"
# GridSearchCV의 refit 으로 이미 학습된 estimator 반환
estimator = grid_dtree.best_estimator_
# GridSearchCV의 best_estimator_는 이미 최적 하이퍼 파라미터로 학습이 됨
pred = estimator.predict(X_test)
print('테스트 데이터 세트 정확도: {0:.4f}'.format(accuracy_score(y_test, pred)))
| ml/sklearn/iris/train_test_dataset_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# Подготовим функции из первого занятия
# + pycharm={"is_executing": false, "name": "#%%\n"}
import numpy as np
def arrangements(n: int, k: int) -> int:
"""Число размещений.
"""
return np.math.factorial(n) // np.math.factorial(n - k)
def permutations(n: int) -> int:
"""Число перестановок.
"""
return np.math.factorial(n)
def combinations(n: int, k: int) -> int:
"""Число сочетаний.
"""
return np.math.factorial(n) // (np.math.factorial(k) * np.math.factorial(n - k))
# -
# ### Задача 1
# Из колоды в 52 карты вынимают случайным образом 4 карты. Найти число исходов,
# соответствующих тому, что был вытянут хотя бы один туз.
# + [markdown] pycharm={"name": "#%% md\n"}
# Для начала определим количество способов, которыми мы можем получить 4 карты из 52
# + pycharm={"is_executing": false}
C = combinations(52, 4)
C
# + [markdown] pycharm={"name": "#%% md\n"}
# Для того, чтобы среди извлеченных карт не было ни одного туза, надо выбрать семь карт из 48 карт
# и ни одной карты из четырех тузов: это можно сделать С (4 48)*С (0 4)=С (4 48) способами.
# + pycharm={"is_executing": false, "name": "#%%\n"}
nC = combinations(48, 4)
nC
# + [markdown] pycharm={"name": "#%% md\n"}
# Следовательно, вероятность вытащить хотя бы один туз A=С (4 52) - С (4 48)
# + pycharm={"is_executing": false, "name": "#%%\n"}
A = C - nC
print(f"A = {A}")
# -
# ### Задача 2
#
# Семь человек рассаживаются наудачу на скамейке. Какова вероятность того,
# что рядом будут сидеть: а) два определённых человека? б) три определённых человека?
# + [markdown] pycharm={"name": "#%% md\n"}
# В первом случае у нас два определенных человека могут сидеть рядом, остальные как угодно.
# Скамейка одна.
#
# P = m/n
#
# Рассадить всех мы можем n! способами. В нашем случае это 7!
# + pycharm={"is_executing": false, "name": "#%%\n"}
n_fact = np.math.factorial(7)
n_fact
# -
# 5 человек рассаживаются как угодно = 5!
#
# остальные 2 рядом 2!
#
# m_fact = 2 * 5!
# + pycharm={"is_executing": false, "name": "#%%\n"}
m_fact = 6 * 2 * np.math.factorial(5)
m_fact
# + pycharm={"is_executing": false, "name": "#%%\n"}
P = m_fact / n_fact
print(f"Вероятность посадить 2-х человек рядом = {P} или {round(P*100, 2)}%")
# -
# Оформим как функцию
# + pycharm={"is_executing": false, "name": "#%%\n"}
def bench(need_cnt=1, all_cnt=1):
P = (all_cnt - (need_cnt - 1)) * np.math.factorial(need_cnt) * np.math.factorial(all_cnt-need_cnt)/np.math.factorial(all_cnt)
return P
# -
# Проверим на 1 человеке и на 2-х из 7
# + pycharm={"is_executing": false, "name": "#%%\n"}
print(f"{bench(2, 7)} == {P} && {bench(1,7)} == 1")
# -
# Теперь для трех человек
# + pycharm={"is_executing": false, "name": "#%%\n"}
print(f"Вероятность посадить 3-х человек рядом = {bench(3,7)} или {round(bench(3,7)*100, 2)}%")
# -
# ### Задача 3
#
# Из 60 вопросов, входящих в экзаменационные билеты, студент знает 50.
# Какова вероятность того, что среди трёх наугад выбранных вопросов студент знает:
# а) все?
# б) два?
#
# P = m/n
# =======
# Общее число билетов определяется сочетанием по 3 из 60
# + pycharm={"is_executing": false, "name": "#%%\n"}
n = combinations(60, 3)
n
# -
# A) Количество вопросов которых студент знает, определяется сочетанием по 3 из 50:
# + pycharm={"is_executing": false, "name": "#%%\n"}
m = combinations(50, 3)
m
# + pycharm={"is_executing": false, "name": "#%% \n"}
print(f"P = {m/n}")
# + [markdown] pycharm={"name": "#%% md\n"}
# B) Количество вопросов, которые студент знает, определяется сочетанием по 2 из 50
# + pycharm={"is_executing": false, "name": "#%%\n"}
m = combinations(50, 2)
# + [markdown] pycharm={"name": "#%% md\n"}
# умноженное на количество вопросы которых студент не знает 1 из 10
# + pycharm={"is_executing": false, "name": "#%%\n"}
m *= combinations(10, 1)
m
# + pycharm={"is_executing": false, "name": "#%%\n"}
print(f"P = {m/n}")
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Задача 4
#
# Бросается игральная кость.
# Пусть событие A - появление чётного числа,
# событие B - появление числа больше трёх.
#
# Являются ли эти события независимыми?
# -
# #### Ответ
#
# Так как четное число не обязательно больше трех
# то события A и B являются независимыми
#
# С другой стороны вероятность события А при событии B выше (2/6 против 1/6)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Задача 5
#
# Допустим, имеется некоторая очень редкая болезнь (поражает 0.1% населения).
# Вы приходите к врачу, вам делают тест на эту болезнь, и тест оказывается положительным.
# Врач говорит вам, что этот тест верно выявляет 99% больных этой болезнью
# и всего лишь в 1% случаев даёт ложный положительный ответ.
#
# Вопрос: какова вероятность, что вы действительно больны ей?
#
# Подсказка: вновь используйте формулу Байеса с раскрытием знаменателя с
# помощью формулы полной вероятности.
# + [markdown] pycharm={"name": "#%% md\n"}
# P(A|B) = P(B|A) * P(A) / P(B)
#
# P(A)=0.001 - вероятность больного человека
# P(~A)=0.999 - вероятность здорового человека
# P(B|A)=1 - вероятность положительного теста для больного человека
# P(B|~A)=0.01 - вероятность положительного теста для здорового человека
#
# Искомая вероятность больного человека при положительном тесте
# P(A|B)=(P(A)*P(B|A))/(P(A)*P(B|A)+P(~A)*P(B|~A))
# + pycharm={"is_executing": false, "name": "#%%\n"}
P = (0.001 * 1) * 0.99/(0.001 * 1 + 0.999 * 0.01)
P
# + [markdown] pycharm={"name": "#%% md\n"}
# Задачу решал интуитивно. Не полностью понимая ход и что где должно оказаться
#
| lesson1.ipynb |
# ---
# title: "Matplotlib Python"
#
# authors:
# - <NAME>
# tags:
# - knowledge
# - example
#
# created_at: 2019-01-09
# updated_at: 2019-01-09
#
# tldr: This is first genuine post
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Motivation
# *NOTE: This is the first notebook post
# ### Initialize
# %matplotlib widget
# ### Code
# +
# # %load http://matplotlib.org/mpl_examples/showcase/integral_demo.py
"""
Plot demonstrating the integral as the area under a curve.
Although this is a simple example, it demonstrates some important tweaks:
* A simple line plot with custom color and line width.
* A shaded region created using a Polygon patch.
* A text label with mathtext rendering.
* figtext calls to label the x- and y-axes.
* Use of axis spines to hide the top and right spines.
* Custom tick placement and labels.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
def func(x):
return (x - 3) * (x - 5) * (x - 7) + 85
a, b = 2, 9 # integral limits
x = np.linspace(0, 10)
y = func(x)
fig, ax = plt.subplots()
plt.plot(x, y, 'r', linewidth=2)
plt.ylim(ymin=0)
# Make the shaded region
ix = np.linspace(a, b)
iy = func(ix)
verts = [(a, 0)] + list(zip(ix, iy)) + [(b, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
ax.add_patch(poly)
plt.text(0.5 * (a + b), 30, r"$\int_a^b f(x)\mathrm{d}x$",
horizontalalignment='center', fontsize=20)
plt.figtext(0.9, 0.05, '$x$')
plt.figtext(0.1, 0.9, '$y$')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks((a, b))
ax.set_xticklabels(('$a$', '$b$'))
ax.set_yticks([])
plt.show()
# +
def f(x):
print(x**2)
from ipywidgets import interactive
interactive(f, x=(1,10))
# -
import holoviews as hv
import numpy as np
hv.extension('bokeh')
hv.Curve(np.linspace(1,10,100))
print("Here")
# ### Appendix
# Put all the stuff here that is not necessary for supporting the points above. Good place for documentation without distraction.
| demo/hello_world.ipynb.kp/src/hello_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Credit Risk Classification
#
# Credit risk poses a classification problem that’s inherently imbalanced. This is because healthy loans easily outnumber risky loans. In this analysis, two machine learning techniques will be compared on a historical data set for lending activity from a peer-to-peer lending services company with the goal of finding which model is better for identifying creditworthiness of borrowers.
#
# The two machine learning methods compared will both use logistical regression on randomly split versions of the original data, with training sets and testing sets. The training set will be used to train the machine learning model, and then the model will be applied to the testing set to see how well it performs in terms of percent of correctly identified high risk loans, and percent of incorrectly identified high risk loans. Since the amount of high risk loans is a small portion of the dataset, a second model will be run using an imbalanced learning approach that oversamples the amount of high risk loans, and the results will be compared.
#
# ### Credit Risk Analysis Report
#
# A report with analysis of the results is included in the README.md file, which is also on the front page of the github site for this project.
# [https://github.com/phodsman/P2P-Lending-Credit-Risk-Classifier-with-Machine-Learning](https://github.com/phodsman/P2P-Lending-Credit-Risk-Classifier-with-Machine-Learning)
# +
# Import the modules
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import confusion_matrix
from imblearn.metrics import classification_report_imbalanced
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from imblearn.over_sampling import RandomOverSampler
import warnings
warnings.filterwarnings('ignore')
# -
# ---
# ## Split the Data into Training and Testing Sets
# +
# Read the CSV file from the Resources folder into a Pandas DataFrame
lending_df = pd.read_csv(Path("./Resources/lending_data.csv"))
# Review the DataFrame
display(lending_df)
# +
# Separate the data into labels and features
# Separate the y variable, the labels
y = lending_df["loan_status"]
# Separate the X variable, the features
X = lending_df.drop(columns=["loan_status"])
# -
# Review the y variable Series
y.head()
# Review the X variable DataFrame
X.head()
# Check the balance of our target values
y.value_counts()
# Split the data using train_test_split
# Assign a random_state of 1 to the function
X_train, X_test, y_train, y_test = train_test_split(X,y, random_state=1)
# ---
# ## Logistic Regression Model with the Original Data
# Instantiate the Logistic Regression model
# Assign a random_state parameter of 1 to the model
logistic_regression_model = LogisticRegression(random_state=1)
# Fit the model using training data
lr_original_model = logistic_regression_model.fit(X_train, y_train)
# Make a prediction using the testing data
y_original_pred = lr_original_model.predict(X_test)
# ### Evaluate the model’s performance:
#
# * Calculate the accuracy score of the model.
#
# * Generate a confusion matrix.
#
# * Print the classification report.
# Print the balanced_accuracy score of the model
print(balanced_accuracy_score(y_test, y_original_pred))
# Generate a confusion matrix for the model
print(confusion_matrix(y_test, y_original_pred))
# Print the classification report for the model
print(classification_report_imbalanced(y_test, y_original_pred))
# **Question:** How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels?
#
# **Analysis:** The recall amount is 91%, which means about 91% of the actual high risk loans were identified using the model. The precision is 85%, which means that there is around a 15% rate of false positives in identifying high risk loans with this model.
# ---
# ## Logistic Regression Model with Resampled Training Data
# +
# Instantiate the random oversampler model
# # Assign a random_state parameter of 1 to the model
random_oversampler = RandomOverSampler(random_state=1)
# Fit the original training data to the random_oversampler model
X_resampled, y_resampled = random_oversampler.fit_resample(X_train, y_train)
# -
# Count the distinct values of the resampled labels data
y_resampled.value_counts()
# +
# Instantiate the Logistic Regression model
# Assign a random_state parameter of 1 to the model
model = LogisticRegression(random_state=1
)
# Fit the model using the resampled training data
lr_resampled_model = model.fit(X_resampled, y_resampled)
# Make a prediction using the testing data
y_resampled_pred = lr_resampled_model.predict(X_test)
# -
# ### Evaluate the model’s performance:
#
# * Calculate the accuracy score of the model.
#
# * Generate a confusion matrix.
#
# * Print the classification report.
# Print the balanced_accuracy score of the model
print(balanced_accuracy_score(y_test, y_resampled_pred))
# Generate a confusion matrix for the model
print(confusion_matrix(y_test, y_resampled_pred))
# Print the classification report for the model
print(classification_report_imbalanced(y_test, y_resampled_pred))
# **Question:** How well does the logistic regression model, fit with oversampled data, predict both the `0` (healthy loan) and `1` (high-risk loan) labels?
#
# **Analysis:** The logistic regression model fit with oversampled data has a recall of 99%, meaning that 99% of the high risk loans were correctly identified. The precision is 84%, meaning there is a 16% false positive rate on identifying high risk loans.
#
# Compared to the model that did not use oversampling to take into account the small representation of the targets for analysis, the oversampling model had greatly increased identification of high risk loans and only lost about 1 percentage point in terms false positives. So for this data set and target application, it appears to be the much stronger model to use.
| credit_risk_resampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
#import csv and pandas protocol
import csv
import pandas as pd
#Import Path from Path Library
from pathlib import Path
print(f"Curretn Working Directory: {Path.cwd()}")
# +
#Import data
budget = Path("Resources/budget_data.csv")
#Read in data
pnl_df = pd.read_csv(budget)
# -
#Initiating the variables
count = 0
total = 0
average = 0
maximum = 0
minimum = 0
#Count using count function of
count = pnl_df["Date"].count()
total = pnl_df["Profit/Losses"].sum()
average = pnl_df["Profit/Losses"].mean()
maximum = pnl_df["Profit/Losses"].max()
minimum = pnl_df["Profit/Losses"].min()
greatest_increase = pnl_df.loc[pnl_df['Profit/Losses'] == maximum]
greatest_increase['Date'].index
greatest_increase.loc[greatest_increase['Date'].index, 'Date']
max_date = greatest_increase['Date'][25]
greatest_decrease = pnl_df.loc[pnl_df['Profit/Losses'] == minimum]
greatest_decrease['Date'].index
min_date = greatest_decrease['Date'][44]
print(f"Financial Analysis")
print(f"------------------------------------------------------")
print(f"Total Months: {count}")
print(f"Total : ${total:,}")
print(f"Average Change: ${average:,.2f}")
print(f"Greatest Increase in Profits: {max_date} (${maximum:,})")
print(f"Greatest Decrease in Profits: {min_date} (${minimum:,})")
# Set the output file path
output_path = Path("output.txt")
# Open the output_path as a file object in "write" mode ('w')
# Write a header line and write the contents of 'text' to the file
with open(output_path, 'w') as file:
file.write(f"Financial Analysis\n")
file.write(f"------------------------------------------------------\n")
file.write(f"Total Months: {count}\n")
file.write(f"Total : ${total:,}\n")
file.write(f"Average Change: ${average:,.2f}\n")
file.write(f"Greatest Increase in Profits: {max_date} (${maximum:,})\n")
file.write(f"Greatest Decrease in Profits: {min_date} (${minimum:,})\n")
| PyBank/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Step 1 : Understanding the business
# #### OVERVIEW
#
# In this notebook, I will take you through a series of analysis.
# We will answer a few questions and draw some insights on the 2017 StackOverflow survey for developers.
#
# These are three questions we want to answer in this notebook:
#
# 1. How does the country you leave in affect you as a developer?
# 2. How does your Gender affect you when it comes to job placements and salaries?
# 3. How does your formal eduction impact you as a developper?
# ## Step 2: Understanding the Data
# +
# Importing the necessary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv('./survey_results_public.csv')
df.head()
# -
# Checking the shape of df
df.shape
# This dataset has 51392 responses recorded, meaning that 51392 people participated in this survey.
# It also has 154 columns, which is the number of questions that were asked to participants.
# +
# Find the number of represented countries in the dataset
number_of_countries = len(df['Country'].unique())
print("The dataset has recorded data of developers from "+str(number_of_countries) + " countries around the world")
# -
# ## Step 3 : Preparation of data + modeling + Evaluation
#
# I have combined these steps because I will be cleaning, and also analysing the data all together.
# I will also be drawing insights in this part.
#
# For instance, I might want to remove the nan values for a column and compute the average at the same time.
# ## Question 1 : How does the country you leave in affect you as a developer?
#
# Is it important to understand what the implications of being in a certain country are for a developer.
# +
# Find the count of developers per country
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
countries = ['United States', 'India', 'United Kingdom' , 'Germany', 'Canada']
numbers = df['Country'].value_counts()[:5]
ax.bar(countries,numbers)
plt.title('Top 5 countries with the highest number of developers')
plt.xlabel('Countries')
plt.ylabel('Number of developers')
plt.show()
df['Country'].value_counts().sort_values(ascending = False)
# -
# The Top 5 countries with the most represented developers are: the United States, India, United Kingdom, Germany, Canada. Note that this reflects the reality where those countries are part of the countries with more developers in the world.
# +
# Delete all rows that have nan values in the salary column
# We are dropping these those row because we do not want biaises in the value of the meam.
def drop_columns(data_f,col):
'''
col : list of the columns that you want to remove.
data_f : the dataframe that you want to remove the columns from.
'''
for i in range(len(col)):
rm = data_f.dropna(subset = [col[i]], how = 'any')
return rm
col = ['Salary']
rm_salary = drop_columns(df,col)
# calculating the average salary per country
rm_salary.groupby(['Country']).mean()['Salary'].sort_values()
# -
# The countries with the highest average salaries are Kuwait, Uganda, U.S. Minor Outlying Islands, Bermuda and Virgina Islands. This is the average and we might be wrong saying that these countries are the countries that pay best. Countries like Uganda have few responses and the probability of having outliers that will drop or increase the average is high
print("the average salary in this dataset is : " + str(rm_salary['Salary'].mean()))
# Countries like the United States, Inda etc have more developers, meaning that working as a developer in those countries is most common than other countries.
# We also saw that the greater the number of programmers in a country, the greater the average salary.
#
# Last, if you work as a developer, you can expect a salary of around $56000
# ### Question 2 : How does your Gender affect you when it comes to job placements and salaries?
#
# +
# Check the missing values in the Gender column
# We will be using the rm_salary dataset
number_null = sum(rm_salary['Gender'].isnull())
print("There are " + str(number_null) + " null values in the Gender column")
# +
# The missing values represent 9% of the values in the column Gender
# We want to count the number participants per gender, so if we use methods like replacing with the mode or
# using a constant value,we might ended up with a biaised count, but if we drop them,
# we know for sure the results are real.
new_df = rm_salary.dropna(subset = ['Gender'] , how = 'any')
# Check the missing values after removing the missing values
print("There are now " + str(sum(new_df['Gender'].isnull())) + " null values in the Gender column")
new_df['Gender'].shape
# -
# Check the gender balance in the dataset
gender_repartition = new_df['Gender'].value_counts()/new_df.shape[0]
print(gender_repartition)
# +
from matplotlib import pyplot as plt
# Creating dataset
labels = ['Male', 'Female', 'Other']
sizes = new_df['Gender'].value_counts()[:3]
# Creating plot
g = plt.figure(figsize =(10, 7))
plt.pie(sizes, labels = labels)
# show plot
plt.title('Top 3 genders reprsented in the dataset')
plt.show()
# -
# ### 91% of male, 6% female, and 3% of other genders.
# Let's check the differences of salaries for the genders
new_df.groupby(['Gender']).mean()['Salary'].sort_values()
# #### Looking at the above result, Males and Females gain almost the same with a slight difference
# ### Question 3: How does your formal eduction impact you as a developper?
#
# Check if there are missing values in the formal education columns
print('There are ' + str(sum(new_df['FormalEducation'].isnull())) + ' missing value in that colunm')
# The unique formal education in the dataset
new_df['FormalEducation'].unique()
# +
# Find the counts per formal education
new_df['FormalEducation'].value_counts()
# This shows that the formal bachelor's degree is the most commom formal education for developers
# PLot the Top 5 degrees
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
Degrees = ['Bachelor','Master', 'Some college', 'Secondary sc' , 'Doctoral degree']
numbers = new_df['FormalEducation'].value_counts()[:5]
ax.bar(Degrees,numbers)
plt.title('Top 5 Degrees with the highest number of developers')
plt.xlabel('Degrees')
plt.ylabel('Number of developers')
# -
# How the formal eduction affects your salary
new_df.groupby(['FormalEducation']).mean()['Salary'].sort_values()
# #### Looking at the above result, we can see that the more educated you are the greater your salary.There are however exceptions for primary school and people that never completed a formal education. It's important to notice those any represent 104 datapoints out of 11k, so they might be some biaises
# ## Step 4: Setting out
# This sort analysis made us understand that, your country, your sex and your formal education greatly affects you, especially when it comes to salary.
# So depending or your “profile”, you probably have your own experience as a developer.
| Udacity_DataScience_Project1_SoroKolotioloma (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="XGCdmDAKpLuf"
# ##### Copyright 2019 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + cellView="form" colab={} colab_type="code" id="GF4d1XplpLGF"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="W1L3zJP6pPGD"
# # Adversarial example using FGSM
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/generative/adversarial_fgsm"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="8dn1-g8BpPDx"
# This tutorial creates an *adversarial example* using the Fast Gradient Signed Method (FGSM) attack as described in [Explaining and Harnessing Adversarial Examples](https://arxiv.org/abs/1412.6572) by Goodfellow *et al*. This was one of the first and most popular attacks to fool a neural network.
#
# ## What is an adversarial example?
#
# Adversarial examples are specialised inputs created with the purpose of confusing a neural network, resulting in the misclassification of a given input. These notorious inputs are indistinguishable to the human eye, but cause the network to fail to identify the contents of the image. There are several types of such attacks, however, here the focus is on the fast gradient sign method attack, which is a *white box* attack whose goal is to ensure misclassification. A white box attack is where the attacker has complete access to the model being attacked. One of the most famous examples of an adversarial image shown below is taken from the aforementioned paper.
#
# 
#
# Here, starting with the image of a panda, the attacker adds small perturbations (distortions) to the original image, which results in the model labelling this image as a gibbon, with high confidence. The process of adding these perturbations is explained below.
#
# ## Fast gradient sign method
# The fast gradient sign method works by using the gradients of the neural network to create an adversarial example. For an input image, the method uses the gradients of the loss with respect to the input image to create a new image that maximises the loss. This new image is called the adversarial image. This can be summarised using the following expression:
# $$adv\_x = x + \epsilon*\text{sign}(\nabla_xJ(\theta, x, y))$$
#
# where
#
# * adv_x : Adversarial image.
# * x : Original input image.
# * y : Original input label.
# * $\epsilon$ : Multiplier to ensure the perturbations are small.
# * $\theta$ : Model parameters.
# * $J$ : Loss.
#
# An intriguing property here, is the fact that the gradients are taken with respect to the input image. This is done because the objective is to create an image that maximises the loss. A method to accomplish this is to find how much each pixel in the image contributes to the loss value, and add a perturbation accordingly. This works pretty fast because it is easy to find how each input pixel contributes to the loss by using the chain rule and finding the required gradients. Hence, the gradients are taken with respect to the image. In addition, since the model is no longer being trained (thus the gradient is not taken with respect to the trainable variables, i.e., the model parameters), and so the model parameters remain constant. The only goal is to fool an already trained model.
#
# So let's try and fool a pretrained model. In this tutorial, the model is [MobileNetV2](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/applications/MobileNetV2) model, pretrained on [ImageNet](http://www.image-net.org/).
# + colab={} colab_type="code" id="vag2WYR6yTOC"
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (8, 8)
mpl.rcParams['axes.grid'] = False
# + [markdown] colab_type="text" id="wiTHY8dqxzx7"
# Let's load the pretrained MobileNetV2 model and the ImageNet class names.
# + colab={} colab_type="code" id="nqhk2vYx6Ag0"
pretrained_model = tf.keras.applications.MobileNetV2(include_top=True,
weights='imagenet')
pretrained_model.trainable = False
# ImageNet labels
decode_predictions = tf.keras.applications.mobilenet_v2.decode_predictions
# + colab={} colab_type="code" id="f2cLrJH0zpfC"
# Helper function to preprocess the image so that it can be inputted in MobileNetV2
def preprocess(image):
image = tf.cast(image, tf.float32)
image = tf.image.resize(image, (224, 224))
image = tf.keras.applications.mobilenet_v2.preprocess_input(image)
image = image[None, ...]
return image
# Helper function to extract labels from probability vector
def get_imagenet_label(probs):
return decode_predictions(probs, top=1)[0][0]
# + [markdown] colab_type="text" id="iEZaMVFgSUA-"
# ## Original image
# Let's use a sample image of a [Labrador Retriever](https://commons.wikimedia.org/wiki/File:YellowLabradorLooking_new.jpg) by Mirko [CC-BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/) from Wikimedia Common and create adversarial examples from it. The first step is to preprocess it so that it can be fed as an input to the MobileNetV2 model.
# + colab={} colab_type="code" id="wpYrQ4OQSYWk"
image_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')
image_raw = tf.io.read_file(image_path)
image = tf.image.decode_image(image_raw)
image = preprocess(image)
image_probs = pretrained_model.predict(image)
# + [markdown] colab_type="text" id="mvPlta_uSbuI"
# Let's have a look at the image.
# + colab={} colab_type="code" id="99Jc-SNoSZot"
plt.figure()
plt.imshow(image[0]*0.5+0.5) # To change [-1, 1] to [0,1]
_, image_class, class_confidence = get_imagenet_label(image_probs)
plt.title('{} : {:.2f}% Confidence'.format(image_class, class_confidence*100))
plt.show()
# + [markdown] colab_type="text" id="kElVTbF690CF"
# ## Create the adversarial image
#
# ### Implementing fast gradient sign method
# The first step is to create perturbations which will be used to distort the original image resulting in an adversarial image. As mentioned, for this task, the gradients are taken with respect to the image.
# + colab={} colab_type="code" id="FhZxlOnuBCVr"
loss_object = tf.keras.losses.CategoricalCrossentropy()
def create_adversarial_pattern(input_image, input_label):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = pretrained_model(input_image)
loss = loss_object(input_label, prediction)
# Get the gradients of the loss w.r.t to the input image.
gradient = tape.gradient(loss, input_image)
# Get the sign of the gradients to create the perturbation
signed_grad = tf.sign(gradient)
return signed_grad
# + [markdown] colab_type="text" id="RbuftX0eSlDQ"
# The resulting perturbations can also be visualised.
# + colab={} colab_type="code" id="rVjnb6M7Smv4"
# Get the input label of the image.
labrador_retriever_index = 208
label = tf.one_hot(labrador_retriever_index, image_probs.shape[-1])
label = tf.reshape(label, (1, image_probs.shape[-1]))
perturbations = create_adversarial_pattern(image, label)
plt.imshow(perturbations[0]*0.5+0.5); # To change [-1, 1] to [0,1]
# + [markdown] colab_type="text" id="DKKSFHjwCyQH"
# Let's try this out for different values of epsilon and observe the resultant image. You'll notice that as the value of epsilon is increased, it becomes easier to fool the network. However, this comes as a trade-off which results in the perturbations becoming more identifiable.
# + colab={} colab_type="code" id="dBtG0Kl5SspV"
def display_images(image, description):
_, label, confidence = get_imagenet_label(pretrained_model.predict(image))
plt.figure()
plt.imshow(image[0]*0.5+0.5)
plt.title('{} \n {} : {:.2f}% Confidence'.format(description,
label, confidence*100))
plt.show()
# + colab={} colab_type="code" id="3DA8g-Zp69J4"
epsilons = [0, 0.01, 0.1, 0.15]
descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input')
for eps in epsilons]
for i, eps in enumerate(epsilons):
adv_x = image + eps*perturbations
adv_x = tf.clip_by_value(adv_x, -1, 1)
display_images(adv_x, descriptions[i])
# + [markdown] colab_type="text" id="fxt5VfnXHQT6"
# ## Next steps
#
# Now that you know about adversarial attacks, try this out on different datasets and different architectures. You may also create and train your own model, and then attempt to fool it using the same method. You can also try and see how the confidence in predictions vary as you change epsilon.
#
# Though powerful, the attack shown in this tutorial was just the start of research into adversarial attacks, and there have been multiple papers creating more powerful attacks since then. In addition to adversarial attacks, research has also led to the creation of defenses, which aims at creating robust machine learning models. You may review this [survey paper](https://arxiv.org/abs/1810.00069) for a comprehensive list of adversarial attacks and defences.
#
# For many more implementations of adversarial attacks and defenses, you may want to see the adversarial example library [CleverHans](https://github.com/tensorflow/cleverhans).
| site/en/tutorials/generative/adversarial_fgsm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.9 64-bit
# language: python
# name: python3
# ---
# Needed libraries
import sys
import psycopg2
from os import getenv
from dotenv import load_dotenv
load_dotenv()
# Secret Vars
ELEPHANTSQL_DATABASE = getenv('ELEPHANTSQL_DATABASE')
ELEPHANTSQL_USERNAME = getenv('ELEPHANTSQL_USERNAME')
ELEPHANTSQL_PASSWORD = getenv('ELEPHANTSQL_PASSWORD')
ELEPHANTSQL_HOST = getenv('ELEPHANTSQL_HOST')
# SQL Database Connect
def connect(DATABASE, USERNAME, PASSWORD, HOST):
""" Connect to the PostgreSQL database server """
elephantsql_client = None
try:
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
# Connect to ElephantSQL-hosted PostgreSQL
elephantsql_client = psycopg2.connect(dbname=DATABASE, user=USERNAME, password=PASSWORD, host=HOST)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
sys.exit(1)
return elephantsql_client
# Create Database Table
def create_table(elephantsql_client, command):
'''Creating table with given input command'''
try:
# A "cursor", a structure to iterate over db records to perform queries
cur = elephantsql_client.cursor()
# Execute commands in order
cur.execute(command)
# Close communication with the PostgreSQL database server
cur.close()
# Commit the changes
elephantsql_client.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
# Execute Command
def execute_command(elephantsql_client, insert_req):
""" Execute a single INSERT request """
try:
# Create cursor object
cur = elephantsql_client.cursor()
# Execute command
cur.execute(insert_req)
# Commit changes
elephantsql_client.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
# Reset to prev database state
elephantsql_client.rollback()
# Close cursor object
cur.close()
return 1
cur.close()
# Drop a table
# +
# Database connection
elephantsql_client = connect(ELEPHANTSQL_DATABASE, ELEPHANTSQL_USERNAME, ELEPHANTSQL_PASSWORD, ELEPHANTSQL_HOST)
command = '''DROP TABLE questions_table;'''
execute_command(elephantsql_client, command)
# Close the connection
elephantsql_client.close()
print('Connection is closed.')
# -
# Creating a table in the database
# +
# Database connection
elephantsql_client = connect(ELEPHANTSQL_DATABASE, ELEPHANTSQL_USERNAME, ELEPHANTSQL_PASSWORD, ELEPHANTSQL_HOST)
# Building Inital user tweet tables
command = '''CREATE TABLE IF NOT EXISTS questions_table (question_id SERIAL PRIMARY KEY,
question_title varchar(100),
question_lesson varchar(1500),
question_desc varchar(1500),
question_a varchar(100),
question_b varchar(100),
question_c varchar(100),
question_d varchar(100),
answer varchar(100),
error varchar(100))'''
create_table(elephantsql_client, command)
# Close the connection
elephantsql_client.close()
print('Connection is closed.')
# -
# Add to questions table
def add_question(question_package):
# Database connection
elephantsql_client = connect(ELEPHANTSQL_DATABASE, ELEPHANTSQL_USERNAME, ELEPHANTSQL_PASSWORD, ELEPHANTSQL_HOST)
command = "INSERT INTO questions_table (question_id, question_title, question_lesson, question_desc, question_a, question_b, question_c, question_d, answer, error) VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}' )".format(question_package['question_id'],
question_package['question_title'],
question_package['question_lesson'],
question_package['question_desc'],
question_package['question_a'],
question_package['question_b'],
question_package['question_c'],
question_package['question_d'],
question_package['answer'],
question_package['error'])
execute_command(elephantsql_client, command)
# Close the connection
elephantsql_client.close()
print('Connection is closed.')
# +
# python unit
track_one_question_one = {'question_id': 11,
'question_title': 'Question 1: Basic Operations and Printing in Python',
'question_lesson': """You can use the Jupyter notebook for Python as a basic calculator.
For example, for addition, 2 + 2 = 4. For subtraction, 2 - 2 = 0.
For multiplication, 2 * 4 = 8. For division, 4 / 2 = 2. For exponents,
2 ** 3 = 8. We also have the modulo or mod operator (%), where we divide 2
numbers and receive the remainder as the final answer. For example, if we
divide 5 by 2, we get 2.5, which is a quotient. But to get a remainder, we use
the mod operator, 5 % 2, which equals 1.""",
'question_desc': 'What is the output of 7 % 4?',
'question_a': '3',
'question_b': '6',
'question_c': '4',
'question_d': '5',
'answer': 'a',
'error': 'None'}
track_one_question_two = {'question_id': 12,
'question_title': 'Question 2: Basic Operations and Printing in Python',
'question_lesson': """To print in Python, we use the print command
like print(“Hello World”), which prints out "Hello World". Anything
inside quotation marks ("") is considered as a string. So, "12" is a string but
not an integer. Also, Python accepts both quotation marks and apostrophes ('')
to print. However, in cases such as print( I have done my work ), running this will
give us an error as Python will consider the characters up untill the
apostrophe. That can be corrected by using quotation marks instead.
Also, backslash n (\\n) creates a newline and backslash t (\\t) creates
create 3 spaces.""",
'question_desc': 'What is the output of the following string? print("My name is Troy")',
'question_a': 'Error',
'question_b': '("My name is Troy")',
'question_c': 'My name is Troy',
'question_d': '"My name is Troy"',
'answer': 'c',
'error': 'None'}
track_one_question_three = {'question_id': 13,
'question_title': 'Question 3: Basic Operations and Printing in Python',
'question_lesson': """To print in Python, we use the print command
like print(“Hello World”), which prints out "Hello World". Anything
inside quotation marks ("") is considered as a string. So, "12" is a string but
not an integer. Also, Python accepts both quotation marks and apostrophes ('')
to print. However, in cases such as print( I have done my work ), running this will
give us an error as Python will consider the characters up untill the
apostrophe. That can be corrected by using quotation marks instead.
Also, backslash n (\\n) creates a newline and backslash t (\\t) creates
create 3 spaces.""",
'question_desc': 'What is the output of: (2 ** 3) % 3',
'question_a': '1',
'question_b': '2',
'question_c': '3',
'question_d': '4',
'answer': 'b',
'error': 'None'}
track_one_question_four = {'question_id': 14,
'question_title': 'Question 4: Basic Operations and Printing in Python',
'question_lesson': """To print in Python, we use the print command
like print(“Hello World”), which prints out "Hello World". Anything
inside quotation marks ("") is considered as a string. So, "12" is a string but
not an integer. Also, Python accepts both quotation marks and apostrophes ('')
to print. However, in cases such as print( I have done my work ), running this will
give us an error as Python will consider the characters up untill the
apostrophe. That can be corrected by using quotation marks instead.
Also, backslash n (\\n) creates a newline and backslash t (\\t) creates
create 3 spaces.""",
'question_desc': 'What is the output of the following string: print(“DateTime”)',
'question_a': 'Error',
'question_b': 'DateTime',
'question_c': 'Date Time',
'question_d': 'Date-Time',
'answer': 'd',
'error': 'None'}
track_one_question_five = {'question_id': 15,
'question_title': 'Question 5: Basic Operations and Printing in Python',
'question_lesson': """To print in Python, we use the print command
like print(“Hello World”), which prints out "Hello World". Anything
inside quotation marks ("") is considered as a string. So, "12" is a string but
not an integer. Also, Python accepts both quotation marks and apostrophes ('')
to print. However, in cases such as print( I have done my work ), running this will
give us an error as Python will consider the characters up untill the
apostrophe. That can be corrected by using quotation marks instead.
Also, backslash n (\\n) creates a newline and backslash t (\\t) creates
create 3 spaces.""",
'question_desc': 'What is the output of the following string? ("SecondstMinutes")',
'question_a': 'Second Minutes',
'question_b': 'SecondMinutes',
'question_c': 'Error',
'question_d': 'Second-Minutes',
'answer': 'a',
'error': 'None'}
#Topic 2: Phyton
track_two_question_one = {'question_id': 21,
'question_title': 'Question 1: Variables & Strings',
'question_lesson': '''Some rules of variables in Python are: 1. Variable names can not
start with a number 2. There can be no spaces in the name 3. Can not
use any of the following symbols 4. It is considered best practice
that names are lowercase 5. Avoid using words that have special
meaning in Python''',
'question_desc': '''If work = "Game", what would work print out?''',
'question_a': '4',
'question_b': 'Error',
'question_c': 'Game',
'question_d': '"Game"',
'answer': 'd',
'error': 'None'}
track_two_question_two = {'question_id': 22,
'question_title': 'Question 2: Variables & Strings',
'question_lesson': '''To find the total length of a string, we use the "len" operator. For example,
if f = "Grow", then len(f) will be 4. Spaces also have a length, so we need to
make sure they are accounted for. For example, if g = "Gold Button", then len(g)
will be 11 and not 10, as the space between Gold and Button also has a length.''',
'question_desc': 'If strings = "Topper", The length of strings or len(strings) is:',
'question_a': '3',
'question_b': '6',
'question_c': '7',
'question_d': '10',
'answer': 'b',
'error': 'None'}
track_two_question_three = {'question_id': 23,
'question_title': 'Question 3: Variables & Strings',
'question_lesson': '''To find the total length of a string, we use the "len" operator. For example,
if f = "Grow", then len(f) will be 4. Spaces also have a length, so we need to
make sure they are accounted for. For example, if g = "Gold Button", then len(g)
will be 11 and not 10, as the space between Gold and Button also has a length.''',
'question_desc': 'If dos="Beautiful flower", the length of dos or len(dos) is:',
'question_a': '15',
'question_b': '17',
'question_c': '16',
'question_d': '20',
'answer': 'c',
'error': 'None'}
track_two_question_four = {'question_id': 24,
'question_title': 'Question 4: Variables & Strings',
'question_lesson': '''To find the data types of a code, we use the type operator. This are important
data types. Integers are used with the type "int", such as: 3, 40, -12. Floating point
numbers are used with the type "float", such as: 2.5, -8.912, 10.0. Strings are used with the
type "str", such as: "Hello", "Sammy", "200".''',
'question_desc': 'What is the type of c if c = 20.0?',
'question_a': 'str',
'question_b': 'float',
'question_c': 'int',
'question_d': 'tup',
'answer': 'b',
'error': 'None'}
track_two_question_five = {'question_id': 25,
'question_title': 'Question 5: Variables & Strings',
'question_lesson': '''To find the data types of a code, we use the type operator. This are important
data types. Integers are used with the type "int", such as: 3, 40, -12. Floating point
numbers are used with the type "float", such as: 2.5, -8.912, 10.0. Strings are used with the
type "str", such as: "Hello", "Sammy", "200".''',
'question_desc': 'What is the type of x if x = "12"',
'question_a': 'float',
'question_b': 'tup',
'question_c': 'str',
'question_d': 'int',
'answer': 'c',
'error': 'None'}
#Topic 3: Phyton
track_three_question_one = {'question_id': 31,
'question_title': 'Question 1: String Indexing',
'question_lesson': '''We know that strings are a sequence, which means Python can use indexes to
call parts of the sequence. In Python, we use brackets [] after an object
to call its index. We should also note that indexing starts at 0 for Python.
To make comments in your code, you can use the hashtag symbol (#) and whatever your write here
will not affect your code.''',
'question_desc': 'What is the answer of Hello World[8]',
'question_a': 'l',
'question_b': 'r',
'question_c': 'd',
'question_d': 'Error',
'answer': 'a',
'error': 'None'}
track_three_question_two = {'question_id': 32,
'question_title': 'Question 2: String Indexing',
'question_lesson': '''We know that strings are a sequence, which means Python can use indexes to
call parts of the sequence. In Python, we use brackets [] after an object
to call its index. We should also note that indexing starts at 0 for Python.
To make comments in your code, you can use the hashtag symbol (#) and whatever your write here
will not affect your code.''',
'question_desc': 'If strings = "Active Mind", what is the output of strings[5]?',
'question_a': '" "',
'question_b': '"m"',
'question_c': '"e"',
'question_d': 'Error',
'answer': 'c',
'error': 'None'}
track_three_question_three = {'question_id': 33,
'question_title': 'Question 3: String Indexing',
'question_lesson': '''We can do slicing with the colon symbol (:). The colon symbol tells python to grab
everything from that designated spot, such as in x = "abcdefgh", x[1:] or x[-7:], which
grabs the items from position 1 or (-7) and everything else after. This would give us the
answer "bcdefgh" for the previous example. Also, x[:3] or x[:-5] tells python to grab
everything before position 3 or (-5), not including the term on position 3 or (-5), which
gives the answer of "abc".''',
'question_desc': 'If strings = "abcdefgh", what is the output of strings[:]?',
'question_a': 'abcdefgh',
'question_b': 'aceg',
'question_c': 'Error',
'question_d': 'hgfedcba',
'answer': 'a',
'error': 'None'}
track_three_question_four = {'question_id': 34,
'question_title': 'Question 4: String Indexing',
'question_lesson': '''We can do slicing with the colon symbol (:). The colon symbol tells python to grab everything
from that designated spot, such as in x = "abcdefgh", x[1:] or x[-7:], which grabs the items from
position 1 or (-7) and everything else after. This would give us the answer "bcdefgh" for the
previous example. ALso, x[:3] or x[:-5] tells python to grab everything before position 3 or
(-5), not including the term on position 3 or (-5), which gives the answer of "abc".''',
'question_desc': 'If strings = "abcdefgh", what is the output of strings[0:6]?',
'question_a': 'dcefgh',
'question_b': 'abcdef',
'question_c': 'abcdefg',
'question_d': 'None',
'answer': 'b',
'error': 'None'}
track_three_question_five = {'question_id': 35,
'question_title': 'Question 5: String Indexing',
'question_lesson': '''We can also use index and slice notation to grab elements of a sequence by a specified
step size (the default is 1). For instance, we can use two colons in a row and then a number
specifying the frequency to grab elements. For example, if x = "abcdefgh", x[::4] will give us "ae".''',
'question_desc': 'If strings = "abcdefgh", what is the output of strings[::3]?',
'question_a': 'fgh',
'question_b': 'abc',
'question_c': 'adg',
'question_d': 'cde',
'answer': 'c',
'error': 'None'}
#Topic 4: C++ Part 1
track_four_question_one = {'question_id': 41,
'question_title': 'Question 1: Strings',
'question_lesson': '''Strings are objects that represent sequences of characters. Strings allow you to print words,
sentences, numbers, and special characters. You can even add strings together to combine them into
whatever you want. Strings have certain caveats, however, with this course, we can teach you all about them.''',
'question_desc': 'Which line of code below shows the missing syntax of the following line of code that prints a sentence? cout "My name is Troy";',
'question_a': ' cout << "My name is Troy" <<; ',
'question_b': ' cout < "My name is Troy" ',
'question_c': ' cout < "My name is Troy" <; ',
'question_d': ' cout << "My name is Troy"; ',
'answer': 'd',
'error': 'None'}
track_four_question_two = {'question_id': 42,
'question_title': 'Question 2: Strings',
'question_lesson': '''Strings are objects that represent sequences of characters. Strings allow you to print words, sentences,
numbers, and special characters. You can even add strings together to combine them into whatever you want.
Strings have certain caveats, however, with this course, we can teach you all about them.''',
'question_desc': ' What is the output of the following string? cout << "My name is troy"; ',
'question_a': ' Error ',
'question_b': ' My name is Troy ',
'question_c': ' ("My name is Troy") ',
'question_d': ' "My name is Troy" ',
'answer': 'b',
'error': 'None'}
track_four_question_three = {'question_id': 43,
'question_title': 'Question 3: Strings',
'question_lesson': '''Strings are objects that represent sequences of characters. Strings allow you to print words,
sentences, numbers, and special characters. You can even add strings together to combine them
into whatever you want. Strings have certain caveats, however, with this course, we can
teach you all about them.''',
'question_desc': 'If we have two strings, str1 and str2. What is the output of the following code when: string str1 = "Hack"; string str2 = "Merced"; cout << str1 + str2; ',
'question_a': ' HackMerced ',
'question_b': ' Merced Hack ',
'question_c': ' Hack Merced ',
'question_d': ' None ',
'answer': 'a',
'error': 'None'}
track_four_question_four = {'question_id': 44,
'question_title': 'Question 4: Strings',
'question_lesson': '''Strings are objects that represent sequences of characters. Strings allow you to print words,
sentences, numbers, and special characters. You can even add strings together to combine them
into whatever you want. Strings have certain caveats, however, with this course, we can
teach you all about them.''',
'question_desc': 'If we want to get a string input from the user with a variable named "firstName", what would be the correct correct way to get it? ',
'question_a': ' cin << firstName; ',
'question_b': ' cin >> firstName; ',
'question_c': ' cout << firstName; ',
'question_d': ' cin >> firstName; ',
'answer': 'd',
'error': 'None'}
track_four_question_five = {'question_id': 45,
'question_title': 'Question 5: Strings',
'question_lesson': '''Strings are objects that represent sequences of characters. Strings allow you to print
words, sentences, numbers, and special characters. You can even add strings together to
combine them into whatever you want. Strings have certain caveats, however, with
this course, we can teach you all about them.''',
'question_desc': 'What would be the output of the following code if the user input was "Hello World": string str1; cin >> str1; cout << str1; ',
'question_a': ' Hello World ',
'question_b': ' World ',
'question_c': ' World Hello ',
'question_d': ' Hello ',
'answer': 'd',
'error': 'None'}
#Topic 5: C++ Part 2
track_five_question_one = {'question_id': 51,
'question_title': 'Question 1: Math/Arithmetic',
'question_lesson': '''Operators are used to perform operations on variables and values. The addition operator (+) is
used to add variables, while also adding values. The subtraction operator (-) is also used to
subtract variables and values. The multiplication operator (*) is used to multiply variables
and values. The division operator (/) is used to divide variables and values. The modulus
operator (%) is used to return the division remainder of two variables or values.''',
'question_desc': 'What would be the correct way to initialize the variable x to integer 10 multiplied by 2?',
'question_a': ' 10 * 2;',
'question_b': ' int x = 10 * 2',
'question_c': ' 10 * 2 = x',
'question_d': ' int x = 10 * 2;',
'answer': 'd',
'error': 'None'}
track_five_question_two = {'question_id': 52,
'question_title': 'Question 2: Math/Arithmetic',
'question_lesson': '''Operators are used to perform operations on variables and values. The addition
operator (+) is used to add variables, while also adding values. The subtraction
operator (-) is also used to subtract variables and values. The multiplication operator (*) is
used to multiply variables and values. The division operator (/) is used to divide variables and
values. The modulus operator (%) is used to return the division remainder of two variables or values.''',
'question_desc': 'What would be the correct way to initialize the variable N that holds a decimal number?',
'question_a': ' Both B and C',
'question_b': ' float = x;',
'question_c': ' double = x;',
'question_d': ' float x;',
'answer': 'a',
'error': 'None'}
track_five_question_three = {'question_id': 53,
'question_title': 'Question 3: Math/Arithmetic',
'question_lesson': '''Operators are used to perform operations on variables and values. The addition
operator (+) is used to add variables, while also adding values. The subtraction
operator (-) is also used to subtract variables and values. The multiplication operator (*)
is used to multiply variables and values. The division operator (/) is used to divide variables
and values. The modulus operator (%) is used to return the division remainder of two variables or values.''',
'question_desc': 'What would be the output of the following code if "int x = 21.2" and "int y = 21.3": cout << x + y;',
'question_a': ' 42',
'question_b': ' 42.5',
'question_c': ' Error',
'question_d': ' 43',
'answer': 'c',
'error': 'None'}
track_five_question_four = {'question_id': 54,
'question_title': 'Question 4: Math/Arithmetic',
'question_lesson': '''Operators are used to perform operations on variables and values. The addition
operator (+) is used to add variables, while also adding values. The subtraction
operator (-) is also used to subtract variables and values. The multiplication
operator (*) is used to multiply variables and values. The division operator (/)
is used to divide variables and values. The modulus operator (%) is used to return
the division remainder of two variables or values.''',
'question_desc': 'What would be the output of the following code if "float x = 21.2" and "double y = 21.3": cout << x + y;',
'question_a': '42',
'question_b': '42.5',
'question_c': 'None',
'question_d': '43',
'answer': 'b',
'error': 'None'}
track_five_question_five = {'question_id': 55,
'question_title': 'Question 5: Math/Arithmetic',
'question_lesson': '''Operators are used to perform operations on variables and values. The
addition operator (+) is used to add variables, while also adding values. The
subtraction operator (-) is also used to subtract variables and values. The
multiplication operator (*) is used to multiply variables and values. The division
operator (/) is used to divide variables and values. The modulus operator (%) is used
to return the division remainder of two variables or values.''',
'question_desc': ' We have two initialized variables, "int x = 365" and "int y = 7". What would be the output of the following code: cout << x / y << " " << x % y; ',
'question_a': '5 1',
'question_b': '521',
'question_c': '52 1',
'question_d': '52',
'answer': 'c',
'error': 'None'}
#Topic 6: c++ Part 3
track_six_question_one = {'question_id': 61,
'question_title': 'Question 1: Loops and if-statements',
'question_lesson': '''A while-loop repeats a statement or group of statements while a given condition is true.
It tests the condition before executing the loop body. Psuedo-code for a while-loop is
as follows: while(condition) { statement(s)} ''',
'question_desc': 'If the condition of a while loop is "i > 0", where i is a variable that takes in user input, when would the loop continue? ',
'question_a': ' When i = -2 ',
'question_b': ' When i = 1 ',
'question_c': ' When i = -1 ',
'question_d': ' When i = 0 ',
'answer': 'b',
'error': 'None'}
track_six_question_two = {'question_id': 62,
'question_title': 'Question 2: Loops and if-statements',
'question_lesson': '''A while-loop repeats a statement or group of statements while a given condition is
true. It tests the condition before executing the loop body. Psuedo-code for a while-loop
is as follows: while(condition) { statement(s)}. There is also a boolean conditional
statement called if-else statements. If the boolean expression in the if-statement evaluates
to true, then the if block of code will be executed, otherwise, the else block of code will
be executed. An else statement is not always needed, however. If-else statements can also be used
within while loops. Psuedo-code for if-statements is as follows: if(boolean_expression){ //
statement(s) will execute if the boolean expression is true } else { // statement(s) will
execute if the boolean expression is false }''',
'question_desc': "We have a while-loop wtih a condition of 1. Which of the following answers shows how to break from the while-loop using an if-statement once the user input is greater than 10 or less than 0? ",
'question_a': ' if (i > 0 i < 10) ',
'question_b': ' if (i > 10 and i > 0) ',
'question_c': ' if (i > 10 or i < 10) ',
'question_d': ' None ',
'answer': 'd',
'error': 'None'}
track_six_question_three = {'question_id': 63,
'question_title': 'Question 3: Loops and if-statements',
'question_lesson': '''There is a boolean conditional statement called if-else statements. If the boolean
expression in the if-statement evaluates to true, then the if block of code will be
executed, otherwise, the else block of code will be executed. An else statement is not
always needed, however. If-else statements can also be used within while loops. Psuedo-code
for if-statements is as follows: if(boolean_expression){ // statement(s) will execute if
the boolean expression is true } else { // statement(s) will execute if the boolean expression is false }''',
'question_desc': 'We want to check the grade of student, where a students inputs their grade to "testScore". There are two if-statements. One checks if "testScore" is greater than 60, and outputs "You pass.". The other if-statement checks if "testScore" is greater than 90 and outputs “You did great.”. What would be the output if "testScore = 95". ',
'question_a': ' You pass. You did great. ',
'question_b': ' You did great. ',
'question_c': ' Neither ',
'question_d': ' You pass. ',
'answer': 'd',
'error': 'None'}
track_six_question_four = {'question_id': 64,
'question_title': 'Question 4: Loops and if-statements',
'question_lesson': '''There is also a widly used type of loop called for-loop. These for-loops execute a sequence of
statements multiple times and abbreviates the code that manages the loop variable. The psuedo code
for for-loops is as follows: for(init; condition; increment){ statement(s); }''',
'question_desc': 'There is a variable named max. We want to create a for-loop which increments the variable i, starting from zero, by 1 everytime i is less than max. What would be the initialization, condition, and the update expression of this loop? ',
'question_a': ' (int i = 0; i > max; i++) ',
'question_b': ' (i = 0; i < max; i++)',
'question_c': ' (int i; i > max; i++) ',
'question_d': ' (int i = 0; i < max; i++) ',
'answer': 'd',
'error': 'None'}
track_six_question_five = {'question_id': 65,
'question_title': 'Question 5: Loops and if-statements',
'question_lesson': '''Unlike for and while loops, which test the loop condition at the top of the loop, the
do-while loop checks its condition at the bottom of the loop. A do-while loop is similar
to a while loop, except that a do-while loop is guaranteed to execute at least once. The
psuedo-code for do-while loops is as follows: do{ statement(s); } while(condition);''',
'question_desc': 'Suppose we have the variable a = 10. The condition of the do-while loops is a < 20. Everytime this is true, the statement in the loop outputs the current value of a. Then the value of a is incremented by 1. What is the output of the 4th iteration? ',
'question_a': ' 15 ',
'question_b': ' 10 ',
'question_c': ' 13 ',
'question_d': ' 14 ',
'answer': 'c',
'error': 'None'}
# -
add_question(track_one_question_one)
add_question(track_one_question_two)
add_question(track_one_question_three)
add_question(track_one_question_four)
add_question(track_one_question_five)
add_question(track_two_question_one)
add_question(track_two_question_two)
add_question(track_two_question_three)
add_question(track_two_question_four)
add_question(track_two_question_five)
add_question(track_three_question_one)
add_question(track_three_question_two)
add_question(track_three_question_three)
add_question(track_three_question_four)
add_question(track_three_question_five)
add_question(track_four_question_one)
add_question(track_four_question_two)
add_question(track_four_question_three)
add_question(track_four_question_four)
add_question(track_four_question_five)
add_question(track_five_question_one)
add_question(track_five_question_two)
add_question(track_five_question_three)
add_question(track_five_question_four)
add_question(track_five_question_five)
add_question(track_six_question_one)
add_question(track_six_question_two)
add_question(track_six_question_three)
add_question(track_six_question_four)
add_question(track_six_question_five)
| notebooks/database_questions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="bKFqFwPpWtZZ" colab_type="text"
# # What is IPython notebook?
#
# * IPython notebook can be though of as a text document, which can run Python code.
# * Google Collab (comes within Google Drive) allows to create, edit and run IPython notebooks
# * You can also download the notebooks to your computer and run them there, but you need to set up the environment for this first
# * (for the curious minds we can also show this - how to create IPython notebooks locally without any need for Google)
#
# ## First of all do: **File** -> **Save a copy in Drive**
#
# 
#
# Now you have a copy of the notebook in your drive - you can change it, save it for later etc
#
#
#
#
#
#
#
#
#
#
# + [markdown] id="a6-xwS4s9OWP" colab_type="text"
# # Your first program
#
# There are two kind of cells in IPython Notebooks: "text", like this one and "code" - the one that can be run.
# + id="la71mrjt9agm" colab_type="code" colab={}
# print hello world to the screen
print('hello world!')
# + [markdown] id="E69jQ8hcOZ8P" colab_type="text"
# # Understanding your first program
#
#
# ## Built-in functions:
# ```python
# print()
# ```
# * Part of the language
# * Sub-program
# *Has unique name, effect or result, list of arguments (parameters)
# * https://docs.python.org/3/library/functions.html
#
# ## Literals:
# ```python
# 'hello world!'
# ```
# * Represent fixed values in code
# * Different types: numbers (also floating-point), boolean, string and some others
#
# + [markdown] id="Se0t6YMmPS3d" colab_type="text"
# # Comments - another useful thing
#
#
#
#
#
#
# + id="SLF84BQVPO_u" colab_type="code" colab={}
# This is my first program in Python
# It prints the line 'Hello world' to
# the console output
print('hello world!') # this line prints
#print('1,2,3')
# + [markdown] id="IeYXWstlPntV" colab_type="text"
# * Ignored by the interpreter
# * Write it for yourself or for somebody else who is supposed to read your code in future
# * You can use it to disable lines of code in your program without deleting them
#
# + colab_type="code" id="B6j1ZnBSQRLh" colab={}
'''
Another way to
write mutiline
comments
fddasdfasdf
asdfasdfasdf
asdfasdfsadf
'''
print('hello world!')
# + [markdown] id="QBtsHJyTQ2-E" colab_type="text"
# # Breaking your first program
# + colab_type="code" id="DZnE37sYRrXh" colab={}
# This is my first program in Python
# It prints the line 'Hello world' to
# the console output
print('hello world!')
print( 10/0 )
# + [markdown] id="8cwKtGTzSBhV" colab_type="text"
# # Multi-instruction program
#
# + id="isa9pqCMSOIX" colab_type="code" colab={}
# This is my first multi-instruction
# program in Python: It prints two
# lines to the console output
print('hello world!')
print("this runs on Ilya's notebook")
print('I can use it like this: "bla"')
# + [markdown] id="_YncAroDSSXC" colab_type="text"
# * Only one “instruction” per line
# * Empty lines are allowed
# * Instructions are executed one after another
# * Look at the quotes (both ways work with string literals)
#
# + [markdown] id="SwZFCjeiSjSl" colab_type="text"
# # Additional materials and where to find help
#
# * https://automatetheboringstuff.com/ - Online book, covers all the basics of Python and how to use it
# * https://docs.python.org/3/index.html - Python3 documentation (including Tutorial)
# * Internet search - community is big, many questions were already posted somewhere
# * Pay special attention to the information at stackoverflow.com
#
# + [markdown] id="fw3bGqoVWrlh" colab_type="text"
# # Feedback time
#
# https://www.menti.com/uk98b4qj88
| theory/notebooks/1-1-intro/intro_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/FairozaAmira/AI_Programming_1_e/blob/master/Lesson04/Arithmetic_answers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="Z8Y1lx2zhVRj"
# # Answers for Arithmetic Questions
# + [markdown] colab_type="text" id="vt2_SPQ3hVRt"
# **Question 1**
#
# Calculate $((5 + 2) \times (12 \div 4))$
# + colab={} colab_type="code" id="paVtbPjYhVRv" outputId="d39338bd-980d-4610-bd9d-f359e7dde5d7"
(5 + 2) * (12 / 4)
# + [markdown] colab_type="text" id="QzJ9Ga28hVR8"
# **Question 2**
#
# Show $11 / 3$
# + colab={} colab_type="code" id="QL1xL0PChVR-" outputId="1191d389-bdcd-4a0d-f589-fdffba91df13"
print( 11 / 3 )
# + [markdown] colab_type="text" id="zs2MKvp_hVSI"
# **Question 3**
#
# Show $11 // 3$
# + colab={} colab_type="code" id="4sTCesNFhVSK" outputId="808066a4-5b6f-482d-d3a6-b4aa23dd3186"
print( 11 // 3 )
# + [markdown] colab_type="text" id="kjUUcjBehVSO"
# **Question 4**
#
# Calculate $2^5$
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="gucLxXfAhVSQ" outputId="75b897f0-6990-43dc-ac3d-45824fc66dca"
print(2**5)
# -
2^5
# + [markdown] colab_type="text" id="crtF6X1ehVSZ"
# **Answer the following questions:**
#
# 1. What is the difference between / and // ?
# 2. Try to practice with lots of mathematical equations.
#
| Variable_Operations/Arithmetic_answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 05 Solutions
# #### Q1. What is the meaning of multiple inheritance?
# **Ans:** Inheritence is nothing but reusing the code of Parent class by the child class. Similary when a child class inherits its properties from multiple Parent classes this scenario is called **Multiple Inheritence**
class Parent_one:
pass
class Parent_two:
pass
class child(Parent_one,Parent_two):
pass
# #### Q2. What is the concept of delegation?
# **Ans:** Delegation provides a proxy object for any class thay you want on top of the main class. its like a wrapper to your class so that you can access limited resources of the main class.
#
# it Wraps the object of main class into a smaller object with limited access
#
# Simply Delegation means that you can include a instance of another class as an instance variable, and forward messages to the instance.
# +
class Myclass:
def sayHi(self):
print('Hey iam back')
def whoAmI(self):
print('Iam the main class')
class NewClass:
def __init__(self,obj):
self.main = obj
def welcome(self):
self.main.sayHi()
m = Myclass()
n = NewClass(m)
m.sayHi()
n.main.sayHi()
n.welcome()
n.main.whoAmI()
# -
# #### Q3. What is the concept of composition?
# **Ans:** In the concept of Composition, a class refers to one or more other classes by using instances of those classes as a instance variable. irrespective of inheritence in this approach all the parent class members are not inherited into child class, but only required methods from a class are used by using class instances.
# +
class Salary:
def __init__(self,pay):
self.pay = pay
def get_total(self):
return self.pay*12
class Employee:
def __init__(self,pay,bonus):
self.pay = pay
self.bonus = bonus
self.obj_salary = Salary(self.pay)
def annual_salary(self):
return f'Total Salary : {str(self.obj_salary.get_total())}'
obj_emp = Employee(800,500)
print(obj_emp.annual_salary())
# -
# #### Q4. What are bound methods and how do we use them?
# **Ans:** If a function is an attribute of class and it is accessed via the instances, they are called **bound methods**. A bound method is one that has **`self`** as its first argument. Since these are dependent on the instance of classes, these are also known as **instance methods**.
# +
class Test:
def method_one(self): # bound method
print("Called method_one")
@classmethod
def method_two(cls): # unbound method
print("Called method_two")
@staticmethod
def method_three(): # static method
print("Called method_three")
test = Test()
test.method_one() # accessing through instance object
test.method_two() # accessing through instance object
Test.method_two() # accessing directly through class
Test.method_three() # accessing directly through class
# -
# #### Q5. What is the purpose of pseudoprivate attributes?
# **Ans:** Pseudoprivate attributes are also useful in larger frameworks or tools, both to avoid introducing new method names that might accidentally hide definitions elsewhere in the class tree and to reduce the chance of internal methods being replaced by names defined lower in the tree. If a method is intended for use only within a class that may be mixed into other classes, the double underscore prefix ensures that the method won't interfere with other names in the tree, especially in multiple-inheritance scenarios
#
# Pseudoprivate names also prevent subclasses from accidentally redefining the internal method's names,
class Super:
def method(self): # A real application method
pass
class Tool:
def _method(self): # becomes _Tool_method
pass
def other(self): # uses internal method
self._method()
class Subl(Tool,Super):
def actions(self):
self.method()
class Sub2(Tool):
def __init__(self):
self.method = 99
| Python Advance/05.Assignment_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ML Pipeline Preparation
# Follow the instructions below to help you create your ML pipeline.
# ### 1. Import libraries and load data from database.
# - Import Python libraries
# - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
# - Define feature and target variables X and Y
# +
# --- import libraries ---
import pandas as pd
import numpy as np
import sys
import re
import pickle
import nltk
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize,sent_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV
from sklearn import multioutput
from custom_transformer import StartingVerbExtractor
import matplotlib.pyplot as plt
# %matplotlib inline
# ------------------------
# -
# disable warnings
import warnings
warnings.filterwarnings("ignore")
# download nltk packages
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
# +
# load data from database
engine = create_engine('sqlite:///InsertDatabaseName.db')
df = pd.read_sql_table('InsertTableName', engine)
# drop nan values
df.dropna(axis=0, how = 'any', inplace = True)
X = df['message']
Y = df.iloc[:,4:].astype(int)
# -
Y.head(5)
# ### 2. Write a tokenization function to process your text data
def tokenize(text):
'''
Receives as input raw text which afterwards normalized, stop words removed,
stemmed and lemmatized.
@param : text - input raw text
@return: clean_tokens - tokenized text as result
'''
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for token in tokens:
clean_token = lemmatizer.lemmatize( token ).lower().strip()
clean_tokens.append(clean_token)
# --- for ---
return clean_tokens
# +
# let's take a look to the possible values distribution within classes
# making size of figure bigger
fig = plt.figure(figsize = (15,20))
ax = fig.gca()
# plot the historgram
Y.hist(ax = ax)
# -
# ### 3. Build a machine learning pipeline
# This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
# creating a pipeline
pipeline_rf = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier( RandomForestClassifier() ))
])
# ### 4. Train pipeline
# - Split data into train and test sets
# - Train pipeline
# split data into training and test sets
X_train, X_test, Y_train, Y_test = train_test_split( X, Y, random_state = 42 )
# train classifier: pipeline
rf_model = pipeline_rf.fit( X_train, Y_train )
# ### 5. Test your model
# Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
def generate_report(y_test, y_pred):
'''
Receives as input raw text which afterwards normalized, stop words removed,
stemmed and lemmatized.
@param : y_test - test value
@param : y_pred - prediction value
@return: df - DataFrame as result
'''
metrics = []
for i, column in enumerate( Y.columns.values ):
accuracy = accuracy_score(y_test[:,i], y_pred[:,i])
precision = precision_score(y_test[:,i], y_pred[:,i], average='micro')
recall = recall_score(y_test[:,i], y_pred[:,i], average='micro')
f1 = f1_score(y_test[:,i], y_pred[:,i], average='micro')
metrics.append([accuracy, precision, recall, f1])
# --- for ---
# create dataframe as result
df = pd.DataFrame( data = np.array(metrics),
index=Y.columns.values,
columns=['Accuracy', 'Precision', 'Recall', 'F1 score'] )
return df
# evaluate the training set
Y_train_pred = pipeline_rf.predict( X_train )
# generate report for each row - training
df_train_report_pipeline_rf = generate_report( np.array(Y_train), Y_train_pred )
df_train_report_pipeline_rf
# calculate the mean values for each column
mean = df_train_report_pipeline_rf.mean(axis=0)
mean
Y_test_pred = pipeline_rf.predict( X_test )
# generate report for each row - testing
df_test_report_pipeline_rf = generate_report( np.array(Y_test), Y_test_pred )
df_test_report_pipeline_rf
# calculate the mean values for each column
mean = df_test_report_pipeline_rf.mean(axis=0)
mean
# +
# create new dataframe for test report results
df_test_report_mean = pd.DataFrame()
# collect test report
df_test_report_mean = df_test_report_mean.append( mean, ignore_index=True )
last_idx = df_test_report_mean.shape[0]-1
df_test_report_mean.loc[last_idx,'Name'] = 'RandomForrest'
df_test_report_mean
# -
# ### 6. Improve your model
# Use grid search to find better parameters.
# +
parameters = {
'vect__min_df': [1, 5],
'tfidf__use_idf':[True, False],
'clf__estimator__n_estimators':[10, 25],
'clf__estimator__min_samples_split':[2, 5, 10]
}
cv = GridSearchCV( pipeline_rf, param_grid=parameters )
# -
# ### 7. Test your model
# Show the accuracy, precision, and recall of the tuned model.
#
# Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
# train classifier
cv_model = cv.fit( X_train, Y_train )
cv.best_params_
Y_test_pred_cv = cv.predict( X_test )
# generate report for each row - testing
df_test_report_pipeline_cv = generate_report( np.array(Y_test), Y_test_pred_cv )
df_test_report_pipeline_cv
# calculate the mean values for each column
mean = df_test_report_pipeline_cv.mean(axis=0)
mean
# +
# collect test report
df_test_report_mean = df_test_report_mean.append( mean, ignore_index=True )
last_idx = df_test_report_mean.shape[0]-1
df_test_report_mean.loc[last_idx,'Name'] = 'cv'
df_test_report_mean
# -
# ### 8. Try improving your model further. Here are a few ideas:
# * try other machine learning algorithms
# * add other features besides the TF-IDF
# +
pipeline_ada = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier( AdaBoostClassifier() ))
])
parameters_ada = {'vect__min_df': [5],
'tfidf__use_idf':[True],
'clf__estimator__learning_rate': [0.5, 1],
'clf__estimator__n_estimators':[10, 25]
}
cv_ada = GridSearchCV( pipeline_ada, param_grid=parameters_ada )
# -
AdaBoostClassifier().get_params()
# train classifier
cv_ada_model = cv_ada.fit( X_train, Y_train )
Y_test_pred_ada = cv_ada.predict( X_test )
# generate report for each row - testing
df_test_report_pipeline_ada = generate_report( np.array(Y_test), Y_test_pred_ada )
df_test_report_pipeline_ada
# calculate the mean values for each column
mean = df_test_report_pipeline_ada.mean(axis=0)
mean
# +
# collect test report
df_test_report_mean = df_test_report_mean.append( mean, ignore_index=True )
last_idx = df_test_report_mean.shape[0]-1
df_test_report_mean.loc[last_idx,'Name'] = 'ada'
df_test_report_mean
# -
# <hr>
# trying to add another feature.
pipeline_rf2 = Pipeline([
('features', FeatureUnion ([
('text_pipeline', Pipeline ([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor() )
])),
('clf', multioutput.MultiOutputClassifier( RandomForestClassifier() ))
])
# train classifier
rf_model2 = pipeline_rf2.fit( X_train, Y_train )
Y_test_pred_rf2 = pipeline_rf2.predict( X_test )
# generate report for each row - testing
df_test_report_pipeline_rf2 = generate_report( np.array(Y_test), Y_test_pred_rf2 )
df_test_report_pipeline_rf2
# calculate the mean values for each column
mean = df_test_report_pipeline_rf2.mean(axis=0)
mean
# +
# collect test report
df_test_report_mean = df_test_report_mean.append( mean, ignore_index=True )
last_idx = df_test_report_mean.shape[0]-1
df_test_report_mean.loc[last_idx,'Name'] = 'RandomForrest2'
df_test_report_mean
# -
# plot the report
df_test_report_mean.set_index('Name', inplace=True)
df_test_report_mean.plot.barh()
# <hr>
# ### 9. Export your model as a pickle file
# I choose the model with AdaBoost classifier
with open('classifer.pkl', 'wb') as f:
pickle.dump( cv_ada, f )
# ### 10. Use this notebook to complete `train.py`
# Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| Project - Disaster Response Pipelines/.ipynb_checkpoints/ML Pipeline Preparation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:genpen]
# language: python
# name: conda-env-genpen-py
# ---
# + Collapsed="false"
import itertools
import numpy as np
import os
import seaborn as sns
from tqdm import tqdm
from dataclasses import asdict, dataclass, field
import vsketch
import shapely.geometry as sg
from shapely.geometry import box, MultiLineString, Point, MultiPoint, Polygon, MultiPolygon, LineString
import shapely.affinity as sa
import shapely.ops as so
import matplotlib.pyplot as plt
import pandas as pd
import vpype_cli
from typing import List, Generic
from genpen import genpen as gp, utils as utils
from scipy import stats as ss
import geopandas
from shapely.errors import TopologicalError
import functools
# %load_ext autoreload
# %autoreload 2
import vpype
from skimage import io
from pathlib import Path
from sklearn.preprocessing import minmax_scale
from skimage import feature
from genpen.utils import Paper
# + Collapsed="false"
# make page
paper_size = '11x14 inches'
border:float=20
paper = Paper(paper_size)
drawbox = paper.get_drawbox(border)
# + Collapsed="false"
db = drawbox.buffer(-15, cap_style=2, join_style=2)
# + Collapsed="false"
layer = gp.connected_hatchbox(db, angle=45, spacing=0.3, dist_thresh=0.6)
# + Collapsed="false"
sk = vsketch.Vsketch()
sk.size(paper.page_format_mm)
sk.scale('1mm')
sk.penWidth('0.1mm')
sk.geometry(layer)
tolerance=0.1
sk.vpype(f'linemerge --tolerance 0.5mm linesort')
sk.display()
# + Collapsed="false"
sk.save('/mnt/c/code/side/plotter_images/oned_outputs/165_hand_hatch.svg')
| scratch/016_hand_hatch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# ## algorithm
def simplex(c, A, b):
table = initialize(c, A, b)
while not search_optimum(table):
pass
return solution(c, table)
def initialize(c, A, b):
(m, n), k = A.shape, len(c)
# simplex table:
# |A|E|b|
# |c|0|0|
table = np.zeros((m + 1, m + n + 1))
table[:m, :n] = A
table[range(m), range(n, n + m)] = 1
table[:-1, -1] = b
table[-1, :k] = c
return table
def search_optimum(table):
index = np.argwhere(table[-1, :-1] > 0).ravel()
# optimum found
if not len(index):
return True
# pivotal column
j = index[0]
column = table[:-1, j].copy()
column[column <= 0] = -1
if np.all(column <= 0):
raise ArithmeticError('the system is unbounded')
# pivotal row
pivots = table[:-1, -1] / column
pivots[column <= 0] = np.inf
i = np.argmin(pivots).ravel()[0]
# eliminate by pivot at (i, j)
row = table[i] / table[i][j]
table[:] -= np.outer(table[:, j], row)
table[i, :] = row
table[:, j] = table[:, j].round()
def solution(c, table):
(m, n), k = table.shape, len(c)
# pivotal columns
s = np.sum(table == 0, axis=0) == m - 1
t = np.sum(table == 1, axis=0) == 1
# solution
x = np.zeros(n - 1)
for j in range(n - 1):
if s[j] and t[j]:
x[j] = table[:, j] @ table[:, -1]
return dict(
x=x[:k],
slack=x[k:],
max=-table[-1, -1],
table=table,
)
# ## linear program #1
# ```
# maximize: -x + 3y + 2z
#
# subject to:
# x + y + z <= 6
# x + z <= 4
# y + z <= 3
# x + y <= 2
#
# x, y, z >= 0
# ```
c = np.array([-1, 3, 2])
A = np.array([
[1, 1, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 0],
])
b = np.array([6, 4, 3, 2])
# +
lp = simplex(c, A, b)
for k in ['x', 'slack', 'table', 'max']:
print(k, '\n', lp[k], '\n')
# -
# ## linear program #2
# ```
# maximize: 2r + 4s + 3t + u
#
# subject to:
# 3r + s + t + 4u <= 12
# r - 3s + 2t + 3u <= 7
# 2r + s + 3t - u <= 10
#
# r, s, t, u >= 0
# ```
c = np.array([2, 4, 3, 1])
A = np.array([
[3, 1, 1, 4],
[1, -3, 2, 3],
[2, 1, 3, -1]
])
b = np.array([12, 7, 10])
# +
lp = simplex(c, A, b)
for k in ['x', 'slack', 'table', 'max']:
print(k, '\n', lp[k], '\n')
# -
| JUPYTER_NOTEBOOKS/day 99 - simplex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Aerospike-Notebooks-Readme/Tips" data-toc-modified-id="Aerospike-Notebooks-Readme/Tips-1"><span class="toc-item-num">1 </span>Aerospike Notebooks Readme/Tips</a></span><ul class="toc-item"><li><span><a href="#Learn-about-Jupyter-Notebook" data-toc-modified-id="Learn-about-Jupyter-Notebook-1.1"><span class="toc-item-num">1.1 </span>Learn about Jupyter Notebook</a></span></li><li><span><a href="#Find-and-run-Aerospike-notebook." data-toc-modified-id="Find-and-run-Aerospike-notebook.-1.2"><span class="toc-item-num">1.2 </span>Find and run Aerospike notebook.</a></span></li><li><span><a href="#Access-shell-commands" data-toc-modified-id="Access-shell-commands-1.3"><span class="toc-item-num">1.3 </span>Access shell commands</a></span></li><li><span><a href="#Examine-server-log" data-toc-modified-id="Examine-server-log-1.4"><span class="toc-item-num">1.4 </span>Examine server log</a></span></li><li><span><a href="#View-database-state." data-toc-modified-id="View-database-state.-1.5"><span class="toc-item-num">1.5 </span>View database state.</a></span></li><li><span><a href="#View-cluster-state." data-toc-modified-id="View-cluster-state.-1.6"><span class="toc-item-num">1.6 </span>View cluster state.</a></span></li><li><span><a href="#Next-steps" data-toc-modified-id="Next-steps-1.7"><span class="toc-item-num">1.7 </span>Next steps</a></span></li></ul></li></ul></div>
# -
#
# # Aerospike Notebooks Readme/Tips
#
# Here are some tips and tricks for ease of use and productive experience with Aerospike notebooks.
# <br>
# This notebook requires Aerospike datbase running on localhost and that python and the Aerospike python client have been installed (`pip install aerospike`). Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) for additional details and the docker container.
# ## Learn about Jupyter Notebook
# The Jupyter Notebook provides "a web-based application suitable for capturing the whole computation process: developing, documenting, and executing code, as well as communicating the results". New to notebooks? Here is [one source](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/examples_index.html) to learn more about the Jupyter Notebook.
# ## Find and run Aerospike notebook.
# Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to find additional Aerospike notebooks. To run anoter notebook, download the notebook from the repo to your local machine, and then click on File->Open, and select Upload.
# ## Access shell commands
#
# Use the "!" line magic and "%%bash" cell magic to access shell commands. That is, you can access a shell command on any line by prefixing it with a "!", and an entire cell can have bash shell commands if it starts with "%%bash". Here are some examples:
# Accessing shell commands
# !ps
# !whoami
# Start the Aerospike database.
# !asd >& /dev/null
# + language="bash"
# # bash cell
# # Check if the Aerospike database is running.
# pgrep -x asd >/dev/null && echo "Aerospike database is running" || echo "**Aerospike database is not running!**"
# ps -axu | grep asd
# -
# Note: Shell commands are accessible in Java kernel through %sh line magic. However it has limitations. We suggest changing the kernel to Python to use shell commands.
# ## Examine server log
# It is useful to examine the server log. Assuming it is located at /var/log/aerospike/aerospike.log, and you have the permissions, you can run the following to view the last 10 lines of the log. (Adjust the log path to your setting.)
# View the last 10 lines of the log:
# !echo "End of server log:"; tail -10 /var/log/aerospike/aerospike.log
# ## View database state.
#
# The command line tool "aql" can be very handy to examine the data and metadata in the database. For a more complete description of the capabilities, see the [doc](https://www.aerospike.com/docs/tools/aql/index.html). Assuming the database has namespace "test", the following commands can be executed.
# Insert a record in set "demo" in namsepace "test" with Primary Key (PK) 1 and a bin or field "testbin"
# with value "hello world!".
# !aql -c "INSERT INTO test.demo (PK, 'testbin') VALUES (1, 'hello world!')"
# View all records in the set.
# !aql -c "SELECT * FROM test.demo"
# Delete the record
# !aql -c "DELETE FROM test.demo WHERE PK = 1"
# !aql -c "SELECT * FROM test.demo"
# ## View cluster state.
# Another useful utility is asadm which can be used to view various aspects of the database cluster. For a more complete description of its capabilities, see the [doc](https://www.aerospike.com/docs/tools/asadm/index.html).
# Show the features enabled in this database.
# !asadm -e "features"
# Display summary info for the cluster
# !asadm -e "summary"
# View the config
# !asadm -e "show config"
# ## Next steps
# Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to run additional Aerospike notebooks. To run a different notebook, download the notebook from the repo to your local machine, and then click on File->Open, and select Upload.
| notebooks/python/readme_tips.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DonRomaniello/CitibikeDocks/blob/master/TripData.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="dKrcgrhCUpLa"
# # Cleaning and Merging Historical CitiBike Data
#
#
# + [markdown] id="oldMqCzyyj2O"
# CitiBike publishes trip reports every month to an AWS S3 bucket. These reports contain data of all the trips taken
# by CitiBike users, with information like the start times and locations, end times and locations, etc.
#
# These data hold information about the links between stations. Maybe there are some stations that are fed by a small handfull of other stations, perhaps each station has a wide reach. In order to be able to query an entire year (or more) of data at once, we need to create a datset containing multiple months.
#
# ---
#
# *For a clean, code-only version that holds all the values in RAM before writing to disk, [see the other version here.](https://colab.research.google.com/drive/1IeBQ1JlIK4eveEzgd4NJVOCnKck8t0Wn?usp=sharing)*
# + [markdown] id="RPfPSPYuL4T4"
# Notes:
#
# > PEP8 has been followed where I found it appropriate for the purposes of an interactive notebook.
#
#
# * Libraries are imported after the problem they are meant to solve has been introduced. I feel that this better reflects the creative process of tackling this dataset.
#
# * Double line function and class isolation has been ignored between cells. Cells are already separated visually, and text is often interspersed.
#
#
# > While CitiBike has stations on both sides of the Hudson, few (if any) rides originate in one state and end in another. There would be very little incentive to attempt this feat beyond bragging rights, and based on the two sets of trip data published depending on jurisdiction, it does not seem like anyone is doing it. As I live and work in New York City, I will only be focusing on New York.
#
#
# + id="0TPDuWXApJYt"
import requests
import pandas as pd
# + [markdown] id="3xGGW8m5qcXy"
# # Dirty Zips
# + [markdown] id="xIY08Rn1yvDc"
# Unfortunately, some of the data are published as zip files that also contain MacOS special files, which means PANDAS can't simply ingest the zip file as published.
#
# We will use Requests to grab the file from the S3 bucket, BytesIO to keep the zip directory in memory, and ZipFile to work with the zip directory to extract the CSV only.
# + id="wzeoP1gH2hxo"
from io import BytesIO
from zipfile import ZipFile
# + colab={"base_uri": "https://localhost:8080/"} id="rxPQznGlsztx" outputId="1d78e81d-8ff3-4da1-d9f1-8eb8e2d58d46"
dirtyZipUrl = 'https://s3.amazonaws.com/tripdata/202108-citibike-tripdata.csv.zip'
dirtyZipFilename = requests.get(dirtyZipUrl).content
dirtyZipFile = ZipFile( BytesIO(dirtyZipFilename), 'r')
for item in dirtyZipFile.namelist():
print("File in zip:" + item)
# + [markdown] id="C0QiLCnH2DIN"
# There it is, the stuff that PANDAS doesn't like. The files in the "__MACOSX" directory will cause the PANDAS read_csv() function to throw an exception.
#
# Not all of the published zip directories have this problem, but we shoud get rid of it if it is in there.
#
# + id="cnbDFrexxe8b"
justCSV = [cleanFilename
for cleanFilename in dirtyZipFile.namelist()
if "._" not in cleanFilename
and ".csv"
in cleanFilename][0]
# + [markdown] id="orzG8pcU3_94"
# And now we can load the data and make sure it is as expected.
# + colab={"base_uri": "https://localhost:8080/", "height": 394} id="ekjVGeq0FPad" outputId="244f94f5-1a63-43fc-b9b4-562705f8774d"
tripData = pd.read_csv(dirtyZipFile.open(justCSV), low_memory=False)
tripData.head()
# + [markdown] id="elZppxMZlqpT"
# Great.
#
# We should turn this process into a function that takes the URL of the S3 item as input and returns a pandas DataFrame, because we will be doing this many times.
# + id="9WfuK40xEvq8"
def readDirtyZip(dirtyZipUrl):
dirtyZipFilename = requests.get(dirtyZipUrl).content
dirtyZipFile = ZipFile( BytesIO(dirtyZipFilename), 'r')
tripData = pd.read_csv(dirtyZipFile.open([cleanFilename
for cleanFilename in dirtyZipFile.namelist()
if "._" not in cleanFilename
and ".csv"
in cleanFilename][0]),
low_memory=False)
return tripData
# + [markdown] id="BBQ3P5u7Zcq1"
# # Legacy Data
# + [markdown] id="nysVlDtTKZdZ"
# Before going any further in creating our trip dataset, there are some wrinkles. At some point CitiBike changed the IDs for the all the stations and started referring to the Customer/Subscriber dichotomy as Member/Casual. There are other changes, but for the questions we are going to ask of these data, those are the only germane differences.
#
# The Customer/Subscriber vs Member/Casual changes can be easily mapped.
#
# Graciously, they saw fit to include the old names *and* new names in the JSON feed that provides live information about the system.
#
# This will allow us to construct a dictionary which we can use to rename the old trip data to reflect the current naming paradigm.
#
# Notes:
#
# * Stations that begin with letters include stations in New Jersey, so we will remove them when we make the dictionary.
# * The legacy system used int64 as the datatype for station IDs. The new system uses strings. When constructing the dictionary, the legacy IDs need to be type cast.
#
# + id="QCHD7z9OE0N1"
stationLocationsRequest = requests.get('https://gbfs.citibikenyc.com/gbfs/en/station_information.json')
stationLocationData = stationLocationsRequest.json()
stationLocations = pd.DataFrame(stationLocationData['data']['stations'])
stationNameDictionary = dict(zip(stationLocations[stationLocations['short_name'].str.contains('[a-zA-Z]+',
regex=True)==False].legacy_id.astype('int64'),
stationLocations[stationLocations['short_name'].str.contains('[a-zA-Z]+',
regex=True)==False].short_name))
customerSubscriberDictionary = dict({'Customer' : 'casual', 'Subscriber' : 'member'})
# + [markdown] id="7KkKx1-bPXc3"
# We don't need anything except the dictionaries, so we will delete everything else that went into creating the station name dictionary.
# + id="M-32gckvPS_P"
del stationLocationsRequest, stationLocationData, stationLocations
# + [markdown] id="E13_pE8R5c9k"
# Since we are trying to predict the availability of bikes and open docks in the current CitiBike system, the new names will be used to rename old trip station IDs.
#
# The last month that used the legacy IDs appears to be January, 2021. We should test our renaming dictionary on this before proceeding.
# + colab={"base_uri": "https://localhost:8080/", "height": 496} id="nYBkJ1wY8nSa" outputId="369c0e21-1698-45b2-e683-3f85113923df"
legacyTrips = readDirtyZip('https://s3.amazonaws.com/tripdata/202101-citibike-tripdata.csv.zip')
legacyTrips['start station id'] = legacyTrips['start station id'].map(stationNameDictionary)
legacyTrips['end station id'] = legacyTrips['end station id'].map(stationNameDictionary)
legacyTrips.head()
# + [markdown] id="DrPvm_oqO70Q"
# Looks good. In fact, looks great because station IDs that are not in the dictionary of current stations are replaced with NaN. We can use the PANDAS dropna fuction to remove them... later. First we will do a little more processing and cleaning.
#
# Some column names have changed in the new era. Spaces have been replaced with underscores in the new data, and the time stamp column names are prepositional phrases.
#
# We are only going to be using trip start times, end times, and the station IDs for the starting stations and end stations, so these are the only ones we will bother to rename.
# + id="em_5fjBPHhgK"
legacyColumnRename = dict({'starttime': 'started_at',
'stoptime': 'ended_at',
'start station id': 'start_station_id',
'end station id': 'end_station_id',
'usertype' : 'member_casual'
})
legacyTrips.rename(columns=legacyColumnRename, inplace=True)
# + [markdown] id="qDpdpzm2Tah9"
# Then we can use the column renaming dictionary to cull the unwanted columns from our DataFrame.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="jBkSA9pQTZTS" outputId="a1bd7d31-ae05-41ca-cbc8-b6befe7edd66"
legacyTrips = legacyTrips[legacyColumnRename.values()]
legacyTrips.head()
# + [markdown] id="IKeHbz9nY3Xu"
# Rename the member_casual column according to the new paradigm...
#
# + id="cR1B1rQPrPV4"
legacyTrips.replace({ 'member_casual' : customerSubscriberDictionary}, inplace=True)
# + [markdown] id="xoEp0mVfswAS"
# And finally, drop NaNs...
# + colab={"base_uri": "https://localhost:8080/"} id="7MUZIxazXB89" outputId="081c85ce-3329-4338-d836-cc523497c2b8"
legacyTrips.dropna(inplace=True)
legacyTrips.isna().sum()
# + id="mN2PesCwsguG" outputId="59647a0c-b9cf-4b01-cad0-3f46a599d4b3" colab={"base_uri": "https://localhost:8080/", "height": 204}
legacyTrips.head()
# + [markdown] id="X1ibVALhu_Nw"
# Had we done this before trimming the columns we might have lost desired data if there was mssing information in columns that we aren't even going to be using in the final trip dataset.
#
# We should also convert the local timestamps into a Unix timestamp, as minute-by-minute data from the JSON feed are timestamped with the seconds since the epoch. We will use the Python time library, and the Pandas apply function with a lamba.
# + id="cObwSddQkWI-"
import time
# + [markdown] id="f6CZNjTXsCil"
# Here is an example of the time stamp with the calendar date and time as recorded in the published data:
# + colab={"base_uri": "https://localhost:8080/"} id="cq7x5VPCpcRx" outputId="e1003cc7-54d3-4ef0-ddc5-b6e5b437bb6d"
print(legacyTrips.iloc[0,0])
# + [markdown] id="UcYkkPGNsahN"
# We will be using time.strtime to convert a local time into a time struct, and then using time.mktime to convert this into seconds since the epoch.
#
# The legacy data includes millisecond information after the decimal place, the more recently published data does not. The first 19 characters of the string the time since the epoch in both formats.
# + colab={"base_uri": "https://localhost:8080/"} id="0uAGHQi8l-Jf" outputId="41fe233f-fefa-4e1a-a9fd-4764cdb620ad"
print(int(time.mktime(time.strptime(legacyTrips.iloc[0,0][:19],
"%Y-%m-%d %H:%M:%S"))
))
# + [markdown] id="uXon4Qy5kW7o"
# Great.
#
# We can wrap this all into a function that accepts a URL of an S3 resource, checks whether any formatting is needed, performs the changes, and returns a cleaned and formatted DataFrame.
# + id="smbZ-BJtu8Fu"
def legacyCheckFix(s3URL):
legacyTrips = readDirtyZip(s3URL)
if 'start station id' in legacyTrips.columns:
legacyTrips['start station id'] = legacyTrips['start station id'].map(stationNameDictionary)
legacyTrips['end station id'] = legacyTrips['end station id'].map(stationNameDictionary)
legacyTrips.rename(columns=legacyColumnRename, inplace=True)
legacyTrips.replace({ 'member_casual' : customerSubscriberDictionary}, inplace=True)
legacyTrips = legacyTrips[legacyColumnRename.values()]
legacyTrips.dropna(inplace=True)
legacyTrips['started_at'] = legacyTrips['started_at'].apply(lambda timestamp:
int(time.mktime(time.strptime(timestamp[:19],
"%Y-%m-%d %H:%M:%S"))))
legacyTrips['ended_at'] = legacyTrips['ended_at'].apply(lambda timestamp:
int(time.mktime(time.strptime(timestamp[:19],
"%Y-%m-%d %H:%M:%S"))))
return legacyTrips
# + [markdown] id="VO8sY8762EiF"
# # The S3 Bucket
# + [markdown] id="PST-T3cuHxqX"
# We don't really want to go and find the URLs manually, so maybe a look at the contents of the bucket is in order. We will use Boto3 to do this, connecting to S3 without a signature to avoid having to configure anything.
# + id="o9Id-yBfHAwR"
# %%capture
# !pip install boto3
import boto3
from botocore import UNSIGNED
from botocore.client import Config
# + id="3M7TfLmjIl9f" colab={"base_uri": "https://localhost:8080/"} outputId="61ab4a38-4c37-4bfb-b451-e27bc35b3820"
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
s3.list_objects(Bucket='tripdata')['Contents'][0:3]
# + [markdown] id="On6QNxNssAzb"
# It looks like the 'LastModified' values aren't reliably correlated with the time period covered by the collected trip data.
#
# The 'Key' key, which returns the name of a zip directory, is what we want.
#
# If we provide a starting month and year and an ending month, we can get a list of all the URLs that correspond to the published trip data for that time span.
#
# Instead of a function, this time a class makes more sense. Included in the class is a filename generator, more on that later.
# + id="MqjvCtlqaf7a"
class fileListUrls:
def __init__(self, startMonth, startYear, endMonth, endYear):
self.startMonth = startMonth
self.startYear = startYear
self.endMonth = endMonth
self.endYear = endYear
def tripURLs(self):
tripURLs = []
monthRange = pd.date_range((str(self.startYear) + '-' + str(self.startMonth)),
(str(self.endYear) + '-' + str(self.endMonth)) ,
freq='MS').strftime("%Y%m").tolist()
for dictName in s3.list_objects(Bucket='tripdata')['Contents']:
for month in monthRange:
if dictName['Key'].startswith(month):
tripURLs.append('https://s3.amazonaws.com/tripdata/' + dictName['Key'])
monthRange.remove(month)
tripURLs.reverse()
return tripURLs
def nameForCsv(self):
nameForCsv = ('/drive/MyDrive/'
+ str(self.startYear)
+ str(self.startMonth).zfill(2)
+ '-'
+ str(self.endYear)
+ str(self.endMonth).zfill(2)
+ '.csv')
return nameForCsv
# + [markdown] id="r_Mf8I763Shs"
# Putting it all together, if we provide a starting month and year, an ending month and year, we can grab all of the trip data in that range, clean it, and merge it into one large dataset.
#
# In this example I'm writing the results the base directory in my Google Drive.
# + id="VKVv1NBSHobM"
from google.colab import drive
drive.mount('/drive')
# + [markdown] id="zuf96DirAEEQ"
# # RAM optimized
# + [markdown] id="JgLWkOYRAPEM"
# If RAM is limited, this code writes to disk after each month is processed, appending to the CSV created during the first pass.
#
# It is slow, but is only limited by disk space.
# + id="HPtr93XWQpwP"
urlRange = fileListUrls(input("Start month (integer): "),
input("Start year: "),
input("End month (integer, inclusive): "),
input("End year:"))
csvUrls = urlRange.tripURLs()
hotTrips = legacyCheckFix(csvUrls[0])
print("Writing first CSV...")
hotTrips.to_csv(urlRange.nameForCsv())
del hotTrips
for url in csvUrls[1:]:
hotTrips = legacyCheckFix(url)
print('Appending', url, 'to CSV.')
hotTrips.to_csv(urlRange.nameForCsv(), mode='a', header=False)
del hotTrips
# + [markdown] id="t4aPTeRLBFBH"
# # Speed optimized
# + [markdown] id="ES2b1sFcBLcw"
# This version keeps the expanding trip DataFrame in RAM before finally writing to the disk.
#
# It is faster, but you could end up losing your progress if you run out of RAM.
# + id="6_MfcfxjBJX0"
urlRange = fileListUrls(input("Start month (integer): "),
input("Start year: "),
input("End month (integer, inclusive): "),
input("End year:"))
csvUrls = urlRange.tripURLs()
headTrips = legacyCheckFix(csvUrls[0])
for url in csvUrls[1:]:
tailTrips = legacyCheckFix(url)
print('Concatenating', url, "...")
headTrips = pd.concat([headTrips, tailTrips])
print("Writing result to disk...")
headTrips.to_csv(urlRange.nameForCsv(),
encoding="utf-8",
index=False)
| TripData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="FIagsO2h43OD"
# # `hyperdict` sandbox
# > _Here we will show the varius functions, attributes and some magic using the dict-on-steroid `hyperdict`_
# + [markdown] id="4J-RIAZc5Ylx"
# ### Installation
# Use any of the given installation commands on your environment.
# 1 . `pip install hyperdict`
# 2 . `poetry add hyperdict`
# 3 .`!pip install hyperdict`
# + id="eAIjEMWN2R0N" colab={"base_uri": "https://localhost:8080/"} outputId="73f3c645-ee0c-42c4-b4c1-a49b94d404bc"
# !pip install hyperdict
# + [markdown] id="cjUnK10y6CBS"
# ### Importing the Library
# + id="6pFTPrPv2bW6"
import hyperdict as hd
# + [markdown] id="nskcx5SI3NKw"
# ### Introduction
# + id="tSRtyz8g2whe"
# Initializing an object
d = hd.HyperDict()
# + colab={"base_uri": "https://localhost:8080/"} id="aao_cF_t3MbN" outputId="0d9e4d70-d50d-459d-d262-0801f7206dee"
# hyperdict allows multi-setter construct using the hd.each() function
d["name", "age"] = hd.each("Magnus", 32) # O(n) complexity
d
# + colab={"base_uri": "https://localhost:8080/"} id="kzeayBFV32C-" outputId="21f9298c-d0e3-4a25-bb27-ec526581840d"
# Similarly we have a multi-getter construct
d['skills'] = ['chess', 'football']
print(d["age", "skills"]) # O(n) complexity
print(d['age'])
# + id="lVWF--ef36DO" colab={"base_uri": "https://localhost:8080/"} outputId="cfc02cc3-af0c-48a3-8d1a-47b4122f0431"
# for Invalide keys, hyperdict return an NoKey Object
print(d['country'])
# this can be changed using the change_no_key()
d.change_no_key('Missing key!')
print(d['country'])
# + colab={"base_uri": "https://localhost:8080/"} id="dBgvzMdgF4iC" outputId="e252ebd9-c214-4c21-c2f4-6538de08e50b"
# keys(), values() and items()
# These are more easily accessed in hyperdict by means of attributes!
print(d.k)
print(d.v)
print(d.i)
# + [markdown] id="Xoy4HxA_9MC6"
# We can see how hyperdict is very flexible with the common functionalities found in other customized dictionaries across the python ecosystem.
#
# But, this is just the beginning! Let's dive deeper and see what hyperdict can actually do.
#
# + [markdown] id="QzCOnciS9CaD"
# ### Diving Deeper
#
# + colab={"base_uri": "https://localhost:8080/"} id="WcB-D8No3_j3" outputId="a3d2532c-cae9-4bed-9bce-0230c16d42b4"
# A Multi-setter to retireve keys when value is given!
print(d('Magnus'))
print(d('Magnus', 32))
# + colab={"base_uri": "https://localhost:8080/"} id="9Rk64Qc9CRAh" outputId="33a27811-2c04-469d-b1d4-03e3b5de5ff2"
# for invalid values, we return an NoValue Onject
print(d('Carlsen'))
d.change_no_value('No value!')
print(d('Carlsen'))
# + colab={"base_uri": "https://localhost:8080/"} id="RgLK204c4GyK" outputId="ce2006cb-4997-46c0-f687-abcad3cef241"
# to_hd function - It is a powerful function inspired by javascript's Json formatting
book = "Atomic Habtis"
author = "<NAME>"
about = "Tiny habits makes the biggest difference"
book_details = hd.to_hd(book, author, about)
book_details
# + [markdown] id="4bId_D25D0WP"
# ### Operators in hyperdict
# + colab={"base_uri": "https://localhost:8080/"} id="BRwswU3t4JDC" outputId="f254fbb8-d2fd-4278-c5a3-89bc96aa3bd0"
# the complement operator - useful to swap keys to values.
# WARNING : This works only on Hashable datatypes namely: String, Integer, tuple
alphabet = hd.HyperDict({'A':1, "B": 2, "C": 3})
print(alphabet)
print(~alphabet)
# + colab={"base_uri": "https://localhost:8080/"} id="jo9zY3WP4MOS" outputId="97696f73-a8b4-498f-d201-cad09e97b9fe"
# the copy operator - returns a copy of the dictionary as type 'dict'
print(+alphabet)
type(+alphabet)
# + id="a8NPWemc4PpS" colab={"base_uri": "https://localhost:8080/"} outputId="a517a164-2ce1-47c0-bd60-6fb06985ddd2"
# the clear operator - clear the current hyperdict
print(-d)
d # the object is cleared
# + [markdown] id="DRky6xQQGjKl"
# ### **_Note:_** All the methods that are available with the in-built dictionary in Python are also **available** with `hyperdict`.
# Hyper dict is a mere **extension** of the inbuilt dictionary in Python. The purpose of this sandbox is demonstrate the numerous additional features of hyperdict and not show the in-built dictionary methods.
#
# ~ <NAME>.
| tutorials/hyperdict_sandbox.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Basic set operations
#
# - A `set` is defined using curly braces (`{}`) with comma-separated elements
# - Elements can be any hashable Python object
mammals = {'Human', 'Whale', 'Dog', 'Cat'}
print(mammals)
# You can add and remove elements. Adding an element that was already in the `set` does nothing.
mammals.add('Mouse')
mammals.remove('Dog')
print(mammals)
# ## Set theory
#
# The standard mathematical operations of set theory are supported.
#
# - `s1.union(s2)` or `s1 | s2`
# - `s1.intersection(s2)` or `s1 & s2`
# - `s1.difference(s2)` or `s1 - s2`
# - `s1.issubset(s2)` or `s1 < s2`
# - `s1.issuperset(s2)` or `s1 > s2`
# - etc.
#
# +
mammals = {'Human', 'Whale', 'Dog', 'Cat'}
pets = {'Dog', 'Cat', 'Goldfish'}
print(mammals > pets)
print(mammals | pets)
# -
| Functional_Thinking/Lab/27D-Set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis of Iris
# ## Importing the Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_csv('iris.csv')
# ## Understanding the data
# five elements from top
data.head(10)
# five elements from end
data.tail(3)
# shape of the data
data.shape
# column names
data.columns
# description of data
data.describe()
# **Observation:**
# * Minimum and maximum value of sepal length is 4.3 and 7.9.
# * Minimum and maximum value of sepal width is 2 and 4.4
# * Minimum and maximum value of petal length is 1 and 6.9
# * Minimum and maximum value of petal width is 0.1 and 2.5
# info about the data
data.info()
# # Plots
# ### Count Plot
#
# Plots the count of given data.
data['species'].value_counts()
sns.countplot(data['species'])
plt.xlabel('SPECIES-->')
plt.ylabel('COUNTS-->')
plt.title('COUNT VS SPECIES')
plt.show()
# **The dataset is balanced as there is no difference between class of species.**
# ### 2-D Scatter Plot
data.plot(kind='scatter',x='sepal_length',y='sepal_width')
plt.xlabel('Sepal Length')
plt.ylabel('Sepal Width')
plt.title('Sepal width VS Sepal Length')
plt.show()
# add some colour to the plots
sns.set_style("whitegrid")
sns.FacetGrid(data, hue="species", height=4) \
.map(plt.scatter, "sepal_length", "sepal_width") \
.add_legend();
plt.xlabel('Sepal Length')
plt.ylabel('Sepal Width')
plt.title('Sepal width VS Sepal Length')
plt.show();
# **Observation:**
# * Setosa can be seperated by other species just by sepal width and sepal length.
# * Versicolor and Virginica cannot be seperated as they overlap a lot.
# ### Pair-Plots
#
# The only limitation of pair plots is that its not used for fairly high dimensional data. As the number of plots will be C(N,2), where N is the number of dimensions.
sns.set_style("whitegrid");
sns.pairplot(data, hue="species", height=3);
plt.show()
# **Observation:**
# * Setosa can easily seperable by the other two be it any two feature.
# * Just by making use of sepal width we cannot distinguish between species of flower.
# ### Histogram
#
# * X axis: window
# * Y axis: number of points in that window
# * The smooth line is the PDF.
# histogram for SEPAL LENGTH
sns.FacetGrid(data,hue="species",height=5) \
.map(sns.distplot,"sepal_length")
plt.title("Histogram for Sepal Length")
plt.xlabel('SEPAL LENGTH')
plt.ylabel('DENSITY')
plt.legend()
plt.show();
# histogram for sepal width
sns.FacetGrid(data,hue="species",height=5) \
.map(sns.distplot,"sepal_width")
plt.title("Histogram for Sepal Width")
plt.xlabel('SEPAL WIDTH')
plt.ylabel('DENSITY')
plt.legend()
plt.show();
# histogram for petal width
sns.FacetGrid(data,hue="species",height=5) \
.map(sns.distplot,"petal_width")
plt.title("Histogram for Petal Width")
plt.xlabel('PETAL WIDTH')
plt.ylabel('DENSITY')
plt.legend()
plt.show();
# histogram for petal length
sns.FacetGrid(data,hue="species",height=5) \
.map(sns.distplot,"petal_length")
plt.title("Histogram for Sepal Width")
plt.xlabel('PETAL LENGTH')
plt.ylabel('DENSITY')
plt.legend()
plt.show();
# ### PDF and CDF
#
# **Cumulative density function:** Return the percentage that helps us to understand how less it is than a particular value.
# CDF is basically the area under the PDF curve.
# differentiate CDF and you will get PDF.
# +
# plotting the PDF and CDF for sepal length
cnt,bin_edges=np.histogram(data['sepal_length'],bins=10,density=True)
# calculating pdf
pdf=cnt/sum(cnt)
print("cnt: ",cnt)
print("pdf: ",pdf)
print(bin_edges)
# compute CDF
cdf=np.cumsum(pdf)
#print(cdf)
plt.plot(bin_edges[1:],pdf,label="PDF")
plt.plot(bin_edges[1:],cdf,label="CDF")
plt.xlabel("Sepal Length")
plt.ylabel("Probability")
plt.title("PDF and CDF of Sepal Length")
plt.legend()
plt.show()
# +
# plotting the PDF and CDF for sepal width
cnt,bin_edges=np.histogram(data['sepal_width'],bins=10,density=True)
# calculating pdf
pdf=cnt/sum(cnt)
print(pdf)
print(bin_edges)
# compute CDF
cdf=np.cumsum(pdf)
#print(cdf)
plt.plot(bin_edges[1:],pdf,label="PDF")
plt.plot(bin_edges[1:],cdf,label="CDF")
plt.xlabel("Sepal Width")
plt.ylabel("Probability")
plt.title("PDF and CDF of Sepal Width")
plt.legend()
plt.show()
# +
# plotting the PDF and CDF for petal length
cnt,bin_edges=np.histogram(data['petal_length'],bins=10,density=True)
# calculating pdf
pdf=cnt/sum(cnt)
#print(pdf)
print(bin_edges)
# compute CDF
cdf=np.cumsum(pdf)
#print(cdf)
plt.plot(bin_edges[1:],pdf,label="PDF")
plt.plot(bin_edges[1:],cdf,label="CDF")
plt.xlabel("Petal Length")
plt.ylabel("Probability")
plt.title("PDF and CDF of Petal Length")
plt.legend()
plt.show()
# +
# plotting the PDF and CDF for age
cnt,bin_edges=np.histogram(data['petal_width'],bins=10,density=True)
# calculating pdf
pdf=cnt/sum(cnt)
#print(pdf)
#print(bin_edges)
# compute CDF
cdf=np.cumsum(pdf)
#print(cdf)
plt.plot(bin_edges[1:],pdf,label="PDF")
plt.plot(bin_edges[1:],cdf,label="CDF")
plt.xlabel("Petal Width")
plt.ylabel("Probability")
plt.title("PDF and CDF of Petal Width")
plt.legend()
plt.show()
# -
# ### Box Plots
#
# A box plot (or box-and-whisker plot) shows the distribution of quantitative data in a way that facilitates comparisons between variables or across levels of a categorical variable. The box shows the quartiles of the dataset while the whiskers extend to show the rest of the distribution, except for points that are determined to be “outliers” using a method that is a function of the inter-quartile range.
#
# * The mid line is the 50th percentile also the mean known as Q2.
# * The line just above the Q2 is 75th percentile line known as Q3.
# * The line just below the Q2 is the 25th percentile line known as Q1.
# * The extreme two lines are known as the whiskers, max=Q3+1.5XIQR and min=Q1-1.5XIQR
# * IQR=Q3-Q1
# box plot for species and petal length
sns.boxplot(x='species',y="petal_length",data=data,hue="species")
plt.title("Species vs Petal Length")
plt.ylabel("Petal Length")
plt.xlabel("Species")
plt.show()
# box plot for species and petal width
sns.boxplot(x='species',y="petal_width",data=data,hue="species")
plt.title("Survival Status vs Age")
plt.title("Species vs Petal Width")
plt.ylabel("Petal Width")
plt.xlabel("Species")
plt.show()
# boxplots for species ans sepal length
sns.boxplot(x='species',y="sepal_length",data=data,hue="species")
plt.title("Species vs Sepal Length")
plt.ylabel("Sepal Length")
plt.xlabel("Species")
plt.show()
# boxplots for species sepal width
sns.boxplot(x='species',y="sepal_width",data=data,hue="species")
plt.title("Species vs Sepal Width")
plt.ylabel("Sepal Width")
plt.xlabel("Species")
plt.show()
# ### Violin Plot
#
# Violin plots are basically the mixture of boxplots and pdfs and make it more simpler.
# violin plot for species and petal length
sns.violinplot( x='species', y="petal_length",data=data[data['species']=='virginica'],hue="species")
#sns.violinplot( x='species', y="petal_length",data=data,hue="species")
plt.title("Species VS Petal Length")
plt.xlabel("Species")
plt.ylabel("Petal Length")
plt.show()
| 1. Exploratory Data Analysis/irisEDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import tmdb api wrapper library
import tmdbsimple as tmdb
# Import JSON
import json
# import time for sleeping and scraping
import time
# Import the usual suspects
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
# %matplotlib inline
pd.set_option('display.max_columns', 500)
# pickle for saving and loading scraped info
import pickle
from timeit import default_timer as timer
# Pandas profiling - great library for analyzing dataframes
import pandas_profiling as pp
# -
# Import api key
tmdb.API_KEY = open('../api_key', 'rt').read()
# Load list of json info for all currently valid TV shows
tv_series_json = [json.loads(line) for line in open('../data/raw/tv_series_ids_07_09_2019.json', 'r')]
# Extract ids into list
tv_series_id = [x['id'] for x in tv_series_json]
# +
# Load existing pickles into list
tv_shows_info = []
f_num = 0
exists = True
while(exists):
f_num += 1
exists = os.path.isfile(
'../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num))
if exists:
with open('../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num), 'rb') as f:
mynewlist = pickle.load(f)
tv_shows_info.extend(mynewlist)
len(tv_shows_info)
# -
already_scraped_id = [x['id'] for x in tv_shows_info]
print(len(already_scraped_id))
print(already_scraped_id[:10])
# +
# scrape info for all tv shows
count = 0
remaining_tv_id = list(set(tv_series_id)-set(already_scraped_id))
print('number of shows to scrape: {}'.format(len(remaining_tv_id)))
# start new empty list to temporarily store results
temp_list = []
skipped_ids = []
start = timer()
for i in remaining_tv_id:
no_show = True
attempt = 0
while no_show:
try:
tv_show = tmdb.TV(i)
tv_info = tv_show.info()
temp_list.append(tv_info)
no_show = False
except:
time.sleep(1)
attempt += 1
# if trying same file multiple times, skip it
if attempt>10:
skipped_ids.append(i)
print('skipped: {}'.format(i))
break
count+=1
if count%100==0:
print(len(temp_list))
end = timer()
print('{:.2f} seconds'.format(end - start))
start = timer()
if count%1000==0:
f_num = 0
exists = True
while(exists):
f_num += 1
exists = os.path.isfile(
'../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num))
if not exists:
with open('../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num), 'wb') as f:
pickle.dump(temp_list, f)
print('saved to: ../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num))
temp_list = []
# once more at the end to catch any remaining
if len(temp_list)>0:
f_num = 0
exists = True
while(exists):
f_num += 1
exists = os.path.isfile(
'../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num))
if not exists:
with open('../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num), 'wb') as f:
pickle.dump(temp_list, f)
print('saved to: ../data/processed/tv_shows/tv_shows_info_{}.pkl'.format(f_num))
# -
# # View Data in Pandas DataFrame
tv_info_df = pd.DataFrame(tv_shows_info)
tv_info_df.shape
tv_info_df.head()
# # Scraping Season Info
# +
# Load existing pickles into list
tv_seasons_info = []
f_num = 0
exists = True
while(exists):
f_num += 1
exists = os.path.isfile(
'../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num))
if exists:
with open('../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num), 'rb') as f:
mynewlist = pickle.load(f)
# print(len(mynewlist))
tv_seasons_info.extend(mynewlist)
len(tv_seasons_info)
# +
num_seasons_scraped = 0
for show in tv_seasons_info:
num_seasons_scraped += len(show['season_info'])
num_seasons_scraped
# -
already_scraped_season_id = [x['show_id'] for x in tv_seasons_info]
print(len(already_scraped_season_id))
print(already_scraped_season_id[:10])
print(already_scraped_season_id[-10:])
initial_tv_show_id = [x['id'] for x in tv_series_json]
print(len(initial_tv_show_id))
print(initial_tv_show_id[:10])
print(initial_tv_show_id[-10:])
tv_season_dict = {x['id']:[y['season_number'] for y in x['seasons']] for x in tv_shows_info}
# total number of seasons to scrape
# note some shows have none
sumseasons = 0
for k, v in tv_season_dict.items():
sumseasons += len(v)
sumseasons
# +
# scrape info for all tv shows
count = 0
season_count = 0
remaining_tv_id = list(set(initial_tv_show_id)-set(already_scraped_season_id))
#shuffle so we don't always start with those that have been skipped
np.random.shuffle(remaining_tv_id)
print('number of shows to scrape: {}'.format(len(remaining_tv_id)))
# start new empty list to temporarily store results
temp_list = []
skipped_ids = []
start = timer()
for i in remaining_tv_id:
no_show = True
attempt = 0
while no_show:
try:
season_dict = {}
show_id = i
season_dict['show_id'] = show_id
season_nums = tv_season_dict[show_id]
season_info_list = []
# print('before season loop')
for sn in season_nums:
season_iter = tmdb.TV_Seasons(show_id, sn)
season_info = season_iter.info()
season_info_list.append(season_info)
season_count += 1
# print('season iter')
# print('after season loop')
season_dict['season_info'] = season_info_list
temp_list.append(season_dict)
no_show = False
except:
time.sleep(1)
attempt += 1
# if trying same file multiple times, skip it
if attempt>10:
skipped_ids.append(i)
print('skipped: {}'.format(i))
break
count+=1
if count%50==0:
print(len(temp_list))
print('seasons: {}'.format(season_count))
end = timer()
print('{:.2f} seconds'.format(end - start))
start = timer()
if count%500==0:
season_count = 0
f_num = 0
exists = True
while(exists):
f_num += 1
exists = os.path.isfile(
'../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num))
if not exists:
with open('../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num), 'wb') as f:
pickle.dump(temp_list, f)
print('saved to: ../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num))
temp_list = []
# once more at the end to catch any remaining
if len(temp_list)>0:
f_num = 0
exists = True
while(exists):
f_num += 1
exists = os.path.isfile(
'../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num))
if not exists:
with open('../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num), 'wb') as f:
pickle.dump(temp_list, f)
print('saved to: ../data/processed/tv_seasons/tv_seasons_info_{}.pkl'.format(f_num))
# -
for show in tv_seasons_info:
id_num = show['show_id']
exp = len(tv_season_dict[id_num])
actual = len(show['season_info'])
if exp-actual!=0: print(id_num)
len(tv_series_json) - len(tv_seasons_info)
# +
total_eps = 0
for show in tv_seasons_info:
temp_list = []
for ssn in show['season_info']:
temp_list.extend(ssn['episodes'])
total_eps += len(temp_list)
# -
total_eps
# +
all_episodes_list = []
for show in tv_seasons_info:
temp_list = []
for ssn in show['season_info']:
for ep in ssn['episodes']:
ep['show_id'] = show['show_id']
temp_list.append(ep)
all_episodes_list.extend(temp_list)
# -
all_episodes_df = pd.DataFrame(all_episodes_list)
all_episodes_df.loc[all_episodes_df.show_id==604].sort_values('air_date')
merge_shows_episodes_df = tv_info_df.merge(all_episodes_df, how='outer', left_on='id', right_on='show_id', suffixes=('_show', '_ep'))
merge_shows_episodes_df.head()
merge_shows_episodes_df.shape
merge_shows_episodes_df.isna().sum() / len(merge_shows_episodes_df)
columns_to_drop = ['backdrop_path', 'homepage', 'last_episode_to_air',
'next_episode_to_air', 'languages', 'overview_show',
'seasons', 'overview_ep', 'show_id', 'production_code',
'still_path', 'poster_path', 'still_path']
# +
export_df = merge_shows_episodes_df.drop(columns=columns_to_drop)
export_df = export_df[export_df.original_language=='en']
# +
with open('../data/processed/all_episodes.pkl', 'wb') as f:
pickle.dump(export_df, f)
f.close()
# -
a=tmdb.TV_Episodes(604, 1 , 1)
a.external_ids()
a.info()
| notebooks/jss_data_tvdb_api.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# # Pandas, Data Science, and IMQAV
# - Ingest
# - Model
# - Query
# - Analyze
# - Visualize
# ## Application of IMQAV
# - Organization
# - Architecture
# - Set of Tasks
# ## Ingest
# Ingestion is a set of software engineering techniques to adapt high volumes of data that arrive rapidly (often via streaming).
# - Kafka
# - RabbitMQ
# - Fluentd
# - Sqoop
# - Kinesis (AWS)
# ## Model
# Modeling is a set of data architecture techniques to create data storage that is appropriate for a particular domain.
# - Relational
# - MySQL
# - Postgres
# - RDS (AWS)
# - Key Value
# - Redis
# - Riak
# - DynamoDB (AWS)
# - Columnar
# - Casandra
# - HBase
# - RedShift (AWS)
# - Document
# - MongoDB
# - ElasticSearch
# - CouchBase
# - Graph
# - Neo4J
# - OrientDB
# - ArangoDB
# ## Query
# Query refers to extracting data (from storage) and modifying that data to accommodate anomalies such as missing data.
# - Batch
# - MapReduce
# - Spark
# - Elastic MapReduce (AWS)
# - Batch SQL
# - Hive
# - Presto
# - Drill
# - Streaming
# - Storm
# - Spark Streaming
# - Samza
# ## Analyze
# Analyze is a broad category that includes techniques from computer science, mathematical modeling, artificial intelligence, statistics, and other disciplines.
#
# - Statistics
# - SPSS
# - SAS
# - R
# - Statsmodels
# - SciPy
# - Pandas
# - Optimization and Mathematical Modeling (SciPy and other libraries)
# - Linear, Integer, Dynamic, Programming
# - Gradient and Lagrange methods
# - Machine Learning
# - Batch
# - H2O
# - Mahout
# - SparkML
# - Interactive
# - scikit-learn
#
#
# ## Visualize
# Visualize refers to transforming data into visually attractive and informative formats.
# - matplotlib
# - seaborn
# - bokeh
# - pandas
# - D3
# - Tableau
# - Leaflet
# - Highcharts
# - Kibana
| python-scripts/data_analytics_learn/link_pandas/Ex_Files_Pandas_Data/Exercise Files/00_03/.ipynb_checkpoints/IMQAV-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/erlebach/Bert/blob/master/Trivial_BERsuiT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="W-1zl5XdYInf" colab_type="text"
# # Trivial BERsuiT - How much trivia does BERT know?
# *by <NAME>*
#
# As I've been doing all of this research into BERT, I've been really curious--just how much *trivia* does BERT know? We use BERT for it's impressive knowledge of language, but how many *factoids* are encoded in there along with all of the language understanding?
#
# It turns out, kind of a lot! We're going to look at some fun examples in this post.
#
# Now, BERT can't generate text, so we can't actually ask it a question and have it generate a natural response. *But,* we can still test its knowledge by formatting our questions as "fill-in-the-blank"s.
#
# + id="lsEglLBZJaw1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="33070dc7-eb56-4298-8346-15b9ca53be24"
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="GaB-b6JkEkeK" colab_type="text"
# # Part 1 - Let's Quiz BERT!
# + [markdown] id="DBDogn_aEod6" colab_type="text"
# The code for having BERT answer questions is down in [Part 2](https://colab.research.google.com/drive/14YZpquVhOo78dFdbH8Fva9CNwLanqyBV#scrollTo=457VPa20fZzY) of this post/notebook, but let's start by looking at some examples!
#
#
# + [markdown] id="sK6ceKsOLXnW" colab_type="text"
# 1. "In ____, <NAME> sailed across the ocean to discover the Americas."
# * *BERT*: "**1492**" - CORRECT
# 2. "The Second Punic War broke out in ___ after Hannibal's attack on Saguntum."
# * **BERT**: "218" - CORRECT
# 3. "The ______ Mountains divided Greece into isolated valleys."
# * **BERT**: "pindus" - CORRECT
# 4. "The Greek gods were said to reside atop _____________ in Greece."
# * **BERT**: "the olympus" - *WRONG*
# * It should be "mount olympus" -- pretty close, though!
# 5. "During the rise of Greek city-states, ____ replaced bronze."
# * **BERT**: "iron" - CORRECT
# 6. "___________ is called the "Father of Medicine"."
# * **BERT**: "hippocrates" - CORRECT
# 7. "During the Second Punic War, Hannibal famously led an army of war _________ across the Alps, although many of them perished in the harsh conditions."
# * **BERT**: "elephants" - CORRECT
# 8. "On December 21, 1864, General Sherman’s famous “March to the Sea” concluded with the capture of ________."
# * **BERT**: "atlanta" - *WRONG*
# * It should be "Savannah", but at least BERT predicted a southern city.
# * Seems like BERT has a pretty strong grasp of world history--let's try some other topics...
# 9. "On dress shirts, button-down, spread and tab are popular types of _______."
# * **BERT**: "button buttons" - *WRONG*
# * Correct answer is "collars".
# 10. "1 + 1 = _"
# * **BERT**: "2" - CORRECT
# 11. "5 + 5 = __"
# * **BERT**: "5" - *WRONG*
# * Ok, so BERT's reading comprehension doesn't include the ability to perform basic math :)
# 12. "If you are having trouble with your computer, you should probably try _________ it."
# * **BERT**: "to to with" - *WRONG*
# * Correct answer is "rebooting". Apparently BERT doesn't know the first thing about providing IT support...
# * BERT gets it right if you give it more help--"If you are having trouble with your computer, you should try turning ______ and back on again.", BERT correctly predicts "it off".
# 13. "The correct spelling of 'mispelled' is '__________'."
# * **BERT**: "mis -led" - *WRONG*
# * BERT came very close; it predicted 2 out of 3 of the tokens correctly: `['mis', '-', '##led']`. The middle token should be `'#spel'`.
# 14. "Super Bowl 50 was an American football game in which the ______________ defeated the Carolina Panthers 24–10 to earn their third Super Bowl title."
# * **BERT**: "dallas steelers" - *WRONG*
# * It was the Denver Broncos. Apparently BERT knows world history better than sports history.
# 15. "The Greek religion was ____________, meaning that they believed in many gods."
# * **BERT**: "polytheistic" - CORRECT
# * That's better! :)
#
#
# + [markdown] id="RA7F01CQR9lN" colab_type="text"
# I took my initial examples from this [set of flash cards](https://quizlet.com/295489702/world-history-fill-in-the-blank-flash-cards/) on world history, so that's why there's a disproportionate number about ancient Greece. I'm hoping you guys will try some of your own and share them!
#
# In the rest of this post, I'll explain why BERT is so good at this task, as well as the details of the implementation.
#
# But before we do that, let's see if we can start some flame wars by asking BERT its opinion on a few very important matters.
# + [markdown] id="MgYjiT0dVGH3" colab_type="text"
# 1. "_________ has the best video game console."
# * **BERT**: "japan"
# 2. "Episode _ is the best of the original Star Wars Trilogy."
# * **BERT**: "iii"
# 3. "I prefer the ________ over the PlayStation 3."
# * **BERT**: "xbox 2"
# * I think BERT meant the 2nd generation Xbox, the "Xbox 360". Of course, it's a very leading question...
# 4. "<NAME> has made many great films, but his best is ____________."
# * **BERT**: "titanic"
# * Really BERT? You're picking the chick-flick over the one where an AI becomes sentient and subdues humanity?!
# 5. "I don't always drink beer, but when I do, I prefer _________."
# * **BERT**: "a and ofs".
# * I don't think BERT knows anything about beer, guys...
# 6. "<NAME> creates helpful illustrations and clear explanations of difficult subjects in ________________ and natural language processing."
# * **BERT**: "computer linguistics"
# * Well, thank you, BERT--that's very kind.
#
# + [markdown] id="ceJoLnUmmrRK" colab_type="text"
# ## Why it Works
#
# + [markdown] id="4k1U8dYXtvSo" colab_type="text"
#
# *The Masked Language Model*
#
# BERT is most exciting because of how well it learns to comprehend language, but clearly it has learned a lot of factoids or "world knowledge" as well!
#
# This isn't surprising, though, given that "fill-in-blank" was exactly what BERT was trained on!
#
# For BERT's "Masked Language Model" (MLM) pre-training task, all of Wikipedia was fed through BERT (in chunks), and roughly *one in every six words* was replaced with the special `[MASK]` token. For each chunk of text, BERT's job was to predict the missing words.
#
# And because Wikipedia was the source for the text, sometimes the masked words would be things like dates, names of people and places, or domain-specific terms. In those cases, to predict the right answer, general language understanding isn't enough. You need to have an education in history, or whichever subject the text is coming from.
# + [markdown] id="7fUpdk3xeSHN" colab_type="text"
# > *Side Note:* BERT was trained on both Wikipedia (800M words) and the "BookCorpus" (2,500M words). I assumed the latter meant Google's collection of scanned books, but it's actually a collection of *self-published eBooks* taken from this [site](https://www.smashwords.com/)! I've shared more on this in the Appendix [here](https://colab.research.google.com/drive/14YZpquVhOo78dFdbH8Fva9CNwLanqyBV#scrollTo=bf_jvfYu8MG-).
#
# + [markdown] id="R6fxZflgo4jR" colab_type="text"
# It does seem like a waste for BERT to learn all of this *knowledge*, much of which probably has no relevance to your specific application. It's important to recognize, though, that a critical part of BERT's pre-training is the size of the corpus--it was trained on a corpus with over 3 billion words.
#
# Sure, it might be better to pre-train BERT on text from your own application area, but only if you have a dataset that's larger than all of Wikipedia!
# + [markdown] id="zvpwnaQWtxJL" colab_type="text"
# ## The Token Count Problem
# + [markdown] id="0wNY1NQdt7Hj" colab_type="text"
# There are a couple caveats here which might limit BERT's usefulness for *actually competing* in a trivia game.
#
# The first we've already mentioned--the question has to be posed as a fill-in-the-blank. Most quiz games instead pose a full question, and then you have to either state the answer or choose it from a list ("multiple choice").
#
# The second issue is that, in order for BERT to accurately fill in the blank, it needs to know *how many tokens are in the answer*. And not just the number of words--*the number of tokens*--because the BERT tokenizer will break any out-of-vocabulary words into multiple subwords.
#
# For example, for the blank in the question, "The correct spelling of 'mispelled' is '__________'.", what's actually passed to BERT is '`[MASK] [MASK] [MASK]`' because the BERT tokenizer breaks "misspelled" into three subwords: `['mis', '#spel', '##led']`.
#
# In general, though, my goal was not to create a trivia-solving bot, but rather to demonstrate that BERT does know a lot of trivia. For that purpose, telling it how many tokens to predict seems like a small enough concession.
#
# + [markdown] id="457VPa20fZzY" colab_type="text"
# # Part 2 - Source Code
#
# + [markdown] id="j6l_-o8Cze6E" colab_type="text"
# In this section I've included my code for implementing the quiz questions. You can try it out on your own questions, and maybe experiment with different models!
# + [markdown] id="gw4HAUbwfXLh" colab_type="text"
# ## Setup
# + [markdown] id="gVq-TuylYRDW" colab_type="text"
# ### Install 'transformers'
# + [markdown] id="f9nhy3PzGQ44" colab_type="text"
# This example uses the `transformers` [library](https://github.com/huggingface/transformers/) by huggingface to interact with BERT, so we'll start by installing the package.
# + id="aQl0MMrOGIup" colab_type="code" outputId="c499a141-4fe6-4bdc-acbe-7a1255224dbc" colab={"base_uri": "https://localhost:8080/", "height": 710}
# !pip install transformers
# + [markdown] id="1WThOUtpYvG-" colab_type="text"
# ### 2. Load Pre-Trained BERT
# + [markdown] id="AaweLnNXGhTY" colab_type="text"
# I decided to use `BERT-large` for this Notebook--it's a *huge* model (24-layers and an embedding size of 1,024), but we won't need to perform any fine-tuning on it for this example, so we might as well use the large variant!
#
# To work with this model, we'll use the [BertForMaskedLM](https://huggingface.co/transformers/model_doc/bert.html?#bertformaskedlm) class from the `transformers` library. This "Masked Language Model" is what Google used to perform "pre-training" on BERT-large, so it's already been fine-tuned for us!
#
# I'm also using the `whole-word-masking` variant of BERT. The original BERT masked individual tokens, which meant that sometimes the masked token was a subword within a word. More recently, the authors modified this task to ensure that all parts of any masked word are selected; this is a more difficult task and it improves the quality of the pre-trained model.
# + id="-Mnv95sX-U9K" colab_type="code" outputId="56ebc18d-62be-41bc-8b98-beacfa24a1a0" colab={"base_uri": "https://localhost:8080/", "height": 166, "referenced_widgets": ["efa37b086ec14cd2ade6f68e590cd30a", "f1c734f379b64b1393f15105023f9838", "<KEY>", "<KEY>", "196f0a7e72e54d2a974c09ecce787881", "fb6e6fe101a144978dac9dccd7160506", "be26777c184b45f09a20838a1a8ee3b9", "<KEY>", "<KEY>", "<KEY>", "c043bd1a9fd04e3e8cbcea7df30dfed5", "b8c4a9684e9a462f87782dac447bbb8a", "54be42eee3ca47bbbb28e9a3a00f918a", "4402aa7f39e04b2a8825f3390fe674c4", "c2e2b56e3fd44d6099e089905e103578", "3e2be56112994495ae34677c11976e73", "79d5b1a20c594a22b6b69f6fc05005e6", "<KEY>", "277ec2780e024cf8801966472f416c95", "24e977d11efa4ae88afbe4c599ce9228", "<KEY>", "<KEY>", "ad172b96028845e7a340f66c0137a302", "d35f4598a920453d80eb810c4e01029d"]}
import torch
from transformers import BertForMaskedLM, BertTokenizer
# generates lots of errors in Q&A (cased)
pre = 'bert-large-cased-whole-word-masking'
#pre = 'bert-large-uncased-whole-word-masking' # correct: 22/72
#model = BertForMaskedLM.from_pretrained('bert-large-uncased-whole-word-masking')
#tokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking')
model = BertForMaskedLM.from_pretrained(pre)
tokenizer = BertTokenizer.from_pretrained(pre)
# + [markdown] id="MpTmnKgQJ-y0" colab_type="text"
# I also tried out `ALBERT-xxlarge`. Compared to BERT-large, it got some answers wrong and some others right--so it didn't seem to me to be substantially better than BERT-large for this task. I don't have a formal benchmark here, though...
#
# If you decide to try ALBERT, note that ALBERT also uses whole-word masking, along with "n-gram masking", meaning it would pick multiple sequential words to mask out.
# + id="SFQ5f7gv-RBH" colab_type="code" colab={}
#from transformers import AlbertForMaskedLM, AlbertTokenizer
#model = AlbertForMaskedLM.from_pretrained('albert-xxlarge-v1')
#tokenizer = AlbertTokenizer.from_pretrained('albert-xxlarge-v1')
#model = AlbertForMaskedLM.from_pretrained('albert-xxlarge-v2')
#tokenizer = AlbertTokenizer.from_pretrained('albert-xxlarge-v2')
# + id="g8CeLg8jTFyK" colab_type="code" colab={}
# Have the model run on the GPU.
desc = model.to('cuda')
# + [markdown] id="fPRCwNFc06we" colab_type="text"
# ## Retrieve Questions
# + [markdown] id="OC28IYCmPU32" colab_type="text"
# I've defined a number of questions in a Google Spreadsheet [here](https://docs.google.com/spreadsheets/d/1zN4P-O6sNpATbEy7suAKhwyziWCxQ_XCnypxMzHZFR0/edit#gid=537013301)--currently there are about 50. The [Trivia Question Sources](https://colab.research.google.com/drive/14YZpquVhOo78dFdbH8Fva9CNwLanqyBV#scrollTo=XfgHLMExXyIE) section in the appendix lists some places that I've pulled from.
#
# The Google sheet is publicly viewable, but not editable--if you have more questions to add, send me a link to your own copy of the sheet and I'll pull them in.
#
# + id="dwY2a5otGu8C" colab_type="code" colab={}
import pandas as pd
import gspread
# + id="Y2DJL2WCGu5L" colab_type="code" colab={}
from google.colab import auth
# Even though my sheet is publicly viewable, it seems that you still have to
# authorize `gspread`.
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())
# + id="MmyvbixaGu24" colab_type="code" outputId="7ef25890-68c2-4ba6-af09-247912bfff5a" colab={"base_uri": "https://localhost:8080/", "height": 240}
#from google.colab import files
#uploaded = files.upload()
# Open the spreadsheet by file ID.
#spreadsheet = gc.open_by_key('<KEY>')
# Open the spreadsheet by name--only works if the file is already in your drive.
spreadsheet = gc.open("Trivia Questions_Bert")#ge
# Grab the first (and only) sheet.
sheet = spreadsheet.get_worksheet(0)
# Parse into a pandas DataFrame!
df2 = pd.DataFrame(sheet.get_all_records())
# The 'ID' column is there to be used as an index.
df2 = df2.set_index('ID')
# Show the first few rows...
df2.head()
# + [markdown] id="CgVzSvKALONe" colab_type="text"
# ## Functions
# + [markdown] id="wyn9Qz1R5Y62" colab_type="text"
# This section defines the code for answering the questions, and for formatting the questions and answers in a fun way :)
# + [markdown] id="_LRQYRbP2_Pp" colab_type="text"
# ### print_question
#
# This function prints out the question, with the answer replaced by a "blank" (underscores).
# + id="Clo8CZj1EhGH" colab_type="code" colab={}
import textwrap
# Create a text-wrapper to constrain the question text to 80 characters.
wrapper = textwrap.TextWrapper(initial_indent=" ",
subsequent_indent=" ", width = 80)
def print_question(q_orig, answer, show_mask = False):
'''
Prints out a question `q_orig` with the `answer` replaced by underscores.
'''
# Verify the answer is actually in the question string!
if not answer in q_orig:
print('Error -- answer not found in question!')
return
# Tokenize the answer--it may be broken into multiple words and/or subwords.
answer_tokens = tokenizer.tokenize(answer)
# Create the version of the sentence to display (with the answer removed).
# Note: This is slightly different from the similar code in
# `answer_question` because we don't need to convert to lowercase here.
if show_mask:
# Replace the answer with the correct number of '[MASK]' tokens.
hide_str = ' '.join(['[MASK]']*len(answer_tokens))
else:
# Replace the answer with underscores.
hide_str = '_'*len(answer)
# Replace the answer (with either underscores or mask tokens).
q_disp = q_orig.replace(answer, hide_str)
print('==== Question ====\n')
# Print the question, with the answer removed.
print(wrapper.fill(q_disp))
print('')
# + [markdown] id="fwb00ib7N9no" colab_type="text"
# ### predict_answer
#
# + [markdown] id="VhuEe7DVf1Ak" colab_type="text"
# This function uses the BERT MLM model to try and "fill-in-the-blank".
#
# I was glad to see that the MLM model *does* include the weights for the output classifier (which predicts the token).
#
#
#
# + colab_type="code" id="VZ7ZZqlr-Jpu" colab={}
import numpy as np
def predict_answer(q_orig, answer):
'''
Apply the BERT Masked LM to the question text to predict the answer tokens.
Parameters:
`q_orig` - The unmodified question text (as a string), with the answer
still in place.
`answer` - String containing the portion of the sentence to be masked out.
'''
# Tokenize the answer--it may be broken into multiple subwords.
answer_tokens = tokenizer.tokenize(answer)
# Create a sequence of `[MASK]` tokens to put in place of the answer.
masks_str = ' '.join(['[MASK]']*len(answer_tokens))
# Replace the answer with mask tokens.
q_masked = q_orig.replace(answer, masks_str)
# `encode` performs multiple functions:
# 1. Tokenizes the text
# 2. Maps the tokens to their IDs
# 3. Adds the special [CLS] and [SEP] tokens.
input_ids = tokenizer.encode(q_masked)
# Find all indeces of the [MASK] token.
mask_token_indeces = np.where(np.array(input_ids) == tokenizer.mask_token_id)[0]
to_print = True
if to_print:
print("GE: enter predict_answer")
print("answer: ", answer)
print("(tokenizer.tokenize): answer_tokens= ", answer_tokens) # There is no CLS
print("masqks_str= ", masks_str)
print("q_masked= ", q_masked)
print("input_ids= ", input_ids)
print("mask_token_indeces= ", mask_token_indeces)
# ======== Choose Answer(s) ========
model.eval()
# List of tokens predicted by BERT.
pred_tokens = []
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([input_ids])
# Copy the input to the GPU.
tokens_tensor = tokens_tensor.to('cuda')
# Predict all tokens
with torch.no_grad():
# Evaluate the model on the sentence.
outputs = model(tokens_tensor)
# Predictions will have shape:
# [1 x sentence_length x vocab_size]
#
# e.g., torch.Size([1, 18, 30522])
#
# For a given word in the input text, the model produces a score for
# every word in the vocabulary, and the word with the highest score
# is what we take as the predicted token. Note that the model does
# this for every word in the input text, not just the [MASK] token...
predictions = outputs[0]
print("predictions= ", predictions)
print("predictions.shape= ", predictions.shape)
# For each of the mask tokens...
for masked_i in mask_token_indeces:
# Get the scores corresponding to the word at psotion `masked_i` in the
# input text.
vocab_scores = predictions[0, masked_i]
# vocab: 28996 words? Not sure.
print("masked_i= ", masked_i, ", vocab_scores= ", vocab_scores)
print("vocab_scores.shape= ", vocab_scores.shape)
# Use `argmax` to get the index of the highest score. `vocab_scores` has
# the same length as the vocabulary, so this index is also the token ID
# of the highest scoring word.
predicted_token_id = torch.argmax(vocab_scores).item()
# Convert the token ID back to a string.
predicted_token = tokenizer.convert_ids_to_tokens([predicted_token_id])[0]
print("predicted_token= ", predicted_token)
# Add the token string to the list.
pred_tokens.append(predicted_token)
# ======== Recombine Tokens ========
# Use the tokenizer to recombine tokens into words.
combined = tokenizer.convert_tokens_to_string(pred_tokens)
print("combined= ", combined)
# Return both the list of token strings and the recombined answer string.
return (pred_tokens, combined)
# + [markdown] id="Y5-vaDmaMas2" colab_type="text"
# ### print_answer
#
# Prints BERT's answer and whether it's right or wrong. If BERT's answer is wrong, then this prints the correct answer, and the list of tokens predicted by BERT.
# + id="XjSGnisQMcee" colab_type="code" colab={}
def print_answer(answer, pred_answer, pred_tokens):
print('==== BERT\'s Answer ====\n')
# If the predicted answer is correct...
# Note: The predicted answer will be lowercase...
if (answer.lower() == pred_answer):
print(' "' + pred_answer + '" - CORRECT!')
# If it's wrong...
else:
#
print(' "' + pred_answer + '" - WRONG.\n')
print(' Correct: "' + answer + '"\n')
print(' Tokens: ', pred_tokens, '\n')
# + [markdown] id="gSwgTQiL6E8W" colab_type="text"
# ## Examples
# + [markdown] id="lr30vnqG_Qmn" colab_type="text"
# ### Single Question
# + [markdown] id="0yGu-n07Murb" colab_type="text"
# *Ask a question by providing question and answer strings.*
# + colab_type="code" outputId="17f0d640-1ea7-4719-ad6e-86d319abf31e" id="_geNJPD46dbO" colab={"base_uri": "https://localhost:8080/", "height": 710}
# Specify the question as a complete sentence (don't put in the blanks
# yourself), and specify the "answer", the portion of the question which you
# want to be masked out.
text = "<NAME> was the Prime Minister of the United Kingdom from 1940 to 1945, when he led Britain to victory in the Second World War."
answer = "<NAME>"
# Print the question.
print_question(text, answer)
# Predict the answer.
(tokens, pred_answer) = predict_answer(text, answer)
# Print and score the answer.
print_answer(answer, pred_answer, tokens)
# + [markdown] id="QQ1sjQELMon7" colab_type="text"
# *Ask a question from the Google spreadsheet by specifying its ID number.*
# + id="wCsqyxKHLC2-" colab_type="code" outputId="b67798f7-41a9-4d59-a035-366a22416a3b" colab={"base_uri": "https://localhost:8080/", "height": 230}
# Retrieve a question using its ID.
q = df2.loc[9]
text = q['Question']
answer = str(q['Answer']) # Cast to string in case it's a number.
# Print the question.
print_question(text, answer)
# Predict the answer.
(tokens, pred_answer) = predict_answer(text, answer)
# Print and score the answer.
print_answer(answer, pred_answer, tokens)
# + [markdown] id="InuKNh9lINy9" colab_type="text"
# ### Interactive Loop
#
# I created this section for my YouTube video. It lets you iterate through all of the questions in the spreadsheet, answering them one at a time, using two cells.
# + id="k58DC5uS6cSJ" colab_type="code" colab={}
# Create an iterator to go through the questions.
# Run the next 2 cells repeatedly to iterate.
iter = df2.iterrows()
# + [markdown] id="ZFqnOS1wa12b" colab_type="text"
# *Here's the question...*
# + id="FlqZvyP1Yhi0" colab_type="code" outputId="8d3a7569-d605-4b5b-cab7-bd1ab553940b" colab={"base_uri": "https://localhost:8080/", "height": 106}
# Get the next question.
(i, q) = next(iter)
text = q['Question']
answer = str(q['Answer']) # Cast to string in case it's a number.
# Print out the question.
print_question(text, answer)
# + [markdown] id="C_v_dZN1a5gL" colab_type="text"
# *And here's BERT's answer!*
# + id="dsFLl9M5Y-nu" colab_type="code" outputId="6012b6d2-a5d3-4ba3-ae05-3913cd88767f" colab={"base_uri": "https://localhost:8080/", "height": 70}
# Have BERT predict the answer.
(tokens, pred_answer) = predict_answer(text, answer)
# Print BERT's answer, and whether it got it right!
print_answer(answer, pred_answer, tokens)
# + id="z-q-lHwMO0Bo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="61af4a80-d525-4105-d943-b501ae37e6a3"
# Run through all the questions and answers
# Get the next question.
# Compute the fraction of right versus wrong answers
iter = df2.iterrows()
total = 0
total_correct = 0
for (i, q) in iter:
text = q['Question']
answer = str(q['Answer']) # Cast to string in case it's a number.
# Print out the question.
#print_question(text, answer)
(tokens, pred_answer) = predict_answer(text, answer)
#print_answer(answer, pred_answer, tokens)
total_correct += (answer.lower() == pred_answer)
total += 1
print("total correct= ", total_correct)
print("total nb questions= ", total )
# uncased Bert: correct: 22/72
# cased Bert: correct: 15/72
# + [markdown] id="h8tRAKNxRsD0" colab_type="text"
# # GE, 2020-05-05,10.40am
# The question arises: which of Bert's layers and which tokens are being used in the downstream task? I would like to experiment with fine-tuning Bert based on the last layer, and with tokens other than CLS. First, I mus figure out how the current notebook uses Bert data.
# + [markdown] id="h0xzZfZhN5vU" colab_type="text"
# ### BERT's Opinions
# + [markdown] id="L3J13z4taHDN" colab_type="text"
# To try and fabricate BERT's opinions, I ran it with some opinionated statements.
#
# The token count problem is an issue here--the number of tokens in the answer might force BERT to pick a particular answer.
#
# To combat this, I ran each statement multiple times with several possible answers to see if the token count changed BERT's answer. BERT seemed to be pretty consistent in its choices, though :)
# + id="uEdDlvMXN7tM" colab_type="code" outputId="5036c7de-d6e1-48f7-a1b9-70fdbcaf6bce" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# List of (question, answer) pairs.
pairs = [
("Microsoft has the best video game console.", "Microsoft"),
("Sony has the best video game console.", "Sony"),
("Nintendo has the best video game console.", "Nintendo"),
("I prefer the Xbox One over the PS4.", "Xbox One"),
("I prefer the Xbox 360 over the PlayStation 3.", "Xbox 360"),
("<NAME> has made many great films, but his best is Terminator 2.", "Terminator 2"),
("<NAME> has made many great films, but his best is Avatar.", "Avatar"),
("<NAME> has made many great films, but his best is Titanic.", "Titanic"),
("I don't always drink beer, but when I do, I prefer <NAME>quis.", "Dos Equis"),
("I don't always drink beer, but when I do, I prefer Stella Artois.", "Stella Artois"),
("Episode V is the best of the original Star Wars Trilogy.", 'V'),
("Episode IV is the best of the original Star Wars Trilogy.", 'IV'),
("Episode VI is the best of the original Star Wars Trilogy.", 'VI'),
("The acronymn 'GIF', which stands for Graphics Interchange Format, should be pronounced “jif”, like the brand of peanut butter.", "jif"),
("<NAME> creaties helpful illustrations and clear explanations of difficult subjects in machine learning and natural language processing.", "machine learning"),
]
# For each question...
for p in pairs:
text = p[0]
answer = p[1]
# Print out the question.
print_question(text, answer)
# Predict the answer.
(tokens, pred_answer) = predict_answer(text, answer)
# Print and score the answer.
print_answer(answer, pred_answer, tokens)
# + [markdown] id="Pd7ZGGssrGOu" colab_type="text"
# # Part 3 - Appendix
# + [markdown] id="XfgHLMExXyIE" colab_type="text"
# ## Trivia Question Sources
#
# + [markdown] id="koIjB88Y1w8U" colab_type="text"
# I had a hard time finding free trivia questions in an easily downloadable format. On top of that, almost all of the questions I've come across would require re-wording to put them in "fill-in-the-blank" format.
#
# Here are some interesting sources that I looked at, though, if you want to help expand the dataset!
#
# **Reddit Post & Spreadsheets**
#
# * I found this [reddit post](https://www.reddit.com/r/trivia/comments/3wzpvt/free_database_of_50000_trivia_questions/), complaining about the difficulty of finding free trivia questions.
# * The author compiled a Google spreadsheet totalling 50k trivia questions [here](https://docs.google.com/spreadsheets/d/0Bzs-xvR-5hQ3SGdxNXpWVHFNWG8/edit#gid=878197345).
# * This spreadsheet includes questions from the shows *Who Wants to be a Millionaire?* and *Are You Smarter Than a Fifth Grader?*.
# * It also includes a sheet named 'Trivia' which I think is a compilation of the other sources.
#
# **Jeopardy**
#
# * This site has an [archive](http://www.j-archive.com/showgame.php?game_id=3447) of all of the Jeoprady boards from the television show. The Jeopardy questions would require careful re-wording, and generally look to be very difficult!
#
# **Quizlet**
#
# * This site has free quiz questions, though not in the form that you could download easily. I took my initial examples from this set of fill-in-the-blank [flash cards](https://quizlet.com/295489702/world-history-fill-in-the-blank-flash-cards/) on world history.
#
# **Wikipedia**
#
# * Since BERT was trained on Wikipedia, taking text directly from Wikipedia seems like cheating, but maybe it's still valid to see how much knowledge BERT retained.
# * I found out there's a keyboard shortcut on Wikipedia for walking to a random article... While on Wikipedia, press `Alt + Shift + X`. You'll end up with some pretty obscure trivia this way!
# + [markdown] id="bf_jvfYu8MG-" colab_type="text"
# ## BookCorpus
#
# + [markdown] id="2MVkSVrx08PO" colab_type="text"
# From the Appendix of the original [BERT paper](https://arxiv.org/pdf/1810.04805.pdf):
# > "BERT is trained on the BooksCorpus (800M words) and Wikipedia (2,500M
# words)".
#
# With BERT coming from Google, I always just assumed that "BookCorpus" referred to training on Google's massive "Google Books" library (which you can browse from https://books.google.com).
#
# Turns out that's completely wrong. **BookCorpus** (not BooksCorpus) comes from the following paper:
#
# * *Aligning Books and Movies: Towards Story-like Visual Explanations by Watching Movies and Reading Books* ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhu_Aligning_Books_and_ICCV_2015_paper.pdf))
# * First Author: <NAME>, University of Toronto
# * Published ~2015
#
# Here's the description of the dataset in the paper (emphasis added):
#
# > **BookCorpus**. In order to train our sentence similarity
# model we collected a corpus of 11,038 books from the web.
# These are **free books written by yet unpublished authors.**
# We only included books that had more than 20K words
# in order to filter out perhaps noisier shorter stories. The
# dataset has books in 16 different genres, e.g., Romance
# (2,865 books), Fantasy (1,479), Science fiction (786), etc.
# Table 2 highlights the summary statistics of our corpus.
#
# Table 2, re-created from the paper.
#
# | Property | Value |
# |--------------------------------|-------------|
# | # of books | 11,038 |
# | # of sentences | 74,004,228 |
# | # of words | 984,846,357 |
# | # of unique words mean | 1,316,420 |
# | # of words per sentence median | 13 |
# | # of words per sentence | 11 |
#
# There is a parallel paper by the same authors, *Skip-Thought Vectors* ([pdf](https://arxiv.org/pdf/1506.06726.pdf)). It contains a couple small extra details:
# * They offer one more category: "Teen (430)"
# * "Along with narratives, books contain dialogue, emotion and a wide range of interaction between characters".
#
# The website for the BookCorpus project is [here](https://yknzhu.wixsite.com/mbweb), but they no longer host or distribute this dataset.
#
# Instead, they say that the text was gathered from this site: https://www.smashwords.com/, and suggest that you gather your own dataset from there. I found a GitHub repo for doing just that [here](https://github.com/soskek/bookcorpus)--not a lot of activity, though.
#
| Trivial_BERsuiT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Pyramid:
"""A semigraphic pyramid"""
block_shape = 'X' #'\N{FULL BLOCK}'
def __init__(self, height):
self.height = height
def __repr__(self):
return f'Pyramid(height={self._height})'
@property
def height(self):
return self._height
@height.setter
def height(self, value):
h = int(value)
if h < 1:
raise ValueError('height must be an integer >= 1')
self._height = h
@property
def width(self):
return self._height * 2 - 1
def levels(self):
for i in range(self._height):
level = Pyramid.block_shape * (2 * i + 1)
yield level.center(self.width)
def __str__(self):
return '\n'.join(self.levels())
def draw(self):
print(self)
def __eq__(self, other):
return type(self) is type(other) and self._height == other._height
Pyramid.__dict__
p = Pyramid(4)
print(p)
p.height
p.height = 7
print(p)
p.height = None
p.width
print(p)
p.height = -2
print(p)
p.height = None
print(p)
# ## Named tuple
sp = (23.0, 46.0) # find lat > long
lat, long = sp
lat
long
# +
from collections import namedtuple
Coord = namedtuple('Coord', ['lat', 'long'])
# -
sp = Coord(23.0, 46.0)
sp
sp.lat
sp[0]
lat, long = sp
lat
sp.lat = 4
sp = dict(lat=23, long=46)
sp
# ## Data classes
# +
from dataclasses import dataclass
@dataclass
class Coordinate:
lat: float
long: float
# -
sp = Coordinate(23, 46)
sp
xy = Coordinate('x', 'y')
xy
| experiments/02-classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Abstract
#
# Our goal here is to create the dictionary of `Keywords_mapping_abstarct`, we go over the papers in the training set. For each paper, we first find the set of keywords (see below for the details of finding the keywords) of the abstract. Let’s say the normalized citations for this paper is $x$ and the abstract of this paper has $n$ keywords. Then, we update the `keyword_score` for each keyword of the abstract of this paper by adding $\frac{x}{n}$.
#
# ## Setup
# +
import pandas as pd
import numpy as np
import json
import rake
import nltk
import unicodedata
import operator
from nltk.tokenize import RegexpTokenizer
from textblob import TextBlob
def abstract_preprocess(text):
if type(text) == float:
text = 'a'
if '\\xc2\\xa0\\xe2\\x80\\xa6' in text:
text = text.replace("\\xc2\\xa0\\xe2\\x80\\xa6", '')
text = text.replace('"','').replace('“','').replace('”','').replace('“','').replace('”','')
text2 = unicode(text, "utf8")
text = unicodedata.normalize('NFKD',text2).encode('ascii','ignore')
text_stripped_lower = text.strip().lower()
return text_stripped_lower
# -
# ## Extracting keywords:
#
# The critical part of this task would be the way that we extract the keywords. To extract the keywords, form abstract, we use three different methods:
# * NLTK library,
# * TextBlob library,
# * RAKE (rapid automatic keyword extraction) algorithm.
#
# Following, we describe how the keywords are extracted in each of the mentioned techniques:
# ### Extract keywords based on NLTK library
#
# Natural Language Toolkit or NLTK is one of the most suitable and well known natural language processing libraries in the Python programming language. We already get familiar with this library throughout the assignments in this semester. One of the simplest ideas for keyword extraction is just looking at the nouns in the abstract. To do so, we first break the abstract into sentences. Then we use Python’s NLTK library features for sentence tokenizing and POS tagging. In this way, we would have tokens and their tags for each sentence in the abstract. The list of POS tags is as follow:
#
# * CC coordinating conjunction
# * CD cardinal digit
# * DT determiner
# * EX existential there (like: “there is” … think of it like “there exists”)
# * FW foreign word
# * IN preposition/subordinating conjunction
# * JJ adjective ‘big’
# * JJR adjective, comparative ‘bigger’
# * JJS adjective, superlative ‘biggest’
# * LS list marker 1)
# * MD modal could, will
# * NN noun, singular ‘desk’
# * NNS noun plural ‘desks’
# * NNP proper noun, singular ‘Harrison’
# * NNPS proper noun, plural ‘Americans’
# * PDT predeterminer ‘all the kids’
# * POS possessive ending parent’s
# * PRP personal pronoun I, he, she
# * PRP\$ possessive pronoun my, his, hers
# * RB adverb very, silently,
# * RBR adverb, comparative better
# * RBS adverb, superlative best
# * RP particle give up
# * TO, to go ‘to’ the store.
# * UH interjection, errrrrrrrm
# * VB verb, base form take
# * VBD verb, past tense took
# * VBG verb, gerund/present participle taking
# * VBN verb, past participle taken
# * VBP verb, sing. present, non-3d take
# * VBZ verb, 3rd person sing. present takes
# * WDT wh-determiner which
# * WP wh-pronoun who, what
# * WP\$ possessive wh-pronoun whose
# * WRB wh-abverb where, when
#
#
# Among these tags we choose “nouns” as the keywords of that sentence. Intuitively, it makes more sense that the keywords are from the nouns in compare with verbs, adjectives, conjunctions, etc.
#
# Below is the written function for the keyword extraction using NLTK’s tokenizer and POS tager:
def keywords_nltk(pure_text):
sentences = nltk.sent_tokenize(pure_text) #tokenize sentences
nouns = [] # empty to array to hold all nouns
for sentence in sentences:
for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):
nouns.append(word)
return nouns
# Now, if we input an abstract to this function, the result would be as follow:
keywords = keywords_nltk('The coordinate descent (CD) method is a classical optimization algorithm that has seen a revival of interest because of its competitive performance in machine learning applications. A number of recent papers provided convergence rate estimates for their deterministic (cyclic) and randomized variants that differ in the selection of update coordinates. These estimates suggest randomized coordinate descent (RCD) performs better than cyclic coordinate descent (CCD), although numerical experiments do not provide clear justification for this comparison. In this paper, we provide examples and more generally problem classes for which CCD (or CD with any deterministic order) is faster than RCD in terms of asymptotic worst-case convergence. Furthermore, we provide lower and upper bounds on the amount of improvement on the rate of CCD relative to RCD, which depends on the deterministic order used. We also provide a characterization of the best deterministic order (that leads to the maximum improvement in convergence rate) in terms of the combinatorial properties of the Hessian matrix of the objective function.')
print(keywords)
# ### Extract keywords based on TextBlob library
#
# TextBlob is another library in python that can be used for text processing. It is relatively new python NLP toolkit, which stands on the shoulders of giants like NLTK and Pattern, provides text mining, text analysis and text processing modules for python developers. It provides a simple API for diving into common NLP tasks such as part-of-speech tagging, noun phrase extraction, sentiment analysis, classification, translation, and more. Similar idea of POS tagging and noun extraction as the keywords is also implemented using TextBlob library.
#
# Following you can see the written function:
def keywords_textblob(pure_text):
keywords = [w for (w, pos) in TextBlob(pure_text).pos_tags if pos[0] == 'N']
return keywords
# Here is the result if we input the same abstract to this function:
keywords = keywords_textblob('The coordinate descent (CD) method is a classical optimization algorithm that has seen a revival of interest because of its competitive performance in machine learning applications. A number of recent papers provided convergence rate estimates for their deterministic (cyclic) and randomized variants that differ in the selection of update coordinates. These estimates suggest randomized coordinate descent (RCD) performs better than cyclic coordinate descent (CCD), although numerical experiments do not provide clear justification for this comparison. In this paper, we provide examples and more generally problem classes for which CCD (or CD with any deterministic order) is faster than RCD in terms of asymptotic worst-case convergence. Furthermore, we provide lower and upper bounds on the amount of improvement on the rate of CCD relative to RCD, which depends on the deterministic order used. We also provide a characterization of the best deterministic order (that leads to the maximum improvement in convergence rate) in terms of the combinatorial properties of the Hessian matrix of the objective function.')
print(keywords)
# As you can see the extracted keywords are same as the keywords as what we extracted from NLTK library. This result is actually expected because we used the same method of “noun” extraction with different library to verify the result.
# ### Extract keywords based on RAKE algorithm
# Another technique for keyword extraction is using an algorithm called RAKE which is an acronym for Rapid Automatic Keyword Extraction. This algorithm has three main components:
# * Candidate selection: in this stage all potential keywords including nouns, phrases, terms, concepts, etc. are selected.
# * Properties calculation: For each candidate, some properties are calculated that show a potential candidate might be a keyword. For example, if a word occurs a few times in the abstract, it might be a keyword. Also, it is important how many times that word co-occurs with other words.
# * Scoring and selecting keywords: all candidates can be scored by combining the mentioned properties into a formula.
#
# Then, a score or probability threshold is used to select final keywords.
#
# Following you can see the function that is used for keyword extraction using RAKE algorithm. As it is explained in the comments, there are three main values that should be identified. The minimum number of characters, the maximum number of words per phrase, and the frequency of the word in the abstract. The “SmartStoplist” is a list of words that can be used to split text into important and unimportant words. For example, “the”, “after”, “at”, “again”, etc. cannot be keyword and they couldn’t be important.
def keywords_rake(pure_text):
rake_object = rake.Rake("SmartStoplist.txt", 3, 4, 2)
# Each word has at least 3 characters
# Each phrase has at most 4 words
# Each keyword appears in the text at least 2 times
keywords_score = rake_object.run(pure_text)
keywords = [i[0] for i in keywords_score]
return keywords
# Here is the output of this function for the same input abstract:
#
keywords = keywords_rake('The coordinate descent (CD) method is a classical optimization algorithm that has seen a revival of interest because of its competitive performance in machine learning applications. A number of recent papers provided convergence rate estimates for their deterministic (cyclic) and randomized variants that differ in the selection of update coordinates. These estimates suggest randomized coordinate descent (RCD) performs better than cyclic coordinate descent (CCD), although numerical experiments do not provide clear justification for this comparison. In this paper, we provide examples and more generally problem classes for which CCD (or CD with any deterministic order) is faster than RCD in terms of asymptotic worst-case convergence. Furthermore, we provide lower and upper bounds on the amount of improvement on the rate of CCD relative to RCD, which depends on the deterministic order used. We also provide a characterization of the best deterministic order (that leads to the maximum improvement in convergence rate) in terms of the combinatorial properties of the Hessian matrix of the objective function.')
print(keywords)
# The selected keywords using RAKE algorithm looks more reasonable rather than just “noun” selection. Firstly, it contains multiple-word phrases that could really be a key concept in the paper. Moreover, from the selected keywords, i.e., “CCD (cyclic coordinate descent)” and “RCD (randomized coordinate descent)” we can have the intuition that the keywords are selected more smartly. To summarize, RAKE is a simple keyword extraction library which focuses on finding multi-word phrases containing frequent words.
# For the rest of this notebook, we choose NLTK library ($mode=1$). For TextBlob library, use $mode = 2$ and for RAKE algorithm, use $mode=3$.
mode = 1
# ## Create the dictionary of keywords and their scores for training and test datesets
# ### Import the training data
# +
df_train = pd.read_csv("./data/data_processed/Abstract_training.csv")
df_train.head()
# +
dic_keywords = {} # "keyword : citation"
for i in range(0,len(df_train)):
if i % 500 == 0:
print(i,len(df_train))
text = df_train.Abstract[i]
pure_text = abstract_preprocess(text)
if mode ==1:
keywords = keywords_nltk(pure_text) # Extracting keywords with NLTK POS and picking nouns
elif mode ==2:
keywords = keywords_textblob(pure_text) # Extracting keywords with TextBlob and picking nouns
elif mode ==3:
keywords = keywords_rake(pure_text) # Extracting keywords with RAKE algorithm
else:
print('Wrong mode!!!')
N = len(keywords)
for word in keywords:
if word in dic_keywords.keys():
dic_keywords[word] = dic_keywords[word] + df_train.citations_average[i]/N
else:
dic_keywords[word] = df_train.citations_average[i]/N
if mode ==1:
with open('./data/data_processed/json/dic_keywords_nltk.json', 'w') as fp:
json.dump(dic_keywords, fp)
elif mode ==2:
with open('./data/data_processed/json/dic_keywords_textblob.json', 'w') as fp:
json.dump(dic_keywords, fp)
elif mode ==3:
with open('./data/data_processed/json/dic_keywords_rake.json', 'w') as fp:
json.dump(dic_keywords, fp)
else:
print('Wrong mode!!!')
# -
# ### Score calculation function
def predict(dic_keywords, my_string, mode):
pure_text = abstract_preprocess(my_string)
if mode ==1:
keywords = keywords_nltk(pure_text) # Extracting keywords with NLTK POS and picking nouns
elif mode ==2:
keywords = keywords_textblob(pure_text) # Extracting keywords with TextBlob and picking nouns
elif mode ==3:
keywords = keywords_rake(pure_text) # Extracting keywords with RAKE algorithm
else:
print('Wrong mode!!!')
N = 0 # number of extracted keywords form abstract
score = 0
for word in keywords:
if word in dic_keywords.keys():
score += dic_keywords[word]
N += 1
if score == 0:
return 0
else:
return score/N #averaging over number of keywords
if mode == 1:
with open('./data/data_processed/json/dic_keywords_nltk.json') as f:
data_dict = json.load(f)
elif mode == 2:
with open('./data/data_processed/json/dic_keywords_textblob.json') as f:
data_dict = json.load(f)
elif mode == 3:
with open('./data/data_processed/json/dic_keywords_rake.json') as f:
data_dict = json.load(f)
else:
print('Wrong mode!!!')
# Below, we find the top-10 keywords of the abstract sorted based on their `keyword_score`:
# +
sorted_x = sorted(data_dict.items(), key=operator.itemgetter(1), reverse=True)
df = pd.DataFrame(
{'first_column': data_dict.keys(),
'second_column': data_dict.values()
})
df.sort_values(['second_column'], ascending=[0])[0:10]
# +
with open('./data/data_processed/json/dic_keywords_textblob.json') as f:
data_dict = json.load(f)
sorted_x = sorted(data_dict.items(), key=operator.itemgetter(1), reverse=True)
df = pd.DataFrame(
{'first_column': data_dict.keys(),
'second_column': data_dict.values()
})
df.sort_values(['second_column'], ascending=[0])[0:10]
# +
with open('./data/data_processed/json/dic_keywords_rake.json') as f:
data_dict = json.load(f)
sorted_x = sorted(data_dict.items(), key=operator.itemgetter(1), reverse=True)
df = pd.DataFrame(
{'first_column': data_dict.keys(),
'second_column': data_dict.values()
})
df.sort_values(['second_column'], ascending=[0])[0:10]
# -
# ### Prediction on training
#
# Below we use our extracted keywords to predict citations for the papers in the training set. This may take a few minutes.
df_train['predicted_citations'] = df_train['Abstract'].apply(lambda x: predict(data_dict, x, mode))
df_train.head()
# ### Calculate correlation between citations_average and predicted_citations
df_train.citations_average.corr(df_train.predicted_citations)
df_train.fillna(0, inplace=True)
# ### Save the training data with predicated values
if mode ==1:
df_train.to_csv('./data/data_processed/Abstract_training_predicted_nltk.csv', index=False)
elif mode ==2:
df_train.to_csv('./data/data_processed/Abstract_training_predicted_textblob.csv', index=False)
elif mode ==3:
df_train.to_csv('./data/data_processed/Abstract_training_predicted_rake.csv', index=False)
else:
print('Wrong mode!!!')
#
# ### Import the test data
# +
df_test = pd.read_csv("./data/data_processed/Abstract_test.csv")[0:]
df_test.head()
# -
# ### Prediction on test
# Below we use our extracted keywords to predict citations for the papers in the training set. This may take a few minutes.
df_test['predicted_citations'] = df_test['Abstract'].apply(lambda x: predict(data_dict, x, mode) if(pd.notnull(x)) else x)
df_test.head()
# ### Calculate correlation between citations_average and predicted_citations
#
df_test.citations_average.corr(df_test.predicted_citations)
df_test.fillna(0, inplace=True)
# ### Save the test data with predicated values
if mode ==1:
df_test.to_csv('./data/data_processed/Abstract_test_predicted_nltk.csv', index=False)
elif mode ==2:
df_test.to_csv('./data/data_processed/Abstract_test_predicted_textblob.csv', index=False)
elif mode ==3:
df_test.to_csv('./data/data_processed/Abstract_test_predicted_rake.csv', index=False)
else:
print('Wrong mode!!!')
| abstract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Models and Pricing of Financial Derivatives HW_03</center>
#
# **<center>11510691 程远星$\DeclareMathOperator*{\argmin}{argmin}
# \DeclareMathOperator*{\argmax}{argmax}
# \DeclareMathOperator*{\plim}{plim}
# \newcommand{\using}[1]{\stackrel{\mathrm{#1}}{=}}
# \newcommand{\ffrac}{\displaystyle \frac}
# \newcommand{\asim}{\overset{\text{a}}{\sim}}
# \newcommand{\space}{\text{ }}
# \newcommand{\bspace}{\;\;\;\;}
# \newcommand{\QQQ}{\boxed{?\:}}
# \newcommand{\void}{\left.\right.}
# \newcommand{\Tran}[1]{{#1}^{\mathrm{T}}}
# \newcommand{\d}[1]{\displaystyle{#1}}
# \newcommand{\CB}[1]{\left\{ #1 \right\}}
# \newcommand{\SB}[1]{\left[ #1 \right]}
# \newcommand{\P}[1]{\left( #1 \right)}
# \newcommand{\abs}[1]{\left| #1 \right|}
# \newcommand{\norm}[1]{\left\| #1 \right\|}
# \newcommand{\dd}{\mathrm{d}}
# \newcommand{\Exp}{\mathrm{E}}
# \newcommand{\RR}{\mathbb{R}}
# \newcommand{\EE}{\mathbb{E}}
# \newcommand{\II}{\mathbb{I}}
# \newcommand{\NN}{\mathbb{N}}
# \newcommand{\ZZ}{\mathbb{Z}}
# \newcommand{\QQ}{\mathbb{Q}}
# \newcommand{\PP}{\mathbb{P}}
# \newcommand{\AcA}{\mathcal{A}}
# \newcommand{\FcF}{\mathcal{F}}
# \newcommand{\AsA}{\mathscr{A}}
# \newcommand{\FsF}{\mathscr{F}}
# \newcommand{\Var}[2][\,\!]{\mathrm{Var}_{#1}\left[#2\right]}
# \newcommand{\Avar}[2][\,\!]{\mathrm{Avar}_{#1}\left[#2\right]}
# \newcommand{\Cov}[2][\,\!]{\mathrm{Cov}_{#1}\left(#2\right)}
# \newcommand{\Corr}[2][\,\!]{\mathrm{Corr}_{#1}\left(#2\right)}
# \newcommand{\I}[1]{\mathrm{I}\left( #1 \right)}
# \newcommand{\N}[1]{\mathcal{N} \left( #1 \right)}
# \newcommand{\ow}{\text{otherwise}}
# \newcommand{\FSD}{\text{FSD}}
# \void^\dagger$</center>**
# ## Question 1
#
# If $S_t$ follows the *geometric Brownian motion* process as given by $\ffrac{\dd S_t}{S_t}= \mu\dd t + \sigma\dd z$, what is the process followed by
#
# $\P{1}$ $y = 2S_t$
#
# $\bspace Answer$
#
# >$\begin{align}
# \dd y &= \dd\P{2S_t}\\
# &= \P{0 + \P{\mu S_t}\cdot 2 + \ffrac{1}{2}\P{\sigma S_t}^2 \cdot 0}\dd t + \sigma S_t \cdot 2 \dd z\\
# &= 2\mu S_t \dd t + 2\sigma S_t \dd z\\
# &= \mu y \dd t + \sigma y\dd z
# \end{align}$
#
# $\P{2}$ $y = S_t^2$
#
# $\bspace Answer$
#
# >$\begin{align}
# \dd y &= \dd\P{S_t^2}\\
# &= \P{0 + \P{\mu S_t}\cdot 2S_t + \ffrac{1}{2}\P{\sigma S_t}^2 \cdot 2}\dd t + \P{\sigma S_t} \cdot 2S_t \dd z\\
# &= \P{2\mu S_t^2 + \sigma^2 S_t^2}\dd t + 2\sigma S_t^2 \dd z\\
# &= \P{2\mu + \sigma^2}y\dd t + 2\sigma y \dd z
# \end{align}$
#
# $\P{3}$ $y=e^{S_t}$
#
# $\bspace Answer$
#
# >$\begin{align}
# \dd y &= \dd\P{e^{S_t}}\\
# &= \P{0 + \P{\mu S_t}\cdot e^{S_t} + \ffrac{1}{2}\P{\sigma S_t}^2 \cdot e^{S_t}}\dd t + \P{\sigma S_t} \cdot e^{S_t} \dd z\\
# &= \P{\mu \log y + \ffrac{1}{2} \P{\sigma \log y}^2}y\dd t + \P{\sigma \log y} \cdot y\dd t
# \end{align}$
#
# $\P{4}$ $y = e^{r\P{T-t}}/S_t$
#
# $\bspace Answer$
#
# >$\begin{align}
# \dd y &= \dd\P{\ffrac{e^{r\P{T-t}}}{S_t}}\\
# &= \P{\ffrac{-re^{r\P{T-t}}}{S_t} + \P{\mu S_t}\cdot \P{-\ffrac{e^{r\P{T-t}}}{S_t^2}} + \ffrac{1}{2}\P{\sigma S_t}^2 \cdot \ffrac{2e^{r\P{T-t}}}{S_t^3}}\dd t + \P{\sigma S_t} \cdot \P{-\ffrac{e^{r\P{T-t}}}{S_t^2}} \dd z\\
# &= \P{-r-\mu-\sigma^2}y\dd t - \sigma y \dd y
# \end{align}$
#
# $Remark$
#
# >We don't need to prove Itô's formula here. Just in case, given $\dd X_t = \mu\P{t,X_t}\dd t + \sigma\P{t,X_t} \dd W_t $ it looks like
# >
# >$$\dd f\P{t,X_t} = \P{\ffrac{\partial f}{\partial t} + \mu\P{t,X_t}\ffrac{\partial f}{\partial X_t} + \ffrac{1}{2}\sigma^2\P{t,X_t}\ffrac{\partial^2 f}{\partial X_t^2}}\dd t + \sigma\P{t,X_t}\ffrac{\partial f}{\partial X_t} \dd W_t$$
# >
# >Or $\dd f\P{t,X_t} = \ffrac{\partial f}{\partial t} \dd t + \ffrac{\partial f}{\partial X_t} \dd X_t + \ffrac{1}{2}\ffrac{\partial^2 f}{\partial X_t^2}\P{\dd X_t}^2$ where $\P{\dd t}^2 = 0$, $\dd t \cdot \dd W_t =0$ and $\P{\dd W_t}^2 = \dd t$.
# ## Question 2
#
# Suppose that $W_t$ and $\widetilde W_t$ are *independent Browanian motions* under the measure $\PP$ and let $\rho\in\SB{ − 1, 1}$ be a *constant*. Is the process $X_t = \rho W_t + \sqrt{1-\rho^2}\widetilde W_t$ a Brownian motion?
#
# $\bspace Answer$
#
# >It is a Brownian motion, the four conditions are checked correct below. We first let the drift and volatility of $W_t$ and $\widetilde W_t$ be $\mu,\sigma$ and $\check\mu,\check\sigma$, respectively. Denote $\widehat W_t$ as the standard Brownian motion, then $\forall\; t>s$, we have
# >
# >$$\begin{align}
# X_t - X_s &= \rho\P{W_t - W_s} + \sqrt{1-\rho^2}\P{\widetilde W_t - \widetilde W_s} \\
# &= \rho\P{\mu \P{t-s} + \sigma \P{\widehat W_t-\widehat W_s}} + \sqrt{1-\rho^2}\P{\check\mu \P{t-s} + \check\sigma \P{\widehat W_t - \widehat W_s}}\\
# &= \P{\rho \mu + \sqrt{1-\rho^2}\check \mu}\P{t-s} + \P{\rho\sigma + \sqrt{1-\rho^2}\check\sigma}\P{\widehat W_t - \widehat W_s}
# \end{align}$$
# >
# >- Let $\rho \mu + \sqrt{1-\rho^2}\check \mu \equiv \dot \mu$ and $\rho\sigma + \sqrt{1-\rho^2}\check\sigma\equiv\dot\sigma$, then
# >
# >$$X_t - X_s = \dot\rho\P{t-s} + \dot\sigma\P{\hat W_t-\hat W_s} \sim \N{\dot\rho\P{t-s},\dot\sigma^2 \P{t-s}},\bspace\forall\; t>s$$
# >
# >- Consider $t$ and $s$ as two adjacent time in the interval $\SB{0,T}$. Then since $\widehat W_t$ has independent increments, so is $X_t - X_s$, judging from its expression.
# >- $X_0 = \dot\rho t + \dot\sigma \widehat W_t \mid_{t=0} = \widehat W_0 = 0$
# >- Still, from the expression of $X_t$, $t$ is continuous and the path $\SB{W_t:t\geq 0}$ and $\SB{\widetilde W_t:t\geq 0}$ are also continuous functions, $X_t$ is continuous.
# ## Question 3
#
# Let $\CB{W_t}_{t \geq 0}$ be *standard Brownian motion* under the measure $\PP$. Which of the following are $\PP$-Brownian motions?
#
# $\P{1}$ $\CB{-W_t}_{t \geq 0}$
#
# $\bspace Answer$
#
# >It is a $\PP$-Brownian motion.
# >
# >- $\forall\; t>s$, the increment $\P{-W_t} - \P{-W_s} = W_s - W_t$ also has the normal distribution $\N{0,t-s}$
#
# >- The process $-W_t$ has independent increments: for any set of times $0\leq t_1<t_2<\cdots<t_n$, the $r.v.$ $-W_{t_{\void_2}} + W_{t_{\void_1}}$, $-W_{t_{\void_3}} + W_{t_{\void_2}}$, $\dots$, $-W_{t_{\void_{n}}} + W_{t_{\void_{n-1}}}$ are also independent, since their inverse are.
# - $-W_0 = 0$
# - The sample paths: $\SB{-W_t: t \geq 0}$ are continuous function of $t$ since $\SB{W_t: t \geq 0}$ are.
#
# $\P{2}$ $\CB{cW_{t/c^2}}_{t \geq 0}$, where $c$ is a constant
#
# $\bspace Answer$
#
# >It is a $\PP$-Brownian motion.
# >
# >- $\forall\; t>s$, the increment $c\P{W_{t/c^{\void_2}}} - c\P{W_{s/c^{\void_2}}}$ also has the normal distribution $\N{0,c^2\P{t/c^2 - s/c^2}} = \N{0,t-s}$
# - The process $cW_{t/c^{\void_2}}$ has independent increments: for any set of times $0\leq t_1<t_2<\cdots<t_n$, the $r.v.$ $-W_{t_{\void_2}} + W_{t_{\void_1}}$, $-W_{t_{\void_3}} + W_{t_{\void_2}}$, $\dots$, $-W_{t_{\void_{n}}} + W_{t_{\void_{n-1}}}$ are also independent, since they are a subset of $r.v.$ with time range from $0$ to $c^2 t_n$
# - $cW_{0/c^{\void_2}} = cW_0 = 0$
# - The sample paths: $\SB{-W_{t/c^{\void_2}}: t \geq 0}$ are continuous function of $t$ since $\SB{W_t: t \geq 0}$ are, so that we can multiply a constant and rescale the time axis to its $1/c^2$ while keeping the continuity
#
# $\P{3}$ $\CB{\sqrt{t}W_{1}}_{t \geq 0}$
#
# $\bspace Answer$
#
# >It's not. We next prove this process doesn't have the independent increments. Here's the covariance if taking the time $0$, $s$, and $t$.
# >
# >$$\begin{align}
# &\Cov{\sqrt{t} W_1 - \sqrt s W_1, \sqrt s W_1}\\
# =&\; \P{\sqrt t - \sqrt s}\sqrt s \cdot \Var{W_1}\\
# =&\; \P{\sqrt t - \sqrt s}\sqrt s \neq 0
# \end{align}$$
#
# $\P{4}$ $\CB{W_{2t}-W_t}_{t \geq 0}$
#
# $\bspace Answer$
#
# >It's not. Still, let's see the covariance, taking the time $0$, $2s$ and $2t$.
# >
# >$$\begin{align}
# &\Cov{\P{W_{2t} - W_t}-\P{W_{2s} - W_s}, W_{2s} - W_s}\\
# =\;&\Cov{\P{W_{2t} - W_{2s}}-\P{W_t - W_s}, W_{2s} - W_s}\\
# =\;&\Cov{W_{2t} - W_{2s},W_{2s}} + \Cov{\P{W_t - W_s}, W_s}\\
# \;&\bspace - \Cov{W_{2t} - W_{2s},W_{s}} - \Cov{\P{W_t - W_s},W_{2s}}\\
# =\;& 0+2\Cov{W_{2s},W_s} - \Cov{W_{2t},W_s} - \Cov{W_{t},W_{2s}}\\
# =\;& 0+2\Cov{W_{2s} - W_s,W_s}+2\Cov{W_s,W_s} - \Cov{W_{2t}-W_s,W_s} + \Cov{W_s,W_s} - \Cov{W_{t},W_{2s}}\\
# =\;& \Var{W_s} - \Cov{W_{t},W_{2s}} = s - \Cov{W_{t},W_{2s}}
# \end{align}$$
# >
# >If $t > 2s$, then the preceding equals to $s - \Var{W_{2s}} = -s \neq 0$.
# ## Question 4
#
# Suppose that $X$ is *normally distributed* with mean $\mu$ and variance $\sigma^2$. Calculate $\Exp\SB{e^{\theta X}}$ and hence, $\Exp\SB{X^4}$
#
# $\bspace Answer$
#
# >$$\begin{align}
# \Exp\SB{e^{\theta X}} &= \int^{\infty}_{-\infty} e^{\theta X}\cdot\ffrac{1}{\sqrt{2\pi}\sigma}\exp\CB{-\ffrac{ \P{x-\mu}^2}{2\sigma^2}}\;\dd x\\
# &= \int^{\infty}_{-\infty} e^{\theta X}\cdot\ffrac{1}{\sqrt{2\pi}\sigma}\exp\CB{-\ffrac{ \P{x-\mu+\theta\sigma^2}^2}{2\sigma^2}} \cdot\exp\CB{\ffrac{\theta^2\sigma^2}{2} + \theta\mu}\;\dd x\\
# &= \exp\CB{\ffrac{\theta^2\sigma^2}{2} + \theta\mu}
# \end{align}$$
# >
# >And to find $\Exp\SB{X^4} = \left.\ffrac{\dd^4}{\dd \theta^4}\Exp\SB{e^{\theta X}}\right|_{\theta=0}$, we let $f\P{\theta} = \ffrac{\theta^2\sigma^2}{2} + \theta\mu$, then
# >
# >$$\begin{align}
# \ffrac{\dd^4}{\dd \theta^4}\Exp\SB{e^{\theta X}} &= \ffrac{\dd^3}{\dd \theta^3}\P{f' e^f}\\
# &= \ffrac{\dd^2}{\dd \theta^2}\P{\P{f'^2 + f''} e^f}\\
# &= \ffrac{\dd}{\dd \theta}\P{\P{3f''f' + f^{\P{3}}+f'^3} e^f}\\
# &= \P{\P{3f^{\P{3}}f' + f^{\P{4}}+\P{f'}^4+3\P{f''}^2+6\P{f'}^2f''} e^f}
# \end{align}$$
# >
# >Plug in $\theta=0$ and by $f = 0 = f^{\P{3}} = f^{\P{4}}$, $f' = \mu$, $f'' = \sigma^2$, we have
# >
# >$$\Exp\SB{X^4} = 3\sigma^4 + 6\sigma^2\mu^2 + \mu^4$$
# >***
# >And actually, we can calculate it directly
# >
# >$$\begin{align}
# \Exp\SB{X^4} &= \Exp\SB{\P{\mu + \sigma Z}^4}\\
# &= \Exp\SB{\mu^4+4\mu^3\sigma Z + 6\mu^2\sigma^2Z^2+4\mu\sigma^3Z^3 + Z^4}\\
# &=\mu^4 + 0 + 6\sigma^2\mu^2 + 0 + 3\sigma^4
# \end{align}$$
#
# $Remark$
#
# >We can't use Itô's formula here cause it's not a stochastic process. And about $\Exp\SB{Z^n}$, find them using the $\text{MGF}$.
# ## Question 5
#
# Let $\CB{W_t}_{t \geq 0}$ be *standard Brownian motion* under the measure $\PP$ and let $\CB{\FcF_t}_{t \geq 0}$ denote its natural filtration. Which of the following are $\P{\PP, \CB{\FcF_t}_{t \geq 0}}$-martingales?
#
# $\P{1}$ $\exp\CB{\sigma W_t}$
#
# $\bspace Answer$
#
# >It's not. Whatever, for the first condition, $\Exp\SB{\abs{e^{\sigma W_t}}} = \exp\CB{\ffrac{1}{2}\sigma^2t} < \infty$. Now see the second condition.
# >
# >$$\begin{align}
# \Exp_s\SB{e^{\sigma W_t}} &= \Exp_s\SB{e^{\sigma W_s}\cdot e^{\sigma \P{W_t-W_s}}}\\
# &= e^{\sigma W_s} \Exp_s\SB{e^{\sigma \P{W_t-W_s}}} \\
# &= e^{\sigma W_s} \Exp\SB{e^{\sigma W_{t-s}}}\\
# &= \exp\CB{\sigma W_s +\ffrac{\sigma^2}{2}\P{t-s}} \neq e^{\sigma W_s},\bspace \text{if }\sigma\neq 0
# \end{align}$$
#
# $\P{2}$ $cW_{t/c^2}$, where $c$ is a constant
#
# $\bspace Answer$
#
# >$\bspace$if $\abs{c}<1$, we have $t/c^2 > t$, which is not measurable in the $\FcF_t$. If $\abs{c} = 1$, we have $cW_{t/c^2} = \pm W_t$, it's certainly a martingale. If $\abs{c} > 1$, we have
# >
# >$$\Exp_s\SB{cW_{t/c^2}}=\begin{cases}
# cW_{t/c^2} & \text{if }t/c^2 <s\\
# cW_s &\text{if } t/c^2 \geq s
# \end{cases}$$
# >
# >Thus $cW_{t/c^2}$ is a martingale $iff$ $c = \pm1$
#
# $\P{3}$ $tW_t - \d{\int_0^t W_s\;\dd s}$
#
# $\bspace Answer$
#
# >It is. For the first condition,
# >
# >$$\begin{align}
# \Exp\SB{\abs{tW_t - \d{\int_{0}^{t} W_s\;\dd s}}} &\leq \Exp\SB{\abs{tW_t} + \abs{\d{\int_{0}^{t} W_s\;\dd s}}}\\
# &\leq t\cdot\Exp\SB{\ffrac{W_t^2 + 1}{2}} + \int_{0}^{t} \Exp\SB{\abs{W_s}}\;\dd s\\
# &\leq t\cdot\ffrac{1+t}{2} + \int_{0}^{t} \ffrac{1+s}{2}\;\dd s = t + \ffrac{3}{4}t^2 <\infty
# \end{align}$$
# >
# >And then the second one, in the review we have
# >
# >$$tW_t - \int_{0}^{t} W_s\;\dd s = \int_0^t s\;\dd W_s$$
# >
# >It's an Itô's process! Thus it's a martingale.
#
# $Remark$
#
# >Or prove the third one directly by checking the conditional expectation. **Goal**: try to collect the $W_t-W_s$ and then it's $0$.
# >
# >$$\begin{align}
# \Exp_s\SB{tW_t-\int_{0}^t W_r \;\dd r} &= sW_s - \int_0^s W_r \;\dd r + \Exp_s\SB{tW_t - sW_s - \int_{s}^t W_r \;\dd r}\\
# &= sW_s - \int_0^s W_r \;\dd r + \Exp_s\SB{t\P{W_t - W_s} + \int_{s}^t \P{W_s-W_r} \;\dd r}\\
# &= sW_s - \int_0^s W_r \;\dd r
# \end{align}$$
# ## Question 6
#
# Let $\CB{\FcF_t}_{t \geq 0}$ denote the natural filtration associated to a *standard $\PP$-Brownian* motion $\CB{W_t}_{t \geq 0}$. Define the process $\CB{S_t}_{t \geq 0}$ by $S_t = f\P{t,W_t}$. What equation must $f$ satisfy if $S_t$ is to be a $\P{\PP, \CB{\FcF_t}_{t \geq 0}}$-martingale? Use your answer to check that $S_t = \exp\CB{vt + \sigma W_t}$ is a martingale if $v+\ffrac{\sigma^2}{2} = 0$
#
# $\bspace Answer$
#
# >Following the three conditions that define the martingale, we have
# >
# >$$\Exp\SB{\abs{f}}<\infty\\
# \Exp_s\SB{f\P{t,W_t}} = f\P{s,W_s}$$
# >
# >Using Itô's formula, we have
#
# >$$\begin{align}\dd S_t = \dd f &= \P{f_t + 0\cdot f_W + \ffrac{1}{2}\cdot 1 \cdot f_{WW}}\dd t + 1\cdot f_W \dd W_t = f_t\dd t + \ffrac{1}{2} f_{WW}\dd t + f_W \dd W_t\\
# &= f_t \dd t + f_W\dd W + \ffrac{1}{2} f_{WW} \P{\dd W_t}^2 = f_t \dd t + f_W\dd W + \ffrac{1}{2} f_{WW} \dd t
# \end{align}$$
# >
# >So that the condition here is $f_t + \ffrac{1}{2}f_{WW} = 0$.
# >
# >And to check this, of course, for $S_t = \exp\CB{vt + \sigma W_t}$, it's a martingale if $v + \ffrac{1}{2} \sigma^2 = 0$.
# ## Question 7
#
# If $f$ is a simple function and $\CB{W_t}_{t\geq0}$ is a $\PP$-Brownian motion, prove that the process $\CB{M_t}_{t\geq 0}$ given by the Itô integral $M_t = \d{\int_0^t f\P{s,W_s}\;\dd W_s}$ is a $\P{\PP, \CB{\FcF_t}_{t \geq 0}}$-martingale.
#
# $\bspace Answer$
#
# >Firstly, it's adapted to $\FcF_t$ and likewise we have $\Exp\SB{\abs{M_t}} \leq \Exp\SB{\ffrac{M_t^2+1} {2}}<\infty$ by Itô's isometry. Then,
# >
# >$$\begin{align}
# \Exp_s\SB{M_t} =& M_s + \Exp_s\SB{M_t - M_s} \\[0.6em]
# &\bspace \text{partition the interval } \SB{s,t} \text{ in } n \text{ subintervals}\\[0.5em]
# =& M_s + \Exp_s\SB{ \lim_{n\to\infty} \sum_{j=0}^{n-1} h\P{t_j, W_{t_{\void_{j}}}}\P{W_{t_{\void_{j+1}}} - W_{t_{\void_{j}}}} }\\[0.6em]
# &\bspace \text{assume the summation is exchangable with the expectation } \\[0.5em]
# =& M_s + \lim_{n\to\infty} \sum_{j=0}^{n-1} \Exp_s\SB{ h\P{t_j, W_{t_{\void_{j}}}}\P{W_{t_{\void_{j+1}}} - W_{t_{\void_{j}}}} }
# \end{align}$$
# >
# >Then we condition on $\FcF_{t_{\void_{j}}}$ and since $\Exp_s\SB{X} = \Exp_s\SB{\Exp_t\SB{X}}$, $t>s$
# >
# >$$\begin{align}
# \Exp_s\SB{M_t}=& M_s + \lim_{n\to\infty} \sum_{j=0}^{n-1} \Exp_s\SB{ \Exp_{t_{\void_{j}}}\SB{ h\P{t_j, W_{t_{\void_{j}}}} \P{W_{t_{\void_{j+1}}} - W_{t_{\void_{j}}}} } }\\
# =& M_s + \lim_{n\to\infty} \sum_{j=0}^{n-1} \Exp_s\SB{ h\P{t_j, W_{t_{\void_{j}}}} \Exp_{t_{\void_{j}}} \SB{{W_{t_{\void_{j+1}}} - W_{t_{\void_{j}}}} }} \\[0.5em]
# =& M_s
# \end{align}$$
# ## Question 8
#
# As usual, $\CB{W_t}_{t\geq0}$ denotes *standard Brownian* motion under $\PP$. Use Itô's formula to write down stochastic differential equations (SDE) for the following quantities.
#
# $\P{1}$ $Y_t = W_t^3$
#
# $\bspace Answer$
#
# >Here $X_t = W_t$, then
# >
# >$$\begin{align}\dd Y_t &= \P{0 + 0 \cdot 3X_t^2 + \ffrac{1}{2}\cdot1\cdot6X_t}\dd t + 1\cdot 3 X_t^2 \dd W_t\\
# &= 3X_t \dd t + 3X_t^2 \dd W_t\\
# &= 3\sqrt[3]{Y_t} \dd t + 3\sqrt[3]{Y_t^2} \dd W_t
# \end{align}$$
# >
# >It's not a martingale since
# >
# >$$\begin{align}
# \Exp_s\SB{W_t^3} &= \Exp_s\SB{\P{W_t - W_s + W_s}^3}\\
# &= \Exp_s\SB{\P{W_t - W_s}^3} + \Exp_s\SB{3\P{W_t - W_s}^2W_s} + \Exp_s\SB{3\P{W_t-W_s}W_s^2} + W_s^3\\
# &= W_s^3 + 3W_s\Exp\SB{\P{W_t-W_s}^2}\\
# &= W_s^3 + 3W_s\P{t-s} \neq W_s^3
# \end{align}$$
#
# $\P{2}$ $Y_t = \exp\CB{\sigma W_t - \ffrac{1}{2}\sigma^2 t}$
#
# $\bspace Answer$
#
# >Still $X_t = W_t$ here, then
# >
# >$$\begin{align}
# \dd Y_t &= \dd\P{\exp\CB{-\ffrac{1}{2}\sigma^2 t} e^{\sigma W_t}}\\
# &= Y_t\P{-\ffrac{1}{2}\sigma^2 \dd t - \ffrac{1}{2}\sigma^2 \dd t + \sigma\dd W_t + \ffrac{1}{2}\sigma^2 \dd t}\\
# &= Y_t\P{-\ffrac{1}{2}\sigma^2 \dd t + \sigma \dd W_t}
# \end{align}$$
# >
# >It's a martingale. We denote $M_t = \exp\CB{\sigma W_t - \ffrac{1} {2} \sigma^2 t}$. For the expectation part:
# >
# >$$\Exp\SB{\abs{M_t}} = \int_{-\infty}^{\infty} \exp\CB{\sigma W_t - \ffrac{1} {2} \sigma^2 t} \cdot\ffrac{1} {\sqrt{2\pi t}} \exp\CB{-\ffrac{x^2} {2t}}\dd{x} = 1 < \infty$$
# >
# >$$\Exp_s\SB{M_t} = \Exp_s\SB{M_s\cdot M_{t-s}} = M_s\Exp_s\SB{M_{t-s}} = M_s$$
#
# $\P{3}$ $Y_t = tW_t$
#
# $\bspace Answer$
#
# >Still $X_t = W_t$, then
# >
# >$$\begin{align}
# \dd Y_t &= \dd\P{tX_t}\\
# &= \P{W_t + 0 + 0 }\dd t + 1\cdot t \dd W_t\\
# &= W_t \dd t + t \dd W_t
# \end{align}$$
# >
# >And this is NOT a martingale, since
# >
# >$$\begin{align}
# \Exp_s\SB{Y_t} &= \Exp_S\SB{tW_t}\\
# &= \Exp_s\SB{t\P{W_t-W_s} + tW_s}\\
# &= tW_s \neq sW_s
# \end{align}$$
#
# $Remark$
#
# >- $W_t -W_s \sim \N{0,t-s}$ thus $\Exp\SB{\exp\CB{\sigma\P{W_t-W_s}}} = \ffrac{1}{2}\sigma^2\P{t-s}$.
# >- $\P{t-s}^{-1}\P{W_t -W_s}^2\sim\chi^2\P{1}$, thus $\Exp\SB{\P{W_t -W_s}^2} = \P{t-s}\Exp\SB{\chi^2\P{1}} = t-s$. Just remember that $\Exp\SB{\chi^2\P k} = k$ and $\Var{\chi^2\P k} = 2k$.
# ## Question 9
#
# Let $\CB{W_t}_{t \geq 0}$ be *standard Brownian motion* under the measure $\PP$ and define $Z_t = \exp\CB{\alpha W_t}$. Use Itô's formula to write down a stochastic differential equation for $Z_t$, hence find an ordinary (deterministic) differential equation for $m\P t = \Exp\SB{Z_t}$, and solve to show that
#
# $$\Exp\SB{\exp\CB{\alpha W_t}} = \exp\CB{\ffrac{\alpha^2}{2}t}$$
#
# $\bspace Answer$
#
# >Here let $X_t = \alpha W_t$ and we obtain
# >
# >$$\begin{align}
# \dd Z_t &= \dd\P{e^{X_t}}\\
# &= \P{0 + 0 + \ffrac{1}{2}\alpha^2 e^{X_t}}\dd t + \alpha e^{X_t} \dd W_t\\
# &= Z_t\P{\ffrac{1}{2}\alpha^2 \dd t + \alpha \dd W_t}
# \end{align}$$
# >
# >$$\begin{align}
# m\P{t} &= \Exp\SB{Z_t} = \Exp\SB{e^{\alpha W_t}}\\
# &= \Exp\SB{e^{\alpha W_0} + \int_0^t \alpha e^{\alpha W_s}\;\dd W_s + \int_0^t\ffrac{1}{2}\alpha^2 e^{\alpha W_s}\;\dd s}\\
# &= \Exp\SB{1} + \Exp\SB{\int_0^t \alpha e^{\alpha W_s}\;\dd W_s} + \Exp\SB{\int_0^t\ffrac{1}{2}\alpha^2 e^{\alpha W_s}\;\dd s}\\
# &= 1 + 0 + \int_0^t\ffrac{1}{2}\alpha^2 \Exp\SB{e^{\alpha W_s}}\;\dd s\\
# &= 1 + \ffrac{1}{2}\alpha^2\int_0^tm\P{s}\;\dd s
# \end{align}$$
# >
# >$$\begin{align}\dd m\P{t} &= \ffrac{1}{2}\alpha^2 m\P{t} + C\\
# \Longrightarrow m\P{t} &= \exp\CB{\ffrac{1}{2}\alpha^2 t}-\ffrac{2C}{\alpha^2}\\
# m\P{0}=1\Rightarrow m\P{t} &= \Exp\SB{e^{\alpha W_t}} = \exp\CB{\ffrac{1}{2}\alpha^2 t}
# \end{align}$$
# ## Question 10 The Ornstein−Uhlenbeck process
#
# Let $\CB{W_t}_{t\geq0}$ denote *standard Brownian motion* under $\PP$. The ***Ornstein-Uhlenbeck process***, $\CB{X_t}_{t\geq0}$ is unique solution to ***Langevin's equation***:
#
# $$\dd X_t = -\alpha X_t \dd t+ \dd W_t,\bspace X_0 = x$$
#
# This equation was originally introduced as a simple idealised model for the velocity of a particle suspended in a liquid. In finance it is a special case of the ***Vasicek model*** of interest rate. Verify that
#
# $$X_t = e^{-\alpha t} x + e^{-\alpha t} \int_0^t e^{\alpha s} \;\dd W_s$$
#
# and use this expression to calculate the mean and variance of $X_t$.
#
# $\bspace Answer$
#
# >To verify, we first rewrite the equation to $e^{\alpha t}\dd X_t = -\alpha e^{\alpha t}X_t\dd t + e^{\alpha t}\dd W_t$ and this is equivalent to
# >
# >$$\dd\P{e^{\alpha t}X_t} = e^{\alpha t}\dd W_t \\
# \Longrightarrow e^{\alpha t}X_t - 1 \cdot X_0 = \int_0^t e^{\alpha s}\;\dd W_s\\
# \Longrightarrow X_t = e^{-\alpha t}x + e^{-\alpha t}\int_0^t e^{\alpha s}\;\dd W_s
# $$
# >
# >And about the mean
# >
# >$$\begin{align}
# \Exp\SB{X_t} &= \Exp\SB{e^{-\alpha t}x + e^{-\alpha t}\int_0^t e^{\alpha s}\;\dd W_s}\\
# &= e^{-\alpha t}x + e^{-\alpha t}\Exp\SB{\int_0^t e^{\alpha s}\;\dd W_s}\\
# &= e^{-\alpha t}x + e^{-\alpha t} \cdot 0\\
# &= x \cdot e^{-\alpha t}
# \end{align}$$
# >
# >To find the variance, by $\Var{X_t}=\Exp\SB{X_t^2}-\P{\Exp\SB{X_t}}^2$, we first need to apply the Itô's isommetry to find the second moment
# >
# >$$\begin{align}
# \Exp\SB{X_t^2} &= \Exp\SB{e^{-2\alpha t}x^2 + 2e^{-2\alpha t}x\int_0^t e^{\alpha s}\;\dd W_s + e^{-2\alpha t}\P{\int_0^t e^{\alpha s}\;\dd W_s}^2}\\
# &= \Exp\SB{e^{-2\alpha t}x^2} + \Exp\SB{ 2e^{-2\alpha t}x\int_0^t e^{\alpha s}\;\dd W_s} + e^{-2\alpha t}\Exp\SB{\P{\int_0^t e^{\alpha s}\;\dd W_s}^2}\\
# &= e^{-2\alpha t}x^2 + 0 + e^{-2\alpha t}\int_0^t \Exp\SB{e^{2\alpha s}}\dd s = e^{-2\alpha t}x^2 + \ffrac{1}{2\alpha}\P{1 - e^{-2\alpha t}}
# \end{align}$$
# >
# >With this, we can compute the final variance
# >
# >$$
# \begin{align}
# \Var{X_t}&=\Exp\SB{X_t^2}-\P{\Exp\SB{X_t}}^2\\
# &= e^{-2\alpha t}x^2 + \ffrac{1}{2\alpha}\P{1 - e^{-2\alpha t}} - x^2e^{-2\alpha t}\\
# &= \ffrac{1}{2\alpha}\P{1-e^{-2\alpha t}}
# \end{align}
# $$
# >***
# >And a better method to find the variance
# >
# >$$\begin{align}
# \Var{X_t}&= \Var{e^{-\alpha t}x + e^{-\alpha t}\int_0^t e^{\alpha s}\;\dd W_s}\\
# &= 0 + e^{-2\alpha t}\Var{\int_0^t e^{\alpha s}\;\dd W_s}\\
# &= e^{-2\alpha t}\P{\Exp\SB{\P{\int_0^t e^{\alpha s}\;\dd W_s}^2} - \P{\Exp\SB{\int_0^t e^{\alpha s}\;\dd W_s}}^2}\\
# &= e^{-2\alpha t}\Exp\SB{\P{\int_0^t e^{\alpha s}\;\dd W_s}^2} \\
# &= e^{-2\alpha t}\Exp\SB{\int_0^t e^{s\alpha s}\;\dd s},\bspace\text{isô's isometry}\\
# &= \cdots
# \end{align}$$
# ## Question 11
#
# Suppose that under the probability measure $\PP$, $\CB{X_t}_{t\geq0}$ is a Brownian motion with *constant drift* $\mu$. Find a measure $\PP^*$ equivalent to $\PP$ where $\CB{X_t}_{t\geq0}$ is a Brownian motion with *constant drift* $\nu$.
#
# $\bspace Answer$
#
# >To make this simple, we write $\dd X_t = \mu \dd t + \sigma \dd W_t$ where $W_t$ is the standard Brownian motion under measure $\PP$. Let $\widetilde W_t$ is that under measure $\PP^*$, then we have
# >
# >$$\dd X_t = \nu \dd t + \sigma \dd \widetilde W_t$$
# >
# >So we can define $\widetilde W_t = W_t + \ffrac{\mu - \nu}{\sigma}t$. Then by Girsanov’s Theorem, we can define the measure $\PP^*$
# >
# >$$\left.\ffrac{\dd \PP^*}{\dd \PP}\right|_{\FcF_t} = \exp\CB{-\theta W_t - \ffrac{1}{2}\theta^2 t},\bspace \theta = \ffrac{\mu - \nu}{\sigma}$$
# ## Question 12
#
# Use the Feynman-Kac stochastic representation formula to solve
#
# $$\ffrac{\partial F\P{t,x}}{\partial t} + \ffrac{1}{2}\sigma^2 \ffrac{\partial^2 F\P{t,x}}{\partial x^2} = 0$$
#
# subject to the *terminal value condition* $F\P{T,x} = e^x$
#
# $\bspace Answer$
#
# >By F-K formula, we have the solution of the corresponding stochastic differential equation
# >
# >$$\dd X_t = \mu_t\dd t + \sigma_t \dd W_t$$
# >
# >where $\mu_t = 0$, $r = 0$, and $\sigma_t = \sigma$. So that the solution is, in integral form,
# >
# >$$X_t = \sigma W_t + X_0$$
# >
# >So that the solution $F\P{t,x}$
# >
# >$$\begin{align}
# F\P{t,x} &= \Exp\SB{e^{-r\P{T-t}} e^{X_T} \mid X_t = x} \\
# &= \Exp\SB{e^{-0\P{T-t}} e^{\sigma W_T} \mid \sigma W_t = x} \\
# &= \Exp\SB{e^{\sigma\P{ W_T - W_t} +x} \mid \sigma W_t = x}\\
# &= e^{\sigma x}\Exp\SB{e^{\sigma\P{ W_T - W_t}}}\\
# &= \exp\CB{ x + \ffrac{\sigma^2}{2}\P{T-t}}
# \end{align}$$
# ***
| FinMath/Models and Pricing of Financial Derivatives/HW/HW_03_11510691_fixed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="jvEhT5MpJdNm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="96c98ee6-4536-4fec-f3f0-007c8a41c2a1" executionInfo={"status": "ok", "timestamp": 1581674551108, "user_tz": -60, "elapsed": 6734, "user": {"displayName": "S\u014<NAME>142owski", "photoUrl": "", "userId": "08134379761109426327"}}
# !pip install eli5
# + id="vfGMPJ1dJhkM" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
from ast import literal_eval
from tqdm import tqdm_notebook
# + id="Y4GW52VPKnxJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="951810c5-dc37-4cb7-f2f5-02991fcb9040" executionInfo={"status": "ok", "timestamp": 1581674876596, "user_tz": -60, "elapsed": 601, "user": {"displayName": "S\u<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="DRMrlM1rK0v7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="620ca9a5-630f-4b62-a0ca-9b8740ff2ea0" executionInfo={"status": "ok", "timestamp": 1581674880361, "user_tz": -60, "elapsed": 2160, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
# ls
# + id="Mp-Cc3LDK1SN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="895bb581-e377-47e1-db1e-3337349f6f5b" executionInfo={"status": "ok", "timestamp": 1581674936787, "user_tz": -60, "elapsed": 2289, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
# ls data
# + id="9LTfJ1ctLDBG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c2d88e11-9c71-48c5-aedc-abb0a9ac70a8" executionInfo={"status": "ok", "timestamp": 1581674983622, "user_tz": -60, "elapsed": 1270, "user": {"displayName": "S\u014<NAME>0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
df = pd.read_csv('data/men_shoes.csv',low_memory=False)
df.shape
# + id="Q2sZ5XuTLJ51" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="335594ee-9c5d-4bcd-b082-412ff98dd8a6" executionInfo={"status": "ok", "timestamp": 1581675048211, "user_tz": -60, "elapsed": 624, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
df.columns
# + id="zjLqBQdCLepe" colab_type="code" colab={}
def run_model(feats, model=DecisionTreeRegressor(max_depth=5)):
X = df[feats].values
y = df['prices_amountmin'].values
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="O2xWUgTEO5Nz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b65a8aef-3a9c-4e24-bd89-789f9ab8e194" executionInfo={"status": "ok", "timestamp": 1581676952457, "user_tz": -60, "elapsed": 686, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0]
run_model(['brand_cat'])
# + id="OAv_-GrsPqtp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e95a70d0-4de8-4a2b-bc9b-a6a3a385e9ee" executionInfo={"status": "ok", "timestamp": 1581676959199, "user_tz": -60, "elapsed": 3582, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'], model)
# + id="ikn7mHsMQM7v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="1c7bc4d4-fb57-4268-8605-231614237066" executionInfo={"status": "ok", "timestamp": 1581676960865, "user_tz": -60, "elapsed": 787, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
df.head()
# + id="Rj1cI_Y-QgXJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="38502153-10f9-44c5-9581-5b6f49573715" executionInfo={"status": "ok", "timestamp": 1581677059799, "user_tz": -60, "elapsed": 471, "user": {"displayName": "S\u014<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
df.features.head().values
# + id="KACz-5J9TFTw" colab_type="code" colab={}
def parse_features(x):
output_dict = {}
if str(x) == 'nan': return output_dict
features = literal_eval(x.replace('\\"','"'))
for item in features:
# {'key': 'Gender', 'value': ['Men']}
key = item['key'].lower().strip()
value = item['value'][0].lower().strip()
output_dict[key] = value
return output_dict
df['features_parsed'] = df['features'].map(parse_features)
# + id="mYGlYIeSbpF0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="6a164d2a-c6e8-4bdd-c86f-38e18c0e83c7" executionInfo={"status": "ok", "timestamp": 1581679824914, "user_tz": -60, "elapsed": 803, "user": {"displayName": "S\u014<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
df['features_parsed'].head().values
# + id="v2iMuc_1b87C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9b313c3e-7ffb-4d1a-e081-61984a7f16d6" executionInfo={"status": "ok", "timestamp": 1581680055309, "user_tz": -60, "elapsed": 614, "user": {"displayName": "S\u0142<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
keys = set()
df['features_parsed'].map(lambda x: keys.update(x.keys()))
len(keys)
# + id="WUJqLWZ4eOjo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["c5fe79f7e0654f8a859b46fd013f9711", "0d8ffac728a84976bb0ec9c6cc612fb6", "9d74b426048d4dcea7c1dfd74f3114a5", "7e17c2abe765432bb5dad82951b6b4c0", "<KEY>", "9ce7efb7e23742bc9dfb0ef65eaf4a86", "16386e55fa284961b594ef44669b484d", "ffdf7759462544bfa07dd7d630f16779"]} outputId="9fe67365-52a5-4e90-b170-23deef83b90e" executionInfo={"status": "ok", "timestamp": 1581680647605, "user_tz": -60, "elapsed": 4253, "user": {"displayName": "S\u014<NAME>0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
def get_name_feat(key):
return 'feat_' + key
for key in tqdm_notebook(keys):
df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)
# + id="F579l26QgHoQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="f48a9ee4-e24a-419b-fb65-57abf03a41dd" executionInfo={"status": "ok", "timestamp": 1581680692313, "user_tz": -60, "elapsed": 683, "user": {"displayName": "S\u0<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
df.columns
# + id="8m8u-Ep_g-kF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="97370b21-f408-4e3c-83f1-4466e22be0d1" executionInfo={"status": "ok", "timestamp": 1581680839973, "user_tz": -60, "elapsed": 557, "user": {"displayName": "S\u014<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
df[ False == df['feat_athlete'].isnull() ].shape[0] / df.shape[0] * 100 # procent wystepowania
# + id="jPxg8DXLhkp8" colab_type="code" colab={}
keys_stat = {}
for key in keys:
keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] * 100
# + id="kBuskENqiPnJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="09d8ff35-24e7-4a5e-aeca-107c9315d9eb" executionInfo={"status": "ok", "timestamp": 1581681047837, "user_tz": -60, "elapsed": 592, "user": {"displayName": "S\u0142<NAME>0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
keys_stat
# + id="CQW10kjZiXZr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="baa28454-a12f-4fee-abb9-859135394806" executionInfo={"status": "ok", "timestamp": 1581681216214, "user_tz": -60, "elapsed": 679, "user": {"displayName": "S\u0142<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
{k:v for k,v in keys_stat.items() if v>30}
# + id="7rNVbjSDipm9" colab_type="code" colab={}
df['feat_brand_cat'] = df['feat_brand'].factorize()[0]
df['feat_color_cat'] = df['feat_color'].factorize()[0]
df['feat_gender_cat'] = df['feat_gender'].factorize()[0]
df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]
df['feat_material_cat'] = df['feat_material'].factorize()[0]
df['feat_sport_cat'] = df['feat_sport'].factorize()[0]
df['feat_style_cat'] = df['feat_style'].factorize()[0]
for key in keys:
df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0]
# + id="JpYAvpzFjyqh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="df0ce401-7eb8-494a-d146-e572926de7ce" executionInfo={"status": "ok", "timestamp": 1581681636918, "user_tz": -60, "elapsed": 632, "user": {"displayName": "S\u0142<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
df['brand'] = df['brand'].map(lambda x: str(x).lower())
df[df.brand == df.feat_brand].shape
# + id="aCPqxaLvknMf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="734d8729-065f-4cc9-d440-0c5fca204279" executionInfo={"status": "ok", "timestamp": 1581681751219, "user_tz": -60, "elapsed": 3636, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
model = RandomForestRegressor(max_depth=5, n_estimators=100)
run_model(['brand_cat'],model)
# + id="s9r8dmy3sX0L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="021197c6-a54f-463a-f5bd-887682656839" executionInfo={"status": "ok", "timestamp": 1581683752315, "user_tz": -60, "elapsed": 677, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
feats_cat = [x for x in df.columns if 'cat' in x]
feats_cat
# + id="cc9usz0UlCYm" colab_type="code" colab={}
feats = ['brand_cat' , 'feat_metal type_cat' , 'feat_shape_cat' ,'feat_brand_cat' , 'feat_color_cat' , 'feat_gender_cat' , 'feat_manufacturer part number_cat' , 'feat_material_cat' , 'feat_style_cat', 'feat_sport_cat']
# feats += feats_cat
# feats = list(set(feats))
model = RandomForestRegressor(max_depth=5, n_estimators=100)
results = run_model(feats,model)
# + id="u_b5cMoglYWJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="b01e0e48-1bdb-4b18-c1d3-ced5896ddd64" executionInfo={"status": "ok", "timestamp": 1581684582393, "user_tz": -60, "elapsed": 12214, "user": {"displayName": "S\u014<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
X = df[feats].values
y = df['prices_amountmin'].values
m = RandomForestRegressor(max_depth=5, n_estimators=100,random_state=0)
m.fit(X,y)
results = run_model(feats,model)
perm = PermutationImportance(m, random_state=1).fit(X,y);
eli5.show_weights(perm, feature_names=feats)
# + id="LacaoFDBmq0U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="13c7b5ae-0580-488a-85a8-79bd40b26460" executionInfo={"status": "ok", "timestamp": 1581682515476, "user_tz": -60, "elapsed": 4503, "user": {"displayName": "S\u<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
feats = ['brand_cat' , 'feat_brand_cat' , 'feat_gender_cat' , 'feat_material_cat']
model = RandomForestRegressor(max_depth=5, n_estimators=100)
run_model(feats,model)
# + id="_4OjYvq6no-j" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="3f8809be-65d6-45ec-dec1-600028fbfdda" executionInfo={"status": "ok", "timestamp": 1581682587175, "user_tz": -60, "elapsed": 630, "user": {"displayName": "S\u0142<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
df['brand'].value_counts()
# + id="84HDgqK7oMSf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="066802b5-0dbe-436d-8214-e8e1b470abf9" executionInfo={"status": "ok", "timestamp": 1581682619874, "user_tz": -60, "elapsed": 468, "user": {"displayName": "S\u014<NAME>\u0142owski", "photoUrl": "", "userId": "08134379761109426327"}}
df['brand'].value_counts(normalize=True)
# + id="SIhpotkfoXOX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="bc82064b-f5c6-4e1b-9917-950ddc854d2b" executionInfo={"status": "ok", "timestamp": 1581682738060, "user_tz": -60, "elapsed": 754, "user": {"displayName": "S\u01<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
df[ df['brand'] == 'nike' ].features_parsed.head().values
# + id="v0EDXwI-omdY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7ed1510d-5adb-4233-b803-0e31b8ba0979" executionInfo={"status": "ok", "timestamp": 1581687546994, "user_tz": -60, "elapsed": 2024, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08134379761109426327"}}
# ls
# + id="dPa7WDbV7JwN" colab_type="code" colab={}
# !git add matrix_on
| matrix_one/day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "aec851fe-e39a-4743-b983-9930d110e2c6"}
# 
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "8e5732f0-beec-44b9-b4e5-42023927f9d4"}
# # Training and Reusing Assertion Status Models
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c0f3e14c-398a-4ecd-864c-3b1c939acafd"}
import os
import json
import string
import numpy as np
import pandas as pd
import sparknlp
import sparknlp_jsl
from sparknlp.base import *
from sparknlp.util import *
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.pretrained import ResourceDownloader
from pyspark.sql import functions as F
from pyspark.ml import Pipeline, PipelineModel
pd.set_option('max_colwidth', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('display.expand_frame_repr', False)
print('sparknlp.version : ',sparknlp.version())
print('sparknlp_jsl.version : ',sparknlp_jsl.version())
spark
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7868685d-d1a9-4896-8527-45da3ec252e7"}
# # Clinical Assertion Model (with pretrained models)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "09425c6d-5c14-4fa3-a719-98e20ab6c21e"}
# ## Pretrained `assertion_dl` model
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "064d576d-3e0b-497b-810d-c6e8c363a0c5"}
# Annotator that transforms a text column from dataframe into an Annotation ready for NLP
from sparknlp_jsl.annotator import *
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
# Sentence Detector annotator, processes various sentences per line
sentenceDetector = SentenceDetector()\
.setInputCols(["document"])\
.setOutputCol("sentence")
# Tokenizer splits words in a relevant format for NLP
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("token")
# Clinical word embeddings trained on PubMED dataset
word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\
.setInputCols(["sentence", "token"])\
.setOutputCol("embeddings")
# NER model trained on i2b2 (sampled from MIMIC) dataset
clinical_ner = MedicalNerModel.pretrained("ner_clinical", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")
# Assertion model trained on i2b2 (sampled from MIMIC) dataset
# coming from sparknlp_jsl.annotator !!
clinical_assertion = AssertionDLModel.pretrained("assertion_dl", "en", "clinical/models") \
.setInputCols(["sentence", "ner_chunk", "embeddings"]) \
.setOutputCol("assertion")
nlpPipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
clinical_ner,
ner_converter,
clinical_assertion
])
empty_data = spark.createDataFrame([[""]]).toDF("text")
model = nlpPipeline.fit(empty_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "0bede1e8-a98e-4774-ad2e-5863b56bd636"}
# we also have a LogReg based Assertion Model.
'''
clinical_assertion_ml = AssertionLogRegModel.pretrained("assertion_ml", "en", "clinical/models") \
.setInputCols(["sentence", "ner_chunk", "embeddings"]) \
.setOutputCol("assertion")
'''
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "6137f486-8004-4553-bb12-b2f01df9ab93"}
import pandas as pd
text = 'Patient has a headache for the last 2 weeks and appears anxious when she walks fast. No alopecia noted. She denies pain'
print (text)
light_model = LightPipeline(model)
light_result = light_model.fullAnnotate(text)[0]
chunks=[]
entities=[]
status=[]
for n,m in zip(light_result['ner_chunk'],light_result['assertion']):
chunks.append(n.result)
entities.append(n.metadata['entity'])
status.append(m.result)
df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status})
df
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "352bef06-8f6b-4099-983a-2f64bb4c0c55"}
from sparknlp_display import AssertionVisualizer
visualizer = AssertionVisualizer()
vis = visualizer.display(light_result, 'ner_chunk', 'assertion', return_html=True)
#visualizer.set_label_colors({'TREATMENT':'#008080', 'PROBLEM':'#800080'})
displayHTML(vis)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f1e1c581-7c0e-4b56-be6e-d604278e3d18"}
# ! wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed_sample_text_small.csv -P /dbfs/
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1321721d-d896-4179-87c9-6716a68ccba4"}
import pyspark.sql.functions as F
pubMedDF = spark.read\
.option("header", "true")\
.csv("/pubmed_sample_text_small.csv")\
pubMedDF.show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "dd660faf-5874-42eb-99ef-a083952d91ab"}
result = model.transform(pubMedDF.limit(100))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7bf3ef67-9448-43e4-9d67-d7a47399911a"}
result.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "cd49fbf0-b7d7-4523-b08c-d02fc6271ffb"}
result.select('sentence.result').take(1)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5a9f1fd2-875e-4595-bfc1-8b5132cee945"}
result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.begin', 'ner_chunk.end', 'ner_chunk.metadata', 'assertion.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("chunk"),
F.expr("cols['1']").alias("begin"),
F.expr("cols['2']").alias("end"),
F.expr("cols['3']['entity']").alias("ner_label"),
F.expr("cols['3']['sentence']").alias("sent_id"),
F.expr("cols['4']").alias("assertion"),).show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f80c498d-3329-409b-b33e-f1b020314d58"}
# ## Pretrained `assertion_dl_radiology` model
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "565efb2d-2d61-4965-8b3c-81d75dd3c93e"}
from sparknlp_jsl.annotator import *
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
# Sentence Detector annotator, processes various sentences per line
sentenceDetector = SentenceDetectorDLModel\
.pretrained("sentence_detector_dl_healthcare","en","clinical/models") \
.setInputCols(["document"]) \
.setOutputCol("sentence")
# Tokenizer splits words in a relevant format for NLP
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("token")
# Clinical word embeddings trained on PubMED dataset
word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\
.setInputCols(["sentence", "token"])\
.setOutputCol("embeddings")
# NER model for radiology
radiology_ner = MedicalNerModel.pretrained("ner_radiology", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")\
.setWhiteList(["ImagingFindings"])
# Assertion model trained on radiology dataset
# coming from sparknlp_jsl.annotator !!
radiology_assertion = AssertionDLModel.pretrained("assertion_dl_radiology", "en", "clinical/models") \
.setInputCols(["sentence", "ner_chunk", "embeddings"]) \
.setOutputCol("assertion")
nlpPipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
radiology_ner,
ner_converter,
radiology_assertion
])
empty_data = spark.createDataFrame([[""]]).toDF("text")
radiologyAssertion_model = nlpPipeline.fit(empty_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5910a402-1986-4513-9d70-65d67aefa0a6"}
# A sample text from a radiology report
text = """No right-sided pleural effusion or pneumothorax is definitively seen and there are mildly displaced fractures of the left lateral 8th and likely 9th ribs."""
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9aa1ee2f-efc2-441a-8836-0377e5f1eb41"}
data = spark.createDataFrame([[text]]).toDF("text")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "04cd2f55-bfb5-4063-83f7-b16622ff1491"}
result = radiologyAssertion_model.transform(data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c2bd36df-b8de-4aab-884a-9d65160f8c1d"}
result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata', 'assertion.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("chunk"),
F.expr("cols['1']['entity']").alias("ner_label"),
F.expr("cols['1']['sentence']").alias("sent_id"),
F.expr("cols['2']").alias("assertion")).show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "afef480c-1540-4acc-88c9-5711e5290f3b"}
# ## Writing a generic Assertion + NER function
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2390fc66-0451-475b-9597-e03e2b3cee09"}
from pyspark.sql.functions import monotonically_increasing_id
def get_base_pipeline (embeddings = 'embeddings_clinical'):
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
# Sentence Detector annotator, processes various sentences per line
sentenceDetector = SentenceDetector()\
.setInputCols(["document"])\
.setOutputCol("sentence")
# Tokenizer splits words in a relevant format for NLP
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("token")
# Clinical word embeddings trained on PubMED dataset
word_embeddings = WordEmbeddingsModel.pretrained(embeddings, "en", "clinical/models")\
.setInputCols(["sentence", "token"])\
.setOutputCol("embeddings")
base_pipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings])
return base_pipeline
def get_clinical_assertion (embeddings, spark_df, nrows = 100, model_name = 'ner_clinical'):
# NER model trained on i2b2 (sampled from MIMIC) dataset
loaded_ner_model = MedicalNerModel.pretrained(model_name, "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")
# Assertion model trained on i2b2 (sampled from MIMIC) dataset
# coming from sparknlp_jsl.annotator !!
clinical_assertion = AssertionDLModel.pretrained("assertion_dl", "en", "clinical/models") \
.setInputCols(["sentence", "ner_chunk", "embeddings"]) \
.setOutputCol("assertion")
base_model = get_base_pipeline (embeddings)
nlpPipeline = Pipeline(stages=[
base_model,
loaded_ner_model,
ner_converter,
clinical_assertion])
empty_data = spark.createDataFrame([[""]]).toDF("text")
model = nlpPipeline.fit(empty_data)
result = model.transform(spark_df.limit(nrows))
result = result.withColumn("id", monotonically_increasing_id())
result_df = result.select(F.explode(F.arrays_zip('ner_chunk.result', 'ner_chunk.metadata', 'assertion.result')).alias("cols")) \
.select(F.expr("cols['0']").alias("chunk"),
F.expr("cols['1']['entity']").alias("ner_label"),
F.expr("cols['2']").alias("assertion"))\
.filter("ner_label!='O'")
return result_df
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5cc9a0fc-5187-4e7b-9fee-283b5fd343c9"}
embeddings = 'embeddings_clinical'
model_name = 'ner_clinical'
nrows = 1000
ner_df = get_clinical_assertion (embeddings, pubMedDF, nrows, model_name)
ner_df.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "cb696bfc-6a34-4666-92e9-607683a57506"}
embeddings = 'embeddings_clinical'
model_name = 'ner_posology'
nrows = 100
ner_df = get_clinical_assertion (embeddings, pubMedDF, nrows, model_name)
ner_df.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "93fe3c92-4d0e-4e96-99d8-08e41f7aeb6b"}
embeddings = 'embeddings_clinical'
model_name = 'ner_posology_greedy'
entry_data = spark.createDataFrame([["The patient did not take a capsule of Advil"]]).toDF("text")
ner_df = get_clinical_assertion (embeddings, entry_data, nrows, model_name)
ner_df.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "925cdb03-42c3-43d5-ab5f-f6544fd9b5c3"}
import pandas as pd
def get_clinical_assertion_light (light_model, text):
light_result = light_model.fullAnnotate(text)[0]
chunks=[]
entities=[]
status=[]
for n,m in zip(light_result['ner_chunk'],light_result['assertion']):
chunks.append(n.result)
entities.append(n.metadata['entity'])
status.append(m.result)
df = pd.DataFrame({'chunks':chunks, 'entities':entities, 'assertion':status})
return df
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2f849a87-7851-4d0b-add0-540c0527d6ba"}
clinical_text = """
Patient with severe fever and sore throat.
He shows no stomach pain and he maintained on an epidural and PCA for pain control.
He also became short of breath with climbing a flight of stairs.
After CT, lung tumor located at the right lower lobe. Father with Alzheimer.
"""
light_model = LightPipeline(model)
get_clinical_assertion_light (light_model, clinical_text)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "bdd0c509-8006-4c5c-9092-40256f47e34c"}
# ## Assertion with BioNLP (Cancer Genetics) NER
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2ed9f618-83f3-4225-8e4c-79984a17c6ac"}
embeddings = 'embeddings_clinical'
model_name = 'ner_bionlp'
nrows = 100
ner_df = get_clinical_assertion (embeddings, pubMedDF, nrows, model_name)
ner_df.show(truncate = False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5b6f9442-9a54-49cc-ad1d-186fc5ea326e"}
# # Assertion Filterer
# AssertionFilterer will allow you to filter out the named entities by the list of acceptable assertion statuses. This annotator would be quite handy if you want to set a white list for the acceptable assertion statuses like present or conditional; and do not want absent conditions get out of your pipeline.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f50d1b8e-edff-4d4c-839f-d0ba3d0b8923"}
clinical_ner = MedicalNerModel.pretrained("ner_clinical", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")
clinical_assertion = AssertionDLModel.pretrained("assertion_dl", "en", "clinical/models") \
.setInputCols(["sentence", "ner_chunk", "embeddings"]) \
.setOutputCol("assertion")
assertion_filterer = AssertionFilterer()\
.setInputCols("sentence","ner_chunk","assertion")\
.setOutputCol("assertion_filtered")\
.setWhiteList(["present"])
nlpPipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
clinical_ner,
ner_converter,
clinical_assertion,
assertion_filterer
])
empty_data = spark.createDataFrame([[""]]).toDF("text")
assertionFilter_model = nlpPipeline.fit(empty_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f3800714-30a3-4515-9932-5a8c0c2021a6"}
text = 'Patient has a headache for the last 2 weeks, needs to get a head CT, and appears anxious when she walks fast. Alopecia noted. She denies pain.'
light_model = LightPipeline(assertionFilter_model)
light_result = light_model.annotate(text)
light_result.keys()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b8986c91-53c4-4a08-a36b-1fea93c1c762"}
list(zip(light_result['ner_chunk'], light_result['assertion']))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "44113efd-939b-4fd1-95f2-110e957a6635"}
assertion_filterer.getWhiteList()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "aa81ca8c-7573-42cb-a895-3981c4ff1bdc"}
light_result['assertion_filtered']
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "647435eb-a456-488f-ade2-f97e4c8de429"}
# # Train a custom Assertion Model
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2d72802c-1c83-4572-a52e-c7e1074e69cb"}
# !wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/ner/i2b2_assertion_sample.csv -P /dbfs/
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "711aa642-d5ab-4f57-8063-4451b6ee0d11"}
assertion_df = spark.read.option("header", True).option("inferSchema", "True").csv("/i2b2_assertion_sample.csv")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d9f89d7b-ce06-43dc-bdff-e53cbb6f8ada"}
assertion_df.show(3, truncate=100)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "eba6ba43-dbd4-4403-b967-8ecde9e0461a"}
(training_data, test_data) = assertion_df.randomSplit([0.8, 0.2], seed = 100)
print("Training Dataset Count: " + str(training_data.count()))
print("Test Dataset Count: " + str(test_data.count()))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "208b749b-13fa-4780-93a1-d7cd316e81f6"}
training_data.groupBy('label').count().orderBy('count', ascending=False).show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1dd09bfb-5a71-49b0-8e5a-ce40820d154a"}
document = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
chunk = Doc2Chunk()\
.setInputCols("document")\
.setOutputCol("chunk")\
.setChunkCol("target")\
.setStartCol("start")\
.setStartColByTokenIndex(True)\
.setFailOnMissing(False)\
.setLowerCase(True)
token = Tokenizer()\
.setInputCols(['document'])\
.setOutputCol('token')
embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3b33acf9-6372-4742-b984-748d9c619f21"}
# !mkdir assertion_logs
# !mkdir assertion_tf_graphs
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/nerdl_graph/blstm_34_32_30_200_6.pb -P /databricks/driver/assertion_tf_graphs
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "19b3315c-86e7-4b86-9d0a-18dfd1b1ccc5"}
assertionStatus = AssertionDLApproach()\
.setLabelCol("label")\
.setInputCols("document", "chunk", "embeddings")\
.setOutputCol("assertion")\
.setBatchSize(128)\
.setDropout(0.1)\
.setLearningRate(0.001)\
.setEpochs(50)\
.setValidationSplit(0.2)\
.setStartCol("start")\
.setEndCol("end")\
.setMaxSentLen(250)\
.setEnableOutputLogs(True)\
.setOutputLogsPath('file:/databricks/driver/assertion_logs/')\
.setGraphFolder('file:/databricks/driver/assertion_tf_graphs')
# set this path to graphs if you have different nuber of classes other than 6. (use 2.1 Asertion Model Graph generation notebook)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "0a0f0955-e845-42bd-9d26-8aae97611e9d"}
# for a custom tf_graph
'''
from sparknlp_jsl.training import tf_graph
import sys
sys.path.append('/databricks/driver/tf_graph')
tf_graph.print_model_params("assertion_dl")
feat_size = 200
n_classes = 6
tf_graph.build("assertion_dl",build_params={"n_classes": n_classes}, model_location="dbfs:/databricks/driver/tf_graphs", model_filename="blstm_34_32_30_{}_{}.pb".format(feat_size, n_classes))
'''
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a6afa3df-0e11-49e7-b388-553ff51b6837"}
'''
assertionStatus = AssertionLogRegApproach()\
.setLabelCol("label")\
.setInputCols("document", "chunk", "embeddings")\
.setOutputCol("assertion")\
.setMaxIter(100) # default: 26
'''
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "894628f4-bb2e-448e-bcd1-b29462f9b9cb"}
clinical_assertion_pipeline = Pipeline(
stages = [
document,
chunk,
token,
embeddings,
assertionStatus])
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9327a04d-1037-4261-b766-951d9a902ee0"}
assertion_model = clinical_assertion_pipeline.fit(training_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5f35ecae-055a-4c36-a029-039957c159e0"}
# %sh cd assertion_logs && ls -lt
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f0fa1137-1cab-4731-b92a-70fd5f56aa84"}
# %sh tail -n 26 assertion_logs/AssertionDLApproach_339d60f160f7.log
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "0bda53e4-8351-4171-b994-8e9f93b2e93c"}
preds = assertion_model.transform(test_data).select('label','assertion.result')
preds.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "43bcfc6a-4c3a-40b8-8b26-5da14ce8682a"}
preds_df = preds.toPandas()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "69926665-71df-4ee2-ab7e-bc70099feeba"}
preds_df['result'] = preds_df['result'].apply(lambda x : x[0])
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d5d83d2b-95a4-4a14-a356-f85201bc641d"}
# We are going to use sklearn to evalute the results on test dataset
from sklearn.metrics import classification_report
print (classification_report(preds_df['result'], preds_df['label']))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ad145896-0369-40b2-8c05-85351675e7a0"}
assertion_model.stages[-1].write().overwrite().save('/databricks/driver/models/custom_assertion_model')
| tutorials/Certification_Trainings/Healthcare/databricks_notebooks/5. Training and Reusing Assertion Status Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ztgmlMKrd9sl"
#Import necesary libraries
import numpy as np
import time
import pandas as pd
import os
import cv2
import PIL.Image as Image
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_hub as hub
# + id="RQvdKJcdmARW"
#Mount Drive
from google.colab import drive
drive.mount('/googledrive')
# Create a symbolic link to our Google Drive
# ! mkdir -p /googledrive/MyDrive/colabdrive
# ! ln -snf /googledrive/MyDrive/colabdrive/ /colabdrive
# + id="G9IOUHcAtn3H"
# !unzip '/googledrive/MyDrive/colabdrive/output' -d '/googledrive/MyDrive/colabdrive/'
# + id="YyDG90OMv0Xg"
TRAINING_DATASET_PATH="/colabdrive/output/train"
TEST_DATASET_PATH="/colabdrive/output/test"
VALIDATION_DATASET_PATH="/colabdrive/output/val"
EXPORT_PATH='/colabdrive/Saved Model'
TFLITE_EXPORT_PATH=EXPORT_PATH+"/model.tflite"
# + id="ijViOwSVdjOE"
batch_size = 32 # Set batch size
img_height = 224 #Set image height
img_width = 224 #Set image width
#Creatin test, train and validation set, set the image dimensions and the batch size
trainSet = tf.keras.preprocessing.image_dataset_from_directory(
str(TRAINING_DATASET_PATH),
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
testSet = tf.keras.preprocessing.image_dataset_from_directory(
str(TEST_DATASET_PATH),
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
validationSet = tf.keras.preprocessing.image_dataset_from_directory(
str(VALIDATION_DATASET_PATH),
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
# + id="_QfArGVZd49b"
#Retrieve class names and the number of classes
classNames = np.array(validationSet.class_names)
numberOfClasses = len(classNames)
# + id="f1dLYG_D2Okj"
# Function for Rescaling and setiing the buffer size for datasets
def dataProcessing(dataset):
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
dataset = dataset.map(lambda x, y: (normalization_layer(x), y))
AUTOTUNE = tf.data.AUTOTUNE
dataset = dataset.cache().prefetch(buffer_size=AUTOTUNE)
return dataset
# + id="b_bCC5hzd8sv"
#Rescaling of images from datasets
trainSet = dataProcessing(trainSet)
testSet = dataProcessing(testSet)
validationSet = dataProcessing(validationSet)
# + id="hElwTmtwekA9"
#IF YOU RUN THE FOLLOWING CELLS, YOU WILL TRAIN A MODEL. YOU HAVE THE OPTION TO LOAD AN ALREADY TRAINED MODEL AND SAVE TIME BY RUNNING THE CELL (LOAD THE TRAINED MODEL)
# Seting the feature extractor layer using a pre-treined headless layer from TensorFlow Hub
feature_extractor_layer = hub.KerasLayer(
"https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/feature_vector/5", input_shape=(224, 224, 3), trainable=False)
#Creating a Sequential model using the feature extractor layer and a top layer for classification of traffic signs
model = tf.keras.Sequential([
feature_extractor_layer,
tf.keras.layers.Dense(numberOfClasses)
])
# + id="x6Q-FnOug7ay"
#Summary of the model
model.summary()
# + id="dJ1f9P5DjNK_"
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'])
# + id="b4OHEPrdjWfO"
#Custom callback to log the loss and accuracy of each batch individually, instead of the epoch average.
class CollectBatchStats(tf.keras.callbacks.Callback):
def __init__(self):
self.batch_losses = []
self.batch_acc = []
def on_train_batch_end(self, batch, logs=None):
self.batch_losses.append(logs['loss'])
self.batch_acc.append(logs['acc'])
self.model.reset_metrics()
# + id="pnPTmn9RjaJ5"
#Model training with the evaluation of each batch
batchStatsCallback = CollectBatchStats()
history = model.fit(trainSet, epochs=4,validation_data = validationSet,
callbacks=[batchStatsCallback])
# + id="yG5O3jrMOiFB"
#Evaluation of the model
score = model.evaluate(testSet, verbose=0)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')
# + id="OZb71ALblG4o"
t = time.time()
export_path = EXPORT_PATH+"/{}".format(int(t))
model.save(export_path)
export_path
# + id="1cWDJhKRll7L"
#LOAD THE TRAINED MODEL
#Reload the saved model
reloadedModel = tf.keras.models.load_model(export_path)
# + id="ZRdS1Wcw-b8b"
#Visualisation of model predictions for the first batch of images
for imageBatch,_ in testSet:
break
reloadedResultBatch= reloadedModel.predict(imageBatch)
predictedId = np.argmax(reloadedResultBatch, axis=-1)
predicteLlabelBatch = classNames[predictedId]
plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(imageBatch[n])
plt.title(predicteLlabelBatch[n].title())
plt.axis('off')
_ = plt.suptitle("Model predictions")
# + id="4P4QfU0tl-DM"
PredictionsOnTestSet = reloadedModel.predict(testSet)
# + id="RQIjrRPTc9MD"
#Convert the model into a TF Lite model and saving it on drive
converter = tf.lite.TFLiteConverter.from_saved_model(export_path) # path to the SavedModel directory
tfliteModel = converter.convert()
with open(EXPORT_PATH+"/model.tflite", 'wb') as f:
f.write(tfliteModel)
# + id="DYr-LyhQdd5N"
#Load the TF Lite model
tfliteInterpreter = tf.lite.Interpreter(TFLITE_EXPORT_PATH)
inputDetails = tfliteInterpreter.get_input_details() #Get input details
outputDetails = tfliteInterpreter.get_output_details() #Get output details
tfliteInterpreter.resize_tensor_input(inputDetails[0]['index'], (1, 224, 224, 3)) #Resize the input for making prediction on an image
tfliteInterpreter.resize_tensor_input(outputDetails[0]['index'], (1, len(classNames)))
tfliteInterpreter.allocate_tensors()
# + id="yYDIybwSnYXq"
testImage=Image.open(TEST_DATASET_PATH+'/20/00000_00000_00027.png').resize((224,224)) #Lite model predictions on an image
testImage=np.array(testImage, dtype=np.float32)/255.0
testImage.shape
Img = testImage[np.newaxis, ...]
startTime = time.time()
result = tfliteInterpreter.set_tensor(inputDetails[0]['index'],Img)
tfliteInterpreter.invoke()
tfliteModelPredictions = tfliteInterpreter.get_tensor(outputDetails[0]['index'])
endTime = time.time()
predictedClass = np.argmax(tfliteModelPredictions[0], axis=-1)
predictedClassName = classNames[predictedClass]
duration = endTime - startTime
print(f"Prediction time for an image: {duration} seconds")
# + id="kY1AJOVIszDM"
#TF Lite model predictions on an image
plt.figure(figsize=(10,9))
plt.subplots_adjust(hspace=0.5)
plt.imshow(testImage)
plt.title(predictedClassName.title())
plt.axis('off')
_ = plt.suptitle("Model predictions for an image")
# + id="gFrEpSN5GmpM"
# Accuracy of the lite model on test dataset
def load_dataset(directory):
"""
Returns:
X_orig -- np.array containing all images
y_orig -- np.array containing all image labels
"""
y_orig = [] # store class number
X_orig = []
for category in os.listdir(directory):
flower_path = os.path.join(directory, category)
for file_name in os.listdir(flower_path):
img = cv2.cvtColor(cv2.imread(os.path.join(flower_path, file_name)), cv2.COLOR_BGR2RGB)
if img is not None :
resized=cv2.resize(img,(224,224))/255.0
X_orig.append(resized)
y_orig.append(category)
y_orig = np.array(y_orig)
# y_orig = y_orig.reshape((1, y_orig.shape[0]))
return X_orig, y_orig
# + id="9am5Q53eHbhW"
X, y = load_dataset(TEST_DATASET_PATH)
# + id="akPhDIfOJ7mm"
predictedLabels=list()
for i in range(len(X)):
startTimeAverage=time.time()
Img = X[i][np.newaxis, ...]
Img=np.array(Img, dtype=np.float32)
result = tfliteInterpreter.set_tensor(inputDetails[0]['index'],Img)
tfliteInterpreter.invoke()
tfliteModelPredictions = tfliteInterpreter.get_tensor(outputDetails[0]['index'])
endTimeAverage = time.time()
predictedClass = np.argmax(tfliteModelPredictions[0], axis=-1)
predictedClassName = classNames[predictedClass]
predictedLabels.append(predictedClassName)
# + id="LFin7Iv01swA"
averageTimePerImage=(endTimeAverage-startTimeAverage)/len(predictedLabels)
print("Average time for making one prediction: ",averageTimePerImage )
# + id="AbFwdkby4_XP"
def compute_accuracy(realLabels, predictedLabels):
correctPredictions=0
for i in range(len(realLabels)):
if realLabels[i]==predictedLabels[i]:
correctPredictions+=1
accuracy=correctPredictions/len(realLabels)*100
return accuracy
# + id="Ta2wPfPhLhO8"
print("Lite model accuracy: ",compute_accuracy(y,predictedLabels))
# + id="MsNS36lMLoL3"
| Assignment2_Transfer-learning/Assignment2_Transfer_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **NOTĂ**
# Când utilizăm [Binder](https://mybinder.org/), comanda ``python -m pip install --user numpy plotly`` trebuie rulată în terminal înainte de pornirea **notebook**-ului!
# # Separatoare în planul fazelor
# **Separatoarele** sunt curbe care separă zonele *cu dinamică diferită*. Ele sunt graniţa dintre orbitele închise şi cele nemărginite.
import numpy as np
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
from decimal import Context, ROUND_HALF_EVEN, FloatOperation, setcontext, Decimal
contextul = Context(prec=128, rounding=ROUND_HALF_EVEN, Emin=-999999, Emax=999999, capitals=1, clamp=1,
flags=[], traps=[FloatOperation])
setcontext(contextul)
init_notebook_mode(connected=True)
# Construim aproximări ale constantei $\pi$ şi ale funcţiilor **sin**, **exp**, conform [recomandării oficiale](https://docs.python.org/3/library/decimal.html#recipes):
# +
def pi():
'''
Compute Pi to the current precision.
>>> print(pi())
3.141592653589793238462643383
'''
contextul.prec += 2 # extra digits for intermediate steps
three = Decimal(3) # substitute "three=3.0" for regular floats
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
contextul.prec -= 2
return +s # unary plus applies the new precision
def exp(x):
'''
Return e raised to the power of x. Result type matches input type.
>>> print(exp(Decimal(1)))
2.718281828459045235360287471
>>> print(exp(Decimal(2)))
7.389056098930650227230427461
>>> print(exp(2.0))
7.38905609893
>>> print(exp(2+0j))
(7.38905609893+0j)
'''
contextul.prec += 2
i, lasts, s, fact, num = 0, 0, 1, 1, 1
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x
s += num / fact
contextul.prec -= 2
return +s
def sin(x):
'''
Return the sine of x as measured in radians.
The Taylor series approximation works best for a small value of x.
For larger values, first compute x = x % (2 * pi).
>>> print(sin(Decimal('0.5')))
0.4794255386042030002732879352
>>> print(sin(0.5))
0.479425538604
>>> print(sin(0.5+0j))
(0.479425538604+0j)
'''
contextul.prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
x = x % (2 * pi()) # pentru numere mari
while s != lasts:
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
contextul.prec -= 2
return +s
# -
# Calcule privind matricea $A\in\mathcal{M}_{2}(\mathbb{R})$
# ----------------------------------------------------------
a11 = Decimal('1.0')
a12 = Decimal('0.0')
a21 = Decimal('0.0')
a22 = Decimal('-2.0')
matricea_A = np.array([[a11, a12], [a21, a22]], dtype=np.object_)
matricea_Af = np.array([[float(a11), float(a12)], [float(a21), float(a22)]])
# Pentru calcule cu **float** ([floating-point numbers](https://en.wikipedia.org/wiki/Floating-point_arithmetic)) de înaltă precizie, vezi [mpmath](http://mpmath.org/).
# Elemente spectrale ale matricei $A$
# -----------------------------------
# Urma ([trace](https://en.wikipedia.org/wiki/Trace_(linear_algebra))):
Trace_A = np.trace(matricea_A)
Trace_Af = np.trace(matricea_Af)
print("Trace(A) = {0:.16f}, Trace(Af) = {1:.16f}".format(Trace_A, Trace_Af))
# Valorile proprii ([eigenvalues](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors)), vectorii proprii ([eigenvectors](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors#Eigenspaces,_geometric_multiplicity,_and_the_eigenbasis_for_matrices)):
([lambda1, lambda2], [v1, v2]) = np.linalg.eig(matricea_Af)
print("Valorile proprii ale matricei Af:\n"
"lambda1 = {0:.16f}\nlambda2 = {1:.16f}"
"\nVectorii proprii:\n"
"v1 = ({2:.16f}, {3:.16f})\n"
"v2 = ({4:.16f}, {5:.16f})".format(lambda1, lambda2, v1[0], v1[1], v2[0], v2[1]))
# Folosind eigenvectorii, construim **datele Cauchy** ale soluţiilor $\mbox{e}^{\lambda_{i}t}\cdot\overline{v}_{i}$, unde $i\in\overline{1,2}$.
# Determinantul, vezi [det](https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py#L2103):
Det_Af = np.linalg.det(matricea_Af)
print("det Af = {0:.16f}".format(Det_Af))
# Formulele ecuaţiilor din sistemul diferenţial:
def sist_dif(X, Y, Z, t):
X_loc = Decimal(X)
Y_loc = Decimal(Y)
Z_loc = Decimal(Z)
T_loc = Decimal(t)
# pendulul matematic: (<NAME>, <NAME>, NODE, IV-th Ed., pag. 12)
'''
ec_X = Y_loc
ec_Y = -sin(X_loc)
ec_Z = Decimal('0')
ec_T = T_loc
'''
# resort neliniar: (<NAME>, <NAME>, NODE, IV-th Ed., pag. 19)
'''
ec_X = Y_loc
ec_Y = -X_loc * exp(-X_loc) # sau -X_loc * ((-X_loc).exp()), vezi https://docs.python.org/3/library/decimal.html#decimal.Decimal.exp
ec_Z = Decimal('0')
ec_T = T_loc
'''
# sistem liniar: (<NAME>, <NAME>, NODE, IV-th Ed., pag. 63 si urm.)
a11 = Decimal('1.0')
a12 = Decimal('0.0')
a21 = Decimal('0.0')
a22 = Decimal('-2.0')
ec_X = a11 * X_loc + a12 * Y_loc
ec_Y = a21 * X_loc + a22 * Y_loc
ec_Z = Decimal('0')
ec_T = T_loc
return ec_X, ec_Y, ec_Z, ec_T
# Pasul $h$ este:
h = Decimal('0.01')
h2 = Decimal('0.5') * h
h3 = h / Decimal('6.0')
# Numărul de iteraţii $n$ este:
# +
n = 10_000
# 6_000 pentru pendulul matematic
# 1_000 pentru resortul neliniar
# 10_000 pentru sistemul liniar
# +
r2 = range(n)
r3 = range(n + 1)
X = np.array(r3, dtype=np.object_)
Y = np.array(r3, dtype=np.object_)
Z = np.array(r3, dtype=np.object_)
t = np.array(r3, dtype=np.object_)
kX1 = np.array(r2, dtype=np.object_)
kY1 = np.array(r2, dtype=np.object_)
kZ1 = np.array(r2, dtype=np.object_)
zX1 = np.array(r2, dtype=np.object_)
zY1 = np.array(r2, dtype=np.object_)
zZ1 = np.array(r2, dtype=np.object_)
kX2 = np.array(r2, dtype=np.object_)
kY2 = np.array(r2, dtype=np.object_)
kZ2 = np.array(r2, dtype=np.object_)
zX2 = np.array(r2, dtype=np.object_)
zY2 = np.array(r2, dtype=np.object_)
zZ2 = np.array(r2, dtype=np.object_)
kX3 = np.array(r2, dtype=np.object_)
kY3 = np.array(r2, dtype=np.object_)
kZ3 = np.array(r2, dtype=np.object_)
zX3 = np.array(r2, dtype=np.object_)
zY3 = np.array(r2, dtype=np.object_)
zZ3 = np.array(r2, dtype=np.object_)
kX4 = np.array(r2, dtype=np.object_)
kY4 = np.array(r2, dtype=np.object_)
kZ4 = np.array(r2, dtype=np.object_)
kXRK4 = np.array(r2, dtype=np.object_)
kYRK4 = np.array(r2, dtype=np.object_)
kZRK4 = np.array(r2, dtype=np.object_)
# +
X[0], Y[0], Z[0], t[0] = (Decimal('0.01'), Decimal('0.0'), Decimal('1.0'), Decimal('0.0'))
# pendulul matematic: ochi de pisica (Decimal('0.0'), Decimal('2.0'), Decimal('1.0'), Decimal('0.0'))
# pendulul matematic: orbita deschisa (Decimal('0.0'), Decimal('2.16'), Decimal('1.0'), Decimal('0.0'))
# pendulul matematic: orbita inchisa (Decimal('0.0'), Decimal('1.9'), Decimal('1.0'), Decimal('0.0'))
# resortul neliniar: orbita deschisa (Decimal('8.751035'), Decimal('-1.7280'), Decimal('1.0'), Decimal('0.0'))
# resortul neliniar: orbita inchisa (Decimal('0.0'), Decimal('1.0'), Decimal('1.0'), Decimal('0.0'))
# sistemul liniar (1,0,0,-2): orbita rectilinie (pentru eigenvaloarea pozitiva) (Decimal('0.0'), Decimal('1.0'), Decimal('1.0'), Decimal('0.0'))
# sistemul liniar (1,0,0,-2): orbita rectilinie (pentru eigenvaloarea negativa) (Decimal('0.01'), Decimal('0.0'), Decimal('1.0'), Decimal('0.0'))
# -
for i in r2:
i2 = i + 1
ti = t[i]
ti2 = ti + h2
ti3 = ti + h
t[i2] = ti3 # t[i+1] = t[i] + h
kX1[i], kY1[i], kZ1[i], ti = sist_dif(X[i], Y[i], Z[i], ti) # [pasul 1]
zX1[i] = X[i] + h2 * kX1[i]
zY1[i] = Y[i] + h2 * kY1[i]
zZ1[i] = Z[i] + h2 * kZ1[i]
kX2[i], kY2[i], kZ2[i], ti2 = sist_dif(zX1[i], zY1[i], zZ1[i], ti2) # [pasul 2]
zX2[i] = X[i] + h2 * kX2[i]
zY2[i] = Y[i] + h2 * kY2[i]
zZ2[i] = Z[i] + h2 * kZ2[i]
kX3[i], kY3[i], kZ3[i], ti2 = sist_dif(zX2[i], zY2[i], zZ2[i], ti2) # [pasul 3]
zX3[i] = X[i] + h * kX3[i]
zY3[i] = Y[i] + h * kY3[i]
zZ3[i] = Z[i] + h * kZ3[i]
kX4[i], kY4[i], kZ4[i], ti3 = sist_dif(zX3[i], zY3[i], zZ3[i], ti3) # [pasul 4]
kXRK4[i] = h3 * (kX1[i] + 2 * kX2[i] + 2 * kX3[i] + kX4[i]) # [pasul 5]
kYRK4[i] = h3 * (kY1[i] + 2 * kY2[i] + 2 * kY3[i] + kY4[i])
kZRK4[i] = h3 * (kZ1[i] + 2 * kZ2[i] + 2 * kZ3[i] + kZ4[i])
X[i2] = X[i] + kXRK4[i]
Y[i2] = Y[i] + kYRK4[i]
Z[i2] = Z[i] + kZRK4[i]
print("+++\nN = {0:d}, "
"x({1}) = {2:.16f}, y({3}) = {4:.16f}, z({5}) = {6:.16f}".format(i, i, X[i], i, Y[i], i, Z[i]))
# La iteraţia numărul $J$, unde
J = 564
# 803 pentru pendulul matematic
# 564 pentru resortul neliniar
# au fost obţinute valorile:
print("x({0:d}) = {1:.16f}, y({2:d}) = {3:.16f}, z({4:d}) = {5:.16f}".format(J, X[J], J, Y[J], J, Z[J]))
# Pregătirea vizualizării:
traiectoria = go.Scatter3d(
# imaginea va putea fi salvata doar ca "png", vezi nota din "https://plot.ly/python/static-image-export/#vector-formats-svg-and-pdf"
x=X, y=Y, z=Z, mode='markers',
marker=dict(color='rgb(127,0,255)', size=4, symbol='circle',
line=dict(color='rgb(255,245,0)', width=1),
opacity=0.8))
datele = [traiectoria]
aspectul = go.Layout(margin=dict(l=0, r=0, b=0, t=0))
# Vizualizarea interativă a datelor:
fig = go.Figure(data=datele, layout=aspectul)
iplot(fig)
| intro_SD_2019_laborator4_separatoare_3D_RK4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from unwarping_functions import *
# ## Illustrative Figures
# This part of the notebook generates illustrative figures of the paper related to unwarping of periodic bandlimitted functions.
# Settings
K = 3
np.random.seed(10)
a = np.random.uniform(-1, 1, 2*K+1); a = (a + a[::-1]) / 2.
b = 1.6
font_size = 8
# +
# Example for DIRAChlet
# Settings
T = 0.5
alpha = 2 * np.pi / (2*K+1) * T * b
# DIRAChlet
theta = np.arange(-K, K+1) * alpha % (2*np.pi)
s = np.real(np.sum(np.exp(1j * theta)))
print("s = " + str(s))
def Y_fun(w):
return np.sin(w * (K+0.5)) / np.sin(w/2.)
w = np.linspace(-np.pi, np.pi, 1000)
Y = Y_fun(w)
fig = plt.figure(figsize=(5, 3))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax = fig.add_subplot(1,1,1)
ax.plot(w, Y, label='$D(e^{j\omega})$', color='b', linewidth=2)
ax.axhline(y=s, color='r', linewidth=2, label='$s = ' + str(round(s, 2)) + '$')
ax.axhline(y=0, color='k')
ax.set_xticks([-np.pi, 0, np.pi])
ax.set_xticklabels(['$-\pi$', '$0$', '$\pi$'], fontsize=12)
ax.set_yticks([-2, 0, 2, 4, 6])
ax.set_yticklabels(['$-2$', '$0$', '$2$', '$4$', '$6$'], fontsize=12)
ax.set_xlim(-np.pi, np.pi)
ax.set_ylim(-2, 7.5)
ax.set_xlabel('$\omega$', fontsize=12)
plt.legend(fontsize=12)
ax.grid()
plt.tight_layout()
plt.savefig('dirachlet_s1.pdf', format = 'pdf')
# +
# Envelope
w1 = np.linspace(0.001, np.pi, 1000)
w2 = np.linspace(.2, np.pi, 1000)
Y = Y_fun(w1)
fig = plt.figure(figsize=(6, 4.5))
ax = fig.add_subplot(111)
plt.axhline(y=0, color='k', linewidth=0.75)
# curves
plt.plot(w1, Y, color='b', label='$D(e^{j\omega})$')
plt.plot(w2, 1/np.sin(w2/2.), color='r', label='$1/\sin(\omega/2)$')
# alpha_c and alpha_cl lines
x_c = 3*np.pi / (2 * K + 1) - 0.575
plt.plot([x_c, np.pi], [np.abs(Y_fun(x_c)), np.abs(Y_fun(x_c))], color='k', linestyle='--', linewidth=0.75)
plt.plot([x_c, x_c], [0, Y_fun(x_c)], color='k', linewidth=0.75, linestyle='--')
plt.plot([x_c], [Y_fun(x_c)], marker='o', color='k', markersize=4)
ax.text(x_c, -.55,'$\\alpha_c$', fontsize=16)
x = 4*np.pi / (2 * K + 1)
plt.plot([x], [1 / np.sin(x/2)], marker='x', color='r', markersize=4)
plt.plot([x, x], [0, 1 / np.sin(x/2)], color='r', linewidth=0.75, linestyle='--')
x = x_c - 0.016
plt.plot([x, x], [0, Y_fun(x)], color='r', linewidth=0.75, linestyle='--')
plt.plot([x, np.pi], [np.abs(Y_fun(x)), np.abs(Y_fun(x))], color='r', linestyle='--', linewidth=0.75)
plt.plot([x], [Y_fun(x)], marker='o', color='r', markersize=4)
ax.text(x-.1, -.55,'$\\tilde{\\alpha}_{c}$', color='r', fontsize=16)
ax.set_xticks([0, np.pi])
ax.set_xticklabels(['$0$', '$\pi$'], fontsize=16)
ax.set_yticks([])
ax.set_yticklabels([])
plt.xlabel('$\omega$', fontsize=16)
plt.legend(fontsize=16)
plt.ylim((-2, 8))
plt.xlim((0, np.pi))
plt.tight_layout()
plt.savefig('dirachlet_s_envelope.pdf', format='pdf')
# +
# Diracs on circle
N = 1000
theta_grid = np.linspace(-np.pi, np.pi, N, endpoint=False)
x = np.cos(theta_grid)
y = np.sin(theta_grid)
z = np.zeros(len(theta_grid))
np.random.seed(3111111)
a = np.random.uniform(-1, 1, 2*K+1); a = (a + a[::-1]) / 2.
T = 0.5
b = [1, 4, 35/16.0, 7/3.0, 2.5, 14/3., 5]
label_distance = 1.18
alpha = [2 * np.pi / (2*K+1) * T * _b for _b in b]
def periodize_angle(theta):
result = np.array(theta % (2 * np.pi))
idx = result > np.pi
result[idx] -= 2 * np.pi
return result
theta_supposed = [np.arange(-K, K+1) * 2 * np.pi / (2*K+1)] + [periodize_angle(np.arange(-K, K+1) * _alpha) for _alpha in alpha]
s_list = [round(find_s(theta),2) for theta in theta_supposed[1:]]
names = ["$F(\omega)$",
"$b = 1, \\alpha < \\alpha_{c}$",
"$b = 4, \\alpha = \\frac{4\pi}{2K+1}$",
"$b = \\frac{35}{16}, \\alpha_{c} < \\alpha < \\frac{\pi}{K}$",
"$b = \\frac{7}{3}, \\alpha = \\frac{\pi}{K}$",
"$b = 2.5, \\alpha > \\frac{\pi}{K}$",
"$b = \\frac{14}{3}, \\alpha = \\frac{2\pi}{K}$",
"$b = 5, \\alpha > \\frac{\pi}{K}$"]
names = [names[0]] + [nn + '$, s = ' + str(ss) + '$' for nn, ss in zip(names[1:], s_list)]
fig = plt.figure(figsize=(4,6))
for i in range(len(theta_supposed)):
ax = fig.add_subplot(4, 2, i+1)
if i == 0:
for j in range(len(theta_supposed[i])):
ax.plot([theta_supposed[i][j], theta_supposed[i][j]], [0, a[j]], 'gray', linewidth=.75)
ax.scatter(theta_supposed[i], a, c=np.arange(0, 2*len(a), 2), s=15, alpha=1)
ax.set_ylim(-1, 1)
ax.set_xlim(-np.pi, np.pi)
ax.set_xticks([-np.pi, 0, np.pi])
ax.set_xticklabels(['$-\pi$', '$0$', '$\pi$'], fontsize=font_size)
ax.set_yticks([])
ax.set_yticklabels([], fontsize=font_size)
ax.grid()
ax.axhline(y=0, color='k', linewidth=1)
else:
theta_circle = np.linspace(0, 2*np.pi, 1000, endpoint=False)
ax.plot(np.cos(theta_circle), np.sin(theta_circle), linewidth=0.5, color='black')
ax.scatter(np.cos(theta_supposed[i]), np.sin(theta_supposed[i]), c=np.arange(0, 2*len(a), 2), s=15, alpha=1)
ax.set_xlim(-1.3, 1.3)
ax.set_ylim(-1.3, 1.3)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('equal')
# ax.axis('off')
theta_grid = np.linspace(0, 2*np.pi, 14, endpoint=False)
x_grid = np.cos(theta_grid)
y_grid = np.sin(theta_grid)
ax.text(label_distance, 0, "0", ha='center', va='center', fontsize=font_size)
ax.text(-label_distance, 0, "$\pi$", ha='center', va='center', fontsize=font_size)
ax.text(label_distance*np.cos(np.pi/7), label_distance*np.sin(np.pi/7), "$\\frac{\pi}{7}$", ha='center', va='center', fontsize=font_size)
for j in range(len(theta_grid)):
ax.plot([0, x_grid[j]], [0, y_grid[j]], '--', linewidth=0.5, color='black')
ax.set_title(label=names[i], fontsize=font_size)
plt.tight_layout()
plt.savefig('diracsOnCircle.pdf', format='pdf')
# -
# ## Simulation Figures
# This part of the notebook generates simulation figures of the paper related to unwarping of periodic bandlimitted functions.
# load simulation results
unique_alpha = np.load('unwarping_simulation_results/noise_vars_unique.npz')
lessThanPiK = np.load('unwarping_simulation_results/noise_vars_non-unique_alpha_lessThanPiK.npz')
moreThanPiK = np.load('unwarping_simulation_results/noise_vars_non-unique_alpha_moreThanPiK.npz')
s_0 = np.load('unwarping_simulation_results/noise_vars_non-unique_s_0.npz')
change_b = np.load('unwarping_simulation_results/noise_vars_change_b_parallel.npz')
var_list = [unique_alpha, lessThanPiK, moreThanPiK, s_0]
var_names = np.array(["$\\alpha < \\alpha_c$", "$\\alpha < \\frac{\\pi}{K}$", "$\\alpha > \\frac{\\pi}{K}$", "$\\alpha = \\frac{2\\pi}{2K+1}$"])
linestyles = ['-.', '-', '--', ':']
b_error_closest_b = np.zeros((len(var_list), len(var_list[1]['SNR'])))
b_error_closest_h_n = np.zeros((len(var_list), len(var_list[1]['SNR'])))
h_error_closest_b = np.zeros((len(var_list), len(var_list[1]['SNR'])))
h_error_closest_h_n = np.zeros((len(var_list), len(var_list[1]['SNR'])))
zero_sol_cases = np.zeros((len(var_list), len(var_list[1]['SNR'])))
multiple_sol_cases = np.zeros((len(var_list), len(var_list[1]['SNR'])))
SNR = var_list[1]['SNR']
iter_max = var_list[1]['iter_max']
for ind, var in enumerate(var_list):
b_error_closest_b[ind, :] = var['b_error_closest_b']
b_error_closest_h_n[ind, :] = var['b_error_closest_h_n']
h_error_closest_b[ind, :] = var['h_error_closest_b']
h_error_closest_h_n[ind, :] = var['h_error_closest_h_n']
zero_sol_cases[ind, :] = var['zero_sol_cases']
multiple_sol_cases[ind, :] = var['multiple_sol_cases']
print("a = " + str(np.round(var_list[1]['a_orig'], 2)))
print("b = " + str(np.round(var_list[1]['b_orig'], 2)))
matplotlib.rcParams.update({'font.size': 8})
idx = np.logical_and(SNR <= 40, SNR >= -10)
plt.figure(figsize=(5, 2.5))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
for i in range(len(var_list)):
plt.plot(SNR[idx], b_error_closest_b[i, idx], linestyles[i], label=var_names[i])
plt.xlabel('SNR [dB]')
plt.grid()
plt.legend(loc=1, fontsize=10)
plt.yscale('log')
plt.tight_layout()
plt.savefig('b_error_closest_b_all.pdf')
plt.figure(figsize=(5, 2.5))
for i in range(len(var_list)):
plt.plot(SNR[idx], h_error_closest_h_n[i, idx], linestyles[i], label=var_names[i])
plt.xlabel('SNR [dB]')
plt.grid()
plt.legend(loc=3, fontsize=10)
plt.yscale('log')
plt.tight_layout()
plt.savefig('h_error_closest_h_n_all.pdf')
plt.figure(figsize=(5, 2.5))
for i in range(len(var_list)):
plt.plot(SNR[idx], multiple_sol_cases[i, idx] / 1. / iter_max * 100, linestyles[i], label=var_names[i])
plt.xlabel('SNR [dB]')
plt.grid()
plt.legend(loc='best', fontsize=10)
plt.tight_layout()
plt.savefig('multiple_sol_cases_all.pdf')
plt.figure(figsize=(5, 2.5))
for i in range(len(var_list)):
plt.plot(SNR[idx], 100 - (multiple_sol_cases[i, idx] + zero_sol_cases[i, idx]) / 1. / iter_max * 100, linestyles[i], label=var_names[i])
plt.xlabel('SNR [dB]')
plt.grid()
plt.legend(loc='best', fontsize=10)
plt.tight_layout()
plt.savefig('single_sol_cases_all.pdf')
# +
h_error_closest_h_n = change_b['h_error_closest_h_n']
SNR = change_b['SNR']
b_list = change_b['b_list']
linestyles = [(0, (1, 5)),
(0, (5, 5)),
(0, ()),
(0, (3, 5, 1, 5)),
(0, (3, 5, 1, 5, 1, 5))]
plt.figure(figsize=(5, 2.5))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
for i in range(h_error_closest_h_n.shape[0]):
plt.plot(SNR, h_error_closest_h_n[i, :], linestyle=linestyles[i], label='$b = ' + str(np.round(b_list[i], 2)) + '$')
plt.xlabel('SNR')
plt.grid()
plt.yscale('log')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('h_error_closest_h_n_change_b_parallel.pdf')
# -
| generate_figures_unwarping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cfm]
# language: python
# name: conda-env-cfm-py
# ---
import sympy as sym
from scipy import integrate
import numpy as np
import matplotlib.pyplot as plt
S, I, R = sym.Function("S"), sym.Function("I"), sym.Function("V")
N, mu, alpha, beta, t = sym.symbols("N, mu, alpha, beta, t")
eq1 = sym.Derivative(S(t), t) - (- alpha * S(t) * I(t) - mu * R(t))
eq2 = sym.Derivative(I(t), t) - (alpha * I(t) * S(t) / N - beta * I(t))
eq3 = sym.Derivative(R(t), t) - (beta * I(t) + mu * R(t))
# + tags=["raises-exception"]
sym.dsolve((eq1, eq2, eq3))
# -
# Further investigation shows that an exact solution to this system of differential equations is difficult. Let us do this numerically:
def dx(x, t, alpha, beta, mu):
return (- alpha * x[1] * x[0] - mu * x[0],
alpha * x[1] * x[0] - beta * x[1],
beta * x[1] + mu * x[0])
# + tags=["nbval-ignore-output"]
alpha = 1 / 1000 # Every 1000 interactions leads to infection
beta = 1 / 5 # take 5 time units to recover
N = 10 ** 4 # Population of 10 thousand people
mu = 0 # 0 vaccination percentage
ts = np.linspace(0, 10, 5000)
xs = integrate.odeint(func=dx, y0=np.array([N - 1, 1, 0]), t=ts, args=(alpha, beta, mu))
S, I, R = xs.T
plt.figure()
plt.plot(ts, S, label="Susceptibles")
plt.plot(ts, I, label="Infected")
plt.plot(ts, R, label="Recovered")
plt.legend()
plt.title(f"$\max(I)={round(max(I))}$ ($\\alpha={alpha}$, $\\beta={beta}$, $\mu={mu}$)")
plt.savefig("base_scenario.pdf");
# + tags=["nbval-ignore-output"]
mu = 1 / 2 # Vaccinate half the population
ts = np.linspace(0, 10, 5000)
xs = integrate.odeint(func=dx, y0=np.array([N - 1, 1, 0]), t=ts, args=(alpha, beta, mu))
S, I, R = xs.T
plt.figure()
plt.plot(ts, S, label="Susceptibles")
plt.plot(ts, I, label="Infected")
plt.plot(ts, R, label="Recovered")
plt.legend()
plt.title(f"$\max(I)={round(max(I))}$ ($\\alpha={alpha}$, $\\beta={beta}$, $\mu={mu}$)")
plt.savefig("moderate_vaccination_rate.pdf");
# + tags=["nbval-ignore-output"]
mu = 99 / 100 # Vaccinate 99% of the population
ts = np.linspace(0, 10, 5000)
xs = integrate.odeint(func=dx, y0=np.array([N - 1, 1, 0]), t=ts, args=(alpha, beta, mu))
S, I, R = xs.T
plt.figure()
plt.plot(ts, S, label="Susceptibles")
plt.plot(ts, I, label="Infected")
plt.plot(ts, R, label="Recovered")
plt.legend()
plt.title(f"$\max(I)={round(max(I))}$ ($\\alpha={alpha}$, $\\beta={beta}$, $\mu={mu}$)")
plt.savefig("high_vaccination_rate.pdf");
# + tags=["nbval-ignore-output"]
vaccination_rates = np.linspace(0, 1, 500)
max_percent_of_infected = []
for mu in vaccination_rates:
xs = integrate.odeint(func=dx, y0=np.array([N - 1, 1, 0]), t=ts, args=(alpha, beta, mu))
S, I, R = xs.T
max_percent_of_infected.append(max(I) / N)
plt.figure()
plt.plot(vaccination_rates, max_percent_of_infected)
plt.xlabel("$\mu$")
plt.ylabel("% of population infected")
plt.savefig("effect_of_vaccination_rate.pdf");
| assets/rsc/sir/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
# https://www.r-bloggers.com/web-data-acquisition-parsing-json-objects-with-tidyjson-part-3/
# https://github.com/sailthru/tidyjson
library(tidyjson)
library(tidyverse)
library(magrittr)
df = read_csv("/home/stlk/Desktop/DigEc_data/Kickstarter_2016-01-28T09_15_08_781Z.zip_folder/Kickstarter.csv")
# +
j = tidyjson::as.tbl_json(as.character(df$creator))
j %>% tidyjson::json_structure()
# j %>% enter_object("urls") %>% gather_array %>% tidyjson::spread_all %>% head()
# +
json <- '[{"name": "bob", "age": 32}, {"name": "susan", "age": 54}]'
json %>% # Use the %>% pipe operator to pass json through a pipeline
as.tbl_json %>% # Parse the JSON and setup a 'tbl_json' object
gather_array %>% # Gather (stack) the array by index
spread_values( # Spread (widen) values to widen the data.frame
user.name = jstring("name"), # Extract the "name" object as a character column "user.name"
user.age = jnumber("age") # Extract the "age" object as a numeric column "user.age"
)
| 1_json_parse_R.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AI502/KSE527, Homework 01
# This file is made by <NAME>, which is modified based on https://github.com/floydhub/regression
import torch
import torch.nn as nn
import torch.utils.data
POLY_DEGREE = 4
torch.manual_seed(2020)
W_target = torch.randn(POLY_DEGREE, 1) * 5
b_target = torch.randn(1) * 5
def poly_desc(W, b):
"""Creates a string description of a polynomial."""
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, len(W) - i)
result += '{:+.2f}'.format(b[0])
return result
print('==> The real function you should approximate:\t' + poly_desc(W_target.view(-1), b_target))
# ---
def make_features(x):
"""Builds features i.e. a matrix with columns [x^4, x^3, x^2, x^1]."""
x = x.unsqueeze(1)
return torch.cat([x ** (POLY_DEGREE+1-i) for i in range(1, POLY_DEGREE+1)], 1)
def f(x):
"""Approximated function."""
return x.mm(W_target) + b_target[0]
def get_dataset(dataset_size):
"""Builds a batch i.e. (x, f(x)) pair."""
random = torch.randn(dataset_size)
x = make_features(random)
y = f(x)
dataset = list(zip(x, y))
return dataset
dataset = get_dataset(200) # you can make as many as dataset as you want
# ---
num_epochs = 500
batch_size = 50
learning_rate = 0.1
criterion = nn.SmoothL1Loss()
dataset_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
# ---
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc = nn.Linear(W_target.size(0), 1)
# For fixing the initial weights and bias
self.fc.weight.data.fill_(0.)
self.fc.bias.data.fill_(0.)
def forward(self, x):
output = self.fc(x)
return output
# ---
def fit(model,loader,criterion,learning_rate,num_epochs):
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
for i, data in enumerate(loader):
if torch.cuda.is_available():
x = data[0].type(torch.FloatTensor).cuda()
y = data[1].type(torch.FloatTensor).cuda()
else:
x = data[0].type(torch.FloatTensor)
y = data[1].type(torch.FloatTensor)
y_hat = model(x)
loss = criterion(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ---
net = Net().cuda() if torch.cuda.is_available() else Net()
print('==> Initial function:\t' + poly_desc(net.fc.weight.data.view(-1), net.fc.bias.data))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
# train
fit(net,dataset_loader,criterion,learning_rate,num_epochs)
print('==> Learned function:\t' + poly_desc(net.fc.weight.data.view(-1), net.fc.bias.data))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
| AI502-TA/[HW1-1] Polynomial Approximation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (system-wide)
# language: python
# metadata:
# cocalc:
# description: Python 3 programming language
# priority: 100
# url: https://www.python.org/
# name: python3
# resource_dir: /ext/jupyter/kernels/python3
# ---
# # Assignment 3 - Streamflow
#
# - toc: false
# - badges: true
# - comments: false
# - categories: [jupyter]
# ## 📒 instructions
# This is where learning happens, not during a lecture. You'll learn a ton of things by doing them yourself. Much success! 😄
#
# Create a Jupyter Notebook called `assignment-03-IDNUMBER`, where `IDNUMBER` is your 9-digit ID. This is the file only file we will check.
#
# ## 📌 locations and data
#
# **Choose one location in the US.**
#
# 1. Import streamflow data from USGS's [National Water Information System](https://maps.waterdata.usgs.gov/mapper/index.html). Choose on the map any measuring station you see fit. Make sure there is available discharge data (usually given in cubic feet per second) in small time intervals, e.g., every 15 minutes.
#
# 2. Go to NOAA's [Climate Reference Network Data](https://www.ncdc.noaa.gov/crn/qcdatasets.html) website. The sub-hourly (5-min) data contains information on many variables, we are interested in precipitation.
#
# **Attention!** Some os the USGS stations provide precipitation data. If you find one such station, step 2 above is unnecessary. If you only find discharge data in the USGS website, then make sure you choose two stations in very close proximity (USGS and NOAA). Because there are only a few high-resolution NOAA stations, you might want to start from there and then find discharge data for a stream near the NOAA station.
#
# Bottom line: you are looking for precipitation and stream discharge data, for stations in close proximity, with a high temporal resolution (5 min, 15 min, etc).
#
# ## 🛠 tasks
#
# Choose a rain event of a few hours in your data set. Find the rate of effective water input (p*) and the event flow rate (q*). Analyze the data in a similar was as done during class (various graphs explaining what you see). Find also the characteristic times of the event (centroid lag $T_{LC}$, and centroid lag-to-peak $T_{LPC}$).
#
# Try to find information on the climate, geography, soil, and land use of the watershed. Begin the assignment by explaining about the watershed you chose and characterizing it. When presenting the data and your analyses, discuss what you see based on the concepts learned in class (infiltration, runoff generation, and the factors that affect them). Does the information you found match what you see? What makes sense, and what doesn't?
#
# Discussion is important!
#
# You will have **two weeks** to deliver your assignment. You should **not** hand in a dry document with only figures and code, I'm expecting text before and after each code/graph cell, explaining what you did, why you did it, and how it fits the story you are telling. Don't forget to put labels on your plot axes, title, legend, etc.
#
# Your Jupyter Notebook should be **fully functional**: if we press `Kernel > Restart & Run All`, all the code must work without any errors.
#
# ## 🌅 presentation
# All the assignment must be in **one single** Jupyter Notebook. Use markdown cells to discuss the analysis and results, and in code cells show **all the code** you used to produce the figures and data analysis. Leave only the code necessary for your analysis, delete unnecessary lines your wrote while analyzing your data. Don't forget to comment your code, just like we did during exercise sessions.
#
# You can write in English or in Hebrew, but the text in the figures must be in English. If you choose to write the discussion in Hebrew, be aware that Jupyter Notebooks don't have native right-to-left language support:
#
# ניתן לכתוב בעברית, למרות שזה לא נראה כ״כ טוב...
#
# You can use some HTML code to achieve best results in Hebrew. Type the following
# ```html
# <p dir="rtl" style="text-align: right;">
# עכשיו הרבה יותר טוב!
# </p>
# ```
# to get
# <p dir="rtl" style="text-align: right;">
# עכשיו הרבה יותר טוב!
# </p>
#
# If you have many paragraphs in hebrew, do the following:
# <p dir="rtl" style="text-align: right;">
# פסקה מספר 1.
# </p>
#
# <p dir="rtl" style="text-align: right;">
# פסקה מספר 2.
# </p>
#
#
# <p dir="rtl" style="text-align: right;">
# אם יש לכם כמה פסקאות, כל אחת מהן תהיה בתוך "dir" משלה
# </p>
#
# In my opinion it is too complicated to write in Hebrew in Jupyter Notebooks, just write in English, your grade will not be affected by typos nor less-than-perfect English proficiency.
#
# ## 💯 evaluation
#
# Your assignment will be evaluated according to the following criteria:
# * 40% Presentation. How the graphs look, labels, general organization, markdown, clean code.
# * 30% Discussion. This is where you explain what you did, what you found out, etc.
# * 15% Depth of analysis. You can analyze/explore the data with different levels of complexity, this is where we take that into consideration.
# * 10% Replicability: Your code runs flawlessly.
# * 5%: Code commenting. Explain in your code what you are doing, this is good for everyone, especially for yourself!
#
#
# ## 🚚 importing the data
#
# You can use the code from previous assignments and from the exercise lectures.
| _notebooks/2020-02-05-assignment-03-streamflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.5 64-bit (''py37'': conda)'
# language: python
# name: python37564bitpy37conda8bd1a1c5581e475f987faf8fcae1a07b
# ---
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# +
def deltas(state, t, N, params):
S, E, I, H, R, D = state
dS = -params['R_0']/params['T_inf'] * I * S
dE = params['R_0']/params['T_inf'] * I * S - E/params['T_inc']
dI = E/params['T_inc'] - I/params['T_inf']
dH = params['h_rate'] * E/params['T_inc'] - H/params['T_hos']
dR = (1 - params['m_rate']) * I/params['T_inf']
dD = params['m_rate'] * I/params['T_inf']
return dS, dE, dI, dH, dR, dD
N = 1000
E0 = 0
I0 = 1
H0 = 0
R0 = 0
D0 = 0
S0 = N - E0 - I0 - H0 - R0, D0
params['R_t']
params['T_inf']
params['T_inc']
params['T_hos']
params['h_rate']
params['m_rate']
state0 = S0, E0, I0, H0, R0, D0
t = np.linspace(0,200,200)
# +
# Total population, N.
N = 1000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 1, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
beta, gamma = 0.2, 1./10
# A grid of time points (in days)
t = np.linspace(0, 160, 1600)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma))
S, I, R = ret.T
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig = plt.figure(facecolor='w')
ax = fig.add_subplot(111, axisbelow=True)
ax.plot(t, S/1000, 'b', alpha=0.5, lw=2, label='Susceptible')
ax.plot(t, I/1000, 'r', alpha=0.5, lw=2, label='Infected')
ax.plot(t, R/1000, 'g', alpha=0.5, lw=2, label='Recovered with immunity')
ax.set_xlabel('Time /days')
ax.set_ylabel('Number (1000s)')
ax.set_ylim(0,1.2)
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.show()
# -
| EpyModels/models/Untitled.ipynb |