code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from glob import glob
from pathlib import Path
import pandas as pd
import re
# -
# ## LOAD PATTERNS
df_patterns = pd.read_excel(Path().absolute().parent.parent/ "data/patterns.xlsx")
dic_patterns = {d["name"]:r"{}".format(d["expression"]) for d in df_patterns.to_dict("records")}
# ## FILES TO EXTRACT REGULAR EXPRESSIONS FROM
PATH_FILES = str(Path().absolute().parent.parent/ "data/regular_expressions_mock_data")
files = glob(PATH_FILES + "/**/Data*.xlsx", recursive=True)
# ## CREATING A BOOLEAN REPORT OF THE TEXTS
COL_ID = "court"
COL_TEXT = "text"
NAME_REPORT = str(Path().absolute().parent.parent/ "reports/mock_data_regex_court.xlsx")
dic_results = {}
for f in files:
df_f = pd.read_excel(f)
for row in df_f.to_dict("records"):
if row[COL_ID] not in dic_results:
dic_results[row[COL_ID]] = {k:0 for k in dic_patterns}
for name, exp in dic_patterns.items():
if re.search(exp, row[COL_TEXT], flags=re.I|re.S):
dic_results[row[COL_ID]][name] += 1
rows = []
for id_lawsuit, res in dic_results.items():
dic_aux = {COL_ID:id_lawsuit}
dic_aux.update(res)
rows.append(dic_aux)
df_report = pd.DataFrame(rows)
df_report.to_excel(NAME_REPORT, index=False)
| notebooks/12. Regular expressions/Extracting regex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # Plotting topographic maps of evoked data
#
#
# Load evoked data and plot topomaps for selected time points.
#
#
#
# +
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.15, 0.01)
# If times is set to None only 10 regularly spaced topographies will be shown
# plot magnetometer data as topomaps
evoked.plot_topomap(times, ch_type='mag')
# compute a 50 ms bin to stabilize topographies
evoked.plot_topomap(times, ch_type='mag', average=0.05)
# plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad')
# plot magnetometer data as an animation
evoked.animate_topomap(ch_type='mag', times=times, frame_rate=10)
# plot magnetometer data as topomap at 1 time point : 100 ms
# and add channel labels and title
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
| 0.14/_downloads/plot_evoked_topomap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # DEMO3 Inverse problem solving
# ## Most of the codes are the same as DEMO2, but slightly different.
# - This script will format graph databases
#
#
# +
import sys
sys.path.append("../MIGraph/GraphConv/")
from ValueTransformer import ValueTransformer
from ConvGraphScript import drawGraph,checkGraphList
from AutoParameterScaling import AutoParameterScaling
from ConvGraphmlToGraph import loadGraphCSV
from PrepGraphScript import PrepGraphScript
import glob
import os
import joblib
from tqdm import tqdm
import numpy as np
import random
os.chdir("praparingGraphs")
# +
#load PEDOT-PSS files
folderList=glob.glob("input/PEDOTPSS/*")
CSVPathList=[]
graphPathList=[]
for folder in folderList:
CSVPath=folder+"/"+os.path.basename(folder)+".csv"
graphPath=folder+"/graph/"
CSVPathList.append(CSVPath)
graphPathList.append(graphPath)
# -
# # convert graph-type PEDOT-PSS file
VT=ValueTransformer()
for CSVPath,graphPath in zip(CSVPathList,graphPathList):
print(CSVPath)
gList=loadGraphCSV(CSVPath,graphPath)
#convert unit etc
gList=VT.convertGraphList(gList)
checkGraphList(gList)
filename=os.path.basename(CSVPath)
outname="temporary/"+filename+".graphbin"
print("saving...", outname)
joblib.dump(gList,outname,compress=3)
# +
#convert wikipedia file
#you can add other compound csv files in additional_simple_comps
csvList=glob.glob("input/additional_simple_comps/*.csv")
print(len(csvList))
sorted(csvList)
def conv(filename):
pgs=PrepGraphScript(filename)
pgs.doFragment=False
pgs.prapareGraphList(numOfMaxFragments=2000)
for num,filename in tqdm(enumerate(csvList)):
print(num, "file: ",filename)
conv(filename)
# -
# # combine compound databases
# +
import pandas as pd
#in the case of this PEDOT-PSS_txt project, only one compound file is available, but normally many)
allCompundsPath="output/allcompounds.csv.gz"
csvList=glob.glob("../convCSVtoGraph/temp/output/*.csv")
csvList2=glob.glob("input/*.csv")
csvgzList=glob.glob("input/*.csv.gz")
compPathList=sorted(list(set(csvList)|set(csvgzList)|set(csvList2)))
print(compPathList)
# +
CompColumns=["ID","SMILES"]
for num,filePath in enumerate(compPathList):
print(filePath)
if num==0:
df=pd.read_csv(filePath)[CompColumns]
else:
df2=pd.read_csv(filePath)[CompColumns]
df=pd.concat([df,df2],axis=0)
df=df.drop_duplicates("ID")
df=df[CompColumns].reset_index()
df.to_csv(allCompundsPath,index=False)
df
# -
# # delete broken compounds and their graphs
# +
from rdkit import Chem
from rdkit.Chem import AllChem
compIDtoSMILES=dict(zip(df["ID"],df["SMILES"]))
graphbinList1=glob.glob("temporary/*.graphbin")
graphbinList2=glob.glob("../convCSVtoGraph/temp/output/*.graphbin")
graphbinList=sorted(list(set(graphbinList1)|set(graphbinList2)))
for graphbin in tqdm(graphbinList):
gList=joblib.load(graphbin)
ngList=[]
for g in (gList):
#extract comps
compIDList=[g.nodes[node]["label"] for node in g.nodes if str(g.nodes[node]["label"])[:2]=="C_"]
if np.nan in compIDList:
compIDList=["none"]
print("nan")
if "C_nan" in compIDList:
compIDList=["none"]
#check if mol objects can be made from smiles
try:
SMILESList = [compIDtoSMILES[i[2:]] for i in compIDList]
molList =[Chem.MolFromSmiles(smiles) for smiles in SMILESList]
for mol in molList:
morgan_fps =AllChem.GetMorganFingerprintAsBitVect(mol, 2, 20)
bit=morgan_fps.ToBitString()
ngList.append(g)
except:
print("error",SMILESList)
joblib.dump(ngList,graphbin)
# +
#standardizing values (this is not necessary for PEDOT-PSS project) and finalize graphs
#** standardizing was done at step 1, because graphs made from automatic text parsing have slightly different forms
#, and standardizing cannot be done by this code. (i.e., developed for "normal graphs" )
graphbinList1=glob.glob("temporary/*.graphbin")
graphbinList2=glob.glob("../convCSVtoGraph/temp/output/*.graphbin")
graphbinList=sorted(list(set(graphbinList1)|set(graphbinList2)))
print(graphbinList)
AutoSC=AutoParameterScaling()
AutoSC.initialize(graphbinList)
joblib.dump(AutoSC,"output/AutoSC.scaler",compress=3)
AutoSC.autoTransform(graphbinList)
# -
# # check graphs
# +
graphbinList=glob.glob("output/*.graphbin")
gList=[]
for file in tqdm(graphbinList):
print(file)
temp=joblib.load(file)
gList.extend(temp)
print(len(gList), " plots")
# +
number=0
#draw
drawGraph(gList[number])
g=gList[number]
nodeVals=[g.nodes[node]["label"] for node in g.nodes]
nodeVals
# -
| codes/DEMO3_Inverse_problem/step1_a_FormatGraphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Импортируем пакет NumPy с псевдонимом np
import numpy as np
# Установим число символов после запятой при печати
np.set_printoptions(precision=3, suppress=True)
# Массивы numpy можно создавать как из плоских списков
np.array([2, 5, 0])
# так и из вложеных
np.array([[2, 5, 0],
[3, 5, 1],
[3, 7, 8]])
# Для создания нулевых векторов и матриц можно использовать функцию zeros
np.zeros(3)
np.zeros((3, 3))
# Для матриц состоящих из одних единиц есть аналогочная функция ones.
np.ones(3)
np.ones((3, 3))
# С помощью функции eye можно создать единичную матрицу нужной размерности
np.eye(3)
# Для генерации диапазонов чисел удобно использовать функцию arange
np.arange(10)
# Её параметры позволяют указать начало и конец полуинтервала, а также шаг и тип данных.
np.arange(2, 10, 2, dtype=np.float)
# Если нужно получить равномерную сетку, то удобней воспользоваться функцией linspace
np.linspace(1.0, 4.0, 10)
# Часто для примера бывает нужно быстро создать матрицу, заполненную случайными числами.
np.random.rand(3, 3)
# Одним из мощнейших инструментов работы с многомерными массивами являются срезы (slices)
x = np.random.rand(3, 3)
x
x[:, 0]
x[1, :]
# С помощью срезов можно организовать присваивания по строкам или столбцам
x[:, 0] = x[:, 1]
x
# Для транспонирования матриц в пакете numpy имеется функция transpose
np.transpose(x)
x.T
# Для сохранения матриц в файл и загрузки из файла в память можно использовать соответвующие функции
x = np.random.rand(10, 2)
np.savetxt("x.csv", x, delimiter=',', fmt="%.5g")
y = np.loadtxt("x.csv", delimiter=',')
y
# Особое внимание следует обратить на то, что при использовании обычных арифметических операторов будут произведены поэлементные операции
a = np.random.rand(3)
b = np.random.rand(3)
a + b
a - b
a * b
a / b
X
a
X = np.random.rand(3, 3)
X * a
# Чтобы выполнить матричное умножение следует использовать функцию dot
np.dot(X, a)
# или оператор @
X @ a
# Для решения СЛАУ, заданного в матричном виде используется функция solve
A = np.random.rand(3, 3)
b = np.random.rand(3)
x = np.linalg.solve(A, b)
A @ x - b
# Для обращения матриц используется функция inv
A = np.random.rand(3, 3)
Ainv = np.linalg.inv(A)
Ainv @ A
# Рассмотрим простой пример использования обращения матриц на основе решения задачи линейной регресии методом наименьших квадратов.
#
# Вначале сгенерируем исходные данные с помощью функций linspace и внесем шум с помощью np.random.randn
X = np.linspace(-2, 3, 30)
X = X + np.random.randn(len(X)) * 0.1
y = 41 * X + 11
y = y + np.random.randn(len(y)) * 10
# Визуализируем результат генерации в виде точек.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.scatter(X, y)
plt.grid()
# -
# В соответствии с методом наименьших квадратов вычислим коэфициенты регресии как
# $$
# b = (X^T X)^{-1}X^T y
# $$
# здесь $X$ - расширенная матрица
#
# $$
# X = \begin{pmatrix}
# x_1 & 1 \\
# x_2 & 1 \\
# \vdots \\
# x_n & 1
# \end{pmatrix}
# $$
X1 = np.hstack((np.ones((len(X), 1)), X.reshape(len(X), 1)))
b = np.linalg.pinv(X1.T @ X1) @ X1.T @ y
# Вычислим точки линейной регресси для визуализации
xx = np.linspace(min(X), max(X), 3, 100)
yy = xx*b[1] + b[0]
# и нарисуем все вместе
plt.plot(xx, yy)
plt.scatter(X, y)
plt.grid()
| Practics/NumpyExamples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false id="bx-ZeOJAQGQS" nbgrader={"cell_type": "markdown", "checksum": "d7142079a02673557fd4d7c62d09fb35", "grade": false, "grade_id": "cell-59e5add4db8ca4cf", "locked": true, "schema_version": 3, "solution": false, "task": false}
# # Part 1: Optimising functions
#
# In this lab we will play with some of the optimisation methods we learned in the lecture by exploring how they work on some analytic functions (both convex and non-convex).
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "14fde3e4d56930cde83320df869ce52a", "grade": false, "grade_id": "cell-2c171e030a9385fc", "locked": true, "schema_version": 3, "solution": false, "task": false} id="d_AJtlznStL9"
import torch
import torch.optim as optim
# + [markdown] deletable=false editable=false id="cG3yqv0BQQVx" nbgrader={"cell_type": "markdown", "checksum": "d6d5a1e2037a78f4a83c58ba02f204b6", "grade": false, "grade_id": "cell-3c9fb8c6c798fa3e", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ## A Simple Function
#
# For this first task, we are going to try to optimise the following using Stochastic Gradient Descent:
#
# \begin{equation}
# min_{\textbf{x}} (\textbf{x}[0] - 5)^2 + \textbf{x}[1]^2 + (\textbf{x}[2] - 1)^2\; ,
# \end{equation}
#
# Use the following block the write down the analytic minima of the above function:
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "62407a53e2c8038e31413b6e939f397c", "grade": false, "grade_id": "cell-31897742431a78ea", "locked": true, "schema_version": 3, "solution": false, "task": false} id="6ElwchnAStMA"
# ### Implement the function
#
# First, complete the following code block to implement the above function using PyTorch:
# + deletable=false id="F4BwVbzTQNv6" nbgrader={"cell_type": "code", "checksum": "82360d8bcdfd9ec6dccea7295fd4440d", "grade": false, "grade_id": "cell-205644c1f2119166", "locked": false, "schema_version": 3, "solution": true, "task": false}
def function(x):
return ((x[0] - 5)**2 + x[1]**2 + (x[2] - 1)**2)
raise NotImplementedError()
# + [markdown] deletable=false editable=false id="hfTJ2GI_Qcej" nbgrader={"cell_type": "markdown", "checksum": "7480042c8d67ef7613ee4bfea5d4971c", "grade": false, "grade_id": "cell-3216df41fb235ae6", "locked": true, "schema_version": 3, "solution": false, "task": false}
# ### Optimising
#
# We need two more things before we can start optimising.
# We need our initial guess - which we've set to [2.0, 1.0, 10.0] and we need to how many epochs to take.
# + deletable=false editable=false id="vvRXozg6QgNV" nbgrader={"cell_type": "code", "checksum": "c117216da51f29eddbad68c8be917e46", "grade": false, "grade_id": "cell-5d4d7fa7ed77956f", "locked": true, "schema_version": 3, "solution": false, "task": false}
p = torch.tensor([2.0, 1.0, 10.0], requires_grad=True)
epochs = 5000
# + [markdown] deletable=false editable=false id="nGNScu9QQgbZ" nbgrader={"cell_type": "markdown", "checksum": "1de659ee2ace2ae86f8601c28e1033e7", "grade": false, "grade_id": "cell-a71c41511464a13e", "locked": true, "schema_version": 3, "solution": false, "task": false}
# We define the optimisation loop in the standard way:
# + deletable=false editable=false id="vkmkfXkaQhou" nbgrader={"cell_type": "code", "checksum": "fb907848ef17ec996ee54c0f8aeb932c", "grade": false, "grade_id": "cell-3ae9b149f3a8a224", "locked": true, "schema_version": 3, "solution": false, "task": false}
opt = optim.SGD([p], lr=0.001)
for i in range(epochs):
opt.zero_grad()
output = function(p)
output.backward()
opt.step()
# + [markdown] deletable=false editable=false id="hFXAQAXqQi3H" nbgrader={"cell_type": "markdown", "checksum": "55c0aa9e2acd143f369fd00849faa6c5", "grade": false, "grade_id": "cell-f63a8e54eca095d1", "locked": true, "schema_version": 3, "solution": false, "task": false}
# Use the following block to print out the final value of `p`. Does it match the value you expected?
# + deletable=false id="6DEGWh_rQkJV" nbgrader={"cell_type": "code", "checksum": "b4d7857d166ffa2f5634a181ce807851", "grade": false, "grade_id": "cell-7124ebe0293e33c9", "locked": false, "schema_version": 3, "solution": true, "task": false} colab={"base_uri": "https://localhost:8080/"} outputId="bec648b8-0308-4a54-a1f1-996f9fe5fec6"
print(p)
#raise NotImplementedError()
# + [markdown] id="nDg-0zB9StMM"
# ## Visualising Himmelblau's Function
#
# We'll now have a go at a more complex example, which we also visualise, with multiple optima; [Himmelblau's function](https://en.wikipedia.org/wiki/Himmelblau%27s_function). This is defined as:
#
# \begin{equation}
# f(x, y) = (x^2 + y - 11)^2 + (x + y^2 - 7)^2\; ,
# \end{equation}
# and has minima at
# \begin{equation}
# f(3, 2) = f(-2.805118, 3.131312) = f(-3.779310, -3.283186) = f(3.584428, -1.848126) = 0\; .
# \end{equation}
#
# Use the following block to first define the function (the inputs $x, y$ are packed into a vector as for the previous quadratic function above):
# + deletable=false nbgrader={"cell_type": "code", "checksum": "1cb1ab20f7bc9cdcd7a221a5134578af", "grade": false, "grade_id": "cell-05f43ebf2fd7e68c", "locked": false, "schema_version": 3, "solution": true, "task": false} id="ebNVCSNCStMN"
def himm(x):
x, y = x[0], x[1]
return (x**2 + y - 11)**2 + (x + y**2 - 7)**2
raise NotImplementedError()
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "a87861c51de11fc596ef3de86b950c70", "grade": false, "grade_id": "cell-fc81ef599d739b56", "locked": true, "schema_version": 3, "solution": false, "task": false} id="hV8cHQPQStMO"
# The following will plot its surface:
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "b6f21db5449a4ad97911d24936609a48", "grade": false, "grade_id": "cell-7d401c3457a5f7e9", "locked": true, "schema_version": 3, "solution": false, "task": false} id="x9IIvmYHStMO" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="e315b611-77bf-4f1b-d7fe-7cc9f9fc621e"
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LogNorm
xmin, xmax, xstep = -5, 5, .2
ymin, ymax, ystep = -5, 5, .2
x, y = np.meshgrid(np.arange(xmin, xmax + xstep, xstep), np.arange(ymin, ymax + ystep, ystep))
z = himm(torch.tensor([x, y])).numpy()
fig = plt.figure(figsize=(8, 5))
ax = plt.axes(projection='3d', elev=50, azim=-50)
ax.plot_surface(x, y, z, norm=LogNorm(), rstride=1, cstride=1,
edgecolor='none', alpha=.8, cmap=plt.cm.jet)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$z$')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
plt.show()
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "1b6e3379a1d9fb21879d0845ca7ba072", "grade": false, "grade_id": "cell-f938244581085c83", "locked": true, "schema_version": 3, "solution": false, "task": false} id="yLcqPDqPStMP"
# Check that the above plot looks correct by comparing to the picture on the [Wikipedia page](https://en.wikipedia.org/wiki/Himmelblau%27s_function).
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "198bab34933b991f896a519e2f4162fd", "grade": false, "grade_id": "cell-574019e663811dcc", "locked": true, "schema_version": 3, "solution": false, "task": false} id="0gls-0ruStMQ"
# ### Optimising
#
# Let's see how it looks for a few different optimisers from a range of starting points
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "f52952455140dbe3b789e48da231adb9", "grade": false, "grade_id": "cell-2ab57db6f9c3ff31", "locked": true, "schema_version": 3, "solution": false, "task": false} id="SdBeaIKMStMQ" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="aae2cb13-66ed-49db-f2a7-4db06dd82b51"
xmin, xmax, xstep = -5, 5, .2
ymin, ymax, ystep = -5, 5, .2
x, y = np.meshgrid(np.arange(xmin, xmax + xstep, xstep), np.arange(ymin, ymax + ystep, ystep))
z = himm(torch.tensor([x, y])).numpy()
fig, ax = plt.subplots(figsize=(8, 8))
ax.contourf(x, y, z, levels=np.logspace(0, 5, 35), norm=LogNorm(), cmap=plt.cm.gray)
p = torch.tensor([[0.0],[0.0]], requires_grad=True)
opt = optim.SGD([p], lr=0.01)
path = np.empty((2,0))
path = np.append(path, p.data.numpy(), axis=1)
for i in range(50):
opt.zero_grad()
output = himm(p)
output.backward()
opt.step()
path = np.append(path, p.data.numpy(), axis=1)
ax.plot(path[0], path[1], color='red', label='SGD', linewidth=2)
ax.legend()
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "a7281ca078d692fbe12ddfe12a0bd51c", "grade": false, "grade_id": "cell-f92c8fbe7bc00ba2", "locked": true, "schema_version": 3, "solution": false, "task": false} id="ANjAcdpNStMR"
# Use the following block to run SGD with momentum (lr=0.01, momentum=0.9) from the same initial point, saving the position at each timestep into a variable called `path_mom`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "5a2c8c4d77435e085753249bb03dd44a", "grade": false, "grade_id": "cell-8beae2091cb7be41", "locked": false, "schema_version": 3, "solution": true, "task": false} id="5Hgn8y2xStMR"
p = torch.tensor([[0.0],[0.0]], requires_grad=True)
opt = optim.SGD([p], lr=0.01, momentum=0.9)
for i in range(50):
opt.zero_grad()
output = himm(p)
output.backward()
opt.step()
path_mom = np.append(path, p.data.numpy(), axis=1)
#raise NotImplementedError()
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "f581993736855e0316c24d86dafede92", "grade": false, "grade_id": "cell-1a3d5f0a22670713", "locked": true, "schema_version": 3, "solution": false, "task": false} id="LKx3UMbEStMS"
# The following will plot the path taken when momentum was used, as well as the original plain SGD path:
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "ef449c500d6e15915d34f0ae82f7474f", "grade": false, "grade_id": "cell-b5ee1281d6ac6ec8", "locked": true, "schema_version": 3, "solution": false, "task": false} id="hf_AzQ41StMS" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="c149907a-7357-44f3-f481-8f6c59c54704"
ax.plot(path_mom[0], path_mom[1], color='yellow', label='SGDM', linewidth=2)
ax.legend()
fig
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "8ce2cd5108842b8589b0a7c0e789a54e", "grade": false, "grade_id": "cell-654d173df302005c", "locked": true, "schema_version": 3, "solution": false, "task": false} id="O7Eirr03StMT"
# Now explore what happens when you start from different points. What effect do you get with different optimisers?
# + deletable=false nbgrader={"cell_type": "code", "checksum": "98b599e2b344f4d5643487f8bb78ed5d", "grade": false, "grade_id": "cell-c95af05c3c20a927", "locked": false, "schema_version": 3, "solution": true, "task": false} id="Xz-6pQmKStMT" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="3a5a40b0-aa27-4445-ff25-96f44213b400"
p = torch.tensor([[2.0],[4.0]], requires_grad=True)
opt = optim.SGD([p], lr=0.01, momentum=0.9)
for i in range(50):
opt.zero_grad()
output = himm(p)
output.backward()
opt.step()
path_mom = np.append(path, p.data.numpy(), axis=1)
ax.plot(path_mom[0], path_mom[1], color='yellow', label='SGDM', linewidth=2)
ax.legend()
fig
| Pytorch Practical Tasks/3_1_FuntionOptimisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## A Numpy Primer
#
# `Numpy` is a `Python` package for scientific computing. It supports N-dimensional arrays and has many linear algebra functions built in.
#To use numpy, you first need to import the package. You can do it in two ways:
import numpy
#or
import numpy as np
#The second solution is in general preferable so that we know from which package each function is coming from.
#You will have to write np.<your_function> to make it works.
# Let's create two numpy arrays from lists of numbers:
a = np.array([2,3,4])
a
b = np.array([3,5,1])
b
# Some of the common arithmetic functions cannot be performed on lists, but numpy arrays. For example, if we want to mulitply every element of a list by 2:
[2,3,4]*2
# This is not what we expected. But if we do this on the numpy array `a`:
a*2
# That looks good!
#
# There are many math functions on the numpy arrays. For example, calculating a dot product between two arrays:
np.dot(a,b)
# alternatively
a.dot(b)
# We can intialize an array with all zeros:
np.zeros(5)
# Or a range using the `arange` function:
np.arange(5)
# 2D arrays can also be created similarly, with two number inputs as dimentions. Here we create a 2 by 3 matrix of random numbers:
np.random.seed(2)
randmat = np.random.rand(2,3)
randmat
# We can take a look at some information about the array:
print(randmat.shape)
print(randmat.size)
print(randmat.dtype)
# Indexing and slicing are very similar to lists:
randmat[0,2]
randmat[:,2]
randmat[0,:2]
# Now we can build `pandas` dataframes from a dictionary of numpy arrays:
import pandas as pd
dict = {'first': np.random.rand(4),
'second':np.random.rand(4)}
df = pd.DataFrame(dict)
df
| intro_to_python/6_Numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/training-data-analyst/blob/master/courses/fast-and-lean-data-science/07_Keras_Flowers_TPU_playground.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Yd4z24wngWau" colab_type="text"
# You can try this notebook on a GPU but you will quickly be switching to a TPU. For this model it's worth it.
# + [markdown] id="89B27-TGiDNB" colab_type="text"
# ## Imports
# + id="9u3d4Z7uQsmp" colab_type="code" outputId="983d6988-63f9-4479-944a-9c516286a637" colab={"base_uri": "https://localhost:8080/", "height": 34}
import os
import json
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
print("Tensorflow version " + tf.__version__)
# little wrinkle: Keras models do not yet work on TPU if eager mode is enabled
# tf.enable_eager_execution()
# + colab_type="code" cellView="form" id="tMy0zz6FXnJY" colab={}
#@title display utilities [RUN ME]
def dataset_to_numpy_util(dataset, N):
dataset = dataset.batch(N)
if tf.executing_eagerly():
# In eager mode, iterate in the Datset directly.
for images, labels, one_hot_labels in dataset:
numpy_images = images.numpy()
numpy_labels = labels.numpy()
numpy_one_hot_labels = one_hot_labels.numpy()
break;
else: # In non-eager mode, must get the TF note that
# yields the nextitem and run it in a tf.Session.
get_next_item = dataset.make_one_shot_iterator().get_next()
with tf.Session() as ses:
(numpy_images,
numpy_labels,
numpy_one_hot_labels) = ses.run(get_next_item)
return numpy_images, numpy_labels, numpy_one_hot_labels
def title_from_label_and_one_hot(label, one_hot_label):
return label.decode("utf-8") + ' ' + str(one_hot_label)
def title_from_label_and_target(label, correct_label):
correct = (label == correct_label)
return "{} [{}{}{}]".format(label.decode("utf-8"), str(correct), ', shoud be ' if not correct else '',
correct_label.decode("utf-8") if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False):
plt.subplot(subplot)
plt.axis('off')
plt.imshow(image)
plt.title(title, fontsize=16, color='red' if red else 'black')
return subplot+1
def display_9_images_from_dataset(dataset):
subplot=331
plt.figure(figsize=(13,13))
images, labels, one_hot_labels = dataset_to_numpy_util(dataset, 9)
for i, image in enumerate(images):
title = title_from_label_and_one_hot(labels[i], one_hot_labels[i])
subplot = display_one_flower(image, title, subplot)
if subplot > 339:
break;
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show()
def display_9_images_with_predictions(images, predictions, labels):
subplot=331
plt.figure(figsize=(13,13))
classes = np.array(CLASSES)[np.argmax(predictions, axis=-1)]
for i, image in enumerate(images):
title, correct = title_from_label_and_target(classes[i], labels[i])
subplot = display_one_flower(image, title, subplot, not correct)
if subplot > 339:
break;
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show()
def display_training_curves(training, validation, title, subplot):
if subplot%10==1: # set up the subplots on the first call
plt.subplots(figsize=(10,10), facecolor='#F0F0F0')
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('#F8F8F8')
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
# + [markdown] id="Lzd6Qi464PsA" colab_type="text"
# ## Colab-only auth
# + id="MPx0nvyUnvgT" colab_type="code" cellView="both" colab={}
IS_COLAB_BACKEND = 'COLAB_GPU' in os.environ # this is always set on Colab, the value is 0 or 1 depending on GPU presence
if IS_COLAB_BACKEND:
from google.colab import auth
auth.authenticate_user() # not necessary to access a public bucket but you will probably want to access your private buckets too
# + [markdown] id="UaKGHPjWkcVj" colab_type="text"
# ## TPU detection
# + id="tmv6p137kgob" colab_type="code" colab={}
# TPUClusterResolver() automatically detects a connected TPU on all Gooogle's
# platforms: Colaboratory, AI Platform (ML Engine), Kubernetes and Deep Learning
# VMs created through the 'ctpu up' utility. If auto-detection is not available,
# you can pass the name of your TPU explicitly:
# tf.contrib.cluster_resolver.TPUClusterResolver('MY_TPU_NAME')
# tip: on a VM created with "ctpu up" the TPU has the same name as the VM.
try:
tpu = tf.contrib.cluster_resolver.TPUClusterResolver() # TPU detection
print('Running on TPU ', tpu.cluster_spec().as_dict()['worker'])
except ValueError:
print("Running on GPU or CPU")
tpu = None
# + [markdown] id="w9S3uKC_iXY5" colab_type="text"
# ## Configuration
# + id="M3G-2aUBQJ-H" colab_type="code" colab={}
GCS_PATTERN = 'gs://flowers-public/tfrecords-jpeg-192x192/*.tfrec'
IMAGE_SIZE = [192, 192]
if tpu:
BATCH_SIZE = 128 # On TPU in Keras, this is the per-core batch size. The global batch size is 8x this.
else:
BATCH_SIZE = 32 # On Colab/GPU, a higher batch size does not help and sometimes does not fit on the GPU (OOM)
VALIDATION_SPLIT = 0.19 # This will set aside three files of data for validation, 13 for training
CLASSES = [b'daisy', b'dandelion', b'roses', b'sunflowers', b'tulips'] # do not change, maps to the labels in the data (folder names)
# splitting data files between training and validation
filenames = tf.gfile.Glob(GCS_PATTERN)
split = int(len(filenames) * VALIDATION_SPLIT)
training_filenames = filenames[split:]
validation_filenames = filenames[:split]
print("Pattern matches {} data files. Splitting dataset into {} training files and {} validation files".format(len(filenames), len(training_filenames), len(validation_filenames)))
validation_steps = int(3670 // len(filenames) * len(validation_filenames)) // BATCH_SIZE # 3670: number of images in dataset
steps_per_epoch = int(3670 // len(filenames) * len(training_filenames)) // BATCH_SIZE
print("With a batch size of {}, there will be {} batches per training epoch and {} batch(es) per validation run.".format(BATCH_SIZE, steps_per_epoch, validation_steps))
# + [markdown] id="kvPXiovhi3ZZ" colab_type="text"
# ## Read images and labels from TFRecords
# + id="LtAVr-4CP1rp" colab_type="code" colab={}
def read_tfrecord(example):
features = {
"image": tf.FixedLenFeature((), tf.string), # tf.string means byte string
"label": tf.FixedLenFeature((), tf.string),
"one_hot_label": tf.FixedLenFeature((), tf.string)
}
example = tf.parse_single_example(example, features)
image = tf.image.decode_jpeg(example['image'])
image = tf.cast(image, tf.float32) / 255.0 # convert image to floats in [0, 1] range
image = tf.reshape(image, [*IMAGE_SIZE, 3])
one_hot_label = tf.io.decode_raw(example['one_hot_label'], out_type=tf.uint8) # 'decode' byte string into byte list
one_hot_label = tf.cast(one_hot_label, tf.float32) # convert one hot labels to floats
one_hot_label = tf.reshape(one_hot_label, [5]) # explicit fixed size needed on TPU
label = example['label'] # byte string
return image, label, one_hot_label
def load_dataset(filenames):
# read from tfrecs
records = tf.data.TFRecordDataset(filenames, num_parallel_reads=32) # this will read from multiple GCS files in parallel
dataset = records.map(read_tfrecord, num_parallel_calls=32)
return dataset
# + id="xb-b4PRz-V6O" colab_type="code" colab={}
display_9_images_from_dataset(load_dataset(training_filenames))
# + [markdown] id="22rVDTx8wCqE" colab_type="text"
# ## training and validation datasets
# + id="7wxKyCklR4Gh" colab_type="code" colab={}
def features_and_targets(image, label, one_hot_label):
feature = image
target = one_hot_label
return feature, target # for training, a Keras model needs 2 items: features and targets
def get_batched_dataset(filenames):
dataset = load_dataset(filenames)
dataset = dataset.map(features_and_targets, num_parallel_calls=32)
dataset = dataset.cache() # This dataset fits in RAM
dataset = dataset.repeat()
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) # drop_remainder needed on TPU
dataset = dataset.prefetch(-1) # prefetch next batch while training (-1: autotune prefetch buffer size)
# should shuffle too but this dataset was well shuffled on disk already
return dataset
def get_training_dataset():
return get_batched_dataset(training_filenames)
def get_validation_dataset():
return get_batched_dataset(validation_filenames)
# load 160 images as a numpy array so that we can play around with them in this notebook
some_flowers, some_labels, some_one_hot_labels = dataset_to_numpy_util(load_dataset(validation_filenames), 8*20)
# + [markdown] id="ALtRUlxhw8Vt" colab_type="text"
# ## Model [WORK REQUIRED]
# 1. train the model as it is, with a single convolutional layer
# * Accuracy 55%... Not great.
# 2. add additional convolutional layers interleaved max-pooling layers. Try also adding a second dense layer. For example:<br/>
# **`conv 3x3, 16 filters, relu`**<br/>
# **`conv 3x3, 30 filters, relu`**<br/>
# **`max pool 2x2`**<br/>
# **`conv 3x3, 50 filters, relu`**<br/>
# **`max pool 2x2`**<br/>
# **`conv 3x3, 70 filters, relu`**<br/>
# **`flatten`**<br/>
# **`dense 90 relu, relu`**<br/>
# **`dense 10 softmax`**<br/>
# * Accuracy 62%... slightly better. But this model is more than 3M parameters and it overfits dramatically (overfitting = eval loss goes up instead of down).
# 3. Try replacing the Flatten layer by Global average pooling.
# * Accuracy 68% The model is back to a modest 50K parameters, works better than before and does not overfit anymore
# 4. Try experimenting with 1x1 convolutions too. They typically follow a 3x3 convolution and decrease the filter count. You can also add dropout between the dense layers. For example:
# **`conv 3x3, 20 filters, relu`**<br/>
# **`conv 3x3, 50 filters, relu`**<br/>
# **`max pool 2x2`**<br/>
# **`conv 3x3, 70 filters, relu`**<br/>
# **`conv 1x1, 50 filters, relu`**<br/>
# **`max pool 2x2`**<br/>
# **`conv 3x3, 100 filters, relu`**<br/>
# **`conv 1x1, 70 filters, relu`**<br/>
# **`max pool 2x2`**<br/>
# **`conv 3x3, 120 filters, relu`**<br/>
# **`conv 1x1, 80 filters, relu`**<br/>
# **`max pool 2x2`**<br/>
# **`global average pooling`**<br/>
# **`dense 70 relu, relu`**<br/>
# **`dense 10 softmax`**<br/>
# * accuracy 73%
# 5. The goal is 80% accuracy ! Good luck. (You might want to train for 20 epochs to get there. Se your traiing curves to see if it is worth training longer.)
# + id="XLJNVGwHUDy1" colab_type="code" colab={}
model = tf.keras.Sequential([
# little wrinkle: specifying the input shape as a Keras InputLayer does not
# work on TPU yet. Please add an input shape on your first layer instead.
# l.InputLayer(input_shape=[*IMAGE_SIZE, 3]),
###
tf.keras.layers.Conv2D(kernel_size=3, filters=20, padding='same', activation='relu', input_shape=[*IMAGE_SIZE, 3]),
#
# YOUR LAYERS HERE
#
# LAYERS TO TRY:
# Conv2D(kernel_size=3, filters=30, padding='same', activation='relu')
# MaxPooling2D(pool_size=2)
# GlobalAveragePooling2D() / Flatten()
# Dense(90, activation='relu')
#
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(5, activation='softmax')
###
])
model.compile(
optimizer='adam',
loss= 'categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# + [markdown] id="dMfenMQcxAAb" colab_type="text"
# ## Training
# + id="H7QwBi6_ri4x" colab_type="code" colab={}
if tpu:
# tpu = tf.contrib.cluster_resolver.TPUClusterResolver()
strategy = tf.contrib.tpu.TPUDistributionStrategy(tpu)
tpu_model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)
# + id="M-ID7vP5mIKs" colab_type="code" colab={}
EPOCHS = 10
if tpu:
history = tpu_model.fit(get_training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=get_validation_dataset, validation_steps=validation_steps)
# Little wrinkle: reading directly from dataset object not yet implemented
# for Keras/TPU. Please use a function that returns a dataset.
else:
history = model.fit(get_training_dataset(), steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=get_validation_dataset(), validation_steps=validation_steps)
# + id="VngeUBIdyJ1T" colab_type="code" colab={}
print(history.history.keys())
display_training_curves(history.history['acc'], history.history['val_acc'], 'accuracy', 211)
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)
# + [markdown] id="MKFMWzh0Yxsq" colab_type="text"
# ## Predictions
# + id="ExtZuDlh2Lem" colab_type="code" colab={}
inference_model = model
if tpu:
inference_model.set_weights(tpu_model.get_weights()) # this copies the weights from TPU to CPU
# + colab_type="code" id="ehlsvY46Hs9z" colab={}
# randomize the input so that you can execute multiple times to change results
permutation = np.random.permutation(8*20)
some_flowers, some_labels, some_one_hot_labels = (some_flowers[permutation], some_labels[permutation], some_one_hot_labels[permutation])
predictions = inference_model.predict(some_flowers, batch_size=16)
evaluations = inference_model.evaluate(some_flowers, some_one_hot_labels, batch_size=16)
print(np.array(CLASSES)[np.argmax(predictions, axis=-1)].tolist())
print('[val_loss, val_acc]', evaluations)
# + id="qzCCDL1CZFx6" colab_type="code" colab={}
display_9_images_with_predictions(some_flowers, predictions, some_labels)
# + [markdown] id="SVY1pBg5ydH-" colab_type="text"
# ## License
# + [markdown] id="hleIN5-pcr0N" colab_type="text"
#
#
# ---
#
#
# author: <NAME><br>
# twitter: @martin_gorner
#
#
# ---
#
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# ---
#
#
# This is not an official Google product but sample code provided for an educational purpose
#
| courses/fast-and-lean-data-science/07_Keras_Flowers_TPU_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
csv_data<-read.csv("regrex1.csv")
print(csv_data)
png('R_Scatter.png')
plot(x = csv_data$x,
y = csv_data$y,
xlab = "x",
ylab = "y",
col = "black",
pch = 4,
main = "regrex1")
dev.off()
print('Regrex Data')
# +
png('R_linReg.png')
plot(x = csv_data$x,
y = csv_data$y,
xlab = "x",
ylab = "y",
col = "black",
pch = 4,
main = "Regrex Data")
abline(lm(csv_data$y ~ csv_data$x,
data = csv_data), col = "black")
print('Printing Scatterplot')
# -
print('Done')
| R_Png.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Econophysics I
# ## Exercise 02 - H05
#
# ### <NAME>
# ### Universität Duisburg-Essen
# 28.04.2020
# +
# Modules
from math import factorial
# -
# ## Exercise 02. Homework 05. Point 03
#
# Calculate the expected profit after 500 laps. What is the maximum profit after 500 laps and how likely is it?
# +
rounds = 500
bet_0 = 100
exp_profit = 0
win_factor = 1.5
lose_factor = 0.6
for n in range(501):
exp_profit += bet_0 * 1.5 ** n * 0.6 ** (500 - n) * n*rounds #* ((factorial(500)) / (factorial(n) * factorial(500 - n))) * 0.5 ** 500
print(f'The expected profit after 500 rounds is {exp_profit_500:.7e} Euros')
# +
rounds = 500
bet_0 = 100
exp_profit = 0.05
exp_profit_500 = bet_0 * (1 + exp_profit) ** rounds
print(f'The expected profit after 500 rounds is {exp_profit_500:.7e} Euros')
# +
rounds = 500
bet_0 = 100
win_factor = 1.5
prob_win = 0.5
max_profit = bet_0 * win_factor ** 500
prob_max_profit = prob_win ** rounds
print(f'The largest possible win is {max_profit:.7e} Euros')
print(f'The probability to obtain the largest possible win is {prob_max_profit:.7e}')
| week_2/Exercise02_H05_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro
#
# 파이썬으로 데이터를 다루기 위해 가장 기본적으로 알아야 하는 것이 데이터의 형태입니다. 데이터는 숫자, 문자, 배열, 집합, 순서쌍 등 다양한 형태를 가질 수 있습니다. 파이썬에는 몇 가지 기본적인 자료형들이 정의되어 있으며, 각각의 자료형마다 다른 연산과 조작을 할 수 있습니다. 이번 장의 목표는 **내가 다루는 데이터가 어떤 형태인지를 알고, 자료형에 맞는 연산과 조작을 할 수 있는 능력**을 키우는 것입니다.
#
# ## 기본적인 파이썬 자료형
#
# 다음은 파이썬에서 사용되는 기본적인 자료형들의 예시입니다. 자세한 내용은 앞으로 차근차근 다룰 예정이니 눈으로만 봐두시면 됩니다.
#
# - `>>>` 표시가 붙은 부분은 파이썬 코드입니다.
# - `#` 표시가 붙은 부분은 코드에 대한 주석으로, 실제 실행되는 파이썬 코드가 아닙니다.
# 숫자
a = 365 # 정수
b = 3.14 # 소수
c = 1.25 #소수
# 문자열
d = 'Hello world!'
e = "DataScience Lab"
# 불리언
f = True
g = False
# 리스트
h = [1.1, 2.2, 3.3, 4.4, 5.5]
i = ["a","b","c","d"]
# 튜플
j = (1,2,3,4,5)
k = (True,False,False,True)
# 셋
l = set([1,2,3])
m = set({"one", "two", "three"})
# 딕셔너리
n = {"김연아":"피겨", "손흥민":"축구"}
o = {1:"짜장면", 2:"짬뽕"}
# ## 기본적인 함수와 연산자
#
# 다음은 이번 장에서 사용할 간단한 함수 및 연산자들입니다. **비교연산자 `==`와 할당연산자 `=`의 차이에 유의하시기 바랍니다.** 할당연산자 `=`는 두 대상이 같은지 비교하는 연산자가 아니라, 변수에 값을 할당하는 할당연산자입니다. 즉 `a=5`은 "a는 5와 같다"가 아니라, "a에 5를 할당하라"는 명령입니다!
#
# 함수/연산자|기능|예시 코드|실행결과
# ---|---|---|---
# `print`|대상을 출력|`print("Hello world!")`|`Hello world!`
# `type`|대상의 자료형을 반환|`type(5)`|`int`
# `len`|대상의 길이를 반환|`len("abc")`|`3`
# `==`|"두 대상이 같다"를 판단|`'Python'=='R'`|`False`
# `!=`|"두 대상이 같지 않다"를 판단|`'Python'!='R'`|`True`
# `=`|왼쪽 변수에 오른쪽 값을 할당|`a=5`|
#
# # 1. 숫자
#
# ## 1.1. 정수와 소수
# 파이썬의 숫자는 **정수**와 **소수**로 나뉘며, 각각 **`int`**와 **`float`**으로 표기합니다. 아래는 정수 `2`와 소수 `3.14`의 자료형을 확인하는 코드와 결과입니다.
type(2)
type(3.14)
# ## 1.2. 자료형의 변환
#
# **`int` 함수는 숫자나 문자열을 정수 자료형으로 바꿔주고, `float` 함수는 숫자나 문자열을 소수 자료형으로 바꿔줍니다.** 소수점을 가진 숫자 혹은 문자열을 정수로 변환하면 소수점 아래는 버림됩니다.
#
# 함수|기능|예시 코드|결과
# ---|---|---|---
# `int`|대상을 정수로 변환|`int(4.8)`|`4`
# `float`|대상을 소수로 변환|`float("3")`|`3.0`
#
# ## 1.2 파이썬으로 계산하기
#
# 숫자들 간에는 정수와 소수 구분 없이 다음의 연산이 가능합니다. 사칙연산과 제곱을 제외한 연산자들은 사용하는 빈도가 많지 않으니, 이런 연산이 있다고만 알아두시면 됩니다.
#
# 연산자|기능|예시 코드|실행결과|결과의 자료형
# ---|---|---|---|---
# `a + b`|덧셈|`1+1`|`2`|`int`
# `a - b`|뺄셈|`3-2.5`|`0.5`|`float`
# `a * b`|곱셉|`2*4.5`|`9.0`|`float`
# `a / b`|나눗셈|`0.9/0.3`|`3.0`|`float`
# `a ** b`| a의 b제곱|`5**2`|`25`|`int`
# `a // b`|a를 b로 나눌 때의 몫|`8//3`|`2`|`int`
# `a % b`|a를 b로 나눌 때의 나머지|`8%3`|`2`|`int`
print(1+1) # 덧셈
print(3 - 2.5) # 뺄셈
print(2 * 4.5) # 곱셉
print(0.9/ 0.3) # 나눗셈
# # 2. 문자열
#
# ## 2.1. Hello World
#
# 문자열은 말 그대로 문자들의 나열을 의미하며, `str`로 표기합니다. 작은 따옴표 `''`, 혹은 큰 따옴표 `""` 를 사용해서 만들 수 있습니다. 둘 중 무엇을 사용하든 상관은 없습니다. 프로그래밍 언어를 배울 때는 `"Hello World"`를 출력해보는 것이 관례입니다. 다같이 인사해보세요!
print("Hello world")
type("Hello world")
# **예제 2-1. 다음 코드의 실행 결과가 True일지 False일지 판단하세요**
365 == "365"
# **풀이**
#
# `365`는 숫자이고, `"365"`는 따옴표로 둘러싸여 있으므로 문자열입니다. 문자열 `"365"`와 숫자 `365`가 같은지 테스트해보면, 양쪽의 자료형이 다르기 때문에 `False`가 반환됩니다.
type("365")
365 == "365" # 365 는 "365"와 같다: 거짓이므로 False를 반환합니다.
# ## 2.2. 자료형의 변환
#
# `str` 함수를 사용해서 숫자를 문자열로 바꿔줄 수 있습니다.
str(3.14)
# ## 2.3. 문자열 연산자
#
# `in` 과 `not in` 연산은 문자열 뿐 아니라 리스트 등의 자료형에서도 자주 쓰이는 연산이니 알아두면 좋습니다.
#
# 연산자|기능|예시 코드|결과
# ---|---|---|---
# `a + b`|a 문자열과 b 문자열을 합침|`"A" + "B"`|`"AB"`
# `a * b`|a 문자열을 b 회 반복|`"A" * 3`|`"AAA"`
# `a in b`|"a가 b에 포함된다"를 판단|`"A" in "ABCDEFG"`|`True`
# `a not in b`|"a가 b에 포함되지 않는다"를 판단|`"A" not in "ABCDEFG"`|`False`
#
#
# **예제 2-2. 다음 코드의 실행 결과를 판단하세요(True/False/Error)**
"Data" in "DatascienceLab"
1 in "123"
# **풀이**
#
# `"DatascienceLab"`이 `"Data"`라는 문자열을 포함하기 때문에 첫 줄의 실행 결과는 `False`입니다.
"Data" not in "DatascienceLab"
# `1`은 숫자이고, `"123"`은 문자열이기 때문에 둘째 줄의 결과는 에러입니다. 숫자와 문자열 간에는 `in`과 `not in`이라는 관계 연산이 정의되지 않습니다.
1 in "123"
# ## 2.4. 인덱싱 & 슬라이싱
#
# **문자열은 문자를 순서대로 나열한 데이터입니다. 문자열을 구성하는 하나하나의 문자에는 순서대로 번호를 매길 수 있으며, 이 번호를 인덱스라고 부릅니다.** 아래 그림은 `"Hello World"`라는 문자열에 앞쪽부터 번호를 매긴 결과입니다. 파이썬의 인덱스는 `0`부터 시작하기 때문에, 가장 먼저 나온 문자인 `"H"`의 인덱스는 `0`이 됩니다. 그 이후 순차적으로 `1, 2, ... 10` 까지의 번호가 매겨졌습니다. 공백 역시 문자열에 포함된다는 사실을 유의해주세요! 파이썬의 인덱스는 뒤에서부터 부여할 수도 있습니다. 인덱스를 뒤에서부터 매기면 `-1, -2, ...` 와 같이 음수가 됩니다.
#
# 문자열|H|e|l|l|o| |w|o|r|l|d
# :---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:
# 인덱스|0|1|2|3|4|5|6|7|8|9|10
# -인덱스|-11|-10|-9|-8|-7|-6|-5|-4|-3|-2|-1
#
# ### 인덱싱
#
# 문자열에 부여된 **인덱스를 통해서 개별 문자를 뽑아낼 수 있고, 이것을 인덱싱이라고 합니다.** 비유하면, 여러분과 파이썬이 다음과 같은 대화를 한다고 생각하시면 됩니다.
#
# > You: "Hello world"의 0번 문자를 찾아줘!
#
# > Python: "Hello world"의 0번 문자는 "H" 입니다.
#
# 그렇다면 `"Hello world"`의 0번 문자를 찾아줘!'라는 명령을 어떻게 파이썬에 전달할 수 있을까요? 문자열 바로 뒤에 대괄호`[]`를 적고, 대괄호 안에 원하는 인덱스 숫자를 넣어주면 됩니다. 실제 파이썬 코드를 실행하면 다음과 같은 결과를 얻을 수 있습니다.
"Hello world"[0]
type("Hello world"[0])
# 문자열|인덱스|예시 코드|결과
# ---|---|---|---
# `"Python"`|`0`|`"Python"[0]`|`'P'`
# `"DataScience Lab"`|`12`|`"DataScience Lab"[12]`|`'L'`
# `"010-5782-xxxx"`|`7`|`"010-5782-xxxx"[7]`|`'2'`
# `"You need Python."`|`-1`|`"You need Python."[-1]`|`'.'`
#
#
# **예제 2-3. 다음 코드의 실행 결과를 판단하세요**
mystring = "Python is too slow"
mystring[-2] != mystring[4]
# **풀이**
#
# `"Python is too slow"`라는 문자열을 `mystring`이라는 변수에 할당했습니다. 따라서 `mystring[-2]`와 `'mystring[4]'`는 모두 `'o'`입니다. 두 값이 같으므로 `!=` 연산의 결과는 `False`입니다.
mystring = "Python is too slow"
mystring[-2] != mystring[4]
# ### 슬라이싱
#
# **슬라이싱은 말 그대로 문자열의 일정 구간을 잘라내는 조작입니다.** 일정 구간에 걸쳐서 인덱싱을 실행한다고 생각하시면 편합니다. 역시 대화로 표현하면 다음과 같습니다.
#
# > You: "Hello world"의 0번부터 4번까지의 문자를 찾아줘!
#
# > Python: "Hello world"의 0번부터 4번까지 문자는 "Hello" 입니다.
#
# 슬라이싱을 할 때는, `[a:b]`와 같이 대괄호 안에 구간을 입력해주면 됩니다. 구간을 입력할 때는 주의사항이 있습니다. 예시와 함께 보겠습니다.
"Hello world"[0:4]
# `[0:4]`의 구간을 입력했으므로 0, 1, 2, 3, 4번 문자가 출력되어야 할 것 같은데, 이상하게 `'Hell'`까지만 출력되었습니다. 이는 구간 `[a:b]`가 a는 포함하지만, b는 포함하지 않기 때문입니다. 즉 0번부터 4번까지를 슬라이싱 하려면 구간을 `[0:5]`와 같이 입력해야 합니다.
"Hello world"[0:5]
# 슬라이싱의 구간 표현법을 정리하면 다음과 같습니다.
#
# 구간|설명|예시 코드|실행 결과
# ---|---|---|---
# `[a:b]`|a번 문자부터 b번 문자 직전까지 슬라이싱|`'www.naver.com'[4:9]`|`'naver'`
# `[:]`|문자열의 전 구간을 슬라이싱|`"Super Awesome Code"[:]`|`'Super Awesome Code'`
# `[a:]`|a번 문자부터 끝까지 슬라이싱|`"Avengers: Endgame"[-7:]`|`'Endgame'`
# `[:b]`|처음부터 b번 문자 직전까지 슬라이싱|`"서울특별시 서대문구"[:5]`|`'서울특별시'`
#
# **예제 2-4. 다음 문자열에서 이메일 주소를 뽑아내세요**
mystring = "학회|데이터사이언스랩|yonseidslab.github.io|<EMAIL>"
# **풀이**
#
# 이메일 주소가 가장 뒤에 위치하므로 인덱스를 뒤쪽부터 세는게 편해 보입니다. `len` 함수를 활용하여 문자열의 길이를 파악하였습니다. 음수 인덱스를 활용하여 `-21`번 인덱스부터 끝까지 슬라이싱하면 이메일 주소를 뽑아낼 수 있습니다.
mystring = "학회|데이터사이언스랩|<EMAIL>|<EMAIL>"
len("<EMAIL>") # len: 문자열의 길이를 반환하는 함수
mystring[-21:]
# ## 2.5. 문자열의 메소드
#
# 메소드는 일종의 함수이며, 대상 뒤에 점`.` 을 찍고 사용할 수 있습니다. 함수이므로 반드시 `()` 와 함께 사용합니다.
#
# 메소드|기능
# ---|---
# `s.strip()`|`s` 문자열 양쪽의 공백을 제거
# `s.replace(a,b)`|`s` 문자열 내의 `a` 문자열을 `b` 문자열로 교체
# `a.split(b)`|`b` 문자를 기준으로 `a` 문자열을 쪼갬
# `a.join(b)`|`b` 문자열 사이사이에 `a` 문자열을 삽입
#
# ### **strip**
#
# **`strip` 메소드는 문자열 양쪽의 공백을 모두 제거합니다. 원하는 문자열 뒤에 점을 찍어서 `"문자열".strip()`과 같이 적어주면 됩니다.** 문자열 `" Hello world "`는 양쪽에 공백이 있고, 가운데에도 한 칸의 공백이 있습니다. `strip` 메소드를 활용하면 가운데의 띄어쓰기는 남겨두고 양 쪽의 공백만을 제거할 수 있습니다.
s = " Hello world "
s.rstrip()
# 사실 `s.strip()`은 s를 실제로 변화시키는 것이 아니라, 변화한 상태를 일시적으로 보여주는 것입니다. `s`를 출력해보면 양쪽의 공백이 그대로 남아있습니다. 공백을 제거한 결과를 `s`에 저장하고 싶다면, `s = s.strip()`과 같이 다시 할당을 해주어야 합니다. `=` 는 수학적인 의미의 등호가 아니라, 할당을 뜻하는 연산자이므로, **`s = s.strip()`이라는 코드는 "`s.strip()`을 실행하고 이 결과를 s에 할당하라"**는 의미입니다. s 와 s.strip() 이 같다는 의미가 아닙니다!
s
s = s.strip() # s.strip()을 실행하고 이 결과를 s에 할당하라
s
# **탭을 의미하는 `\t` 문자열과 개행을 의미하는 `\n` 문자열 역시 공백으로 인식됩니다.** 아래 예시 코드를 보면, `strip` 메소드가 `\t`와 `\n`을 공백으로 인식하여 삭제하였음을 확인할 수 있습니다.
s = "\n\n\nHello world\t\t\t"
s = s.strip()
s
# ### **replace**
#
# **`replace` 메소드는 문자열 내의 특정 문자를 다른 문자로 모두 교체합니다. `s.replace(a,b)`와 같이 적어주면 `s` 문자열 내의 `a` 문자열을 모두 `b` 문자열로 교체합니다.** 아래는 문자열 `"You need Python"` 에서 `"Python"`을 `"R"`로 바꾸어준 예제입니다. 역시 `s.replace("Python","R")`를 실행하는것만으로는 변수에 할당된 값이 변화하지 않습니다.
s = 'You need Python' # 문자열을 변수 s에 할당
s.replace("Python", "R") # s에서 Python -> R로 교체
s # s는 변화하지 않음
s = s.replace("Python", "R") # Python을 R로 교체한 후 이 결과를 s에 할당
s
s = s.replace(" ", ",") # 모든 " "를 ","로 교체
s
# **예제 2-5. a를 b와 같은 문자열로 변형하고 결과를 검증하세요. 단, 문자열 조작하는 과정에는 한 줄의 코드만 사용할 수 있습니다.**
a = " C GO Java Python PHP "
b = "C|GO|Java|Python|PHP"
# **풀이**
#
# **1) 문자열의 양쪽 공백을 제거해준 후, 2) 중간중간의 공백을 `|`로 교체**해주면 문제를 해결할 수 있습니다. 얼핏 보면 다음과 같이 두 줄의 코드가 필요할 것 같습니다. 먼저 양쪽의 공백을 지운 결과를 `tmp`라는 임시 변수에 저장합니다. 이후 `tmp`에서 공백을 `|`로 교체하고 이 결과를 `result`라는 변수에 최종적으로 저장합니다. `b`와 `result`를 비교해보면 두 값이 같다는 사실을 알 수 있습니다.
a = " C GO Java Python PHP "
b = "C|GO|Java|Python|PHP"
tmp = a.strip()
result = tmp.replace(" ","|")
b == result
# 위의 코드가 틀린 것은 아니지만, `tmp`라는 임시 변수를 사용하지 않고 코드를 더 간소화할 수 있습니다. 아래 풀이에서는 **메소드 체이닝, 즉 메소드를 연결하는 코드를 통해서 두 번의 조작을 한 줄의 코드로 끝냈습니다.** `a.strip()`의 결과는 양쪽 공백이 제거된 `"C GO Jave Python PHP"` 입니다. 따라서 `a.strip().replace(" ","|")`는 결국 `"C GO Jave Python PHP".replace(" ","|")`와 같습니다.
a = " C GO Java Python PHP "
b = "C|GO|Java|Python|PHP"
result = a.strip().replace(" ","|")
b == result
# ### **split()**
#
# **`split` 메소드는 특정 문자를 기준으로 문자열을 쪼개어 리스트를 반환합니다. `s.split(a)`와 같이 적어주면 a 문자열을 기준으로 s 문자열을 쪼개어 리스트를 반환합니다.** 아무런 문자도 주어지지 않는다면 공백을 기준으로 문자열을 쪼갭니다.
s = "Hello world"
s.split(" ") # " " 를 기준으로 문자열을 쪼갬 > 이 결과를 리스트로 반환
s = "one/two/three/four"
s.split("/")
# **예제 2-6. 다음 코드의 실행 결과를 예상해보세요**
a = "<EMAIL>"
a.replace("@",".").split(".")
# **풀이**
#
# 문자열에서 `@`를 `.`으로 교체해준 후, `.`을 기준으로 나누었으므로 결과는 다음과 같습니다.
a = "<EMAIL>"
a.replace("@",".").split(".")
# ### **join**
#
# **`join` 메소드는 문자열 사이사이에 다른 문자를 삽입합니다. `a.join(b)`와 같이 적어주면 b 문자열 사이사이에 a 문자열을 삽입한니다.** `join` 메소드는 문자열보다는 리스트에 대해 사용하는 경우가 많습니다
alphabet = "ABCDEFG"
"|".join(alphabet)
mylist = ['a','b','c']
','.join(mylist)
# ## 2.6. 문자열 포매팅
#
# 문자열 포매팅은 문자열 안에 변수를 포함하는 기능입니다. 유사한 패턴의 문자열에서 특정 부분만 변형해서 사용해야 하는 경우, 문자열 포매팅이 유용합니다. 여기에서는 `f` 포매팅에 대해서 배워보겠습니다. `f` 포매팅을 사용할 때는 문자열을 감싼 따옴표 앞에 `f`를 적어줍니다. 이후 변수를 사용할 위치에 중괄호`{}`를 쓰고, 중괄호 안에 원하는 변수를 입력해주면 됩니다.
ID = "yonseidslab"
domain = "naver"
address = f"{<EMAIL>"
address
# # 3. 불리언
#
# ## 3.1. 불리언 자료형
#
# 불리언은 참/거짓을 나타내는 자료형이며, 각각 `True`, `False`로 씁니다. 크게 주의할 것은 없고, 파이썬에서 `0`이 `False`를 의미한다는 것은 알고 넘어가면 좋습니다.
True
False
0 == False
# ## 3.2. 논리 연산자
#
# 불리언 자료형에서 가장 중요한 것은 논리 연산입니다. 크게 복잡한 것은 없지만 조건문에서 자주 활용되는 연산이므로 알아두시면 좋습니다.
#
# 연산자|기능|예시 코드|실행 결과
# ---|---|---|---
# `a & b`|"a와 b가 모두 참이다"를 판단|`True & True`|`True`
# `a and b`|"a와 b가 모두 참이다"를 판단|`True and False`|`False`
# `a \| b`|"a, b 중 최소한 하나가 참이다"를 판단|`True | False`|`True`
# `a or b`|"a, b 중 최소한 하나가 참이다"를 판단|`False or False`|`False`
# **예제 3.1. 다음 연산의 실행 결과를 판단하세요**
(10 < 10) or (10 > 10)
("Python" != "R") and ("Life" == "short")
# **풀이**
#
# 첫 줄은 `(10 < 10)`과 `(10 > 10)` 이라는 두 개의 명제로 이루어져 있습니다. 두 명제 모두 거짓이므로 `False or False`가 되고, 결과는 `False`입니다. 둘째 줄은 `("Python" != "R")`과 `("Life"=="short")` 라는 두 개의 명제로 이루어져 있습니다. 첫 명제는 참이지만 둘째 명제는 거짓이므로 `True and False`가 되고, 결과는 `False`입니다.
(10 < 10) or (10 > 10)
("Python" != "R") and ("Life" == "short")
# ## 3.3. 비교연산자
#
# 비교연산자 `==`, `!=`, `>`, `<` 등은 연산 결과로 불리언 자료형을 반환합니다.
"Python" != "R"
"You" == "Me"
1 > 0
2 <= 2
# # 4. 변수와 함수
#
# ## 4.1. 변수
#
# 변수는 정보를 담는 공간입니다. 우리가 `x=3` 이라는 할당을 시행하면 컴퓨터는 메모리 어딘가에 3이라는 정보를 기록하고, x라는 라벨을 붙입니다. 이후 코드에서 x라는 변수를 사용하면, 컴퓨터는 x라는 라벨이 붙은 정보를 찾아 계산에 사용하게 되는 것입니다. 변수가 메모리 어디에 저장되어 있는지는 `id` 함수를 사용하면 알 수 있습니다.
x = 3
id(x)
meanSquaeredError
# - 변수는 숫자로 시작할 수 없다
# - 하나의 변수명에는 하나의 객체가 대응한다
# - 하나의 변수명에는 하나의 객체가 대응한다
#
# ## 4.2. 함수
#
# ### 함수의 구조
#
# 
#
# 우리가 흔히 알고 있는 함수는 y = f(x)와 같은 형태입니다. x라는 입력이 들어가면, f(x)라는 함수값이 출력됩니다. 파이썬의 함수 역시 이와 유사하게 작동합니다. **파이썬에서는 x를 인자(argument) 또는 매개변수(parameter)라고 부르고, y를 반환(return)이라고 부릅니다.**
def 함수명(매개변수1,매개변수2,...):
매개변수들을 활용한 계산 # 들여쓰기 필수!!!! 스페이스 네 개 !!!!
return 결과물
# 파이썬의 함수는 `def` 문을 통해 만들 수 있으며, `def` 문의 구조를 일반화하면 위와 같습니다. `def` 다음 함수명을 적고, 괄호 안에 필요한 매개변수들을 적어줍니다. 이후 다음 줄로 넘어가 들여쓰기(스페이스 네 개)된 상태에서 계산을 진행하면 됩니다. 대부분의 편집기에서는 자동으로 들여쓰기를 적용해줍니다. **계산이 끝나면 `return` 절에 반환할 결과물을 적어줍니다.**
#
def add(x,y):
value = x + y
return value
add(x=4,y=5)
add(2,3)
# `add` 함수는 `x`와 `y`라는 두 개의 인자를 받아서 더하고, 이 결과를 반환합니다. 함수를 사용할 때는 함수 이름을 적고, 등호 `=`를 사용해서 필요한 인자들을 전달해줍니다. 인자를 순서대로 전달했다면, 매개변수명을 생략할 수 있습니다.
#
# **함수 안에서 사용되는 변수들은 지역 변수(local variable)로, 함수 안에서만 유효합니다.** 즉 함수 밖에서는 함수 안의 변수들에 접근할 수 없습니다. 예를 들어 우리가 만든 `add` 함수에 `x=4, y=5`를 전달하고 실행하면 함수 안에서는 다음과 같은 일이 일어날 것입니다.
# 함수 작동의 예시로, 실행 가능한 코드가 아닙니다
value = 4 + 5
return value
# 하지만 함수를 실행한 후 `result`를 출력하면 변수 `result`가 선언되지 않았다는 에러가 발생합니다. 함수 안에서 사용되는 변수는 지역 변수(local variables)로, 함수 안에서만 유효하기 때문입니다. 따라서 함수의 실행 결과를 저장하려면 이를 별도의 변수에 할당해주어야 합니다.
add(4,5)
print(value)
a = add(4,5)
print(a)
# **예제 4-1. 이메일 문자열이 gmail 주소이면 True, 아니면 False를 반환하는 함수를 작성하세요. 다음과 같이 작동하면 됩니다.**
# 아직 함수를 작성하지 않았으므로 실행하면 오류납니다!
my_function("<EMAIL>")
# 아직 함수를 작성하지 않았으므로 실행하면 오류납니다!
my_function("<EMAIL>")
# **풀이**
#
# 주어진 문자열에 `"@<EMAIL>"`이 포함되는지를 판단하면 되는 문제입니다. 함수의 이름을 `my_function`으로 정하고, `address`라는 매개변수로 문자열을 받았습니다. 이제부터 `address`는 어떤 문자열을 가리키는 변수라고 상상하면서 코딩을 해주면 됩니다. `in` 연산을 통해 `address`에 `"@gmail.com"`이 포함되는지 판단할 수 있습니다. 이 결과를 `result`라는 변수에 저장하고, `return result`로 결과를 반환합니다.
def my_function(address):
result = "@gmail.com" in address
return result
my_function("<EMAIL>")
my_function("<EMAIL>")
# 연산이 간단할 때에는 다음과 같이 적는 것도 가능합니다. 한 줄의 코드로 끝나는 연산이기 때문에 별도의 변수를 사용하지 않고 즉시 반환하였습니다.
def my_function(address):
return "@gmail.com" in address
# ### 인자와 반환
#
# 사실 파이썬의 함수는 일반적인 함수와 달리 x와 y가 없이도 작동할 수 있습니다. 즉 파이썬의 함수는 다음과 같은 네 가지 경우로 구분할 수 있습니다.
#
# 1. 매개변수와 반환이 모두 있는 함수
# 2. 매개변수가 없고 반환이 있는 함수
# 3. 매개변수가 있고 반환이 없는 함수
# 4. 매개변수와 반환이 모두 없는 함수
# **예제 4-2. 다음 함수들이 1,2,3,4 중 어디에 해당하는지 판단하세요.**
def my_function(x,y):
print(x+y)
def my_function2():
print("Hello world")
def my_function3():
return("Hello world")
def my_function4(x,y):
return("Hello world")
# **풀이**
#
# 1. x, y 두 개의 매개변수가 있지만 `return`이 없으므로 3에 해당
# 2. 매개변수가 없고 `return`이 없으므로 4에 해당
# 3. 매개변수가 없고 `return`이 있으므로 2에 해당
# 4. x, y 두 개의 매개변수가 있고 `return`이 있으므로 1에 해당
# ### lambda 함수
#
# 지금까지는 `def` 문을 통해서 함수를 만드는 방법을 배웠습니다. 하지만 한 줄 정도의 간단한 함수를 만들 때는 `lambda` 문을 사용하는 것이 편리합니다. `lambda` 뒤에 괄호를 사용하지 않고 매개변수들을 나열해준 후, 콜론 다음에 계산식을 적어주면 됩니다. `lambda` 문에서는 `return`을 사용하지 않으며, 콜론 이후 코드의 실행 결과가 즉시 반환됩니다. `lambda` 문을 변수에 할당하면 해당 변수명으로 함수가 생성됩니다.
add = lambda x,y: x+y
add(4,5)
# `lambda` 문의 구조를 일반화하면 다음과 같습니다.
함수명 = lambda 인자1, 인자2, : 계산식
# # 5. 리스트
#
# ## 5.1. 리스트 생성
#
# 리스트는 `[요소1, 요소2, 요소3, ...]`과 같이 여러 요소들을 묶어놓은 자료형입니다. 대괄호 혹은 `list` 함수를 사용해서 리스트를 생성할 수 있습니다. 각 요소들은 쉼표 ,로 구분합니다. 리스트는 거의 모든 자료형을 요소로 가질 수 있으며, 요소들의 자료형이 모두 같을 필요도 없습니다. 다음은 모두 리스트의 예시입니다.
a = [] # 빈 리스트
b = [1,2,3,4,5] # 숫자 리스트
c = ['a','b','c','d','e'] # 문자열 리스트
d = ['a','b',3,4,'d',False] # 여러 자료형을 포함한 리스트
e = [[1,2,3],[4,5,6]] # 리스트의 리스트
f = list([1,2,3]) # list() 함수를 사용해서 리스트 생성
# ## 5.2. 리스트 연산자
#
# 연산자|기능|예시 코드|결과
# ---|---|---|---
# `+`|두 리스트를 합침|`[1,2,3] + [4,5,6]`|`[1,2,3,4,5,6]`
# `*`|리스트의 요소들을 반복|`[1,2,3]*2`|`[1,2,3,1,2,3]`
# `a in b`|"a 요소가 b 리스트 안에 포함된다"를 판단|`1 in [1,2,3]`|`True`
# `a not in b`|"a 요소가 b 리스트 안에 포함되지 않는다"를 판단|`1 not in [1,2,3]`|`False`
#
# `in` 연산자와 `not in` 연산자는 기억하고 넘어가시기 바랍니다.
"Apple" in ["Apple", " Banana", "code"]
[1,2,3] in [[1,2,3],[4,5,6]]
# **예제 5-1. 다음 코드의 실행 결과를 판단하세요(True/False)**
[1,2,3] not in [1,2,3,[1,2,3]]
1 in [[1,2,3]]
# **풀이**
#
# `[1,2,3]`의 요소는 `1, 2, 3` 이므로 `[1,2,3]`은 `[1,2,3]`의 요소가 아닙니다. 따라서 첫 줄의 결과는 `True`입니다. [[1,2,3]]의 요소는 [1,2,3]뿐이므로 결과는 `False`입니다.
[1,2,3] not in [1,2,3]
1 in [[1,2,3]]
# ## 5.3. 리스트 인덱싱 & 슬라이싱
#
# 리스트는 요소들의 순차적인 배열입니다. 따라서 문자열과 마찬가지로, 각각의 요소들에 번호를 매길 수 있습니다. 번호를 매기는 방식 역시 문자열과 동일합니다. 즉 앞에서부터 0, 1, 2 ... 순으로 인덱스가 붙고, 뒤에서부터 -1,-2,-3 ... 순으로 인덱스가 붙습니다. 예를 들면 다음과 같습니다.
["Life", "is", "short", "you", "need", "Python"]
# 요소|"Life"|"is"|"short"|"you"|"need"|"Python"
# :---:|:---:|:---:|:---:|:---:|:---:|:---:
# 인덱스|0|1|2|3|4|5
# 인덱스|-6|-5|-4|-3|-2|-1
#
# ### 인덱싱
#
# 문자열의 인덱싱과 슬라이싱에 익숙해졌다면, 리스트의 인덱싱과 슬라이싱 역시 어렵지 않을 겁니다. 리스트의 인덱싱 역시 문자열과 마찬가지로 대괄호 []를 사용합니다.
#
# 리스트|인덱스|예시 코드|결과
# ---|---|---|---
# `[0.25, 0.5, 0.75, 1.0]`|`1`|`[0.25, 0.5, 0.75, 1.0][1]`|`0.5`
# `["You", "need", "Python"]`|`-1`|`["You", "need", "Python"][-1]`|`'Python'`
# `[[1,2,3],[4,5,6],[7,8,9]]`|`2`|`[[1,2,3],[4,5,6],[7,8,9]][2]`|`[7,8,9]`
[0.25, 0.5, 0.75, 1.0][1]
["You", "need", "Python"][-1]
[[1,2,3],[4,5,6],[7,8,9],[10,11,12]][`]
# 인덱싱을 사용하여 개별 요소를 추출하였다면, 해당 요소에 대해 다시 조작을 가하는 것도 가능합니다. 아래는 리스트에서 개별 요소를 추출한 후, 해당 요소에 대해 다시 인덱싱/슬라이싱을 적용한 예제입니다.
mylist = ["Life", "is", "short", "you", "need", "Python"] # 리스트 생성
mylist
element = mylist[0] # 0번 요소인 "Life" 추출
element
element[:2] # "Life"에서 2번 문자까지 슬라이싱
element[:2] == mylist[0][:2] # 위 과정을 붙여서 쓴 코드
# **예제 5-2. 다음 리스트에서 "Wally"를 추출하세요.**
WhereIsWally = [["wally","<NAME>"],[["waly"],"Wally","Wallmart"]]
WhereIsWally[1][1]
# **풀이**
#
# "Wally"는 두 번째 서브리스트의 두 번째 요소입니다. 따라서 다음과 같이 두 번의 인덱싱으로 손쉽게 월리를 찾아낼 수 있습니다.
WhereIsWally = [["wally","<NAME>"],[["waly"],"Wally","Wallmart"]]
WhereIsWally[1][1]
# **예제 5-3. 다음 주소에서 문자열 인덱싱을 사용하지 않고 동을 추출하세요.**
address = "서울특별시 서대문구 신촌동 연세로 50"
address.split(" ")[2]
# **풀이**
#
# 주어진 문자열은 띄어쓰기 기준으로 스플릿할 수 있습니다. 스플릿을 실행하면 각 어절이 리스트의 요소로 들어갈 것이고, "신촌동"은 리스트의 두 번째 요소가 될 것입니다. 이 결과를 코드로 정리해주면 됩니다.
address = "서울특별시 서대문구 신촌동 연세로 50"
address.split(" ")[2]
# ### 슬라이싱
#
# 리스트 슬라이싱 역시 문자열 슬라이싱과 다르지 않습니다. 대괄호 안에 콜론을 써서 구간을 표현해주시면 됩니다.
['one', 'two', 'three', 'four', 'five'][:]
["Apple","Banana", "Computer", "Data"][1:3]
[1, 2, 3, 4, 5][3:-1]
# 이번에는 특정 요소들을 건너뛰는 슬라이싱을 배워보겠습니다. 예를 들어 리스트 안에서 홀수 인덱스를 가진 요소들만 추출할고 싶을 때, 이 방법을 활용할 수 있습니다. **기본적인 구간 표현법은 `start:end:step`과 같습니다. 이는 start부터 end까지 step만큼 이동하면서 가져오라는 의미입니다.** step이 1이면 바로 다음 요소로 가는 것이므로 해당 구간의 모든 요소들을 가져올 것이고, step이 2이면 한 칸씩 건너뛰면서 요소들을 가져올 것입니다.
#
# 예제와 함께 보겠습니다. 첫 예제에서 사용한 구간 `1:8:2`는 1번 인덱스부터 8번 인덱스까지 2칸씩 진행하면서 요소들을 가져오라는 뜻입니다. 즉 따라서 1, 3, 5, 7번 인덱스에 해당하는 1, 3, 5, 7이 추출되었습니다. 두 번째 예제에서는 start와 end를 생략하여 리스트의 전 구간을 표현하였고, step은 2로 주었습니다. 따라서 0, 2, 4, 6, 8번 인덱스에 해당하는 0, 2, 4, 6, 8이 추출되었습니다.
[0, 1, 2, 3, 4, 5, 6, 7, 8][0:8:3]
[0, 1, 2, 3, 4, 5, 6, 7, 8][::2]
# ## 5.4. 리스트 메소드
#
# 메소드|기능
# ---|---
# `a.append(b)`|리스트 a에 요소 b를 추가함
# `a.index(b)`|리스트 a에서 요소 b의 인덱스를 찾음
#
# ### append
#
# append 메소드는 특정 리스트에 요소를 추가하는 메소드입니다. **이 메소드는 리스트를 즉시 변형시키므로 주의해서 사용해야 합니다.** 즉 `a.append(b)`를 실행하면 즉시 리스트에 요소가 추가되고, 다시 할당을 할 필요가 없습니다.
#
myList = [1,2,3]
a = [1,2,3]
a.append(4)
a
# ### index
a = ["A","B","C"]
a.index("B")
# ## 5.5. 리스트의 요소들에 함수 적용하기: map
#
# `map` 함수는 리스트 내의 요소들에 일괄적으로 함수를 적용하는 함수입니다. 만약 리스트 안의 모든 요소들에 2를 곱하고 싶다면 어떻게 코드를 짜야 할까요? 리스트의 사칙연산은 숫자와 다르게 작동하므로, [1,2,3] * 2 와 같은 코드로는 원하는 결과를 얻을 수 없습니다. 이 때 `map` 함수를 사용하면 쉽게 해결이 가능합니다. `map`은 함수와 반복 가능한 객체 하나를 인자로 받습니다. 반복 가능한 객체는 리스트와 같이 각 요소에 차례대로 접근할 수 있는 객체를 말합니다.
map(함수, 리스트)
# 아래 예시 코드를 보겠습니다. a는 리스트이고, f는 x라는 인자를 받아서 x의 제곱을 반환하는 함수입니다. `map(f,a)`는 a 리스트의 각 요소에 f 함수를 적용하는 코드입니다. 하지만 `map(f,a)`까지만 실행하면 계산이 실제로 실행되지는 않으며, 맵 객체라는 이상한 결과가 반환됩니다. 이 객체는 실제 계산을 실행하기 위해 대기중인 상태입니다. 실제 계산 결과를 반환받으려면 `list` 로 다시 한 번 감싸주어야 합니다.
mylist = [1,2,3]
f = lambda x: x ** 2
# **예제 5-4. address_list는 주소 문자열을 요소로 갖는 리스트입니다. address_list 각 요소들에서 시/도를 추출하여 리스트를 만드세요. 다음과 같은 결과를 얻으시면 됩니다.**
address_list = [
'서울 서대문구 연세로00길 00',
'경기도 성남시 분당구 불정로 0',
'부산 남구 유엔평화로 00-00'
]
f = lambda x: x.split(" ")[0]
sample = '서울 서대문구 연세로00길 00'
sample.split(" ")[0]
['서울', '경기도', '부산']
# **풀이**
#
#
# **1. 주소 문자열로부터 시도를 분리할 수 있는 패턴 찾기**
#
# - 주어진 문자열들은 띄어쓰기를 기준으로 분리할 수 있고, 각 문자열의 첫 번째 어절이 시/도를 나타냅니다. 따라서 띄어쓰기를 기준으로 문자열을 스플릿한 후, 이렇게 생성된 리스트의 첫 요소를 추출하면 시/도를 추출할 수 있을 것입니다.
#
#
# **2. 1의 과정을 함수화하기**
#
# - x라는 인자를 문자열로 생각하면, x.split(" ") 메소드를 활용해 x를 띄어쓰기 기준으로 분리 가능합니다. 이 결과는 하나의 리스트일 것이고, 리스트의 첫 번째 요소를 추출하기 위해서 [0]과 같이 인덱싱을 햊면 됩니다. 이 과정을 붙여 쓰면 x,split(" ")[0]이 되고, 이를 lambda문에 넣어서 함수화하는 과정을 my_function = lambda x: x.split(" ")[0] 과 같이 쓸 수 있습니다.
#
#
# **3. 리스트에 함수 적용하기**
#
# - map을 활용해 함수를 적용하고 list()를 씌워서 결과를 출력하면 끝입니다!
address_list = [
'서울 서대문구 연세로00길 00',
'경기도 성남시 분당구 불정로 0',
'부산 남구 유엔평화로 00-00'
]
my_func = lambda x: x.split(" ")[0]
list(map(my_func, address_list))
# # 6. 튜플
#
# 튜플은 순서쌍이며, 소괄호를 사용하여 만들 수 있습니다. 요소들의 순서가 있는 배열이라는 점에서 리스트와 거의 비슷하지만, **튜플은 요소를 수정할 수 없으며 요소들의 자료형이 모두 같아야 합니다.** 자주 쓰는 자료형은 아니지만, 특징을 알아둘 필요가 있습니다.
a = (1,2,3)
b = tuple((1,2,3))
# # 7. 셋
#
# ## 7.1. 집합
#
# 셋은 집합이며, 중괄호`{}` 를 사용하거나 `set` 함수를 사용해서 만들 수 있습니다.
mySet={1,2,3}
mylist = [1,2,3]
set(mylist)
# **셋의 중요한 특징들은 다음과 같습니다.**
#
# - 셋은 중복을 허용하지 않는다
# - 셋에는 순서가 없다
# - 합집합, 교집합, 차집합 등의 집합 연산이 가능하다
#
# ## 7.1. 집합으로 중복 제거하기
#
# 중복을 허용하지 않는다는 특징 때문에 집합은 중복을 제거하는 수단으로 활용되기도 합니다. 중복이 존재하는 리스트를 집합으로 만들었다가, 다시 리스트로 변환하는 과정을 통해서 리스트의 중복을 제거할 수 있습니다.
mylist = [1,2,3,3,4,5]
set(mylist)
list(set(mylist))
# ## 7.2. 집합 연산
#
# 집합 연산자는 **1) 연산자를 통해서 실행하는 방법, 2) 메소드를 통해서 실행하는 방법**이 있습니다. 어떤 방법을 사용해도 상관 없습니다.
#
# ### 합집합
#
# 합집합은 `|` 연산자 혹은 `union` 메소드를 활용합니다. 집합의 개수와 상관 없이 연산이 가능합니다.
a = set([1,2,3])
b = set([3,4,5])
c = set([5,6,7])
a | b | c
a.union(b)
a | b | c
a.union(b,c)
# ### 교집합
#
# 교집합은 `&` 연산자 혹은 `intersection` 메소드를 활용합니다. 역시 집합의 개수와 상관 없이 연산이 가능합니다.
a
b
a & b
b.intersection(a)
a
b
c
a.intersection(b,c)
# ### 차집합
#
# 차집합은 `-` 연산자 혹은 `difference` 메소드를 활용합니다.
a
b
a - b
a.difference(b)
b.difference(a,c)
# # 8. 딕셔너리
#
# ## 8.1. 딕셔너리 만들기
{key1:value1, key2:value2, ...}
# 사전에서 'people'을 찾으면 '사람'이라는 뜻이 대응되고, 'science'를 찾으면 '과학'이라는 뜻이 대응됩니다. 파이썬의 딕셔너리 역시 이러한 대응관계를 나타내는 자료형이며, 키를 넣으면 그에 맞는 값을 얻을 수 있습니다. 리스트나 튜플의 요소들에 접근할 때에는 요소의 번호인 인덱스를 활용했지만, 딕셔너리에서는 키를 활용합니다. 딕셔너리의 각 요소는 키와 값을 콜론으로 대응시킨 쌍이고, 각 쌍들은 컴마로 구분합니다. 이 쌍들을 중괄호로 묶어주면 딕셔너리가 생성됩니다.
dict1 = {"김연아":"피겨","박찬호":"야구","손흥민":"축구"}
dict2 = {
"이름" : ["펭수","뽀로로","뿡뿡이","뚝딱이"],
"종" : ["펭귄","펭귄","NA","도깨비"]
}
import pandas as pd
pd.DataFrame(dict2)
# `dict1`의 구조를 이해하는 것은 어렵지 않을 것입니다. `dict2`의 구조는 약간 더 복잡합니다. "이름"이라는 문자열 키에 리스트가 값으로 대응하고, "종"이라는 문자열 키에 리스트가 값으로 대응합니다. 앞으로 계속 마주칠 DataFrame, Json에서 활용되는 구조이기 때문에, 정확히 이해하고 넘어가야 합니다. `dict2`를 테이블로 표현하면 다음과 같습니다. 즉 `dict2`는 키-값의 쌍이 하나의 컬럼을 이루는 표로 표현할 수 있습니다.
#
# 이름|종
# ---|---
# 펭수|펭귄
# 뽀로로|펭귄
# 뿡뿡이|NA
# 뚝딱이|도깨비
#
# ## 8.2. 딕셔너리 메소드
#
# 메소드|기능
# ---|---
# a.keys()|a 딕셔너리의 키를 반환
# a.values()|b 딕셔너리의 값을 반환
dict2 = {
"이름":["펭수","뽀로로","뿡뿡이","뚝딱이"],
"종":["펭귄","펭귄","NA","도깨비"]
}
dict2.keys()
dict2.values()
dict2['종']
# # 참고자료
#
# - 박응용, [점프 투 파이썬](https://wikidocs.net/book/1)
| docs/page1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ENms_F_eVwvX" colab_type="code" outputId="a3b77864-567d-4619-b191-554e434b5d58" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# notebook based on Taleb's tweet: https://twitter.com/nntaleb/status/1150457625877864450
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
plt.style.use('Solarize_Light2')
def plot_bivariate_normal_with_correlation(correlation, size):
mean = [0, 0]
cov = [[1, correlation],
[correlation, 1]] # diagonal covariance
x, y = np.random.multivariate_normal(mean, cov, size, 'raise').T
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
pearson_correlation = [0, 0.5, 0.6, 0.8, 0.9, 0.99, 0.9999]
print('Pearson correlation coefficient ρ=1/2 is much closer to ρ=0 than to a ρ=1.')
print('There is also a huge difference between a ρ=0.9 and ρ=0.99')
size = 1000
for x in pearson_correlation:
print("ρ =", x)
plot_bivariate_normal_with_correlation(x, size)
# + id="J5dTThADb_uy" colab_type="code" outputId="0cde517f-2f82-4b26-c72a-6d5609f0395c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import math
# https://en.wikipedia.org/wiki/Mutual_information#Linear_correlation
def mutual_information_to_pearson(mutual_information):
return math.sqrt(1 - math.exp(-2 * mutual_information))
def plot_bivariate_normal_with_mutual_information(mutual_information, size):
r = mutual_information_to_pearson(mutual_information)
print("Mutual information", mutual_information)
print("Pearson correlation", r)
plot_bivariate_normal_with_correlation(r, size)
mutual_information = [0, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, 32]
pearson_correlation = [mutual_information_to_pearson(x) for x in mutual_information]
print("Mutual Information scales to noise: correlation of 0.47 is half the information of 0.62")
plt.plot(mutual_information, pearson_correlation, 'x')
plt.show()
size = 1000
for x in mutual_information:
plot_bivariate_normal_with_mutual_information(x, size)
| pearson_correlation_coefficient_vs_mutual_information.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="t1K8pvFx5FXc"
# **Name: <NAME>**
#
# **Task_2**
# + [markdown] id="Ytfy_YkC2osN"
# ### Stock Market Prediction And Forecasting Using Stacked LSTM
# + id="etu5Fbdr2osS"
import numpy as np
import pandas as pd
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 74} id="cylNHrRTOBox" outputId="58d194d4-2a21-4110-a3c4-0fdd31cdd388"
from google.colab import files
uploaded = files.upload()
# + id="_TGVTBRu2osT"
df=pd.read_csv('AAPL.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="jRWCvUOE2osT" outputId="e2a0bf0a-d1a9-4367-a96f-458574edd537"
df.head()
# + id="6EqRz8pw2osU" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="2859c22c-4ade-41ca-fa01-4aa651fa1a7d"
df.tail()
# + id="S21epLX12osU"
df1=df.reset_index()['close']
# + id="zHGYGweJ2osV" colab={"base_uri": "https://localhost:8080/"} outputId="c2a20c3a-5749-4e39-c491-2e63ca48de27"
df1
# + id="u38wGOSN2osV" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="0e29080c-4e1b-4a43-c9ea-7bde73941be4"
import matplotlib.pyplot as plt
plt.plot(df1)
# + id="3Dqe2O8F2osW"
### LSTM are sensitive to the scale of the data. so we apply MinMax scaler
# + id="ZenJMMBb2osX"
import numpy as np
# + id="hftMHsam2osX" colab={"base_uri": "https://localhost:8080/"} outputId="a40d72ee-5d72-4a00-af43-e2d85d944a03"
df1
# + id="DBBE4a7g2osX"
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
df1=scaler.fit_transform(np.array(df1).reshape(-1,1))
# + id="jV5E8E2j2osY" colab={"base_uri": "https://localhost:8080/"} outputId="2f040a4d-026d-461b-dd5b-586992fca359"
print(df1)
# + id="fIAajEor2osY"
##splitting dataset into train and test split
training_size=int(len(df1)*0.65)
test_size=len(df1)-training_size
train_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1]
# + id="8o3PsmLn2osY" colab={"base_uri": "https://localhost:8080/"} outputId="7e95ece5-4934-4a69-fb6b-76486f827656"
training_size,test_size
# + id="KnwfpTP-2osZ"
train_data
# + id="JRrTAYYZ2osZ"
import numpy
# convert an array of values into a dataset matrix
def create_dataset(dataset, time_step=1):
dataX, dataY = [], []
for i in range(len(dataset)-time_step-1):
a = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100
dataX.append(a)
dataY.append(dataset[i + time_step, 0])
return numpy.array(dataX), numpy.array(dataY)
# + id="Zc8rUB0E2osZ"
# reshape into X=t,t+1,t+2,t+3 and Y=t+4
time_step = 100
X_train, y_train = create_dataset(train_data, time_step)
X_test, ytest = create_dataset(test_data, time_step)
# + id="s6swxq2y2osa" colab={"base_uri": "https://localhost:8080/"} outputId="40897d03-a452-45d0-9e8d-1cfcdf456202"
print(X_train.shape), print(y_train.shape)
# + id="dMXYwcxJ2osa" colab={"base_uri": "https://localhost:8080/"} outputId="a1e14b53-5bea-4a23-8697-031d101a815b"
print(X_test.shape), print(ytest.shape)
# + id="EL4efA902osa"
# reshape input to be [samples, time steps, features] which is required for LSTM
X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1)
# + id="PVWKDDL52osb"
### Create the Stacked LSTM model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
# + id="Bg4CaZ8C2osb"
model=Sequential()
model.add(LSTM(50,return_sequences=True,input_shape=(100,1)))
model.add(LSTM(50,return_sequences=True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam')
# + id="2VSKnC8O2osb" colab={"base_uri": "https://localhost:8080/"} outputId="f1a0f9f0-6f32-4310-9cb1-a753d6f77555"
model.summary()
# + id="ybM5vnMY2osb" colab={"base_uri": "https://localhost:8080/"} outputId="6c2441b2-2cc5-4f3f-e355-607b73f441bd"
model.summary()
# + id="IJ7SbOQf2osb"
# + id="xaFrSq4W2osc" colab={"base_uri": "https://localhost:8080/"} outputId="f59e5121-69e8-48be-80b9-69475fddce27"
model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=100,batch_size=64,verbose=1)
# + id="FapDVBSz2osc"
import tensorflow as tf
# + id="6iooEOK92osc" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ffd52866-32e4-479c-9cf9-6daa87b95047"
tf.__version__
# + id="AzeHrXtw2osc"
### Lets Do the prediction and check performance metrics
train_predict=model.predict(X_train)
test_predict=model.predict(X_test)
# + id="xdHJOzz_2osc"
##Transformback to original form
train_predict=scaler.inverse_transform(train_predict)
test_predict=scaler.inverse_transform(test_predict)
# + id="H5P-g8cm2osc" colab={"base_uri": "https://localhost:8080/"} outputId="bfff31a5-2425-4573-a12d-ba5f758a3568"
### Calculate RMSE performance metrics
import math
from sklearn.metrics import mean_squared_error
math.sqrt(mean_squared_error(y_train,train_predict))
# + id="b3Ca4JKZ2osd" colab={"base_uri": "https://localhost:8080/"} outputId="5110c351-7ec7-47a3-81a0-0e7bedff9187"
### Test Data RMSE
math.sqrt(mean_squared_error(ytest,test_predict))
# + id="7FyVCx8q2osd" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="f4ae2810-4a6c-4358-8b6f-48ad8a96be90"
### Plotting
# shift train predictions for plotting
look_back=100
trainPredictPlot = numpy.empty_like(df1)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(df1)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :] = test_predict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(df1))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
# + id="TzLKMA2k2osd" colab={"base_uri": "https://localhost:8080/"} outputId="5e753316-c495-4621-e220-ca805f524ddb"
len(test_data)
# + id="7_t30KOE2ose" colab={"base_uri": "https://localhost:8080/"} outputId="d4a796a0-ec37-4283-98ec-079c6eef1c5c"
x_input=test_data[341:].reshape(1,-1)
x_input.shape
# + id="6PUPA_6L2ose"
# + id="ginuQLnB2ose"
# + id="QQtlPNLC2ose"
temp_input=list(x_input)
temp_input=temp_input[0].tolist()
# + id="UooSK9x02ose" colab={"base_uri": "https://localhost:8080/"} outputId="52b55eb5-f33d-4323-f16d-9025db24baae"
temp_input
# + id="mr-9iNd82ose" colab={"base_uri": "https://localhost:8080/"} outputId="198e05e0-1993-43e6-88ba-e7efaeb786be"
# demonstrate prediction for next 10 days
from numpy import array
lst_output=[]
n_steps=100
i=0
while(i<30):
if(len(temp_input)>100):
#print(temp_input)
x_input=np.array(temp_input[1:])
print("{} day input {}".format(i,x_input))
x_input=x_input.reshape(1,-1)
x_input = x_input.reshape((1, n_steps, 1))
#print(x_input)
yhat = model.predict(x_input, verbose=0)
print("{} day output {}".format(i,yhat))
temp_input.extend(yhat[0].tolist())
temp_input=temp_input[1:]
#print(temp_input)
lst_output.extend(yhat.tolist())
i=i+1
else:
x_input = x_input.reshape((1, n_steps,1))
yhat = model.predict(x_input, verbose=0)
print(yhat[0])
temp_input.extend(yhat[0].tolist())
print(len(temp_input))
lst_output.extend(yhat.tolist())
i=i+1
print(lst_output)
# + id="ADaMtX1y2osf"
day_new=np.arange(1,101)
day_pred=np.arange(101,131)
# + id="1FTbXzDl2osf"
import matplotlib.pyplot as plt
# + id="fV4F2ihb2osf" colab={"base_uri": "https://localhost:8080/"} outputId="ce584447-abd6-4b25-8c4e-2fc05d2aab2a"
len(df1)
# + id="gTwc_b-42osf"
# + id="pfADbkJe2osf" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="e748ac00-55c3-4ca0-8ed8-121cddaff2bd"
plt.plot(day_new,scaler.inverse_transform(df1[1158:]))
plt.plot(day_pred,scaler.inverse_transform(lst_output))
# + id="-EibS4se2osf" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="bbb04273-fe04-473a-d905-bad26ed287d4"
df3=df1.tolist()
df3.extend(lst_output)
plt.plot(df3[1200:])
# + id="7vY9xiC02osf"
df3=scaler.inverse_transform(df3).tolist()
# + id="JcIoyP-D2osg" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="c3800090-5846-4f76-f92b-ff2838c5b978"
plt.plot(df3)
| Task2_Growmore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dpk-a7/Deep-learning/blob/main/toxic_comment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="S6CD-18TM3bc" colab={"base_uri": "https://localhost:8080/"} outputId="d6dff48f-a5dd-49d2-c98f-1209d9e589cc"
# https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data
# # !kaggle competitions download -c jigsaw-toxic-comment-classification-challenge
# !wget http://nlp.stanford.edu/data/glove.6B.zip
# + colab={"base_uri": "https://localhost:8080/"} id="ItCNVhJmsPxX" outputId="f1a4c38f-8d1b-4905-d833-4e32be1493cd"
# !pip install -q kaggle
# !pip install -q kaggle-cli
# !mkdir -p ~/.kaggle
# !cp "kaggle.json" ~/.kaggle/
# !cat ~/.kaggle/kaggle.json
# !chmod 600 ~/.kaggle/kaggle.json# For competition datasets
# !kaggle competitions download -c jigsaw-toxic-comment-classification-challenge
# + colab={"base_uri": "https://localhost:8080/"} id="xa6C_8-xzQ4t" outputId="cd584e06-ef1d-47d5-a7da-69522a31a425"
# !unzip glove.6B.zip
# # !unzip train.csv.zip
# # !unzip test.csv.zip
# + id="Q_lqdE-uM-Nw"
from __future__ import print_function, division
from builtins import range
# + id="Dkbo9hjip43T"
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer # individual component in a sentence like(words,punctuation)
from keras.preprocessing.sequence import pad_sequences # normalize length
from keras.layers import Dense, Input, GlobalMaxPool1D
from keras.layers import Conv1D, MaxPool1D, Embedding
from keras.models import Model
from sklearn.metrics import roc_auc_score # area under curve matric which is useful for binary classification
# + id="0I-J9KkSqimw"
MAX_SEQ_LENGTH = 100
MAX_VOCAB_SIZE = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
BATCH_SIZE = 128
EPOCHS = 10
# + colab={"base_uri": "https://localhost:8080/"} id="1PzBmPvGxUn1" outputId="b7a3f22c-cec3-4b38-8490-cf38996d3e58"
#loading word vectors..
word2vec = {} #key = word: value = vectors
with open(os.path.join("glove.6B.%sd.txt" % EMBEDDING_DIM)) as f:
for line in f:
values = line.split()
word = values[0]
vec = np.asarray(values[1:], dtype= 'float32')
word2vec[word] = vec
print("found %s word vectors" % len(word2vec))
# + colab={"base_uri": "https://localhost:8080/"} id="ymqCUPmZyDht" outputId="5b93ebe1-0073-4c06-dc47-174dde492fee"
word2vec['the']
# + colab={"base_uri": "https://localhost:8080/"} id="Z0DFbCIKyz5B" outputId="87334264-7ce9-4da0-e6af-1164ba560662"
#Loading comments
train = pd.read_csv("train.csv")
train.columns
# + id="gUs--l-Czhgu"
sentences = train["comment_text"].fillna("DUMMY_VALUE").values
possible_labels = ['toxic', 'severe_toxic', 'obscene',
'threat','insult', 'identity_hate']
targets = train[possible_labels].values
# + colab={"base_uri": "https://localhost:8080/"} id="DWGujo5A0G5Y" outputId="f24b856d-fbcd-4fb3-9bad-571dd42766ad"
print("max sequence length:",max(len(s)for s in sentences))
print("min sequence length:",min(len(s)for s in sentences))
s = sorted(len(s) for s in sentences)
print("median sequence length:", s[len(s)//2])
# + id="rYh3Ec0wJALL"
# + id="l_cSdzbz0kHc"
tokenizer = Tokenizer(num_words = MAX_VOCAB_SIZE)
tokenizer.fit_on_texts(sentences)
sequences = tokenizer.texts_to_sequences(sentences)
# + colab={"base_uri": "https://localhost:8080/"} id="ds6i9-S86EH9" outputId="732d9d08-9b44-4ca6-d8bc-9dafa1ea7a24"
word2idx = tokenizer.word_index
print("found %s unique tokens" % len(word2idx))
# + colab={"base_uri": "https://localhost:8080/"} id="ewS2_Wfu6QKK" outputId="07f6a2e2-7ece-4351-d73a-886212d1acfe"
data = pad_sequences(sequences, maxlen = MAX_SEQ_LENGTH)
print("Shape of data tensor:", data.shape)
# + id="JbluQ4cfVaip" colab={"base_uri": "https://localhost:8080/"} outputId="b9fecfbe-b138-400b-f025-53fc7acb56ac"
print("Filling pre-trained embeddings..")
num_words = min(MAX_VOCAB_SIZE, len(word2idx)+1)
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word2idx.items():
if i < MAX_VOCAB_SIZE:
embedding_vector = word2vec.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# + id="cFsvhtTrE9bZ"
embedding_layer = Embedding(
num_words,
EMBEDDING_DIM,
weights = [embedding_matrix],
input_length = MAX_SEQ_LENGTH,
trainable = False
)
# + id="XpdvYqPfGWBx"
input_ = Input(shape=(MAX_SEQ_LENGTH,))
x = embedding_layer(input_)
x = Conv1D(128,3,activation = 'relu')(x)
x = MaxPool1D(3)(x)
x = Conv1D(128,3,activation = 'relu')(x)
x = MaxPool1D(3)(x)
x = Conv1D(128,3,activation = 'relu')(x)
x = GlobalMaxPool1D()(x)
x = Dense(128, activation='relu')(x)
output = Dense(len(possible_labels), activation="sigmoid")(x)
model = Model(input_, output)
model.compile(
loss = 'binary_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy']
)
# + colab={"base_uri": "https://localhost:8080/"} id="1tRlpljVG-5x" outputId="49749a71-3f07-48e2-b6b4-01a6862fd0d8"
history = model.fit(
data,
targets,
batch_size = BATCH_SIZE,
epochs= EPOCHS,
validation_split= VALIDATION_SPLIT
)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="WlpCo-jXICG0" outputId="c3771a3a-5d4c-41ed-b49f-df3d2d306d43"
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.legend()
plt.show();
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="d9ilp33UIT0U" outputId="c8dac4b7-79be-4f44-efb2-3a2c280f81cc"
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_acc')
plt.legend()
plt.show();
# + colab={"base_uri": "https://localhost:8080/"} id="bSprMUk4IZS-" outputId="7a9b55b9-2bfe-496a-82fa-c756958a2e8b"
p = model.predict(data)
aucs = []
del i
for i in range(5):
auc = roc_auc_score(targets[:,i], p[:,i])
aucs.append(auc)
print(np.mean(aucs))
# + id="B-GTbsAGNId7" outputId="c6fccf91-c22c-431a-a366-748b209f19f6" colab={"base_uri": "https://localhost:8080/"}
def single_predict(sentence):
single_sequences = tokenizer.texts_to_sequences(sentence)
pad_input=[[j for i in single_sequences for j in i]]
single_data = pad_sequences(pad_input, maxlen=MAX_SEQ_LENGTH)
p = model.predict(single_data)
p = list(p)
print(ans)
print(p)
for i in p[0]:
print(max([i]))
print('->',max(p[0]))
single_predict("very bad")
# + id="3IF-83QPI7-R" outputId="b76da92a-7179-468b-8ff7-7fb164209b39" colab={"base_uri": "https://localhost:8080/"}
def single_predict(sentence):
single_sequences = tokenizer.texts_to_sequences(sentence)
single_data = pad_sequences(single_sequences, maxlen=MAX_SEQ_LENGTH)
p = model.predict(single_data)[-1]
lab = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
p = [i for i in p]
ans = lab[p.index(max(p))] if max(p) > 0.057 else "Neutral"
print(ans)
print(p)
single_predict("youll die")
# + id="0pNDoIHtLogU"
# + id="oNEBt7uMNPFr"
| toxic_comment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="I3UQM67DgXtk" colab_type="text"
# # Sentiment Classification with Natural Language Processing on LSTM
# + [markdown] id="KxXxxUZpgXtl" colab_type="text"
# This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories.
# + id="Mp13wse9gXtm" colab_type="code" colab={}
# Importing the libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="rwgLF7nggXtp" colab_type="code" colab={}
df = pd.read_csv('SentimentData/Reviews.csv')
# + id="IKjIS7Q4gXts" colab_type="code" colab={}
df=df[:10000]
# + id="ikMUJuoqgXtu" colab_type="code" colab={} outputId="11dc67ff-dfc9-42db-c9e7-e30f61251e1e"
df.head()
# + id="CqQDibB_gXty" colab_type="code" colab={} outputId="20d81b74-d0e4-4c58-e701-bd13c7519d41"
df.shape
# + [markdown] id="0QQs-2U4gXt0" colab_type="text"
#
# # Text Cleaning or Preprocessing
# + id="PoIWSwtKgXt1" colab_type="code" colab={} outputId="bfd80da4-6c79-4131-b3bf-d0a9f74545a1"
# Cleaning the texts
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 10000):
review = re.sub('[^a-zA-Z]', ' ', df['Text'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# + id="6kA9AfcJgXt3" colab_type="code" colab={} outputId="cbaa12af-a226-411c-b018-cf0fcbe0dc28"
corpus=pd.DataFrame(corpus, columns=['Reviews'])
corpus.head()
# + id="dRyKYbxugXt8" colab_type="code" colab={} outputId="574a2612-50be-4932-c141-9e884a078afc"
result=corpus.join(df[['Score']])
result.head()
# + [markdown] id="7XDrm818gXt-" colab_type="text"
# # TFIDF
# + [markdown] id="krUxR2jVgXt_" colab_type="text"
# TFIDF is an information retrieval technique that weighs a term’s frequency (TF) and its inverse document frequency (IDF). Each word has its respective TF and IDF score. The product of the TF and IDF scores of a word is called the TFIDF weight of that word.
#
# Put simply, the higher the TFIDF score (weight), the rarer the word and vice versa
# + id="YlM7qofWgXt_" colab_type="code" colab={} outputId="a326ad67-0e0a-4730-d690-2ebd396bfe5b"
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
tfidf.fit(result['Reviews'])
# + id="jA2vnYADgXuB" colab_type="code" colab={} outputId="f17988c0-ff46-4464-c08e-705e237a65e0"
X = tfidf.transform(result['Reviews'])
result['Reviews'][1]
# + id="pNqxHnFvgXuD" colab_type="code" colab={} outputId="990904b0-0f4d-4351-adf0-41b94af90b93"
print([X[1, tfidf.vocabulary_['peanut']]])
# + id="ND2U8WvDgXuG" colab_type="code" colab={} outputId="a67370ed-86da-4cfd-f9d7-b7264e46c042"
print([X[1, tfidf.vocabulary_['jumbo']]])
# + id="ukPwlQ1EgXuI" colab_type="code" colab={} outputId="e44e6a82-ef7a-4be6-ebb2-66f53ba6707d"
print([X[1, tfidf.vocabulary_['error']]])
# + [markdown] id="dIMqghVhgXuK" colab_type="text"
# Among the three words, “peanut”, “jumbo” and “error”, tf-idf gives the highest weight to “jumbo”. Why? This indicates that “jumbo” is a much rarer word than “peanut” and “error”. This is how to use the tf-idf to indicate the importance of words or terms inside a collection of documents.
# + [markdown] id="J0eYHoVMgXuK" colab_type="text"
# # Sentiment Classification
# + id="4t6ixlaDgXuL" colab_type="code" colab={} outputId="ae33f578-1938-43f1-9c1b-37515b219558"
result.dropna(inplace=True)
result[result['Score'] != 3]
result['Positivity'] = np.where(result['Score'] > 3, 1, 0)
cols = [ 'Score']
result.drop(cols, axis=1, inplace=True)
result.head()
# + id="3pCUC6gYgXuN" colab_type="code" colab={} outputId="df3b8ac7-e068-4d7e-b9ed-0541090ce113"
result.groupby('Positivity').size()
# + [markdown] id="TCEh45B7gXuP" colab_type="text"
# # Train Test Split
# + id="lVwBn8FRgXuP" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X = result.Reviews
y = result.Positivity
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)
# + id="BBKAzHvegXuR" colab_type="code" colab={} outputId="459eeed5-a789-4bce-8a61-1415fd3e41f9"
print("Train set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(X_train),
(len(X_train[y_train == 0]) / (len(X_train)*1.))*100,
(len(X_train[y_train == 1]) / (len(X_train)*1.))*100))
# + id="afCXhhEdgXuT" colab_type="code" colab={} outputId="3c220d7f-fbe4-4c98-d038-7869226b67ad"
print("Test set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(X_test),
(len(X_test[y_test == 0]) / (len(X_test)*1.))*100,
(len(X_test[y_test == 1]) / (len(X_test)*1.))*100))
# + id="3ZPlSbCmgXuV" colab_type="code" colab={}
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
# + id="gLXTDS8IgXuX" colab_type="code" colab={}
def accuracy_summary(pipeline, X_train, y_train, X_test, y_test):
sentiment_fit = pipeline.fit(X_train, y_train)
y_pred = sentiment_fit.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("accuracy score: {0:.2f}%".format(accuracy*100))
return accuracy
# + id="ZMiE4FHKgXuZ" colab_type="code" colab={}
cv = CountVectorizer()
rf = RandomForestClassifier(class_weight="balanced")
n_features = np.arange(10000,25001,5000)
def nfeature_accuracy_checker(vectorizer=cv, n_features=n_features, stop_words=None, ngram_range=(1, 1), classifier=rf):
result = []
print(classifier)
print("\n")
for n in n_features:
vectorizer.set_params(stop_words=stop_words, max_features=n, ngram_range=ngram_range)
checker_pipeline = Pipeline([
('vectorizer', vectorizer),
('classifier', classifier)
])
print("Test result for {} features".format(n))
nfeature_accuracy = accuracy_summary(checker_pipeline, X_train, y_train, X_test, y_test)
result.append((n,nfeature_accuracy))
return result
# + id="9KIzHEyygXub" colab_type="code" colab={}
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer()
# + id="UZ69PmpjgXud" colab_type="code" colab={} outputId="c0954547-5f25-4c53-d967-0cf443a47ff4"
print("Result for trigram with stop words (Tfidf)\n")
feature_result_tgt = nfeature_accuracy_checker(vectorizer=tfidf,ngram_range=(1, 3))
# + id="9Z1oHVNYgXuf" colab_type="code" colab={} outputId="6bfb9d8f-cce5-49b8-d88b-219ba1647e45"
from sklearn.metrics import classification_report
cv = CountVectorizer(max_features=30000,ngram_range=(1, 3))
pipeline = Pipeline([
('vectorizer', cv),
('classifier', rf)
])
sentiment_fit = pipeline.fit(X_train, y_train)
y_pred = sentiment_fit.predict(X_test)
print(classification_report(y_test, y_pred, target_names=['negative','positive']))
# + id="vQBV3YDDgXui" colab_type="code" colab={} outputId="b566e954-461c-4439-b9b4-c1eedb186fa9"
## K-fold Cross Validation
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = pipeline, X= X_train, y = y_train,
cv = 10)
print("Random Forest Classifier Accuracy: %0.2f (+/- %0.2f)" % (accuracies.mean(), accuracies.std() * 2))
# + [markdown] id="zfu9xttygXul" colab_type="text"
# # Chi2 Feature Selection
# + id="ISmYaVmegXul" colab_type="code" colab={}
from sklearn.feature_selection import chi2
tfidf = TfidfVectorizer(max_features=30000,ngram_range=(1, 3))
X_tfidf = tfidf.fit_transform(result.Reviews)
y = result.Positivity
chi2score = chi2(X_tfidf, y)[0]
# + id="ZwPMRLckgXun" colab_type="code" colab={} outputId="f914d014-4b0a-44a8-d6d4-d7006b9153a0"
plt.figure(figsize=(16,8))
scores = list(zip(tfidf.get_feature_names(), chi2score))
chi2 = sorted(scores, key=lambda x:x[1])
topchi2 = list(zip(*chi2[-20:]))
x = range(len(topchi2[1]))
labels = topchi2[0]
plt.barh(x,topchi2[1], align='center', alpha=0.5)
plt.plot(topchi2[1], x, '-o', markersize=5, alpha=0.8)
plt.yticks(x, labels)
plt.xlabel('$\chi^2$')
plt.show();
# + [markdown] id="ZCZVCLEUgXur" colab_type="text"
# # LSTM neural network
# + id="ifowaiDXgXur" colab_type="code" colab={} outputId="d724111d-3fe1-4065-b86e-ceac7a07f157"
from sklearn.feature_extraction.text import CountVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
# + id="Aj_DICnkgXut" colab_type="code" colab={} outputId="51573cfd-11f2-4da4-f1c3-585bc2bf5b37"
max_fatures = 30000
tokenizer = Tokenizer(nb_words=max_fatures, split=' ')
tokenizer.fit_on_texts(result['Reviews'].values)
X1 = tokenizer.texts_to_sequences(result['Reviews'].values)
X1 = pad_sequences(X1)
# + id="KWboa2JhgXuw" colab_type="code" colab={} outputId="1a8411e8-2565-48b6-b221-2bc7d975f102"
Y1 = pd.get_dummies(result['Positivity']).values
X1_train, X1_test, Y1_train, Y1_test = train_test_split(X1,Y1, random_state = 42)
print(X1_train.shape,Y1_train.shape)
print(X1_test.shape,Y1_test.shape)
# + [markdown] id="ibFB24LGgXuy" colab_type="text"
# Keras Embedding Layer
# Keras offers an Embedding layer that can be used for neural networks on text data.
#
# It requires that the input data be integer encoded, so that each word is represented by a unique integer. This data preparation step can be performed using the Tokenizer API also provided with Keras.
#
# The Embedding layer is initialized with random weights and will learn an embedding for all of the words in the training dataset.
#
# It is a flexible layer that can be used in a variety of ways, such as:
#
# It can be used alone to learn a word embedding that can be saved and used in another model later.
# It can be used as part of a deep learning model where the embedding is learned along with the model itself.
# It can be used to load a pre-trained word embedding model, a type of transfer learning.
# The Embedding layer is defined as the first hidden layer of a network. It must specify 3 arguments:
#
# It must specify 3 arguments:
#
# input_dim: This is the size of the vocabulary in the text data. For example, if your data is integer encoded to values between 0-10, then the size of the vocabulary would be 11 words.
# output_dim: This is the size of the vector space in which words will be embedded. It defines the size of the output vectors from this layer for each word. For example, it could be 32 or 100 or even larger. Test different values for your problem.
# input_length: This is the length of input sequences, as you would define for any input layer of a Keras model. For example, if all of your input documents are comprised of 1000 words, this would be 1000.
# For example, below we define an Embedding layer with a vocabulary of 200 (e.g. integer encoded words from 0 to 199, inclusive), a vector space of 32 dimensions in which words will be embedded, and input documents that have 50 words each.
#
#
# e = Embedding(200, 32, input_length=50)
# 1
# e = Embedding(200, 32, input_length=50)
# The Embedding layer has weights that are learned. If you save your model to file, this will include weights for the Embedding layer.
#
# The output of the Embedding layer is a 2D vector with one embedding for each word in the input sequence of words (input document).
#
# If you wish to connect a Dense layer directly to an Embedding layer, you must first flatten the 2D output matrix to a 1D vector using the Flatten layer.
# + id="RMnpcNE9gXuy" colab_type="code" colab={} outputId="501ffa22-31d0-4734-f7b7-f36eba5c4b6a"
embed_dim = 150
lstm_out = 200
model = Sequential()
model.add(Embedding(max_fatures, embed_dim,input_length = X1.shape[1], dropout=0.2))
model.add(LSTM(lstm_out, dropout_U=0.2,dropout_W=0.2))
model.add(Dense(2,activation='softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
print(model.summary())
# + [markdown] id="E2SD9DS-gXu0" colab_type="text"
# Next, I compose the LSTM Network. Note that embed_dim, lstm_out, batch_size, droupout_x variables are hyperparameters, their values are somehow intuitive, can be and must be played with in order to achieve good results. Please also note that I am using softmax as activation function. The reason is that our Network is using categorical crossentropy, and softmax is just the right activation method for that.
# + id="Rtde-uf7gXu0" colab_type="code" colab={} outputId="0106a746-6958-4005-d54f-4c3e88be9c2e"
batch_size = 32
model.fit(X1_train, Y1_train, nb_epoch = 10, batch_size=batch_size, verbose = 2)
# + id="8amCk_N6gXu2" colab_type="code" colab={} outputId="00d1bd2c-d0a0-4b27-dd58-a6697f59b323"
score,acc = model.evaluate(X1_test, Y1_test, verbose = 2, batch_size = batch_size)
print("score: %.2f" % (score))
print("acc: %.2f" % (acc))
# + [markdown] id="TvwPDWnngXu4" colab_type="text"
#
# Finally measuring the number of correct guesses. It is clear that finding negative tweets goes very well for the Network but deciding whether is positive is not really.
# + id="fRyLWbyVgXu4" colab_type="code" colab={} outputId="d0dbbc26-a3ac-41ac-f5a8-6fcc65d22a25"
pos_cnt, neg_cnt, pos_correct, neg_correct = 0, 0, 0, 0
for x in range(len(X1_test)):
result = model.predict(X1_test[x].reshape(1,X1_test.shape[1]),batch_size=1,verbose = 2)[0]
if np.argmax(result) == np.argmax(Y1_test[x]):
if np.argmax(Y1_test[x]) == 0:
neg_correct += 1
else:
pos_correct += 1
if np.argmax(Y1_test[x]) == 0:
neg_cnt += 1
else:
pos_cnt += 1
print("pos_acc", pos_correct/pos_cnt*100, "%")
print("neg_acc", neg_correct/neg_cnt*100, "%")
# + id="ecGCEsIfgXu7" colab_type="code" colab={}
# + id="yrwdiCJwgXu9" colab_type="code" colab={}
# + id="veI4hMrtgXu_" colab_type="code" colab={}
| Lecture/Notebooks/Deep Learning/L7/SentimentAnalysis_ipynb_txt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="7Xef4vQF6txB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} executionInfo={"status": "ok", "timestamp": 1595260391255, "user_tz": -330, "elapsed": 5452, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="44c36d8c-5dfc-42fb-a397-faf74082c5ed"
# !git clone https://github.com/razuswe/Prima-Indian-Diabetes-prediction.git
# + id="5hmfJwN07k0m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595260463958, "user_tz": -330, "elapsed": 1565, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="cdab5160-d9bd-4476-db8f-aecc6b286336"
# cd Prima-Indian-Diabetes-prediction
# + id="cVqCMfqm7r4c" colab_type="code" colab={}
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(color_codes=True)
# %matplotlib inline
# + id="EbV0CTVd70wp" colab_type="code" colab={}
data_frame = pd.read_csv("pimadataorig.csv")
# + id="UshZrnHe74TQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595260544684, "user_tz": -330, "elapsed": 1750, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="b0d6c699-4aab-42ca-c812-9e296dd302d7"
data_frame.shape
# + id="1VggjBTY8B-R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595260552987, "user_tz": -330, "elapsed": 1769, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="67bf9da6-7b11-4219-a490-6b5c74ca86ea"
data_frame.head(5)
# + id="LVt6hVPM8D_y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595260564190, "user_tz": -330, "elapsed": 2302, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="07b540dc-ba4f-47c4-ea96-4a0c9cf9f002"
data_frame.tail(5)
# + id="1ayToq4r8Gmf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595260575196, "user_tz": -330, "elapsed": 1702, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="a015178b-87c8-415b-db65-4ffb0b321766"
print (data_frame.isnull().values.any())
# + id="20vQeJkK8Jb_" colab_type="code" colab={}
def plot_corr(data_frame, size=11):
"""
Function plots a graphical correlation matrix for each pair of columns in the dataframe.
Input:
data_frame: pandas DataFrame
size: vertical and horizontal size of the plot
Displays:
matrix of correlation between columns. Blue-cyan-yellow-red-darkred => less to more correlated
0 ------------------> 1
Expect a darkred line running from top left to bottom right
"""
corr = data_frame.corr() # data frame correlation function
fig, ax = plt.subplots(figsize=(size, size))
ax.matshow(corr) # color code the rectangles by correlation value
plt.xticks(range(len(corr.columns)), corr.columns) # draw x tick marks
plt.yticks(range(len(corr.columns)), corr.columns) # draw y tick marks
# + id="L-sI4i5d8V8l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 655} executionInfo={"status": "ok", "timestamp": 1595260659146, "user_tz": -330, "elapsed": 1812, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="8f1ff54d-6feb-4a8b-e781-e6d6eb6056cd"
plot_corr(data_frame)
# + id="x-vCoNSi8d6B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 314} executionInfo={"status": "ok", "timestamp": 1595260673871, "user_tz": -330, "elapsed": 2109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="606920b6-e49a-4674-cf40-771161fac5d6"
data_frame.corr()
# + id="75w7zbRG8hbT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595260687787, "user_tz": -330, "elapsed": 1173, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="f44cfbe1-69ce-48bf-d426-2dfd92e6716f"
data_frame.head(5)
# + id="8HdVgdvs8lDb" colab_type="code" colab={}
del data_frame['skin_thickness']
# + id="PjeQrJqJ8njL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595260734988, "user_tz": -330, "elapsed": 2708, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="f7f05a18-03a6-4f9a-ac2b-f076d92d5ec5"
data_frame.head(5)
# + id="FKXPZRU-8wNA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 655} executionInfo={"status": "ok", "timestamp": 1595260744588, "user_tz": -330, "elapsed": 1186, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="e487d192-26cc-4e99-e284-5e2ae489fe3f"
plot_corr(data_frame)
# + id="r62JMhwz8y6u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595260768264, "user_tz": -330, "elapsed": 1776, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="24acb804-0252-44eb-fc76-7178f05025fc"
data_frame.head(5)
# + id="7QobTGp084jU" colab_type="code" colab={}
diabetes_map = {True : 1, False : 0}
data_frame['diabetes'] = data_frame['diabetes'].map(diabetes_map)
# + id="Dva13sND87P8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595260803870, "user_tz": -330, "elapsed": 1600, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="89006283-af3b-45ad-f587-21c3a82d0c74"
data_frame.head(5)
# + id="QX95z8xm9BSW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595260822668, "user_tz": -330, "elapsed": 2285, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="7cf72926-cef9-4113-e66e-e2433540b118"
data_frame.isnull().values.any()
# + id="AE0mXo939Ftc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1595261278322, "user_tz": -330, "elapsed": 2858, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="55a8523c-b8ee-401c-a609-411b39f807cb"
num_obs = len(data_frame)
num_true = len(data_frame.loc[data_frame['diabetes'] == 1])
num_false = len(data_frame.loc[data_frame['diabetes'] == 0])
print("Number of True cases: {0} ({1:2.2f}%)".format(num_true, ((1.00 * num_true)/(1.0 * num_obs)) * 100))
print("Number of False cases: {0} ({1:2.2f}%)".format(num_false, (( 1.0 * num_false)/(1.0 * num_obs)) * 100))
# + id="MAyEoWAJ-0zX" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595261452962, "user_tz": -330, "elapsed": 1608, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
from sklearn.model_selection import train_test_split
feature_col_names = ['num_preg', 'glucose_conc', 'diastolic_bp', 'insulin', 'bmi', 'diab_pred', 'age']
predicted_class_names = ['diabetes']
X = data_frame[feature_col_names].values # predictor feature columns (8 X m)
y = data_frame[predicted_class_names].values # predicted class (1=true, 0=false) column (1 X m)
split_test_size = 0.30
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split_test_size, random_state=42)
# test_size = 0.3 is 30%, 42 is the answer to everything
# + id="h2b_GAiB-7EC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1595261462566, "user_tz": -330, "elapsed": 1546, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="2610c6fe-10e5-4707-b45a-293164866332"
trainval = (1.0 * len(X_train)) / (1.0 * len(data_frame.index))
testval = (1.0 * len(X_test)) / (1.0 * len(data_frame.index))
print("{0:0.2f}% in training set".format(trainval * 100))
print("{0:0.2f}% in test set".format(testval * 100))
# + id="aG-mKMUj_iG1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} executionInfo={"status": "ok", "timestamp": 1595261589920, "user_tz": -330, "elapsed": 1560, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="0ea8e004-a891-4c8c-ce72-6a7d791775a1"
print("Original True : {0} ({1:0.2f}%)".format(len(data_frame.loc[data_frame['diabetes'] == 1]), (len(data_frame.loc[data_frame['diabetes'] == 1])/len(data_frame.index)) * 100.0))
print("Original False : {0} ({1:0.2f}%)".format(len(data_frame.loc[data_frame['diabetes'] == 0]), (len(data_frame.loc[data_frame['diabetes'] == 0])/len(data_frame.index)) * 100.0))
print("")
print("Training True : {0} ({1:0.2f}%)".format(len(y_train[y_train[:] == 1]), (len(y_train[y_train[:] == 1])/len(y_train) * 100.0)))
print("Training False : {0} ({1:0.2f}%)".format(len(y_train[y_train[:] == 0]), (len(y_train[y_train[:] == 0])/len(y_train) * 100.0)))
print("")
print("Test True : {0} ({1:0.2f}%)".format(len(y_test[y_test[:] == 1]), (len(y_test[y_test[:] == 1])/len(y_test) * 100.0)))
print("Test False : {0} ({1:0.2f}%)".format(len(y_test[y_test[:] == 0]), (len(y_test[y_test[:] == 0])/len(y_test) * 100.0)))
# + id="_myg0FUD_lVb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"status": "ok", "timestamp": 1595261605304, "user_tz": -330, "elapsed": 1579, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="40cfa7ca-9987-4a14-807c-b7e4cf461507"
data_frame.head()
# + id="2le0h6uVAE8l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1595261636021, "user_tz": -330, "elapsed": 1487, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="54134ee8-d4d0-4bdb-cd45-5c7adf5231f3"
print("# rows in dataframe {0}".format(len(data_frame)))
print("# rows missing glucose_conc: {0}".format(len(data_frame.loc[data_frame['glucose_conc'] == 0])))
print("# rows missing diastolic_bp: {0}".format(len(data_frame.loc[data_frame['diastolic_bp'] == 0])))
print("# rows missing insulin: {0}".format(len(data_frame.loc[data_frame['insulin'] == 0])))
print("# rows missing bmi: {0}".format(len(data_frame.loc[data_frame['bmi'] == 0])))
print("# rows missing diab_pred: {0}".format(len(data_frame.loc[data_frame['diab_pred'] == 0])))
print("# rows missing age: {0}".format(len(data_frame.loc[data_frame['age'] == 0])))
# + id="4MmclaZmB2E9" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595262268665, "user_tz": -330, "elapsed": 1482, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
from sklearn.impute import SimpleImputer
# + id="WiOTT7DcAJJa" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595262358709, "user_tz": -330, "elapsed": 1363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
#Impute with mean all 0 readings
fill_0 = SimpleImputer(missing_values=0, strategy="mean")
X_train = fill_0.fit_transform(X_train)
X_test = fill_0.fit_transform(X_test)
# + id="JV1xn7AmAQQc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262372007, "user_tz": -330, "elapsed": 1256, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="905b00c9-5567-49a7-9c66-1e0694422b25"
from sklearn.naive_bayes import GaussianNB
# create Gaussian Naive Bayes model object and train it with the data
nb_model = GaussianNB()
nb_model.fit(X_train, y_train.ravel())
# + id="c7FKUw-YDANX" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595262407847, "user_tz": -330, "elapsed": 1563, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
# this returns array of predicted results
prediction_from_trained_data = nb_model.predict(X_train)
# + id="DgJ2etqUDI5M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262428086, "user_tz": -330, "elapsed": 1667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="3010cdf7-6820-427d-fb88-78e99471f665"
from sklearn import metrics
accuracy = metrics.accuracy_score(y_train, prediction_from_trained_data)
print ("Accuracy of our naive bayes model is : {0:.4f}".format(accuracy))
# + id="1TPxL71SDNlm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262436007, "user_tz": -330, "elapsed": 1406, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="d97b373b-19eb-4b11-b659-361c45adf4b7"
prediction_from_test_data = nb_model.predict(X_test)
accuracy = metrics.accuracy_score(y_test, prediction_from_test_data)
print ("Accuracy of our naive bayes model is: {0:0.4f}".format(accuracy))
# + id="hLHqeVNSDPzM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} executionInfo={"status": "ok", "timestamp": 1595262451823, "user_tz": -330, "elapsed": 1451, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="e64cad5d-388e-474f-aa97-2cdf4c5ad86c"
print ("Confusion Matrix")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.confusion_matrix(y_test, prediction_from_test_data, labels=[1, 0])))
# + id="gPRIEahgDTp3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} executionInfo={"status": "ok", "timestamp": 1595262464806, "user_tz": -330, "elapsed": 1508, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="78735d8f-562e-4b94-82ce-ed8799d750c5"
print ("Classification Report")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.classification_report(y_test, prediction_from_test_data, labels=[1, 0])))
# + id="_mrMN9JaDW0B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} executionInfo={"status": "ok", "timestamp": 1595262494565, "user_tz": -330, "elapsed": 1705, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="88bda549-9e95-4d28-e828-5b250d4d1eac"
from sklearn.ensemble import RandomForestClassifier
# Create a RandomForestClassifier object
rf_model = RandomForestClassifier(random_state=42)
rf_model.fit(X_train, y_train.ravel())
# + id="tbFFmeCMDeB_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262521282, "user_tz": -330, "elapsed": 7508, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="b4a7950c-b3c2-4c97-f6fa-693e26c363bd"
rf_predict_train = rf_model.predict(X_train)
#get accuracy
rf_accuracy = metrics.accuracy_score(y_train, rf_predict_train)
#print accuracy
print ("Accuracy: {0:.4f}".format(rf_accuracy))
# + id="wHhTNKh1DjIh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262527529, "user_tz": -330, "elapsed": 1453, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="1be96448-07ba-40c2-f2db-e5edf071e15a"
rf_predict_test = rf_model.predict(X_test)
#get accuracy
rf_accuracy_testdata = metrics.accuracy_score(y_test, rf_predict_test)
#print accuracy
print ("Accuracy: {0:.4f}".format(rf_accuracy_testdata))
# + id="QtuINsgCDmI-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} executionInfo={"status": "ok", "timestamp": 1595262639538, "user_tz": -330, "elapsed": 1388, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="fa23734f-0b99-446b-d8c7-ca47c022d4ef"
import matplotlib.pyplot as plot
print ("Confusion Matrix for Random Forest")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.confusion_matrix(y_test, rf_predict_test, labels=[1, 0])))
print ("")
print ("Classification Report\n")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.classification_report(y_test, rf_predict_test, labels=[1, 0])))
# + id="1xGHoSGQDp5_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 403} executionInfo={"status": "ok", "timestamp": 1595262674726, "user_tz": -330, "elapsed": 1349, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="db8caf73-f138-4097-8d14-eb6c305b93b9"
from sklearn.linear_model import LogisticRegression
lr_model = LogisticRegression(C=0.7, random_state=42)
lr_model.fit(X_train, y_train.ravel())
lr_predict_test = lr_model.predict(X_test)
# training metrics
print ("Accuracy : {0:.4f}".format(metrics.accuracy_score(y_test, lr_predict_test)))
print ("Confusion Matrix")
print (metrics.confusion_matrix(y_test, lr_predict_test, labels=[1, 0]))
print ("")
print ("Classification Report")
print (metrics.classification_report(y_test, lr_predict_test, labels=[1, 0]))
# + id="M017rIh3EKGB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 252} executionInfo={"status": "ok", "timestamp": 1595262691672, "user_tz": -330, "elapsed": 2856, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqY<KEY>g=s64", "userId": "02004506660163687076"}} outputId="d61754a5-0f9a-4901-cd9e-e12b0dcae776"
from sklearn.linear_model import LogisticRegressionCV
lr_cv_model = LogisticRegressionCV(n_jobs=-1, random_state=42, Cs=3, cv=10, refit=False, class_weight="balanced")
# set number of jobs to -1 which uses all cores to parallelize
lr_cv_model.fit(X_train, y_train.ravel())
lr_cv_predict_test = lr_cv_model.predict(X_test)
# training metrics
print( "Accuracy: {0:.4f}".format(metrics.accuracy_score(y_test, lr_cv_predict_test)))
print (metrics.confusion_matrix(y_test, lr_cv_predict_test, labels=[1, 0]))
print ("")
print ("Classification Report")
print (metrics.classification_report(y_test, lr_cv_predict_test, labels=[1,0]))
# + id="9vtr3SFvEN3h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} executionInfo={"status": "ok", "timestamp": 1595262716075, "user_tz": -330, "elapsed": 4196, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="abc927a9-6f73-4865-e2c5-2fe00b1aab72"
from sklearn.svm import SVC
# Create a RandomForestClassifier object
svm_model = SVC(kernel='linear', C=1, random_state=42)
svm_model.fit(X_train, y_train.ravel())
# + id="3VzSdl4hETQk" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595262721763, "user_tz": -330, "elapsed": 1671, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
# this returns array of predicted results
prediction_from_trained_data = svm_model.predict(X_train)
# + id="d6yb212pEVgT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262732286, "user_tz": -330, "elapsed": 913, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="753827ec-410e-4c00-9347-24751966528a"
from sklearn import metrics
accuracy = metrics.accuracy_score(y_train, prediction_from_trained_data)
print ("Accuracy of our SVM model is : {0:.4f}".format(accuracy))
# + id="LBq7VZyMEYQf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262742189, "user_tz": -330, "elapsed": 1545, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="f506b759-ea85-4ded-f1a7-09445b306472"
svm_predict_test = svm_model.predict(X_test)
#get accuracy
svm_accuracy_testdata = metrics.accuracy_score(y_test, svm_predict_test)
#print accuracy
print ("Accuracy: {0:.4f}".format(svm_accuracy_testdata))
# + id="DwcgPRN6Eahr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} executionInfo={"status": "ok", "timestamp": 1595262761716, "user_tz": -330, "elapsed": 1658, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="c83a8235-3a8c-413e-ab12-641f132bd358"
print ("Confusion Matrix for Support Vector Amchine")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.confusion_matrix(y_test, svm_predict_test, labels=[1, 0])))
print ("")
print ("Classification Report\n")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.classification_report(y_test, svm_predict_test, labels=[1, 0])))
# + id="Wp6eJ8Y5EfQp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} executionInfo={"status": "ok", "timestamp": 1595262773031, "user_tz": -330, "elapsed": 2220, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="549ebbc0-a9fc-4c2d-8549-e90ece11f983"
from sklearn.neural_network import MLPClassifier
# Create a RandomForestClassifier object
ann_model = MLPClassifier(hidden_layer_sizes=(13,13,13),max_iter=500, random_state=42)
ann_model.fit(X_train, y_train.ravel())
# + id="Dn9R5zlZEh4x" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595262800826, "user_tz": -330, "elapsed": 2001, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
prediction_from_trained_data = ann_model.predict(X_train)
# + id="oHzmatoGEoui" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262809966, "user_tz": -330, "elapsed": 1638, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="51735b15-7354-4989-a842-69e49383d7d4"
from sklearn import metrics
accuracy = metrics.accuracy_score(y_train, prediction_from_trained_data)
print ("Accuracy of our ANN model is : {0:.4f}".format(accuracy))
# + id="87vrtVfwErDA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262820967, "user_tz": -330, "elapsed": 1667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="425b4b10-3957-4508-8646-ef77f6dc1fc1"
ann_predict_test = ann_model.predict(X_test)
#get accuracy
ann_accuracy_testdata = metrics.accuracy_score(y_test, ann_predict_test)
#print accuracy
print ("Accuracy: {0:.4f}".format(ann_accuracy_testdata))
# + id="o_hlF4P0EtuW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} executionInfo={"status": "ok", "timestamp": 1595262831530, "user_tz": -330, "elapsed": 1589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="6ebeb133-d4b7-4637-828c-5d0d0f407614"
print ("Confusion Matrix for Artificial Neural Network")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.confusion_matrix(y_test, ann_predict_test, labels=[1, 0])))
print ("")
print ("Classification Report\n")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.classification_report(y_test, ann_predict_test, labels=[ 1,0])))
# + id="qKNGNzsNEwUl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} executionInfo={"status": "ok", "timestamp": 1595262840798, "user_tz": -330, "elapsed": 1303, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AO<KEY>YZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="52e2b049-1038-48b5-dbd9-586ef10658c9"
from sklearn.tree import DecisionTreeClassifier
# create Gaussian Naive Bayes model object and train it with the data
dt_model = DecisionTreeClassifier(random_state=42)
dt_model.fit(X_train, y_train.ravel())
# + id="4sdEixAFEyp7" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595262850726, "user_tz": -330, "elapsed": 1351, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}}
prediction_from_trained_data = dt_model.predict(X_train)
# + id="bpaTs_eeE1EL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262859993, "user_tz": -330, "elapsed": 1351, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="8cd6497a-b3d5-4971-db78-ee0ad70c9a3f"
from sklearn import metrics
accuracy = metrics.accuracy_score(y_train, prediction_from_trained_data)
print ("Accuracy of our DT model is : {0:.4f}".format(accuracy))
# + id="FD_Y0o1ME3Uu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1595262894237, "user_tz": -330, "elapsed": 1542, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="1c47b011-2ad2-4272-a0db-08037c54a0ce"
dt_predict_test = dt_model.predict(X_test)
#get accuracy
dt_accuracy_testdata = metrics.accuracy_score(y_test, dt_predict_test)
#print accuracy
print ("Accuracy: {0:.4f}".format(dt_accuracy_testdata))
# + id="24B3BgFME_o4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} executionInfo={"status": "ok", "timestamp": 1595262906724, "user_tz": -330, "elapsed": 1592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="cc0a18b1-6a63-45fc-ae07-f31567281eeb"
print ("Confusion Matrix for DT")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.confusion_matrix(y_test, dt_predict_test, labels=[1, 0])))
print ("")
print ("Classification Report\n")
# labels for set 1=True to upper left and 0 = False to lower right
print ("{0}".format(metrics.classification_report(y_test, dt_predict_test, labels=[ 1,0])))
# + id="wA541sTUFCrZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 301} executionInfo={"status": "ok", "timestamp": 1595262924012, "user_tz": -330, "elapsed": 1805, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj-OqbYZJ3WSQQHsL2wTyYetEoqqYoiBjx6MVIpcg=s64", "userId": "02004506660163687076"}} outputId="3e876a2a-7c0c-4ae9-8a10-d92d45e4f2a9"
import numpy as np
import matplotlib.pyplot as plt
# fpr, tpr
naive_bayes = np.array([0.28, 0.52])
logistic = np.array([0.54, 0.26])
random_forest = np.array([0.43, 0.37])
ann = np.array([0.36, 0.44])
svm = np.array([0.46, 0.34])
dt = np.array([0.52, 0.28])
#plotting
plt.scatter(naive_bayes[0], naive_bayes[1], label = 'Naive Bayes', facecolors='yellow', edgecolors='yellow', s=100)
plt.scatter(logistic[0], logistic[1], label = 'Logistic Regression', facecolors='orange', edgecolors='orange', s=100)
plt.scatter(random_forest[0], random_forest[1], label = 'Random Forest', facecolors='blue', edgecolors='black', s=100)
plt.scatter(ann[0], ann[1], label = 'Artificial Neural Network', facecolors='gray', edgecolors='black', s=100)
plt.scatter(svm[0], svm[1], label = 'Support Vector Machine', facecolors='red', edgecolors='red', s=100)
plt.scatter(ann[0], dt[1], label = 'Dicision Tree Classifier', facecolors='green', edgecolors='yellow', s=100)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic (ROC)')
plt.legend(loc='upper left')
plt.show()
# + id="_3cNp_axFG2O" colab_type="code" colab={}
| Prima-Indian-Diabetes-prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0 64-bit (''.venv'': venv)'
# name: python3
# ---
# # Recipe: Get schedules for a whole year
#
# ## Problem
#
# Get schedules for a whole year.
#
# ## Solution
# +
import datetime as dt
import pandas as pd
from cro.schedule.sdk import Client, Schedule
# +
YEAR = 2022
month_dates = [dt.date(YEAR, month, 1) for month in range(1, 3)]
data: dict[Schedule, pd.DataFrame] = {}
client = Client() # Set the station id (sid) later within the for loop.
# -
# Fetch the sechedules for station Plus and Radiožurnál from the beginning of the year.
# +
from tqdm import tqdm
for sid in ("plus", "radiozurnal"):
client.station = sid
for date in tqdm(month_dates):
schedules = client.get_month_schedule(date)
for schedule in schedules:
data[schedule] = schedule.to_table()
# -
# ### Write single dataset to Excel
for schedule, table in tqdm(data.items()):
week_number = f"{schedule.date.isocalendar()[1]:02d}"
week_start = schedule.date - dt.timedelta(days=schedule.date.weekday()) # Monday
week_end = week_start + dt.timedelta(days=6) # Sunday
with pd.ExcelWriter(
f"../../../data/sheet/{YEAR}/Schedule_{schedule.station.name}_{YEAR}W{week_number}_{week_start}_{week_end}.xlsx"
) as writer:
table.to_excel(writer, index=False)
# ### Write concatenated datasets to Excel
with pd.ExcelWriter(f"../../../data/sheet/Schedule_Y{YEAR}.xlsx") as writer:
pd.concat(data.values()).to_excel(writer)
| docs/source/notebooks/Recipe_Get_Schedule_Year.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import fastai; fastai.__version__
from fastai.vision import *
from fastai.callbacks import *
torch.cuda.set_device(7)
import sys; sys.path.append("../dev")
from local.segmentation.dataset import SemanticSegmentationData
from local.segmentation.metrics import *
# test data creation
PATH = Path("/home/turgutluk/data/siim_acr_pneu/")
IMAGES, MASKS, CODES, TRAIN, VALID, TEST = \
"train/images_1024", "train/masks_1024", "codes.txt", "train.txt", 0.1, "test.txt"
ssdata = SemanticSegmentationData(PATH, IMAGES, MASKS, CODES, TRAIN, VALID, TEST,
sample_size=1000, bs=1, size=768)
data = ssdata.get_data()
PATH.ls()
rle_df = pd.read_csv(PATH/'train-rle.csv')
yes_pneumo = rle_df.loc[rle_df[' EncodedPixels'] != " -1", 'ImageId'].values
yes_pneumo = [f"{Path(o)}.png" for o in yes_pneumo]
# +
from torch.utils.data.sampler import *
class OverSamplingCallback(LearnerCallback):
def __init__(self,learn:Learner,binary_lbl_fns:list=None,weights:torch.Tensor=None):
"binary_lbl_fns has list of image filenames with at least 1 non-background pixel"
super().__init__(learn)
self.weights = weights
self.binary_lbl_fns = binary_lbl_fns
def on_train_begin(self, **kwargs):
self._labels = self.learn.data.train_dl.dataset.y.items
if self.binary_lbl_fns: self.labels = [1 if o.name in self.binary_lbl_fns else 0 for o in self._labels]
else: self.labels = self._labels
_, counts = np.unique(self.labels,return_counts=True)
if self.weights is None: self.weights = (1/counts)[self.labels]
else: self.weights = self.weights[self.labels]
# self.label_counts = np.bincount([self.learn.data.train_dl.dataset.y[i].data for i in range(len(self.learn.data.train_dl.dataset))])
# self.total_len_oversample = int(self.learn.data.c*np.max(self.label_counts))
self.total_len_oversample = len(self._labels)
self.learn.data.train_dl.dl.batch_sampler = BatchSampler(WeightedRandomSampler(self.weights,self.total_len_oversample), self.learn.data.train_dl.batch_size,False)
# -
learn = unet_learner(data, models.resnet34);
learn.metrics = [dice]
learn.path = Path(".")
learn.to_fp16();
learn.callback_fns.append(partial(OverSamplingCallback, binary_lbl_fns=yes_pneumo, weights=tensor([0.8, 0.2])))
learn.fit_one_cycle(2, 1e-3)
learn.show_results(rows=3)
res = learn.get_preds(DatasetType.Valid)
preds, targs = res
preds.shape, targs.shape
ImageSegment(preds.argmax(1)[9][None,...]).show(figsize=(5,5), cmap='viridis', alpha=1)
| pneumothorax/debug.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Q-Learning
#
# Nach dem Paper _Deep reinforcement learning compared with Q-table learning applied to backgammon_ von <NAME> und <NAME>, 2016
#
# (https://www.kth.se/social/files/58865ec8f27654607fb6e9a4/PFinnman_MWinberg_dkand16.pdf)
#
# Q-Learning erstellt eine Tabelle mit dem besten Zug für jede mögliche Backgammonposition. Dies ist jedoch ein Problem, da es in Backgammon $34 * 10^{21}$ mögliche Position gibt.
#
# In dem Paper wurde das Spielfeld, sowie Steine und Regeln stark reduziert und somit die Anzahl der möglichen Positionen auf $54 * 10^{6}$ gesenkt. Erst dadurch wurde Q-Learning bei Backgammon ermöglicht.
#
#
# Da ich eine exakte Repräsentation gewählt habe, um möglichst realitätsnahe Spieler zu entwickeln, bietet sich Q-Learning nicht für meinen Fall an.
# 
| TD-Gammon010-QLearning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlu
# language: python
# name: nlu
# ---
import pandas as pd
import numpy as np
import pickle
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.stem.porter import *
import string
import re
#from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as VS
from textstat.textstat import *
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.svm import LinearSVC
import matplotlib.pyplot as plt
import seaborn
from sklearn.model_selection import train_test_split
# %matplotlib inline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
df = pd.read_csv("data/train/training_data.csv")
tweets=df.tweet
# ## Davidson Feature Generation
# +
stopwords=stopwords = nltk.corpus.stopwords.words("english")
other_exclusions = ["#ff", "ff", "rt"]
stopwords.extend(other_exclusions)
stemmer = PorterStemmer()
def preprocess(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, '', parsed_text)
parsed_text = re.sub(mention_regex, '', parsed_text)
return parsed_text
def tokenize(tweet):
"""Removes punctuation & excess whitespace, sets to lowercase,
and stems tweets. Returns a list of stemmed tokens."""
tweet = " ".join(re.split("[^a-zA-Z]*", tweet.lower())).strip()
tokens = tweet.split() #[stemmer.stem(t) for t in tweet.split()]
return tokens
def basic_tokenize(tweet):
"""Same as tokenize but without the stemming"""
tweet = " ".join(re.split("[^a-zA-Z.,!?]*", tweet.lower())).strip()
return tweet.split()
vectorizer = TfidfVectorizer(
tokenizer=tokenize,
preprocessor=preprocess,
ngram_range=(1, 3),
stop_words=stopwords,
use_idf=True,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=10000,
min_df=5,
max_df=0.75
)
# -
#Construct tfidf matrix and get relevant scores
tfidf = vectorizer.fit_transform(tweets).toarray()
vocab = {v:i for i, v in enumerate(vectorizer.get_feature_names())}
idf_vals = vectorizer.idf_
idf_dict = {i:idf_vals[i] for i in vocab.values()} #keys are indices; values are IDF scores
#Get POS tags for tweets and save as a string
tweet_tags = []
for t in tweets:
tokens = basic_tokenize(preprocess(t))
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
tweet_tags.append(tag_str)
#We can use the TFIDF vectorizer to get a token matrix for the POS tags
pos_vectorizer = TfidfVectorizer(
tokenizer=None,
lowercase=False,
preprocessor=None,
ngram_range=(1, 3),
stop_words=None,
use_idf=False,
smooth_idf=False,
norm=None,
decode_error='replace',
max_features=5000,
min_df=5,
max_df=0.75,
)
#Construct POS TF matrix and get vocab dict
pos = pos_vectorizer.fit_transform(pd.Series(tweet_tags)).toarray()
pos_vocab = {v:i for i, v in enumerate(pos_vectorizer.get_feature_names())}
def count_twitter_objs(text_string):
"""
Accepts a text string and replaces:
1) urls with URLHERE
2) lots of whitespace with one instance
3) mentions with MENTIONHERE
4) hashtags with HASHTAGHERE
This allows us to get standardized counts of urls and mentions
Without caring about specific people mentioned.
Returns counts of urls, mentions, and hashtags.
"""
space_pattern = '\s+'
giant_url_regex = ('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|'
'[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
mention_regex = '@[\w\-]+'
hashtag_regex = '#[\w\-]+'
parsed_text = re.sub(space_pattern, ' ', text_string)
parsed_text = re.sub(giant_url_regex, 'URLHERE', parsed_text)
parsed_text = re.sub(mention_regex, 'MENTIONHERE', parsed_text)
parsed_text = re.sub(hashtag_regex, 'HASHTAGHERE', parsed_text)
return(parsed_text.count('URLHERE'),parsed_text.count('MENTIONHERE'),parsed_text.count('HASHTAGHERE'))
# ## Preprocess for slang
from nltk.tokenize import TweetTokenizer
tknzr = TweetTokenizer()
def load_slang_dict():
slang_dict = {}
with open("slang_to_words.txt", 'r') as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split('\t')
#print(tokens[1])
slang_dict[tokens[1]] = tokens[0]
return slang_dict
slang_dict_one = load_slang_dict()
#slang_dict
def load_slang_two_dict():
slang_dict_two = {}
with open("noslangdotcom.txt", 'r') as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split(':')
#print(tokens[1])
slang_dict_two[tokens[0]] = tokens[1]
return slang_dict_two
slang_dict_two = load_slang_two_dict()
#slang_dict_two
def load_slang_three_dict():
slang_dict_three = {}
with open("internet_slangsDotNet.txt", 'r') as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split('==')
slang_dict_three[tokens[0]] = tokens[1]
#print("first ", tokens[0], "second ", tokens[1])
return slang_dict_three
slang_dict_three = load_slang_three_dict()
def load_slang_four():
slang_dict_four = {}
with open("common_twitter_abbreviations.txt", 'r') as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split('=')
slang_dict_four[tokens[0]] = tokens[1]
return slang_dict_four
slang_dict_four = load_slang_four()
# +
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
slang_dict = merge_dicts(slang_dict_one, slang_dict_two, slang_dict_three, slang_dict_four)
# -
# ## Replace slang with definitions
from nltk.corpus import sentiwordnet as swn
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
lemmatizer = WordNetLemmatizer()
# +
def slang_sentiment(text):
text = remove_slang(text)
senti = get_sentiment_text(text)
return senti
def positive(text, indicator):
text = remove_slang(text)
senti = get_sentiment_text(text)
return get_pos(text, indicator)
def negative(text, indicator):
text = remove_slang(text)
senti = get_sentiment_text(text)
return get_pos(text, indicator)
def objective(text, indicator):
text = remove_slang(text)
senti = get_sentiment_text(text)
return get_pos(text, indicator)
def remove_slang(text):
s = tknzr.tokenize(text)
soFar = ''
for word in s:
if word.lower() in slang_dict:
soFar += slang_dict[word.lower()] + ' '
else:
soFar += word + ' '
return soFar.split(' ')
def get_sentiment_text(strList):
text = ' '.join(strList)
pos_values = nltk.pos_tag(text)
pos_senti = []
for (x, y) in pos_values:
if len(get_sentiment(x,y)) > 1:
pos_senti.append(get_sentiment(x,y))
else:
pos_senti.append([0, 0, 0])
return pos_senti
def get_pos(text, indicator):
x = 0
pos = get_sentiment_text(text)
for v in pos:
x += v[indicator]
return x
# -
from nltk.corpus import wordnet as wn
def penn_to_wn(tag):
#Convert between the PennTreebank tags to simple Wordnet tags"""
if tag.startswith('J'):
return wn.ADJ
elif tag.startswith('N'):
return wn.NOUN
elif tag.startswith('R'):
return wn.ADV
elif tag.startswith('V'):
return wn.VERB
return None
def get_sentiment(word,tag):
#""" returns list of pos neg and objective score. But returns empty list if not present in senti wordnet. """
wn_tag = penn_to_wn(tag)
if wn_tag not in (wn.NOUN, wn.ADJ, wn.ADV):
return []
lemma = lemmatizer.lemmatize(word, pos=wn_tag)
if not lemma:
return []
synsets = wn.synsets(word, pos=wn_tag)
if not synsets:
return []
# Take the first sense, the most common
synset = synsets[0]
swn_synset = swn.senti_synset(synset.name())
return [swn_synset.pos_score(),swn_synset.neg_score(),swn_synset.obj_score()]
data = pd.read_csv('SentiWordNet_3.0.0.txt', sep='\t', header=None)
data.columns = ["POS","ID","PosScore","NegScore","SynsetTerms","Gloss"]
# Check for quotes
def contains_quotes(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word == '"' or word == "'" else 0, s))
if score > 0:
return 1
return 0
# Check if self-referential
# +
ethnic_groups = []
with open('ethnic_groups_and_common_slurs.txt', 'r') as fileinput:
for line in fileinput:
ethnic_groups.append((line.split('\n'))[0].lower())
#demonstrative adjectives and other words that can inidicate targeting of a specific group
targets = ['all', 'every', 'you', 'those', 'these', 'any', 'each', 'no', 'that', 'this']
modality = ['should', 'can', 'can\'t', 'cannot', 'won\'t', 'will', 'want', 'wants', 'are']
reclaiming = ['proud', 'reclaim', 'reclaming', 'offensive', 'like']
me = ['i\'m', 'we', 'i', 'me', 'this']
def contains_target_self_referential(text):
words = tknzr.tokenize(text)
#check word in ethnic_groups comes before word in me
#e.g. the beaner in me forgets I like beans
for word in ethnic_groups:
if word in words[0:]:
for key in me:
if key in words[words.index(word):]:
return 1
#check if word in me comes before word in reclaiming
#e.g. i'm a proud beaner
for key in me:
if key in words[0:]:
for word in reclaiming:
if word in words[words.index(key):]:
return 1
#check if word in me comes before word in ethnic_groups
for word in ethnic_groups:
if word in words[words.index(key):]:
return 1
#check if word in me comes before word in ethnic_groups
#e.g. We beaners have to stick together
for word in ethnic_groups:
if word in words[words.index(key):]:
return 1
#check if word in reclaiming comes after modality
#e.g. all beaners should go home is offensive
for key in modality:
if key in words[0:]:
for word in reclaiming:
if word in words[words.index(key):]:
return 1
return 0
# -
# Offensive to women/words that hurt
# +
words_that_hurt = {
'bitch': 'Targets and dehumanizes women, even if used toward men, including queer and gay men. Devalues women and femininity. Reinforces sexism.',
'ghetto' :'Describes something or someone as cheap, worn out, poor, dangerous, etc. Reference to housing communities that are impoverished and disproportionately impact people of color. Associates people of color with these negative characteristics.',
'ratchett':'Describes something or someone as cheap, worn out, poor, dangerous, etc. Reference to housing communities that are impoverished and disproportionately impact people of color. Associates people of color with these negative characteristics.',
'illegal alien': 'Reduces undocumented immigrants to something less than human. Fixates on legal status instead of people as individuals. Asserts that some people belong here more than others do. Ignores political, social, and economic factors that impact people of color.',
'no homo': 'Stresses the speaker\'s heterosexuality, masculinity, and/or other traits to avoid being perceived as LGBTQIA. Goes to great lengths to avoid association with anything queer. Reinforces that to be LGBTQIA is bad.',
'retarded': 'Targets mental, emotional and physical disabilities as objects for ridicule. Used as synonyms for "worthless," "bad," "unintelligent," "incapable," etc.',
'retard': 'Targets mental, emotional and physical disabilities as objects for ridicule. Used as synonyms for "worthless," "bad," "unintelligent," "incapable," etc.',
'lame': 'Targets mental, emotional and physical disabilities as objects for ridicule. Used as synonyms for "worthless," "bad," "unintelligent," "incapable," etc.',
'crazy':'Targets mental, emotional and physical disabilities as objects for ridicule. Used as synonyms for "worthless," "bad," "unintelligent," "incapable," etc.',
'dumb': 'Targets mental, emotional and physical disabilities as objects for ridicule. Used as synonyms for "worthless," "bad," "unintelligent," "incapable," etc.',
'that\'s so gay': 'Stigmatizes gay and queer people. Uses their identities to describe something as undesirable and bad. Replaces negative adjectives with words related to LGBTQIA identities.',
'whore': 'Dismisses anyone seen as being "too" sexual, particularly sex workers, women, LGBTQI people and people of color. Perpetuates negativity toward sex itself. Regulates who is allowed to have it.',
'ho': 'Dismisses anyone seen as being "too" sexual, particularly sex workers, women, LGBTQI people and people of color. Perpetuates negativity toward sex itself. Regulates who is allowed to have it.',
'slut': 'Dismisses anyone seen as being "too" sexual, particularly sex workers, women, LGBTQI people and people of color. Perpetuates negativity toward sex itself. Regulates who is allowed to have it.',
'Bisexuality doesn\'t really exist. People are just gay or straight.': 'This denies the fluidity of sexuality and dismisses people\'s experiences and definitions of self. People deserve the right to define their own identities any way they wish and have those definitions honored.',
'i think everyone is bisexual': 'While this is often meant to acknowledge the fluidity of sexuality, it dismisses the reality of people who identify as bisexual and erases their experiences. It also invalidates the self-identifications of non-bisexual people.',
'You\'re too femme to be bisexual':'Gender presentation does not indicate sexual orientation. Bisexual people have a wide range of gender presentations.',
'You\'re too butch to be bisexual':'Gender presentation does not indicate sexual orientation. Bisexual people have a wide range of gender presentations.',
'Bisexual people just want straight privilege':'Bisexual people experience discrimination within straight communities and lesbian/gay communities. They never fully experience straight privilege because they do not identify as straight. Often their identities are made invisible and denied.',
'Bisexual people are just greedy and want to have sex with everyone.':'This stereotypes bisexual people and assumes they are all promiscuous - and that this is a bad thing. It creates negative attitudes toward sex and works against creating a sex positive climate. It also demonstrates an underlying belief that bisexuality is only about behavior and is not a legitimate identity.',
'Who do you see yourself ending up with?':'This is another way of implying one has to "end up" gay or straight and ignores bisexuality as an identity versus a relationship status. It also assumes everyone desires to be in a long-term monogamous relationship.',
'Tranny':'Whether or not someone identifies as trans*, calling anyone "tranny" is extremely offensive. While some folks within the trans* community may choose to reclaim this word for themselves, it is not a word that is okay to use to label another person or use as a joke.',
'That person doesn\'t really look like a woman':'What does it mean to look like a man or woman? There are no set criteria. It also should not be assumed that all Trans Men strive to fit within dominant ideas of masculinity or all Trans Women strive to fit within dominant ideas of femininity, or that all Trans* people want to look like men or women. Gender presentation is fluid and distinct from gender identity, and all forms of gender expression deserve affirmation.',
'That person doesn\'t really look like a man':'What does it mean to look like a man or woman? There are no set criteria. It also should not be assumed that all Trans Men strive to fit within dominant ideas of masculinity or all Trans Women strive to fit within dominant ideas of femininity, or that all Trans* people want to look like men or women. Gender presentation is fluid and distinct from gender identity, and all forms of gender expression deserve affirmation.',
'What is your REAL name? I mean the one you were given at birth':'This implies that the person\'s gender identity and chosen name are not "real" and perpetuates the idea of Trans people as deceptive. It removes agency and any right to make decisions for themselves, and is incredibly invalidating. It presumes a right to intimate information, disregards privacy, and places Trans lives on public display.',
'He-She':'This hyphenated term is demeaning and invalidates an individual\'s identity and the pronouns that they use.',
'What are you REALLY? Have you had surgery?': 'Asking anyone personal questions about their bodies and/or surgeries is invasive and inappropriate. We don\'t ask cisgender people about what is under their clothes; we shouldn\'t ask Trans* people either.',
'cunt':'Using words that refer to people with vaginas to express that someone is weak or emotional. Dehumanizes womxn and perpetuates misogyny and sexism.',
'twat':'Using words that refer to people with vaginas to express that someone is weak or emotional. Dehumanizes womxn and perpetuates misogyny and sexism.',
'pussy':'Using words that refer to people with vaginas to express that someone is weak or emotional. Dehumanizes womxn and perpetuates misogyny and sexism.',
'thot':'Word created to express womxn or people who are sexually promiscuous. There are speculations that the word comes from the KKK organization that referred to Black women who were forced into prostitution (i.e. <NAME>: Hottentot).',
'ugly':'Word used to put down someone for the way they look, can be connected back to white supremacist, ableist, sizeist standards of beauty.',
'you guys':'Erases the identities of people who are in the room. Generalizing a group of people to be masculine.',
'I\'m being such a fat-ass':'Demeans and devalues fatness/fat bodies, reinforces harmful assumptions that fat people are gluttonous and are fat because they have no restraint around food. Also implies that there is an acceptable amount of food to eat and anything more is disgusting, or that enjoying food too much is disgusting.',
'I\'m being so fat right now!':'Demeans and devalues fatness/fat bodies, reinforces harmful assumptions that fat people are gluttonous and are fat because they have no restraint around food. Also implies that there is an acceptable amount of food to eat and anything more is disgusting, or that enjoying food too much is disgusting.'
}
hurtfulWords = list(words_that_hurt.keys())
# +
#Binary Feature #6 1) ID tweets with female pronouns 2) Check if these words are in the tweet
#these words are used disproportionately often against women
#the behaviour they describe often goes unremarked in men.
#source: http://sacraparental.com/2016/05/14/everyday-misogyny-122-subtly-sexist-words-women/
#EVERYDAY MISOGYNY: 122 SUBTLY SEXIST WORDS ABOUT WOMEN (AND WHAT TO DO ABOUT THEM)
female_and_nongender_Pronouns = set(['you','she','its','their','yours',
'her', 'it', 'they', 'them',
'yourself', 'herself', 'themselves',
'your','hers'])
pronouns = {'I': ('personal', True, 'first'),
'me': ('personal', True, 'first'),
'we': ('personal', False, 'first'),
'us': ('personal', False, 'first'),
'you': ('personal', False, 'second'),
'she': ('personal', True, 'third'),
'he': ('personal', True, 'third'),
'her': ('possessive', True, 'third'),
'him': ('personal', True, 'third'),
'it': ('personal', True, 'third'),
'they': ('personal', False, 'third'),
'them': ('personal', False, 'third'),
'myself': ('reflexive', True, 'first'),
'ourselves': ('reflexive', False, 'first'),
'yourself': ('reflexive', True, 'second'),
'yourselves': ('reflexive', False, 'second'),
'himself': ('reflexive', True, 'third'),
'herself': ('reflexive', True, 'third'),
'itself': ('reflexive', True, 'third'),
'themselves': ('reflexive', False, 'third'),'my': ('possessive', True, 'first'),
'your': ('possessive', False, 'second'),
'his': ('possessive', True, 'third'),
'hers': ('possessive', True, 'third'),
'its': ('possessive', True, 'third'),
'our': ('possessive', False, 'first'),
'their': ('possessive', False, 'third'),
'mine': ('possessive', True, 'first'),
'yours': ('possessive', False, 'second'),
'ours': ('possessive', False, 'first')}
female_offensive = ['bossy', 'abrasive', 'ball-buster', 'aggressive',
'shrill', 'bolshy', 'intense', 'stroppy', 'forward',
'mannish', 'gossipy', 'Dramatic', 'Drama Queen', 'Catty',
'Bitchy', 'Nag', 'Cold', 'Ice queen', 'Shrew', 'Humourless',
'Man-hater', 'Banshee', 'Fishwife', 'Lippy', 'Ditzy', 'Feminazi',
'militant feminist', 'Bridezilla', 'Diva', 'Prima donna', 'Blonde moment',
'Feisty', 'Supermum','Working mother', 'Career woman', 'Yummy mummy', 'Little old lady',
'WAHM', 'Slut', 'Trollop','Frigid','Easy','Tease','Loose','Man-eater','Cougar',
'Asking for it','prude','the town bike', 'Mutton dressed as lamb','Slutty','Curvy','Mumsy',
'Cheap','That dress is flattering','Frumpy','Let herself go','Faded beauty','Mousey',
'Plus-size','Clotheshorse','Brunette ','Ladylike','Bubbly','Vivacious','Flirty',
'Sassy','Chatty','Demure','Modest','Emotional','Hysterical','Hormonal',
'Menstrual ',' pre-menstrual ','Flaky','Moody','Over-sensitive',
'Clucky','Neurotic','Irrational','Baby brain','Baby weight','Mummy blogger',
'Female engineer','That’s good, for a girl','Like a girl','run like a girl',
'throw like a girl','Mumpreneur','Spinster','Barren','She wears the pants','Housewife',
'Houseproud','Soccer mom','Mistress','Kept woman','Incompetent cervix',
'Failure to progress','Elderly primagravida','Irritable uterus','Tomboy',
'Girly','a girly girl','Little lady','Jail-bait','Heart-breaker','pretty little thing','Catfight','Mommy wars','Caring','Compassionate','Hard-working',
'Conscientious','Dependable','Diligent','Dedicated','Tactful','Interpersonal','Warm',
'Helpful','Maternal', 'Princess', 'Heart-breaker']
#most tweeted to <NAME> by Trump and trump supporters
#https://www.vox.com/2016/1/27/10852876/donald-trump-supporters-sexist-tweets-megyn-kelly
trump_suppporters_megynKelly = ["ugly", "cheap", 'bitch', 'whore', 'bimbo',
'cunt', 'hooker', 'slut', 'skank']
others = ['hoe', 'pussy', 'bitches', 'fatty', 'fatass', 'fat-ass']
offsensive_words_toward_women = female_offensive + trump_suppporters_megynKelly + others + hurtfulWords
# +
female_offensive_words = set()
for word in offsensive_words_toward_women:
female_offensive_words.add(word.lower())
#female_offensive_words
def check_offensive_to_women(text):
#split tweet by white space and make lower case
li = set([word.lower() for word in text.split()])
isFemale = female_and_nongender_Pronouns.intersection(li)
if len(isFemale) == 0:
return 0
isOffensive = female_offensive_words.intersection(li)
if isOffensive:
return len(isOffensive)
return 0
# -
# NRC emotions
nrc_emotions_df = pd.read_csv("nrc_emotions.csv")
anger = nrc_emotions_df.loc[nrc_emotions_df['anger']][['term']].values
anticipation = nrc_emotions_df.loc[nrc_emotions_df['anticipation']][['term']].values
disgust = nrc_emotions_df.loc[nrc_emotions_df['disgust']][['term']].values
fear = nrc_emotions_df.loc[nrc_emotions_df['fear']][['term']].values
joy = nrc_emotions_df.loc[nrc_emotions_df['joy']][['term']].values
sadness = nrc_emotions_df.loc[nrc_emotions_df['sadness']][['term']].values
surprise = nrc_emotions_df.loc[nrc_emotions_df['surprise']][['term']].values
trust = nrc_emotions_df.loc[nrc_emotions_df['trust']][['term']].values
def anger_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in anger else 0, s))
return score
def anticipation_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in anticipation else 0, s))
return score
def disgust_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in disgust else 0, s))
return score
def joy_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in joy else 0, s))
return score
def fear_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in fear else 0, s))
return score
def sadness_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in sadness else 0, s))
return score
def surprise_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in surprise else 0, s))
return score
def trust_count(text):
s = tknzr.tokenize(text)
score = sum(map(lambda word : 1 if word in trust else 0, s))
return score
#groups = open('groups.txt','r').read().split('\n')
ethnic_groups = []
with open('ethnic_groups_and_common_slurs.txt', 'r') as fileinput:
for line in fileinput:
ethnic_groups.append((line.split('\n'))[0].lower())
#demonstrative adjectives and other words that can inidicate targeting of a specific group
targets = ['all', 'every', 'you', 'those', 'these', 'any', 'each', 'no', 'that', 'this', ]
modality = ['should', 'can', 'can\'t', 'cannot', 'won\'t', 'will', 'want']
# +
#If tweet contains a targeted statement referring to a certain group, i.e. "all you Asians" or "every Mexican"
#also checks if a group word is followed by some sort of modal verb
def contains_target(text):
s = tknzr.tokenize(text)
for i in range(len(s)):
if s[i].lower() in targets:
if i != len(s)-1:
if s[i+1].lower() in ethnic_groups:
return 1
elif s[i].lower() in ethnic_groups:
if i != len(s)-1:
if s[i+1].lower() in modality:
return 1
return 0
# +
def other_features_base(tweet):
words = preprocess(tweet) #Get text only
syllables = textstat.syllable_count(words)
num_chars = sum(len(w) for w in words)
num_chars_total = len(tweet)
num_terms = len(tweet.split())
num_words = len(words.split())
avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)
num_unique_terms = len(set(words.split()))
###Modified FK grade, where avg words per sentence is just num words/1
FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)
##Modified FRE score, where sentence fixed to 1
FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)
twitter_objs = count_twitter_objs(tweet)
retweet = 0
if "rt" in words:
retweet = 1
features = [num_chars, num_chars_total, num_terms, num_words,
num_unique_terms,
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet]
#features = pandas.DataFrame(features)
return features
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
#sentiment = sentiment_analyzer.polarity_scores(tweet)
#Our features
text_only = preprocess(tweet) #Get text only
words = remove_slang(text_only) #replace slang/abbreviations with full words
senti = slang_sentiment(text_only)
pos = positive(tweet, 0)
neg = negative(tweet, 1)
obj = objective(tweet, 2)
no_slang_str = ''.join(words)
trustCount = trust_count(no_slang_str)
surpriseCount = surprise_count(no_slang_str)
sadnessCount = sadness_count(no_slang_str)
fearCount = fear_count(no_slang_str)
joyCount = joy_count(no_slang_str)
disgustCount = disgust_count(no_slang_str)
anticipationCount = anticipation_count(no_slang_str)
angerCount = anger_count(no_slang_str)
isSelfReferential = contains_target_self_referential(no_slang_str)
hasQuotes = contains_quotes(tweet)
targeted = contains_target(text_only)
immigrant_ref = 0
if text_only.find('immigrant') or text_only.find('immigrants'):
immigrant_ref = 1
isOffensiveToWomen = check_offensive_to_women(tweet)
#Davidson features
syllables = textstat.syllable_count(text_only)
num_chars = sum(len(w) for w in text_only)
num_chars_total = len(tweet)
num_terms = len(tweet.split())
num_words = len(tweet.split())
#avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)
num_unique_terms = len(set(text_only.split()))
twitter_objs = count_twitter_objs(tweet)
retweet = 0
if "rt" in words:
retweet = 1
features = [num_chars, num_chars_total, num_terms, num_words,
num_unique_terms,
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet, targeted, immigrant_ref, isOffensiveToWomen,
trustCount, surpriseCount, sadnessCount, angerCount, fearCount,
joyCount, disgustCount, anticipationCount, isSelfReferential, hasQuotes, pos, neg, obj]
return features
def get_feature_array(tweets, base):
feats=[]
for t in tweets:
if base:
feats.append(other_features_base(t))
else:
feats.append(other_features(t))
return np.array(feats)
# +
other_features_names = ["num_chars", "num_chars_total", "num_terms", "num_words", "num_unique_words", "num_hashtags", \
"num_mentions", "num_urls", "is_retweet", "targeted", "immigrant_ref", "isOffensiveToWomen",
"trustCount", "surpriseCount", "sadnessCount", "angerCount", "fearCount",
"joyCount", "disgustCount", "anticipationCount", "isSelfReferential", "hasQuotes", "pos", "neg", "obj"]
other_features_base_names = ["num_chars", "num_chars_total", "num_terms", "num_words", "num_unique_words", "num_hashtags", \
"num_mentions", "num_urls", "is_retweet"]
# -
base_feats = get_feature_array(tweets, True)
hand_built_feats = get_feature_array(tweets, False)
# ## Flair
from flair.embeddings import DocumentPoolEmbeddings, WordEmbeddings, CharacterEmbeddings, StackedEmbeddings, FlairEmbeddings, BertEmbeddings
import torch
#stack word-level twitter embeddings and forward/backward flair sentence embeddings
news_forward = FlairEmbeddings('news-forward-fast')
news_backward = FlairEmbeddings('news-backward-fast')
twitter = WordEmbeddings('twitter')
bert = BertEmbeddings('bert-base-uncased')
#elmo = ELMoEmbeddings('small')
from flair.data import Sentence
# Create embedding for tweets by getting token-level embeddings from stacked embedding
import time, sys
from IPython.display import clear_output
def update_progress(progress):
bar_length = 20
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
def embed_tweets(model, row_len, tweets):
embeddings = np.empty([len(tweets), row_len])
num = 0
total = len(tweets)
for tweet in tweets:
s = Sentence(tweet)
model.embed(s)
flattened = np.array(s.get_embedding().detach()).flatten()
#print(flattened.shape[0])
embeddings[num] = (flattened)
num+=1
update_progress(num / total)
update_progress(1)
return embeddings
# +
#tweet_embed = DocumentPoolEmbeddings([twitter])
# +
# #%%time
#tweet_embeddings = embed_tweets(tweet_embed,100)
#np.savetxt('tweet_embeddings.txt', tweet_embeddings)
tweet_embeddings = np.loadtxt('tweet_embeddings.txt')
# +
#bert_embed = DocumentPoolEmbeddings([bert])
# +
# #%%time
#bert_embeddings = embed_tweets(bert_embed, 3072)
#np.savetxt('bert_embeddings.txt', bert_embeddings)
#bert_embeddings = np.loadtxt('bert_embeddings.txt')
# -
bert_news_twitter_embed = DocumentPoolEmbeddings([news_forward, news_backward, bert, twitter])
# +
#bert_news_twitter_embeddings = embed_tweets(bert_news_twitter_embed, 5220)
#np.savetxt('bert_news_twitter_embeddings.txt', bert_news_twitter_embeddings)
bert_news_twitter_embeddings = np.loadtxt('bert_news_twitter_embeddings.txt')
# -
# ## Train Models with features
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.pipeline import Pipeline
# Flair embeddings
#Now join them all up
M1 = np.concatenate([tfidf,pos,base_feats,bert_news_twitter_embeddings],axis=1)
#M2 = np.concatenate([tfidf,pos,feats,bert_embeddings],axis=1)
M3 = np.concatenate([tfidf,pos,base_feats,tweet_embeddings],axis=1)
# Hand-built features
M4 = np.concatenate([tfidf,pos,hand_built_feats],axis=1)
# Combine hand-built and twitter embeddings
M5 = np.concatenate([tfidf,pos,hand_built_feats,tweet_embeddings],axis=1)
M6 = np.concatenate([tfidf,pos,hand_built_feats,bert_news_twitter_embeddings],axis=1)
# ## Train
def train_model(M):
X = pd.DataFrame(M)
y = df['class'].astype(int)
pipe = Pipeline(
[('select', SelectFromModel(LogisticRegression(class_weight='balanced',
penalty="l1", C=0.01))),
('model', LogisticRegression(class_weight='balanced',penalty='l2'))])
param_grid = [{}] # Optionally add parameters here
grid_search = GridSearchCV(pipe,
param_grid,
cv=StratifiedKFold(n_splits=5,
random_state=42).split(X, y),
verbose=2)
model = grid_search.fit(X, y)
return model
tweet_only_LR = train_model(M3)
# +
# np.savetxt("tweet_only_true.txt", tweet_only_true)
# np.savetxt("tweet_only_pred.txt", tweet_only_pred)
# +
#bert_only_true, bert_only_pred, bert_only_model = train_model(M2)
# +
# np.savetxt("bert_only_true.txt", bert_only_true)
# np.savetxt("bert_only_pred.txt", bert_only_pred)
# -
bert_news_twitter_model = train_model(M1)
# +
# np.savetxt("bert_news_twitter_true.txt", bert_news_twitter_true)
# np.savetxt("bert_news_twitter_pred.txt", bert_news_twitter_pred)
# -
hand_built_model = train_model(M4)
# +
# np.savetxt("hand_built_true.txt", hand_built_true)
# np.savetxt("hand_built_pred.txt", hand_built_pred)
# -
combined_model = train_model(M5)
bnt_hb_combined_model = train_model(M6)
# ## Training Evaluation
all_tweets = df[['tweet', 'class']]
def evaluate(y_true, y_preds, tweet):
report = classification_report( y_true, y_preds )
print(report)
misses = np.where(np.asarray(y_true) != y_preds)
missed_preds = []
for i in range(len(y_true)):
if np.asarray(y_true)[i] != y_preds[i]:
missed_preds.append(y_preds[i])
missed = [list(y_true.index)[i] for i in misses[0]]
missed_tweets = tweet.iloc[missed]
missed_tweets.loc[:,'prediction'] = missed_preds
corrects = np.where(np.asarray(y_true) == y_preds)
correct_preds = []
for i in range(len(y_true)):
if np.asarray(y_true)[i] == y_preds[i]:
correct_preds.append(y_preds[i])
correct = [list(y_true.index)[i] for i in corrects[0]]
correct_tweets = tweet.iloc[correct]
correct_tweets.loc[:,'prediction'] = correct_preds
return missed_tweets, correct_tweets
# Tweet sentence embeddings
tweet_missed = evaluate(tweet_only_true, tweet_only_pred, all_tweets)
# BERT embeddings
bert_missed = evaluate(bert_only_true, bert_only_pred, all_tweets)
# Combined BERT, news, and tweet embeddings
bert_news_twitter_missed = evaluate(bert_news_twitter_true, bert_news_twitter_pred)
hand_built_missed = evaluate(hand_built_true, hand_built_pred)
both_missed = evaluate(combined_true, combined_pred)
# ## Run on test set
testing = pd.read_csv("data/test/testing_data.csv")
dev = pd.read_csv("data/dev/development_data.csv") #dev wasn't used in training
test = pd.concat([testing, dev], sort=False)
y_test = test['class'].astype(int)
test_tweets = test['tweet']
#use transform instead of fit_transform to get vector in same space as training data
test_tfidf = vectorizer.transform(test_tweets).toarray()
test_tweet_tags = []
for t in test_tweets:
tokens = basic_tokenize(preprocess(t))
tags = nltk.pos_tag(tokens)
tag_list = [x[1] for x in tags]
tag_str = " ".join(tag_list)
test_tweet_tags.append(tag_str)
test_pos = pos_vectorizer.transform(pd.Series(test_tweet_tags)).toarray()
base_test_feats = get_feature_array(test_tweets, True)
hand_built_test_feats = get_feature_array(test_tweets, False)
# Test tweet only embeddings
tweet_embed = DocumentPoolEmbeddings([twitter])
test_tweet_embeddings = embed_tweets(tweet_embed,100, test_tweets)
test_M3 = np.concatenate([test_tfidf,test_pos,base_test_feats,test_tweet_embeddings],axis=1)
tweet_only_preds = tweet_only_LR.predict(test_M3)
# Test hand-built features
test_M4 = np.concatenate([test_tfidf,test_pos,hand_built_test_feats],axis=1)
hand_built_preds = hand_built_model.predict(test_M4)
# Test combined hand-built and tweet embeddings
test_M5 = np.concatenate([test_tfidf,test_pos,hand_built_test_feats,test_tweet_embeddings],axis=1)
combined_preds = combined_model.predict(test_M5)
# Bert, news, and twitter embeddings
test_bert_news_twitter_embeddings = embed_tweets(bert_news_twitter_embed, 5220, test_tweets)
# +
#test_bert_news_twitter_embeddings.shape, bert_news_twitter_embeddings.shape,
# +
#test_tfidf.shape, tfidf.shape,test_pos.shape, pos.shape,base_test_feats.shape, base_feats.shape
# -
test_M1 = np.concatenate([test_tfidf,test_pos,base_test_feats,test_bert_news_twitter_embeddings],axis=1)
test_M1.shape, M1.shape
bert_news_twitter_preds = bert_news_twitter_model.predict(test_M1)
# Bert, news, and twitter embeddings with hand-built features
test_M6 = np.concatenate([test_tfidf,test_pos,hand_built_test_feats,test_bert_news_twitter_embeddings ],axis=1)
bnt_hb_combined_preds = bnt_hb_combined_model.predict(test_M6)
# ## Test Evaluation
testing_tweets = test[['tweet','class']]
test_tweet_only_missed, test_tweet_only_correct = evaluate(y_test, tweet_only_preds, testing_tweets)
test_hand_built_missed, test_hand_built_correct = evaluate(y_test, hand_built_preds, testing_tweets)
test_combined_missed, test_combined_correct = evaluate(y_test, combined_preds, testing_tweets)
test_bert_news_twitter_missed, test_bert_news_twitter_correct = evaluate(y_test, bert_news_twitter_preds, testing_tweets)
test_bnt_hb_combined_missed, test_bnt_hb_combined_correct = evaluate(y_test, bnt_hb_combined_preds, testing_tweets)
combined_missed.to_csv("test_tweet_hand_combined_missed.csv", sep='\t')
test_hand_built_missed.to_csv("test_hand_built_missed.csv", sep='\t')
test_tweet_only_missed.to_csv("test_tweet_only_missed.csv", sep='\t')
test_bert_news_twitter_missed.to_csv("test_bert_news_twitter_missed.csv", sep='\t')
test_bnt_hb_combined_missed.to_csv("test_bnt_hb_combined_missed.csv", sep='\t')
pd.set_option('display.max_colwidth', -1)
pd.set_option('display.max_rows', 700)
test_bnt_hb_combined_correct_hate = test_bnt_hb_combined_correct.loc[test_bnt_hb_combined_correct['class']==2][['tweet']]
test_bnt_hb_combined_correct_hate.columns
test_bnt_hb_combined_correct_hate
test_bnt_hb_combined_missed_hate = test_bnt_hb_combined_missed.loc[test_bnt_hb_combined_missed['class']==2][['tweet']]
# +
#test_bnt_hb_combined_missed.to_csv("test_bnt_hb_combined_missed.csv", sep='\t')
# +
#test_bnt_hb_combined_correct.to_csv("test_bnt_hb_combined_correct.csv", sep='\t')
# +
#test_hand_built_missed.to_csv("test_hand_built_missed.csv", sep='\t')
# +
#test_hand_built_correct.to_csv("test_hand_built_correct.csv", sep='\t')
# -
# ## Compare with baseline
test_baseline_correct = pd.read_csv('test_baseline_correct.csv', sep='\t')[['tweet','class','prediction']]
test_baseline_correct_hate = test_baseline_correct[test_baseline_correct['class']==2][['tweet']]
test_baseline_missed = pd.read_csv('test_baseline_missed.csv', sep='\t')[['tweet','class','prediction']]
len(test_bnt_hb_combined_correct_hate), len(test_baseline_correct_hate)
df_final
m = test_bnt_hb_combined_correct_hate.merge(test_baseline_correct, on=cols, how='outer', suffixes=['', '_'], indicator=True)
m
test_bnt_hb_combined_missed
| our_model.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Matlab
% language: matlab
% name: matlab
% ---
% ## Chapter 7. Regression models
%
% ### Normal simple linear regression
%
% Data: $(x_i, y_i)$, $ i = 1, \dots, n$
%
% Model: $Y_i = \alpha + \beta x_i + e_i$ with $e_i \sim N(0, \sigma^2)$ i.i.d.
%
% ### Example
%
% The price of diamonds $Y$ depends on the size $X$.
size = [0.17 0.16 0.17 0.18 0.25 0.16 0.15 0.19 0.21 0.15 0.18 0.28 0.16 0.20 0.23 0.29 0.12 ...
0.26 0.25 0.27 0.18 0.16 0.17 0.16 0.17 0.18 0.17 0.18 0.17 0.15 0.17 0.32 0.32 0.15...
0.16 0.16 0.23 0.23 0.17 0.33 0.25 0.35 0.18 0.25 0.25 0.15 0.26 0.15]';
price = [...
355 328 350 325 642 342 322 485 483 323 462 823 336 498 595 860 223 ...
663 750 720 468 345 352 332 353 438 318 419 346 315 350 918 919 298 ...
339 338 595 553 345 945 655 1086 443 678 675 287 693 316]';
dia = table(size, price)
scatter(size, price)
xlim([0 0.35])
ylim([0 1100])
mq = fitlm(dia, 'price ~ size') % ~ is obtained with ALT125 with numeric keypad on
format bank
[price mq.Fitted price-mq.Fitted]
scatter(size, price)
refline(3721.02, -259.63)
xlim([0 0.35])
ylim([0 1100])
% ### Regression to the mean
% We simulate data from the simple regression model
% \begin{align*}
% X_i &\sim N(0,1) \\
% Y_i &\sim N( \textstyle \frac{1}{2} X_i, \sigma = 1/2)
% \end{align*}
n = 1000;
x = randn(n,1);
y = x ./2 + randn(n,1) ./ sqrt(2);
plot(x,y, '.b')
refline(1/2,0)
refline([1,0])
vline([1,1.5])
% ### Example 7.6 Height data
%
% Data found on the book webpage at `http://www.aup.nl`.
%
% Height
% - $Y$ of adolescents (column 1)
% - $X_2$ of father (column 2)
% - $X_3$ of mother (column 3)
% - $X_4$ gender (column 4, 1 = man, 2 = woman)
X = [ 170 180 164 2
175 173 165 1
171 178 176 2
167 180 168 2
180 190 167 2
169 169 169 2
169 189 168 2
183 196 173 2
174 182 163 2
173 176 176 2
163 167 168 2
199 182 164 1
186 183 173 1
178 176 167 2
179 176 176 2
175 174 175 2
176 181 164 2
170 171 168 2
176 183 170 2
180 184 169 2
176 165 170 1
193 189 182 1
168 188 172 2
168 189 170 2
171 175 171 2
188 184 170 1
169 175 171 2
184 185 164 1
189 177 177 1
173 178 169 2
185 174 169 1
177 167 171 1
186 178 169 1
190 178 168 1
162 183 160 2
174 182 167 2
170 183 163 2
174 177 167 2
164 173 164 2
182 176 157 1
159 183 160 2
171 177 165 2
170 172 166 2
187 181 169 1
184 177 163 1
174 174 168 2
171 185 177 2
183 182 165 1
162 176 165 2
172 167 155 1
169.5 179 159 2
179 179 166 1
176 182 165 2
175 186 164 2
191 187 169 1
189 179 163 1
183 179 174 1
164 174 160 2
186 188 167 1
174 178 172 1
191 184 167 1
179 182 169 1
175 172 168 1
174 168 171 1
173 182 174 2
185 183 167 1
169 179 170 2
183 172 170 1
170 191 163 2
185 177 172 1
189 181 165 1
197 180 180 1
179 182 176 2
171 183 168 2
198 186 174 1
180 177 168 1
196 187 174 1
174 168 172 2
168 183 167 2
170 177.5 153.5 1
163 179 164 2
167 181 161 2
171 176 170 2
165 171 157 2
167 176 176 2
176 179 168 2
164 180 160 2
184 184 171 2
169 164 165 2
185 176 167 2
170 183 160 2
178 186 172 2
187 180 170 1
158 171 155 2
178 189 167 2
185 188 177 2
180 176 168 1
171 181 165 1
165 173 162 2
189 180 169 1
170 180 165 2
183 187 178 2
173 176 172 2
160 172 162 2
168 176.5 173 2
167.25 178.8 167.5 2
179 180 162 1
180 172.3 165.7 1
180.5 177.4 161.3 1
169 169 160.5 1
170.75 176.9 167.5 2];
% Model: $Y = \beta_1 + \beta_2 x_2 + \beta_3 x_3 + \beta_4 x_4 + e$ with $e\sim N(0, \sigma^2)$
% +
Y = X(:,1); father = X(:,2); mother = X(:, 3); gender = X(:,4);
% Transform the gender code into 1 for a boy and -1 for a girl
gender = 2 * (1 - gender) + 1;
height = table(Y, father, mother, gender)
% -
reg = fitlm(height, 'Y ~ father + mother + gender')
% Notes:
%
% - R-squared is the *coefficient of determination* = 0.69.
% - all the t-test are highly significant
% - the effect of gender is the same whatever the the height of father and mother, i.e., about 12 cm
% $$
% E(Y\mid x_2, x_3, 1) - E(Y\mid x_2, x_3, -1) = 6.274 - (-6.274) = 12.5
% $$
% ### Regression with a linear constraint
%
% Constraining $\beta_3 = \beta_4$ can be done with the model
% $$
% Y = \beta_1 + \beta_2 (x_2 + x_3) + \beta_4 x_4 + e \text{ with } e\sim N(0, \sigma^2)
% $$
tot = father + mother;
height = table(Y, tot, gender);
reg_c = fitlm(height, 'Y ~ tot + gender')
eh = Y - reg_c.Fitted
qqplot(eh)
% Comment: the normality assumption is fine. There are two possible outliers.
% ### Additivity and Interaction
%
% Mr <NAME> of the UK Building Research Station recorded the weekly gas consumption and average external temperature at his own house in south-east England for two heating seasons, one of 26 weeks before, and one of 30 weeks after cavity-wall insulation was installed. The object of the exercise was to assess the effect of the insulation on gas consumption.
%
% - col 1 observation number
% - col 2 insulation 1 = no, 2 = yes
% - col 3 temperature in Celsius degrees
% - col 4 the weekly gas consumption in 1000s of cubic feet.
data = [ 1 1 -0.8 7.2
2 1 -0.7 6.9
3 1 0.4 6.4
4 1 2.5 6.0
5 1 2.9 5.8
6 1 3.2 5.8
7 1 3.6 5.6
8 1 3.9 4.7
9 1 4.2 5.8
10 1 4.3 5.2
11 1 5.4 4.9
12 1 6.0 4.9
13 1 6.0 4.3
14 1 6.0 4.4
15 1 6.2 4.5
16 1 6.3 4.6
17 1 6.9 3.7
18 1 7.0 3.9
19 1 7.4 4.2
20 1 7.5 4.0
21 1 7.5 3.9
22 1 7.6 3.5
23 1 8.0 4.0
24 1 8.5 3.6
25 1 9.1 3.1
26 1 10.2 2.6
27 2 -0.7 4.8
28 2 0.8 4.6
29 2 1.0 4.7
30 2 1.4 4.0
31 2 1.5 4.2
32 2 1.6 4.2
33 2 2.3 4.1
34 2 2.5 4.0
35 2 2.5 3.5
36 2 3.1 3.2
37 2 3.9 3.9
38 2 4.0 3.5
39 2 4.0 3.7
40 2 4.2 3.5
41 2 4.3 3.5
42 2 4.6 3.7
43 2 4.7 3.5
44 2 4.9 3.4
45 2 4.9 3.7
46 2 4.9 4.0
47 2 5.0 3.6
48 2 5.3 3.7
49 2 6.2 2.8
50 2 7.1 3.0
51 2 7.2 2.8
52 2 7.5 2.6
53 2 8.0 2.7
54 2 8.7 2.8
55 2 8.8 1.3
56 2 9.7 1.5];
% We recode the variables.
gas = data(:, 4); % 1000s of cubic feet
gas = gas * 28.3168466; % liters
temp = data(:, 3);
insul = data(:, 2) - 1; % 0 = before 1 = after
plot(temp(insul==0), gas(insul==0), 'or')
hold on
plot(temp(insul==1), gas(insul==1), 'o')
hold off
vline(0)
% **Model with no interaction**
%
% First we fit a model without interaction (the regressors are additive)
whiteside = table(temp, gas, insul);
reg_add = fitlm(whiteside, 'gas ~ temp + insul')
% Comments:
% - The effects of temperature and insulation on gas consumptions are both negative.
% - Consumptions tend to decrease with increasing temperature and after insulation.
% - The effect of insulation specifically decreases the consumptions of about 44 liters whatever the temperature.
% - The effect of temperature is of about -10 liters per 1 degree of increase of temperature.
% - Both effect are highly significant
% - The average gas consumption at zero degrees temperature with no insulation is 185.51 liters.
% - The estimate of the standard deviation $\sigma$ is 10.1
% - The coefficient of determination is 0.91
% A graph is obtained plotting the fitted model, i.e., of two parallel regression lines
%
% \begin{align*}
% \texttt{gas} &= 185.51 - 9.5342\; \texttt{temp} \qquad (\text{no insulation}) \\
% \texttt{gas} &= (185.51 -44.322) - 9.5342\; \texttt{temp} \qquad (\text{with insulation})
% \end{align*}
plot(temp(insul==0), gas(insul==0), 'or')
hold on
plot(temp(insul==1), gas(insul==1), 'o')
hold off
vline(0)
refline(-9.5342, 185.51)
refline(-9.5342, 185.51-44.322)
% Notes:
% - the effect of the insulation is the difference between the intercepts that does not depend on the temperature
% - the effect of the temperature is the common slope of the lines
% **Model with interaction**
%
% Then we fit a model with interaction. That is we ad a regressor obtained by the product of the two explanatory variables $\texttt{temp} \cdot \texttt{insul}$
reg_int = fitlm(whiteside, 'gas ~ temp + insul + temp:insul')
% Comments
% - The interaction operator in the model formula is denoted by $\texttt{:}$
% - The t-test for the interaction term is highly significant
% - The other tests are not relevant (non hierarchical models are usually hardly interpretable)
% - The effect of temperature depends on insulation
% - The effect of insulation depends on temperature
%
% We can plot the model with two non parallel fitted lines:
%
% \begin{align*}
% \texttt{gas} &= 194.08 - 11.135\; \texttt{temp} \qquad (\text{no insulation}) \\
% \texttt{gas} &= (194.08 -60.314) - (11.135 - 3.265)\; \texttt{temp} \qquad (\text{with insulation})
% \end{align*}
plot(temp(insul==0), gas(insul==0), 'or')
hold on
plot(temp(insul==1), gas(insul==1), 'o')
hold off
vline(0)
refline(-11.135, 194.08)
refline(-11.135 +3.265, 194.08-60.314)
% Comments:
% - The slopes are significantly different. The difference is $3.265$
% - The vertical distance between the lines changes with the temperature.
% ### Logistic regression
%
% The **logistic function** (or *sigmoid*) is
% $$
% G(x) = \frac{e^x}{1+ e^x} = \frac{1}{1 + e^{-x}}
% $$
%
% The plot is shown below and compared with a normal CDF (in red) for a $N(0, \sigma = 1.8)$.
logi = @(x) 1./(1 + exp(-x));
nor = @(x) normcdf(x, 0, 1.8);
fplot(logi, [-5, 5])
hold on
fplot(nor)
% ### Example from Cox and Snell
%
% "Minor faults occur irregularly in an industrial process and
% the following experiment was done. Batches of raw material were selected and each batch was divided into two equal sections: for each batch, one of the sections was processed by the standard method and the other by a slightly modified process, in which the temperature at one stage is reduced. Before processing, a purity index was measured for the whole batch of material. For the product from each section of material it was recorded whether the minor faults did or did not occur."
%
% Below we consider the data for the sections processed by the standard method. `x` is the purity and `y` is the binary response (1=fault).
x = [7.2 6.3 8.5 7.1 8.2 4.6 8.5 6.9 8.0 8.0 9.1 6.5 4.9 5.3 7.1 8.4 8.5 6.6 9.1 7.1 7.5 8.3]';
y = [0 1 1 0 1 1 0 1 0 1 0 0 1 1 0 1 0 1 0 1 0 0]';
data = table(x,y)
% Linear regression and logistic regression.
mq = fitlm(data, 'y ~ x')
ml = fitglm(data, 'y ~ x', 'Distribution', 'binomial')
plot(x,y, 'xr')
hold on
logi = @(x) 1./ (1 + exp(-(6.43 - 0.87 .* x)));
fplot(logi, [2, 12], 'r')
refline(-0.18, 1.8)
ylim([0,1])
% Comments:
% - the effect of purity is not significant but borderline according to the Wald test. The likelihood ratio test is however significant but not highly significant.
% - the effect is negative as expected.
% We consider now also the data for the second section processed at lower temperature.
y2 = [0 0 0 1 0 0 0 1 0 0 0 1 1 0 1 0 1 0 0 0 1 0 ]';
x2 = x; % the purity are the same because the batches were split in two equal sections
purity = [x;x2];
fault = [y;y2];
modified = [zeros(22,1); ones(22,1)];
data2 = table(purity, fault, modified)
% The dataset now has a new binary variable indicator of the modified process. We then fit a multiple logistic model without interaction
%
% $$
% \text{logit}(p) = \beta_1 + \beta_2\; \texttt{purity} + \beta_3 \;\texttt{modified}
% $$
ml2 = fitglm(data2, 'fault ~ purity + modified', 'Distribution', 'binomial')
% Comments:
% - The effect of purity is significant and shows that the probaility of faults decreases with increasing purity
% - The test related to the effect of the modified process is not significant. Therefore
% there is no evidence against $\beta_3 = 0$. The observed effect is nevertheless negative.
% +
plot(purity,fault, 'xr')
hold on
logi1 = @(x) 1./ (1 + exp(-(4.46 - 0.6 .* x)));
logi2 = @(x) 1./ (1 + exp(-(4.46 - 0.86 - 0.6 .* x)));
fplot(logi1, [2, 12], 'r')
fplot(logi2, [2, 12], 'b')
ylim([0,1])
% -
% ### Example Modeling with Categorical Predictors
% In a study of the analgesic effects of treatments on elderly
% patients with neuralgia, two test treatments and a placebo are
% compared. The response variable, `Pain`, is whether the patient
% reported pain or not.
%
% Researchers record `Age` and `Sex` of the
% patients and the `Duration` of complaint before the treatment
% began.
%
[Treatment, Sex, Age, Duration, Pain] = readvars('neuralgia.csv');
Treatment = categorical(Treatment);
Sex = categorical(Sex);
Pain = string(Pain) =='Yes';
neur = table(Treatment, Sex, Age, Duration, Pain)
tabulate(categorical(Treatment))
m1 = fitglm(neur, 'Pain ~ Treatment + Age + Sex + Duration', 'Distribution', 'binomial')
m2 = fitglm(neur, 'Pain ~ Treatment + Age + Sex', 'Distribution', 'binomial')
m2.Deviance - m1.Deviance
% Likelihood ratio test LRT = $0.0317$ with 1 degree of freedom.
1- chi2cdf(0.0317,1) % not significant
% Is there an interaction between `Treatment` and `Age'?
m3 = fitglm(neur, 'Pain ~ Treatment + Age + Treatment : Age + Sex', 'Distribution', 'binomial')
m2.Deviance - m3.Deviance
% Likelihood ratio test LRT $=6.83$ with 2 degrees of freedom
chi2cdf(6.83, 2, 'upper')
% Significant but not highly significant
| jupyter_matlab/Chapter 7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/aigarsan99/codes_TFM/blob/main/extractive_summarization/english.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="JheKkh1pmu5z"
# ## *Extractive summarization* en inglés
#
# El objetivo del presente proyecto es crear un modelo capaz de producir resúmenes del conjunto de noticias en **lengua inglesa** de CNN y Daily Mail. Los resúmenes serán obtenidos utilizando la metodología de extracción (*extraction summarization*), es decir, el resumen generado será a partir de las frases del texto original que sean más relevantes.
#
# ##### El proyecto constará de distintas secciones:
# - Preparación del entorno
# - Análisis de los datos
# - Preprocesamiento de los datos
# - Análisis de la extensión de los datos
# - Construcción del modelo
# - Generar nuevos resúmenes
#
#
# + [markdown] id="8v4It9qc0_pD"
# ## Preparación del entorno
# + colab={"base_uri": "https://localhost:8080/"} id="Fllo4bMio3i2" outputId="6940d86e-6784-4b80-c0a2-bf24131997f6"
# Librerías necesarias
import tensorflow as tf
import tensorflow_datasets as tfds
import pandas as pd
import math as m
import re
from itertools import chain, groupby
from bs4 import BeautifulSoup
from collections import Counter
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('punkt')
import heapq
# + colab={"base_uri": "https://localhost:8080/"} id="Ofhg9FqSo4pq" outputId="abcb5091-f47b-4f26-c41d-d266c2276132"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="5xIDihgQ1Crj"
# ## Análisis de los datos
# + id="POq6hI44o679"
data = pd.read_csv('/content/drive/MyDrive/TFM/data/en_train.csv')
def dataframe_ok(df):
df.drop(['Unnamed: 0'], axis = 1, inplace = True) #Eliminar la columna del índice
df.columns = ['Text', 'Summary'] #Asignar el nombre a cada columna
dataframe_ok(data)
# + id="w_5rOZJ1Y0UZ" colab={"base_uri": "https://localhost:8080/"} outputId="bb9c9383-3bfc-49f8-ad36-0e4ee9591b5d"
data.shape # Dimensiones de los datos: 1000 filas (noticias) con dos columnas: texto y resumen
# + id="JgakTeKj1XPL" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="03d6e21d-3d26-409a-9935-995d4b2f6117"
data.head() # Observar las cinco primeras líneas de los datos
# + colab={"base_uri": "https://localhost:8080/"} id="waSdVu8C1bA-" outputId="d16c75e6-4e4a-4fe8-e5d2-1eea73969757"
# Inspeccionar el texto completo de las tres primeras filas
for i in range(3):
print("Noticia #",i+1)
print(data.Summary[i])
print(data.Text[i])
print()
# + colab={"base_uri": "https://localhost:8080/"} id="ANfIGhbe1dvb" outputId="e8a42421-45bf-4ed2-d665-9d125d3526f7"
# Comprobar el número de datos nulos
data.isnull().sum()
# + [markdown] id="3Nd3DY-01gKK"
# ## Preprocesamiento de los datos
#
# La tarea de preprocesamiento de los datos es una de las partes más importantes en un proyecto de procesamiento de lenguaje natural. Para realizar resúmenes de texto por extracción se parte de la hipótesis de que el tema principal del texto viene dado por las palabras que aparezcan con mayor frecuencia. En consecuencia, el resumen se generará a partir de las frases que contengan mayor cantidad de dichas palabras. Es por esta razón que para este tipo de resumen automático de textos no es necesario modificar de forma excesiva los textos originales para que estos sean más naturales.
#
# Según la lengua con la que se desee entrenar el modelo, las tareas de limpieza de los datos pueden tener variaciones. Se recuerda que en el presente *notebook* se pretende utilizar textos en lengua inglesa.
#
# ##### **Preprocesamiento de los datos:**
# - **Eliminar letras mayúsculas**: Python diferencia entre carácteres en mayúsuclas y en minúsculas, por lo tanto, las palabras *News* y *news* serían interpretadas como diferentes. Sin embargo, para comprender el texto correctamente, esto no debe ser así. Es por ello que se convierte todo el texto a letras minúsculas.
# - **Eliminar los restos de la importación de los datos**: El conjunto de datos ha sido descargado de la librería TensorFlow. Estos datos se encuentran en el interior de `tf.tensor\(b" [...] ", shape=\(\), dtype=string\)`, carácteres que deben eliminarse porque no forman parte del texto original que se desea analizar. Además, también quedan conjuntos de carácteres como `xe2`, `xc2`... que carecen de significado y están presentes con mucha frecuencia en los textos.
# - **Eliminar los cambios de línea ./n**
# - **Eliminar el texto entre paréntesis**: generalmente, entre paréntesis no se pone información relevante. Por ello, se puede prescindir de esta para reducir la información que debe ser analizada por el modelo.
# - **Eliminar caracteres especiales**
# - **Eliminar 's**
# - **Sustituir las contracciones por su forma original**: [diccionario para expandir las contracciones](https://www.analyticsvidhya.com/blog/2019/06comprehensive-guide-text-summarization-using-deep-learning-python/)
#
#
# + id="ol0F5BIOpZ06"
# Diccionario para expandir las contracciones
contraction_mapping_upper = {"ain't": "is not","can't": "cannot", "'cause": "because", "could've": "could have",
"he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
"I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would",
"i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "it'd": "it would",
"it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "let's": "let us", "ma'am": "madam",
"mayn't": "may not", "might've": "might have", "mightn't've": "might not have", "must've": "must have",
"mustn't've": "must not have", "needn't've": "need not have","o'clock": "of the clock",
"oughtn't": "ought not", "oughtn't've": "ought not have", "sha'n't": "shall not", "shan't've": "shall not have",
"she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have",
"shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would",
"there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have",
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
"we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are",
"we've": "we have", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is",
"where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have",
"why's": "why is", "why've": "why have", "will've": "will have", "won't've": "will not have",
"would've": "would have", "wouldn't've": "would not have", "y'all": "you all",
"y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have",
"you'd've": "you would have", "you'll've": "you will have"}
contraction_mapping = dict((k.lower(), v) for k, v in contraction_mapping_upper .items()) # Convertir todas las clave-valor del diccionario a minúsculas
# Stop words: palabras que no tienen un significado por sí solas (artículos, pronombres, preposiciones)
stop_words = set(stopwords.words('english'))
# + id="kTIxj696piX5"
def clean_text(text):
clean = text.lower() #Convierte todo a minúsculas
""" Limpiar los datos """
#Eliminar los restos de tensorflow para los casos en los que están entre comillas simples y entre comillas dobles
clean = re.sub('tf.tensor\(b"', "", clean)
clean = re.sub('", shape=\(\), dtype=string\)',"",clean)
clean = re.sub("tf.tensor\(b'", "", clean)
clean = re.sub("', shape=\(\), dtype=string\)","",clean)
#Eliminar los cambios de línea
clean = clean.replace('.\\n','')
#Eliminar el texto que se encuentra entre paréntesis
clean = re.sub(r'\([^)]*\)', '', clean)
#Eliminar las 's
clean = re.sub(r"'s", "", clean)
#Eliminar las contracciones
clean = ' '.join([contraction_mapping[t] if t in contraction_mapping else t for t in clean.split(" ")]) #Quitar las contracciones
#Eliminar los carácteres especiales
clean = re.sub("[^a-zA-Z, ., ,, ?, %, 0-9]", " ", clean)
#Añadir un espacio antes de los signos de puntuación y los símbolos
clean = clean.replace(".", " . ")
clean = clean.replace(",", " , ")
clean = clean.replace("?", " ? ")
#Eliminar carácteres extraños
clean = re.sub(r'xe2', " ", clean)
clean = re.sub(r'xc2', " ", clean)
clean = re.sub(r'x99s', " ", clean)
clean = re.sub(r'x99t', " ", clean)
clean = re.sub(r'x99ve', " ", clean)
clean = re.sub(r'x99', " ", clean)
clean = re.sub(r'x98i', " ", clean)
clean = re.sub(r'x93', " ", clean)
clean = re.sub(r'xa0', " ", clean)
clean = re.sub(r'x80', " ", clean)
clean = re.sub(r'x94', " ", clean)
clean = re.sub(r'x98', " ", clean)
clean = re.sub(r'x0', " ", clean)
clean = re.sub(r'x81', " ", clean)
clean = re.sub(r'x82', " ", clean)
clean = re.sub(r'x83', " ", clean)
clean = re.sub(r'x84', " ", clean)
clean = re.sub(r'x89', " ", clean)
clean = re.sub(r'x9', " ", clean)
clean = re.sub(r'x8', " ", clean)
clean = re.sub(r'x97', " ", clean)
clean = re.sub(r'x9c', " ", clean)
tokens = [w for w in clean.split()] #Juntar palabras
return (" ".join(tokens).strip())
# + colab={"base_uri": "https://localhost:8080/"} id="MWhA3Qx66EH2" outputId="875d0cec-547f-49d3-fd34-8eee000961b4"
# Limpiar los resúmenes y los textos
clean_summaries = []
for summary in data.Summary:
clean_summaries.append(clean_text(summary)) #Remove_stopwords = False: hacer resúmenes más naturales
print("Sumarios completados.")
clean_texts = []
for text in data.Text:
clean_texts.append(clean_text(text)) #Remove_stopwords = True: stop words no aportan información por lo que son irrelevantes para entrenar al modelo
print("Textos completados.")
# + colab={"base_uri": "https://localhost:8080/"} id="ads1S1XIp56T" outputId="b51655b2-c449-407c-f67e-52ef9d59c5b3"
# Inspeccionar los resúmentes y textos limpios para observar que se ha efectuado la limpieza correctamente
for i in range(3):
print("Noticia #",i+1)
print('Sumario: ', clean_summaries[i])
print('Texto: ',clean_texts[i])
print()
# + [markdown] id="E8-q3NXZC1qx"
# ### Análisis de la extensión de los textos
# + id="WcLQh0bM1cBl" colab={"base_uri": "https://localhost:8080/", "height": 485} outputId="2eb515c9-23d5-4a50-ce29-ce84178bbd01"
text_lengths =[]
for i in (range(0,len(clean_texts))):
text_lengths.append(len(clean_texts[i].split()))
import matplotlib.pyplot as plt
plt.title('Número de palabras de los textos')
plt.hist(text_lengths, bins = 30)
# + colab={"base_uri": "https://localhost:8080/", "height": 485} id="f9Xzi9Qgw9bJ" outputId="7df0536f-0ad1-4797-bc6e-db42dde51ef8"
text_sentences =[]
for i in (range(0,len(clean_texts))):
text_sentences.append(len(clean_texts[i].split(".")))
import matplotlib.pyplot as plt
plt.title('Número de frases de los textos')
plt.hist(text_sentences, bins = 30)
# + colab={"base_uri": "https://localhost:8080/", "height": 485} id="lME32SnkC93F" outputId="39b41c86-476d-466b-8e0b-b2f5d3d61d59"
summaries_lengths =[]
for i in (range(0,len(clean_summaries))):
summaries_lengths.append(len(clean_summaries[i].split()))
import matplotlib.pyplot as plt
plt.title('Número de palabras de los sumarios')
plt.hist(summaries_lengths, bins = 30)
# + colab={"base_uri": "https://localhost:8080/", "height": 400} id="r9JvfFrZw2w6" outputId="4895be35-cffb-4172-b97c-1032cd5c15c2"
summaries_sentences =[]
for i in (range(0,len(clean_summaries))):
summaries_sentences.append(len(clean_summaries[i].split(".")))
import matplotlib.pyplot as plt
plt.title('Número de frases de los sumarios')
plt.hist(summaries_sentences, bins = 30)
# + colab={"base_uri": "https://localhost:8080/"} id="2UZLgjNI5pw3" outputId="948adc41-1469-48cb-e9a3-77fe90254ab5"
#Devuelve la frecuencia con la que aparece cada palabra en el texto
def count_words(count_dict, text):
for sentence in text: #Separa los textos del conjunto text introducido. Cada sentence es uno de estos textos.
for word in sentence.split(): #Separa los textos en palabras
if word not in count_dict:
count_dict[word] = 1
else:
count_dict[word] += 1
word_frequency = {}
count_words(word_frequency, clean_summaries)
count_words(word_frequency, clean_texts)
print("Vocabulario total:", len(word_frequency))
# + id="z3fhgnAG5MVi" colab={"base_uri": "https://localhost:8080/"} outputId="7e7fd016-2514-4c79-ba42-3558ae450bd1"
#Buscar restos de la conversión del texto ('x99', 'x99s', 'x98', etc.) para incluirlos en la función clean_text
import operator
sorted(word_frequency.items(), key=operator.itemgetter(1), reverse=True )
# + [markdown] id="Lmpk5NLbDvqH"
# ## Construcción del modelo
#
# Para generar resúmenes de texto por extracción, es necesario conocer qué frases del texto original son las que mayor información relevante contienen. Para ello, se seguirán los siguientes pasos para cada una de las noticias del conjunto de datos:
# - Calcular la frecuencia de aparición de las palabras.
# - Calcular la frecuencia ponderada de cada una de las palabras, siendo la frecuencia ponderada la división entre la frecuencia de aparición de la palabra en cuestión y la frecuencia de la palabra que aparece más veces en el texto.
# - Calcular la puntuación de cada una de las frases del texto, siendo la puntuación la suma ponderada de cada palabra que conforma dicha frase.
# - Seleccionar las N frases con mayor puntuación para generar el resumen a partir de estas.
# + id="4x7EuUDWHBqi"
def word_frequency (word_frequencies, text):
""" Calcula la frecuencia de las palabras en cada uno de los textos y añadirlo como pares clave-valor a un diccionario
Las palabras añadidas no deben ser ni stop words ni signos de puntuación"""
punctuations = {".",":",",","[","]", "“", "|", "”", "?"}
for word in nltk.word_tokenize(text):
if word not in stop_words:
if word not in punctuations:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
# + id="BkbP3aIDi-si"
word_freq_per_text = [] # Lista recogiendo los diccionarios de las frecuencias de aparición de las palabras de cada texto
for text in clean_texts:
word_frequencies = {}
word_frequency(word_frequencies, text) # Devuelve el diccionario de frecuencias de las palabras
word_freq_per_text.append(word_frequencies)
# + id="WqE05RvoiFmF"
def word_score(index):
""" Calcula la puntuación ponderada de cada una de las palabras del texto mediante la fórmula: frecuencia_palabra / frecuencia_máxima
siendo la frecuencia_palabra el número de veces que aparece en el texto la palabra en cuestión y la frecuencia_máxima
el número de veces que aparece en el texto la palabra más repetida"""
sentence_list = nltk.sent_tokenize(clean_texts[index])
word_frequency = word_freq_per_text[index]
maximum_frequency = max(word_freq_per_text[index].values()) #Frecuencia de la palabra que más veces aparece
for word in word_freq_per_text[index].keys():
word_freq_per_text[index][word] = (word_freq_per_text[index][word]/maximum_frequency) # Cálculo de la puntuación de cada una de las palabras del texto: word_freq/max_freq
# + id="_CY8Vb8mihda"
for i in range(0, len(clean_texts)):
word_score(i)
# + id="9hXojFvKjBkd"
def sentence_score(sentence_scores, index):
""" Calcula la puntuación de cada una de las frases del texto siendo esta la suma de las frecuencias
ponderadas de todas las palabras que conforman el texto"""
sentence_list = nltk.sent_tokenize(clean_texts[index]) # Tokenización de las frases del texto
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_freq_per_text[index].keys():
if len(sent.split(' ')) < 20:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_freq_per_text[index][word]
else:
sentence_scores[sent] += word_freq_per_text[index][word]
# + id="-nOnFMDinPkj"
sent_sc_per_text = [] # Lista recogiendo los diccionarios de las frases y sus puntuaciones
for i in range(0, len(clean_texts)):
sentence_scores = {}
sentence_score(sentence_scores, i) # Devuelve el diccionario de la puntuación de la frase
sent_sc_per_text.append(sentence_scores)
# + [markdown] id="p6ljqTaAsGvm"
# ## Generar nuevos resúmenes
#
# En el apartado anterior *Análisis de la extensión de los datos* se ha examinado el número de palabras y frases de las noticias y sus respectivos resúmenes que forman el conjunto de datos. En los gráficos presentados se ha podido observar que la extensión de los textos es muy variable, variando entre 5 y 134 frases. En cuanto a los sumarios, estos tienen entre 1 y 19 frases.
#
# El número de frases con las que se desea generar el resumen por extracción debe ser indicado de forma avanzada. No se ha creído oportuno especificar un número concreto de frases para producir el resumen de todos los textos del conjunto debido a que las extensiones de estos son muy variables. Por ello, se ha establecido que el número de frases a escoger debe ser de un 25% del total de frases del texto original.
# + id="kQxFYD03s4Sa"
def generate_summary(index):
""" Genera el resumen del texto en función de las n_sentences con mayor puntuación"""
n_sentences = m.ceil(len(nltk.sent_tokenize(clean_texts[index]))*25/100)
summary_sentences = heapq.nlargest(n_sentences, sent_sc_per_text[index], key=sent_sc_per_text[index].get)
summary = ' '.join(summary_sentences)
#Eliminar un espacio antes de los signos de puntuación y los símbolos
summary = summary.replace(" .", ".")
summary = summary.replace(" ,", ",")
summary = summary.replace(" ?", "?")
return summary
generated_summaries = []
for i in range(0, len(clean_texts)):
new_summary = generate_summary(i) # Devuelve el resumen generado
generated_summaries.append(new_summary)
# + id="kHwivSWIqS3E" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="0d0974b1-5cce-4392-c0a8-9bc32f7680d1"
# Inspeccionar el texto completo de las tres primeras filas y los resúmenes que se han generado
for i in range(3):
print("\nNoticia #",i+1)
print('\nTexto original: ', clean_texts[i])
print('\nResumen original: ', clean_summaries[i])
print('\nResumen generado: ', generated_summaries[i])
print()
# + id="ctvyaG3hcR-n"
| extractive_summarization/english.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GRIP: THE SPARK FOUNDATION
#
# # Data Science And Business Analytics Intern
#
# # Prediction Using Unsupervised ML
# # NAME : <NAME>
# K-MEANS CLUSTERING
# +
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn import datasets
# Load the iris dataset
iris = pd.read_csv("Iris.csv")
iris.drop(["Id"],axis=1,inplace=True)
iris.head()
# -
# Getting the Statistical Information
iris.describe()
# Lets check for datatypes
iris.info()
# # Using Pairplots for better understanding of the data points distribution
sns.pairplot(data=iris,hue="Species",palette="Set1")
plt.show()
# From above visuals iris-setosa is easily separable from the other two.
from sklearn.cluster import KMeans
features = iris.loc[:,["SepalLengthCm","SepalWidthCm","PetalLengthCm","PetalWidthCm"]]
# # Below snippet shows how we can find the optimum number of clusters for K Means and how can we determine the value of K?
# +
# Finding the optimum number of clusters for k-means classification
x = iris.iloc[:, [0, 1, 2, 3]].values
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++',
max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(x)
wcss.append(kmeans.inertia_)
# Plotting the results onto a line graph,
# `allowing us to observe 'The elbow'
plt.plot(range(1, 11), wcss)
plt.title('The elbow method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS') # Within cluster sum of squares
plt.show()
# -
# We can clearly see why it is called 'The elbow method' from the above graph, the optimum clusters is where the elbow occurs. This is when the within cluster sum of squares (WCSS) doesn't decrease significantly with every iteration.
# From this we choose the number of clusters as 3.
# # Verifying visually that with which cluster number, K-means will be optimum
# +
plt.figure(figsize=(24,4))
plt.suptitle("K Means Clustering",fontsize=20)
plt.subplot(1,5,1)
plt.title("K = 1",fontsize=16)
plt.xlabel("PetalLengthCm")
plt.ylabel("PetalWidthCm")
plt.scatter(features.PetalLengthCm,features.PetalWidthCm)
plt.subplot(1,5,2)
plt.title("K = 2",fontsize=16)
plt.xlabel("PetalLengthCm")
kmeans = KMeans(n_clusters=2)
features["labels"] = kmeans.fit_predict(features)
plt.scatter(features.PetalLengthCm[features.labels == 0],features.PetalWidthCm[features.labels == 0])
plt.scatter(features.PetalLengthCm[features.labels == 1],features.PetalWidthCm[features.labels == 1])
# dropping labels we only want to use features.
features.drop(["labels"],axis=1,inplace=True)
plt.subplot(1,5,4)
plt.title("K = 3",fontsize=16)
plt.xlabel("PetalLengthCm")
kmeans = KMeans(n_clusters=3)
features["labels"] = kmeans.fit_predict(features)
plt.scatter(features.PetalLengthCm[features.labels == 0],features.PetalWidthCm[features.labels == 0])
plt.scatter(features.PetalLengthCm[features.labels == 1],features.PetalWidthCm[features.labels == 1])
plt.scatter(features.PetalLengthCm[features.labels == 2],features.PetalWidthCm[features.labels == 2])
# dropping labels as we only want to use features.
features.drop(["labels"],axis=1,inplace=True)
plt.subplot(1,5,3)
plt.title("K = 4",fontsize=16)
plt.xlabel("PetalLengthCm")
kmeans = KMeans(n_clusters=4)
features["labels"] = kmeans.fit_predict(features)
plt.scatter(features.PetalLengthCm[features.labels == 0],features.PetalWidthCm[features.labels == 0])
plt.scatter(features.PetalLengthCm[features.labels == 1],features.PetalWidthCm[features.labels == 1])
plt.scatter(features.PetalLengthCm[features.labels == 2],features.PetalWidthCm[features.labels == 2])
plt.scatter(features.PetalLengthCm[features.labels == 3],features.PetalWidthCm[features.labels == 3])
# dropping labels as we only want to use features.
features.drop(["labels"],axis=1,inplace=True)
plt.subplot(1,5,5)
plt.title("Original Labels",fontsize=16)
plt.xlabel("PetalLengthCm")
plt.scatter(iris.PetalLengthCm[iris.Species == "Iris-setosa"],iris.PetalWidthCm[iris.Species == "Iris-setosa"])
plt.scatter(iris.PetalLengthCm[iris.Species == "Iris-versicolor"],iris.PetalWidthCm[iris.Species == "Iris-versicolor"])
plt.scatter(iris.PetalLengthCm[iris.Species == "Iris-virginica"],iris.PetalWidthCm[iris.Species == "Iris-virginica"])
plt.subplots_adjust(top=0.8)
plt.show()
# -
# Applying kmeans to the dataset / Creating the kmeans classifier
kmeans = KMeans(n_clusters = 3, init = 'k-means++',
max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = kmeans.fit_predict(x)
# +
# Visualising the clusters - On the first two columns
plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1],
s = 100, c = 'red', label = 'Iris-setosa')
plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1],
s = 100, c = 'blue', label = 'Iris-versicolour')
plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1],
s = 100, c = 'green', label = 'Iris-virginica')
# Plotting the centroids of the clusters
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1],
s = 100, c = 'yellow', label = 'Centroids')
plt.legend()
| GRIP- TASK2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This uses an earlier audio clip
# FILE_MUS = 'test0.mp3'
# FILE_CSV = 'test0.csv'
# # adjust until good alignment
# OFFSET = 848
# STRETCH = -.00003
# LEFT_TEST = 420, 620
# RIGHT_TEST = 3850, 3950
# TRIM_START = 400
# SCATTER = [
# [580, 1300, 83, 'r'],
# [1550, 2200, 82, 'g'],
# [2200, 2800, 80, 'b'],
# [2800, 3450, 78, 'k'],
# ]
# +
FILE_MUS = 'test1.mp3'
FILE_CSV = 'test1.csv'
# adjust until good alignment
OFFSET = -490
STRETCH = -.00004
LEFT_TEST = 50, 190
RIGHT_TEST = 9300, 9600
TRIM_START = 30
SCATTER = [
[200, 1675, 72, '#9f1d3f', [368, 558, 726, 927, 1117, 1307, 1508], 'C'],
[1675, 2994, 74, '#eb6437', [1832, 2002, 2172, 2364, 2546, 2693, 2840], 'D'],
[2994, 4211, 76, '#e3c70e', [3169, 3361, 3497, 3656, 3792, 3962, 4064], 'E'],
[4211, 5463, 77, '#008a61', [4381, 4540, 4677, 4846, 5016, 5163, 5322], 'F'],
[6032, 7250, 79, '#77c1fe', [6166, 6323, 6446, 6602, 6758, 6937, 7071], 'G'],
[7250, 8423, 81, '#0062bf', [7443, 7580, 7714, 7845, 8003, 8137, 8282], 'A'],
[8423, 9518, 83, '#774fc2', [8888, 9268, 9332], 'B'],
]
LIGHT = {
'#9f1d3f': '#c46',
'#eb6437': '#f96',
'#e3c70e': '#ff3',
'#008a61': '#3b9',
'#77c1fe': '#aff',
'#0062bf': '#39e',
'#774fc2': '#a7f',
}
# -
import numpy as np
from numpy import pi
import matplotlib
from matplotlib import pyplot as plt
import librosa
from IPython.display import Audio
import scipy
import csv
# from stats import regression
from sklearn import linear_model
# +
PAGE_LEN = 2048
HOP_LEN = PAGE_LEN // 4
amp_c, pre_c, freq_c, *_ = plt.rcParams['axes.prop_cycle'].by_key()['color']
# -
plt.rcParams.update({
"text.usetex": not plt.rcParams['text.usetex'],
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"],
'font.size': 16,
"legend.framealpha": 1,
})
print('TEX:', plt.rcParams['text.usetex'])
# Run the above ceel to toggle Latex debug mode!
# +
def sino(freq, length):
return np.sin(np.arange(length) * freq * TWO_PI / SR)
def play(data):
return Audio(np.concatenate([data, [1]]), rate = SR)
def findPeaks(energy):
slope = np.sign(energy[1:] - energy[:-1])
extrema = slope[1:] - slope[:-1]
return np.argpartition(
(extrema == -2) * energy[1:-1], - N_HARMONICS,
)[- N_HARMONICS:] + 1
def sft(signal, freq_bin):
# Slow Fourier Transform
return np.abs(np.sum(signal * np.exp(IMAGINARY_LADDER * freq_bin))) / PAGE_LEN
def refineGuess(guess, signal):
def loss(x):
if x < 0:
return 0
return - sft(signal, x)
freq_bin, loss = blindDescend(loss, .01, .4, guess)
return freq_bin * SR / PAGE_LEN, - loss
def widePlot(h = 3, w = 12):
plt.gcf().set_size_inches(w, h)
def spectro(signal, do_wide = True, trim = 130):
energy = np.abs(rfft(signal * HANN))
plt.plot(energy[:trim])
if do_wide:
widePlot()
def concatSynth(synth, harmonics, n):
buffer = []
for i in range(n):
synth.eat(harmonics)
buffer.append(synth.mix())
return np.concatenate(buffer)
def pitch2freq(pitch):
return np.exp((pitch + 36.37631656229591) * 0.0577622650466621)
def freq2pitch(f):
return np.log(f + .001) * 17.312340490667562 - 36.37631656229591
# -
raw_0, SR = librosa.load(FILE_MUS)
SR
play(raw_0)
# help(librosa.yin)
f0s = librosa.yin(raw_0, 200, 2500, SR, PAGE_LEN)
plt.plot(f0s)
widePlot()
def traceEnergy(signal):
i = 0
energy = []
while True:
page = signal[i*HOP_LEN : i*HOP_LEN + PAGE_LEN]
if page.size < PAGE_LEN:
break
energy.append(np.sum(scipy.signal.periodogram(page, SR)) / PAGE_LEN)
i += 1
return energy
e = np.array(traceEnergy(raw_0))
plt.plot(e)
widePlot()
ee = (e - 2758.94165039096) * 10000000
plt.plot(ee)
widePlot()
def getP():
time = []
pressure = []
with open(FILE_CSV, 'r') as f:
last_t = -1
epoch = 0
for t, p in csv.reader(f):
t = int(t)
if t < last_t:
epoch += 1
last_t = t
time.append((t + 16384 * epoch) / 1000)
pressure.append(int(p))
return time, pressure
t, p = getP()
plt.plot(t, p)
widePlot()
# +
def sampleP(time, pressure, t):
s = np.sign(time - t)
i = np.where(s[1:] - s[:-1])[0][0]
t4, t5, t6 = time[i], t, time[i+1]
return pressure[i] * (t6-t5) / (t6-t4) + pressure[i+1] * (t5-t4) / (t6-t4)
def uniformP(time, pressure):
time = np.array(time)
t = 0
result = []
while True:
# print(t, end='\r', flush = True)
t += HOP_LEN / SR + STRETCH
if t > time[-1]:
break
if t < time[0]:
continue
result.append(sampleP(time, pressure, t))
# print('Done ')
return np.array(result)
pp = uniformP(t, p)
# -
if OFFSET > 0:
eee = ee[OFFSET:]
ff = f0s[OFFSET:]
pp_ = pp
else:
pp_ = pp[-OFFSET:]
eee = ee
ff = f0s
# +
st, en = LEFT_TEST
x = np.arange(en - st) * HOP_LEN / SR
plt.plot(x, eee[st:en] * 3, label='amplitude')
plt.plot(x, pp_[st:en], label='pressure')
widePlot()
plt.xlabel('time (seconds)')
plt.legend()
plt.savefig('imgs/align_left.svg', bbox_inches='tight')
# +
st, en = RIGHT_TEST
x = np.arange(en - st) * HOP_LEN / SR
plt.plot(x, eee[st:en] * 3, label='amplitude')
plt.plot(x, pp_[st:en], label='pressure')
widePlot()
plt.xlabel('time (seconds)')
plt.legend()
plt.savefig('imgs/align_right.svg', bbox_inches='tight')
# -
plt.plot(eee[:1500])
widePlot()
eee = eee[:pp_.size]
ff = ff[:pp_.size]
eeee = eee[TRIM_START:]
fff = ff[TRIM_START:]
ppp = pp_[TRIM_START:]
ffff = []
for x, y in zip(fff, ppp):
if y > 15:
ffff.append(x)
else:
ffff.append(0)
ffff = np.array(ffff)
plt.plot(ffff)
widePlot()
SIZE = eeee.size
# +
x = np.arange(SIZE) / SR * HOP_LEN
plt.plot(x, eeee * 18, label='amplitude')
plt.plot(x, ppp * 8, label='pressure')
plt.plot(x, ffff, label='frequency')
widePlot(5, 50)
plt.xlabel('time (seconds)')
plt.legend()
# plt.savefig('eyeball.pdf')
eeee.size, ppp.size, ffff.size
# -
def scatterBend(ax, p, f, start, end, pitch, c):
p = p[start:end]
f = f[start:end]
pb = freq2pitch(f) - pitch - .75
pp = []
pbpb = []
for x, y in zip(p, pb):
if x > 20:
pp.append(x)
pbpb.append(y)
scatter = ax.scatter(pp, pbpb, c=c, s=.5, marker='.')
ax.grid(which='major')
ax.set_ylim([-4,14])
return scatter
plt.plot(ffff)
widePlot()
# +
# octave hysterisis
NOTE = 4
NOTE_I = 1
start, end, pitch, color, mids, symbol = SCATTER[NOTE]
last_start = start
ax = plt.axes()
for i, x in enumerate(mids + [end]):
if NOTE_I < 0 or i in range(NOTE_I * 2, NOTE_I * 2 + 2):
if i % 2 == 0:
sc = scatterBend(ax, ppp, ffff, last_start, x, pitch, 'b')
sc.set_label('upward')
else:
sc = scatterBend(ax, ppp, ffff, last_start, x, pitch, 'r')
sc.set_label('downward')
last_start = x
plt.xlabel('pressure (Pa)')
plt.ylabel('pitch bend (semitones)')
lgnd = plt.legend()
for handle in lgnd.legendHandles:
handle.set_sizes([50])
plt.savefig('imgs/hysteresis.svg', bbox_inches='tight')
# -
NOTE = 1
NOTE_I = 2
start, end, pitch, color, mids, symbol = SCATTER[NOTE]
last_start = start
for i, x in enumerate(mids + [end]):
if NOTE_I < 0 or i in range(NOTE_I * 2, NOTE_I * 2 + 2):
scatterBend(plt.gca(), ppp, ffff, last_start, x, pitch, 'b' if i % 2 == 0 else 'r')
last_start = x
axes = plt.gca()
axes.set_xlim([0,200])
axes.set_ylim([-2,1.5])
widePlot(4, 10)
# ## filter (pressure, pitch) pairs with timing.
# So like, invalidate those close to the octave change.
# previously we used unsupervised learning to call two distribution domains.
#
# legacy code here
# +
# from sklearn import cluster
# from sklearn import mixture
# +
# pitch, (X, Y) = regress_data[2]
# # clustering = cluster.DBSCAN(eps=8e4, min_samples=10).fit([*zip(X, Y)])
# # clustering = cluster.SpectralClustering(n_clusters=2).fit([*zip(X, Y)])
# # clustering = cluster.AgglomerativeClustering(n_clusters=2).fit([*zip(X, Y)])
# # clustering = cluster.OPTICS().fit([*zip(X, Y)])
# # clustering = cluster.KMeans(n_clusters=2).fit([*zip(X, Y)])
# # clustering = cluster.MeanShift().fit([*zip(X, Y)])
# # clustering = cluster.Birch(n_clusters=2).fit([*zip(X, Y)])
# # print(clustering.labels_)
# # c = clustering.labels_
# mix = mixture.GaussianMixture(n_components=2, warm_start=False).fit([*zip(X, Y)])
# print('iter', mix.n_iter_, '. if > 100, raise max')
# c = mix.predict([*zip(X, Y)])
# print(mix.means_)
# plt.scatter(X, Y, s=1, c=['brgk'[t] for t in c])
# # plt.scatter(X, Y, s=1, c=['b' if t < 2 else 'r' for t in c])
# # plt.scatter(X, Y, s=1, c=c)
# -
x = np.arange(SIZE) / SR * HOP_LEN
plt.plot(x[3690:3920], ffff[3690:3920], c=freq_c)
plt.axvspan(86.9, 87.18, facecolor='r', alpha=0.3)
span = plt.axvspan(88.53, 88.9, facecolor='r', alpha=0.3)
plt.xlabel('time (seconds)')
plt.ylabel('frequency (Hz)')
plt.legend([span], ['not in equilibrium'])
# for illustration
plt.savefig('imgs/neq.svg', bbox_inches='tight')
# +
# plt.plot(ffff[1700:1950])
plt.plot(ffff[1850:1930])
# so deadzone approx = 25 (pages)
DEADZONE = 19
# +
is_domain = [True for _ in ffff]
last_freq = [0, 0]
for i, freq in enumerate(ffff):
two_before = last_freq.pop()
if two_before == 0:
is_domain[i] = False
else:
ratio = freq / two_before
if ratio > 1.7:
# jump up!
is_domain[i-1 : i+1] = [False] * 2
for j in range(i - 2, i - DEADZONE, -1):
if ffff[j] > freq * .9:
break
is_domain[j] = False
if ratio < .6:
# jump down!
is_domain[i-1 : i+1] = [False] * 2
for j in range(i, i + DEADZONE, +1):
if ffff[j] > two_before * .9:
break
is_domain[j] = False
last_freq.append(freq)
# domain_p = ppp[is_domain]
# domain_f = ffff[is_domain]
fig, (ax0, ax1) = plt.subplots(2, 1, sharex=True)
x = np.arange(SIZE) / SR * HOP_LEN
ax0.plot(x, eeee * 18, label='amplitude')
ax0.plot(x, ppp * 8, label='pressure')
ax0.plot(x, ffff, label='frequency')
ax0.legend()
ax1.plot(x, ppp * 8, pre_c, label = 'pressure')
ax1.plot(x, ffff, freq_c, label = 'frequency')
last_start = None
span = None
def endRect(end):
global last_start, span
if last_start is not None:
span = ax1.axvspan(x[last_start], x[end], facecolor='r', alpha=0.3)
last_start = None
for i, is_do in enumerate(is_domain):
if not is_do:
if last_start is None:
last_start = i
else:
endRect(i)
endRect(i)
ax1.legend([span], ['removed'])
widePlot(10, 50)
plt.xlabel('time (seconds)')
plt.savefig('imgs/scroll.svg', bbox_inches='tight')
# -
# The below cell hand-removes a particularly large dent.
plt.plot(range(7600, 7800), ffff[7600:7800])
plt.axvspan(7700, 7752, facecolor='r', alpha=0.5)
for i in range(7700, 7752):
is_domain[i] = False
# +
def scatterDomainBend(ax, p, f, start, end, pitch, c, do_bads = True):
_p = p[start:end]
f = f[start:end]
dom = is_domain[start:end]
_pb = freq2pitch(f) - pitch - .75
if do_bads:
p = _p[np.invert(dom)]
pb = _pb[np.invert(dom)]
pp = []
pbpb = []
for x, y in zip(p, pb):
if x > 20:
pp.append(x)
pbpb.append(y)
ax.scatter(pp, pbpb, c='k', s=.5, marker='.')
p = _p[dom]
pb = _pb[dom]
pp = []
pbpb = []
for x, y in zip(p, pb):
if x > 20:
pp.append(x)
pbpb.append(y)
sct = ax.scatter(pp, pbpb, c=c, s=.5, marker='.')
ax.grid(which='major')
ax = plt.gca()
ax.set_ylim([-3,1])
return sct
fig, axes = plt.subplots(2, 4, sharey = True, sharex = True)
fig.delaxes(axes[0][-1])
for ax, args in zip([*axes[0][:-1], *axes[1]], SCATTER):
sct = scatterDomainBend(ax, ppp, ffff, *args[:4])
lgnd = ax.legend([sct], [args[5]], loc='lower right')
for handle in lgnd.legendHandles:
handle.set_sizes([50])
ax = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
plt.xlabel('pressure (Pa)')
plt.ylabel('pitchbend (semitones)')
widePlot(6, 10)
plt.savefig('imgs/clean_result.svg', bbox_inches='tight')
# +
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
fig.subplots_adjust(hspace=0.05) # adjust space between axes
for args in SCATTER:
# if args[3] == 'red':
scatter = scatterDomainBend(ax1, ppp, ffff, *args[:4], False)
scatter = scatterDomainBend(ax2, ppp, ffff, *args[:4], False)
scatter.set_label(args[5])
ax1.set_ylim(8.5, 13)
ax2.set_ylim(-3, 1.5)
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(labeltop=False)
ax2.xaxis.tick_bottom()
d = .5
kwargs = dict(marker=[(-1, -d), (1, d)], markersize=12,
linestyle="none", color='k', mec='k', mew=1, clip_on=False)
ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs)
ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs)
widePlot(7, 10)
plt.xlabel('pressure (Pa)')
plt.ylabel('pitch bend (semitones)', position = (0, 1))
lgnd = ax2.legend()
for handle in lgnd.legendHandles:
handle.set_sizes([50])
# axes = plt.gca()
# axes.set_xlim([0,100])
# axes.set_ylim([-3,1.5])
plt.savefig('imgs/rainbow_scatter.svg', bbox_inches='tight')
# -
# ## regressions, parameter finding
# +
def scatterBendFreq(p, f, start, end, pitch, c, octave_high = False):
if octave_high:
pitch += 12
p = p[start:end]
f = f[start:end]
dom = is_domain[start:end]
p = p[dom]
f = f[dom]
fb = (f - pitch2freq(pitch + .75))
fq = (f / pitch2freq(pitch + .75))
pb = freq2pitch(f) - (pitch + .75)
pp = []
fbfb = []
pbpb = []
fqfq = []
for x, y, z, u in zip(p, fb, pb, fq):
if octave_high:
if x < 20 or y < -500:
continue
else:
if x < 20 or abs(y) > 250 or x > 250:
continue
pp.append(np.log(x))
fbfb.append(y)
# pbpb.append(z)
pbpb.append(np.exp(z))
fqfq.append(u ** 10)
# plt.scatter(pp, fbfb, c=c, s=1, marker='.')
# plt.scatter(pp, pbpb, c=c, s=1, marker='.')
plt.scatter(pp, fqfq, c=c, s=1, marker='.')
# plt.grid(which='major')
return pp, fqfq
scatterBendFreq_results = []
for i, args in enumerate(SCATTER):
# if i >= 3:
scatterBendFreq_results.append([args[2],
scatterBendFreq(ppp, ffff, *args[:4])
])
widePlot(5, 8)
# axes = plt.gca()
# axes.set_xlim([0,3])
# axes.set_xlim([0,250])
# axes.set_ylim([-200,50])
# -
scatterBendFreqHighOctave_results = []
for i, args in enumerate(SCATTER):
# if i >= 3:
scatterBendFreqHighOctave_results.append([args[2] + 12,
scatterBendFreq(ppp, ffff, *args[:4], True)
])
widePlot(5, 8)
# axes = plt.gca()
# axes.set_xlim([0,3])
# axes.set_xlim([0,7.5])
# axes.set_ylim([0,1.2])
regress_data = scatterBendFreq_results + scatterBendFreqHighOctave_results
assert len(regress_data) == 14 # in case the above scattering code was conditioned
# +
reg_results = []
# legacy
# for pitch, (X, Y) in regress_data:
# reg_results.append([pitch, regression(X, Y)])
for i, (pitch, (X, Y)) in enumerate(regress_data):
# if i in [0, 1, 2, 3, 4, 5, 6]:
# mix = mixture.GaussianMixture(n_components=2, warm_start=True).fit([*zip(X, Y)])
# label = mix.predict([*zip(X, Y)])
# if mix.means_[0][0] < mix.means_[1][0]:
# choose_label = 0
# else:
# choose_label = 1
# XX = [t for i, t in enumerate(X) if label[i] == choose_label]
# YY = [t for i, t in enumerate(Y) if label[i] == choose_label]
# else:
# XX = X
# YY = Y
XX = X
YY = Y
lm = linear_model.LinearRegression()
# lm.fit_intercept = False
model = lm.fit([[t] for t in XX], [[t] for t in YY])
reg_results.append([pitch, model.coef_[0][0], model.intercept_[0]])
reg_results
# +
fig, axes = plt.subplots(1, 3, sharey=True)
for r, ax in zip([(0, 14), (0, 7), (7, 14)], axes):
ax.axhline(1, linewidth = .5, c='k')
for i in range(*r):
X, Y = regress_data[i][1]
c, _, symbol = SCATTER[i % 7][3:]
YY = [reg_results[i][1] * t + reg_results[i][2] for t in X]
ax.plot(X, YY, LIGHT[c], linewidth = .5, label=symbol)
ax.scatter(X, Y, s=.5, c=c)
axes[0].set_title('Both octaves')
axes[1].set_title('Lower octave')
# axes[0].set_xlim([-.5e6, 0.2e8])
# axes[0].set_ylim([-.05, 3])
axes[2].set_title('Higher octave')
# axes[1].set_xlim([-1.0e7, 4.0e8])
# axes[1].set_ylim([-.1, 6])
fig.subplots_adjust(wspace=.3)
handles, labels = ax.get_legend_handles_labels()
ax = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.grid(False)
lgnd = fig.legend(handles, labels, loc=(.31,.09), prop={'size': 12})
for handle in lgnd.legendHandles:
handle.set_linewidth(1)
plt.xlabel('$ln($pressure$)$', labelpad=5)
plt.ylabel('frequency quotient \\^{} $10$', labelpad=15)
widePlot(4, 10)
plt.savefig('imgs/bend_regress.svg', bbox_inches='tight')
# +
mean_slope = np.mean([t[1] for t in reg_results])
def fitIntercept():
results = []
for i in range(0, 14):
X, Y = regress_data[i][1]
results.append(np.mean(X) - (np.mean(Y) - 1) / mean_slope)
return results
X = np.array([t[0] for t in reg_results])
intercepts = fitIntercept()
plt.scatter(X, intercepts)
lm = linear_model.LinearRegression()
model = lm.fit([[t[0]] for t in reg_results], [[t] for t in intercepts])
pb_coef = model.coef_[0][0]
pb_inter = model.intercept_[0]
print(pb_coef, pb_inter)
predicted_x_intercept = pb_inter + pb_coef * X
plt.plot(X, predicted_x_intercept)
plt.xlabel('pitch (MIDI)')
plt.ylabel('$ln($pressure$)$ intercept')
plt.xticks([
*np.array([60, 62, 64, 65, 67, 69, 71]) + 12,
*np.array([60, 62, 64, 65, 67, 69, 71]) + 24,
])
widePlot(3, 10)
plt.savefig('imgs/interc_regress.svg', bbox_inches='tight')
# -
# ## next step: reverse back to rainbow and overlay
# +
X = np.array(range(10, 350))
log_X = np.log(X)
ONE_PITCH = freq2pitch(1)
for i, args in enumerate(SCATTER):
pitch = args[2]
c = args[3]
sym = args[5]
xi_l = predicted_x_intercept[i]
xi_h = predicted_x_intercept[i + 7]
fq_l = ((log_X - xi_l) * mean_slope + 1) ** .1
fq_h = ((log_X - xi_h) * mean_slope + 1) ** .1
pb_l = [freq2pitch(t) - ONE_PITCH for t in fq_l]
pb_h = [freq2pitch(t) - ONE_PITCH for t in fq_h]
plt.plot(X, pb_l, c, linewidth = .5)
plt.plot(X, pb_h, c, linewidth = .5, label=sym)
scatterDomainBend(plt, ppp, ffff, *args[:2], pitch, c, False)
scatterDomainBend(plt, ppp, ffff, *args[:2], pitch+12, c, False)
widePlot(10, 9)
axes = plt.gca()
axes.set_xlim([10,320])
axes.set_ylim([-3,.8])
plt.xlabel('pressure (Pa)')
plt.ylabel('pitch bend (semitones)')
plt.legend()
plt.savefig('imgs/rainbow_overlay.svg', bbox_inches='tight')
# -
# ## !!! only three free parameters!
# and one of them is "10"
# ## Failure: study amplitude
# +
# Legacy code
# plt.plot(ffff[230:1300])
# # plt.plot(ffff[1550:2200])
# # plt.plot(ffff[2200:2800])
# # plt.plot(ffff[2800:3450])
# widePlot()
# plt.plot(ffff[230:580])
# plt.plot(ffff[580:960])
# # scatterBend(ppp, ffff, 230, 580, 83, 'r')
# scatterBend(ppp, ffff, 580, 1300, 83, 'r')
# scatterBend(ppp, ffff, 1550, 2200, 82, 'g')
# scatterBend(ppp, ffff, 2200, 2800, 80, 'b')
# scatterBend(ppp, ffff, 2800, 3450, 78, 'k')
# plt.grid(which='major')
# +
def scatterVelo(p, e, start, end, _, c):
p = p[start:end]
e = e[start:end]
pp = []
ee = []
for x, y in zip(p, e):
if x > 20:
# if x < 100:
pp.append(x ** 1)
ee.append(y ** 1)
plt.scatter(pp, ee, c=c, s=.5, marker='.')
for i, args in enumerate(SCATTER):
scatterVelo(ppp, eeee, *args[:4])
# if i == 6:
# scatterVelo(ppp, eeee, *args[:3], 'k')
# widePlot(10, 10)
widePlot()
# -
# Total failure.
# ## octave threshold
# Hand labeling from note-wise pressure-pitch scatter.
OCTV_THRES = [
[60, [62, 64, 66, 66, ], [51, 55, 57, 54]],
[62, [80, 101, 84, 79, ], [83, 80, 82, 80]],
[64, [104, 97, 112, 101,], [75, 73, 74, 73]],
[65, [122, 99, 91, 95, ], [79, 72, 79, 79]],
[67, [159, 141, 122, 126,], [149, 106, 99, 96]],
[69, [236, 216, 225,], [212, 186, 188]],
[71, [], [201]],
]
# +
def scatterOctave(s):
x_b = []
y_b = []
x_r = []
y_r = []
x = []
y = []
c = []
for pitch, ups, downs in OCTV_THRES:
for things, color in zip([ups, downs], ['b', 'r']):
for thing in things:
c.append(color)
x.append(pitch)
y.append(np.log(thing))
if color == 'b':
x_b.append(pitch)
y_b.append(np.log(thing))
else:
x_r.append(pitch)
y_r.append(np.log(thing))
plt.scatter(x_b, y_b, s=s, marker='o', facecolors='none', edgecolors='b', label = 'upward')
plt.scatter(x_r, y_r, s=s, marker='o', facecolors='none', edgecolors='r', label = 'downward')
return x, y, c
# single line fit - ignores hysteresis
x, y, c = scatterOctave(s=20)
lm = linear_model.LinearRegression()
model = lm.fit([[t] for t in x], [[t] for t in y])
ot_coef = model.coef_[0][0]
ot_inter = model.intercept_[0]
print(ot_coef, ot_inter)
y_hat = ot_inter + ot_coef * np.array(x)
plt.plot(x, y_hat, c='k')
plt.xlabel('pitch (MIDI)')
plt.ylabel('$ln($pressure$)$')
plt.legend()
# +
x, y, c = scatterOctave(s=20)
lm = linear_model.LinearRegression()
model = lm.fit([[t, 0 if tt == 'r' else 1] for t, tt in zip(x, c)], [[t] for t in y])
ot_coef = model.coef_[0][0]
ot_c_coef = model.coef_[0][1]
ot_inter = model.intercept_[0]
print(ot_coef, ot_c_coef, ot_inter)
y_hat = ot_inter + ot_coef * np.array(x)
plt.plot(x, y_hat, 'r')
plt.plot(x, y_hat + ot_c_coef, 'b')
plt.xlabel('pitch (MIDI)')
plt.ylabel('$ln($pressure$)$')
plt.xticks([60, 62, 64, 65, 67, 69, 71])
plt.legend()
plt.savefig('imgs/octave_thresholds.svg', bbox_inches='tight')
| lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlu
# language: python
# name: nlu
# ---
from process_util import *
from random import sample
# +
# read jsonl dataset
src = "/home/ubuntu/cs224u/raw_reddit/tldr-training-data.jsonl"
reader = jsonlines.open(src)
# choose a subreddit
input_subreddit = "ShouldIbuythisgame"
# create the directory to this corresponding dataset
dst = "/home/ubuntu/cs224u/processed_ShouldIbuythisgame"+'/'+input_subreddit+'_story'
#os.mkdir(dst)
# -
def create_preprocessed_story_file(input_dict, save_dir):
'''
input:
input_dict: input dictionary, include information about its id, content, summary etc
save_dir: a directory about where to save the story files
reference: https://medium.com/@datamonsters/text-preprocessing-in-python-steps-tools-and-examples-bf025f872908
here we preprocessed the content and the summary of the story by:
1) get rid of extra space tab
2) filter out those whose summary is too short/content is too short
3) delete special characters like [...]
4) [potential] Stemming (spend/spent/spends...)
5) [potential] Lemmatization (do/done/did)
'''
dic_id = input_dict["id"]
content = input_dict["content"]
summary = input_dict['summary']
if(summary.split() >3):
# get rid of extra space tab
content = re.sub('\s+', ' ', content).strip()
summary = re.sub('\s+', ' ', summary).strip()
# get rid of words inside special characters
content = re.sub("[\(\[].*?[\)\]]", "", content)
summary = re.sub("[\(\[].*?[\)\]]", "", summary)
filename = os.path.join(save_dir, dic_id + ".story")
file1 = open(filename,"w")
file1.writelines(content+'\n')
file1.writelines('@highlight \n')
file1.writelines(summary)
file1.close()
# get a corresponding dataset
count = 0
dic_list = []
for dic in reader:
if("subreddit" in dic.keys() and dic["subreddit"] == input_subreddit and
isEnglish(dic["summary"]) == True and isEnglish(dic["content"]) == True ):
dic_list.append(dic)
# create a small dataset if needed
sample_list = sample(dic_list,100)
for dic in sample_list:
create_preprocessed_story_file(dic, dst)
#dst_whole = "/home/ubuntu/cs224u/processed_politics"+'/'+input_subreddit+'_story_whole'
#os.mkdir(dst_whole)
dst
# create the whole dataset
#sample_list = sample(dic_list,100)
for dic in dic_list:
create_preprocessed_story_file(dic, dst)
# # get corresponding list
# +
#input_subreddit = "relationships"
#dst = "/home/ubuntu/cs224u/new_relationships"+'/'+input_subreddit+'_story'
result_list = os.listdir(dst)
np.random.shuffle(result_list)
size = len(result_list)
train_list = result_list[0:int(0.8*size)-1]
train_str = "\n".join(x for x in train_list)
dev_list = result_list[int(0.8*size):int(0.9*size)-1]
dev_str = "\n".join(x for x in dev_list)
test_list = result_list[int(0.9*size): int(size)-1]
test_str = "\n".join(x for x in test_list)
# print (len(train_list))
# print (len(dev_list))
# print (len(test_list))
# -
dst
# create three lists
create_list(input_subreddit, "_train", train_str)
create_list(input_subreddit, "_val", dev_str)
create_list(input_subreddit, "_test", test_str)
# # create a baseline result for the test set
# create the directory to the corresponding baseline result
make_dir = '/home/ubuntu/cs224u/processed_' +input_subreddit+'/baseline'
os.mkdir(make_dir)
# +
make_dec_dir = make_dir + '/decoded'
os.mkdir(make_dec_dir)
make_ref_dir = make_dir + '/reference'
os.mkdir(make_ref_dir)
# -
# get the name of the test list
test_name_list = [x[:-6] for x in test_list]
test_name_list
reader = jsonlines.open(src)
# create corresponding baseline summarization
for dic in reader:
if("subreddit" in dic.keys() and dic["subreddit"] == input_subreddit and isEnglish(dic["content"]) == True):
if(dic["id"] in test_name_list):
print(dic["id"])
create_baseline_summarization_file(dic, make_dec_dir)
create_reference_file(dic, make_ref_dir)
# # create an example.story (not relavant to here)
for dic in reader:
if("subreddit" in dic.keys() and dic["subreddit"] == "AskReddit" and isEnglish(dic["summary"]) == True and isEnglish(dic["content"]) == True ):
print(dic)
break
dic["content"]
len(dic["summary"].split())
# cwd = os.getcwd()
filename = os.path.join(cwd, "example.story")
file1 = open(filename,"w")
file1.writelines(dic["content"]+'\n')
#file1.writelines('@hightlight \n')
#file1.writelines(input_dict['summary'])
file1.close()
dic["content"]
dic["summary"]
| process_data/preprocess_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !python --version
# !conda install -y fastcore
import twitter
from pathlib import Path
api = twitter.Api(consumer_key=[consumer key],
consumer_secret=[consumer secret],
access_token_key=[access token],
access_token_secret=[access token secret])
| mental-health/get_raw_twitter_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import cv2
import PIL.Image as Image
import os
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# +
image_shape= (224,224)
classifier = tf.keras.Sequential([hub.KerasLayer('https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/4',
input_shape=image_shape+(3,))])
# -
gold_fish=Image.open('Goldfish.jpg')
gold_fish
| CNN_study/transfer_learning/transfer_learning_mobilenet_v2/.ipynb_checkpoints/Trasnfer_learning_mobilenetV2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''venv'': venv)'
# name: python3
# ---
#
# <h1 style="font-size: 200%">Single-mixed refrigerant (SMR) natural gas liquefaction process simulation optimization <a href="https://doi.org/10.1016/j.ces.2021.116699" title="SMR optimization paper">[1]</a> </h1>
#
# <hr>
#
# <h2 style="font-size: 150%">SMR process description</h2>
#
# <spam style="font-size: 120%">The single-mixed refrigerant natural gas liquefaction process consists of using a multi-component refrigerant operating in a refrigeration cycle to produce heat sink to cool down and liquefy the natural gas stream, as illustrated in the figure below. The refrigeration cycle includes a four-stage compression system with intermediate cooling and phase separation for possible condensate. The liquid phase is compressed in pumps and the vapor phase in the compressors. No phase mixing is considered, which means that the refrigerant heavy condensate is mixed together and goes through the hot pass in the multi-stream heat exchanger separately from the light vapor phase. Then, the streams are mixed back together in the cold pass inside the cryogenic heat exchanger as they vaporize.</spam>
#
# <br/>
# <br/>
# <img src="pfd_smr.png" alt="process flow diagram image" style="height: 400px"/>
# <br/>
# <br/>
#
# <h2 style="font-size: 150%"> SMR process simulation</h2>
#
# <spam style="font-size: 120%">The described single-mixed refrigerant natural gas liquefaction process is rather difficult to model and simulate mainly because it involves Pinch-like calculations in each multi-stream heat exchangers (MSHE) considering phase change, cryogenic conditions, and non-ideal mixtures. In other words, these heat exchangers are discretized in temperature segments, where vapor-liquid equilibrium calculations are performed to determine the temperature of every stream and, therefore, the temperature profiles in these operation units. For the sake of process feasibility considering the Second Law of Thermodynamics, the temperatures of hot streams have to be effectively higher than the cold ones throughout the heat exchangers.
# For rigorous calculations, this process is modeled and simulated in DWSIM v7.0 using Peng-Robinson equation of state, which is appropriate for hydrocarbons mixtures, such as the natural gas and the refrigerant mixtures. The minimum internal temperature approach is calculated using the Python Script in DWSIM. See the simulation ``SMR_2exp_phaseSep_MSHE_MITApy.dwxmz`` for more details.</spam>
#
# <br />
# <br />
# <img src="pfd_prico_dwsim.png" alt="process flow diagram image" style="height: 400px"/>
# <br />
# <br />
#
# <h2 style="font-size: 150%">SMR process optimization problem</h2>
#
# <spam style="font-size: 120%">Knowing that the work consumption is the most relevant spending in the natural gas liquefaction process, then the design problem is to find $\textbf{x}^*\in \mathbb{R}^n$ that minimizes the following optimization problem</spam>
#
# <spam style="font-size: 120%">
#
# $$ \min_{\textbf{x}\in \mathcal{D}}\ \ f(\textbf{x})=\frac{\sum_{p\in PM}{W_p(\textbf{x})}}{\dot{m}_{NG}} $$
# $$ \text{s.t.} \ \ g(\textbf{x})=3 - MITA(\textbf{x})\le 0$$
# $$ \mathcal{D}=[\textbf{x}^{lb},\ \textbf{x}^{ub}], $$
# </spam>
#
# <spam style="font-size: 120%"> in which, for a given $\textbf{x}$, $W_p(\textbf{x})$ is the work consumption of the pressure manipulator unit $p$ in the set of compressors and pumps $PM$, $MITA(\textbf{x})$ is the minimum internal temperature approach in the MSHEs, $\mathcal{D}$ is a box constraint for the decision variables bounded by $\textbf{x}^{lb}$ and $\textbf{x}^{ub}$, and $\dot{m}_{LNG}$ is the mass flow rate of the natural gas stream. Therefore, the objective function $f$ in this optimization problem is the specific work consumed in the refrigeration cycle with respect to the mass flow rate of liquefied natural gas (LNG). Notice that the value of $W_p(\textbf{x})$ as well as $MITA(\textbf{x})$ are obtained in the black-box chemical process simulator, and so are $f(\textbf{x})$ and $g(\textbf{x})$. Then, $f(\textbf{x})$ and $g(\textbf{x})$ are known only at sampled points and make the optimization problem equivalent to equation above.</spam>
# +
import numpy as np
from scipy import optimize
from pprint import pprint
import os
from pathlib import Path
dir_path = str(Path(os.getcwd()).parent.parent.absolute())
print(dir_path)
import sys
sys.path.append(dir_path)
if 'dwsimopt.sim_opt' in sys.modules: # Is the module in the register?
del sys.modules['dwsimopt.sim_opt'] # If so, remove it.
del SimulationOptimization
from dwsimopt.sim_opt import SimulationOptimization
# +
# Getting DWSIM path from system path
for k,v in enumerate(os.environ['path'].split(';')):
if v.find('\DWSIM')>-1:
path2dwsim = os.path.join(v, '')
if path2dwsim == None:
path2dwsim = "C:\\Users\\lfsfr\\AppData\\Local\\DWSIM7\\"
# Loading DWSIM simulation into Python (Simulation object)
sim_smr = SimulationOptimization(dof=np.array([]), path2sim= os.path.join(dir_path, "examples\\PRICO_LNG\\PRICO.dwxmz"),
path2dwsim = path2dwsim)
sim_smr.savepath = str(os.path.join(dir_path, "examples\\PRICO_LNG\\PRICO2.dwxmz"))
sim_smr.add_refs()
# Instanciate automation manager object
from DWSIM.Automation import Automation2
# import clr
# clr.AddReference( os.path.join(dir_path, 'dwsimopt\\System.Buffers2.dll') )
if ('interf' not in locals()): # create automation manager
interf = Automation2()
# Connect simulation in sim.path2sim
sim_smr.connect(interf)
# +
# Add dof
def set_comp_massflow(x, simobj):
ms = sim_smr.flowsheet.SimulationObjects[simobj.Name]
def set_property(x, obj, property=None):
if property==None:
obj = x
# #ACCESS PROPERTY CORRECTLY
# sim_smr.add_dof(lambda x: set_property(x, sim_smr.flowsheet.SimulationObjects[sim_smr.flowsheet.GetFlowsheetSimulationObject('MSTR-02').Name], property='massflow') )
# sim_smr.add_dof(lambda x: set_property(x, sim_smr.flowsheet.GetFlowsheetSimulationObject("n2").Phases[0].Properties.massflow) )
# sim_smr.add_dof(lambda x: set_property(x, sim_smr.flowsheet.GetFlowsheetSimulationObject("c1").Phases[0].Properties.massflow) )
# sim_smr.add_dof(lambda x: set_property(x, sim_smr.flowsheet.GetFlowsheetSimulationObject("c2").Phases[0].Properties.massflow) )
# sim_smr.add_dof(lambda x: set_property(x, sim_smr.flowsheet.GetFlowsheetSimulationObject("c3").Phases[0].Properties.massflow) )
sim_smr.add_dof(lambda x: sim_smr.flowsheet.GetFlowsheetSimulationObject("MR-1").SetOverallCompoundMassFlow(7,x))
sim_smr.add_dof(lambda x: sim_smr.flowsheet.GetFlowsheetSimulationObject("MR-1").SetOverallCompoundMassFlow(0,x))
sim_smr.add_dof(lambda x: sim_smr.flowsheet.GetFlowsheetSimulationObject("MR-1").SetOverallCompoundMassFlow(1,x))
sim_smr.add_dof(lambda x: sim_smr.flowsheet.GetFlowsheetSimulationObject("MR-1").SetOverallCompoundMassFlow(2,x))
sim_smr.add_dof( lambda x: set_property(x, sim_smr.flowsheet.GetFlowsheetSimulationObject("VALV-01").OutletPressure) )
sim_smr.add_dof( lambda x: set_property(x, sim_smr.flowsheet.GetFlowsheetSimulationObject("COMP-4").POut) )
# adding objective function (f_i):
sim_smr.add_fobj(lambda : sim_smr.flowsheet.GetFlowsheetSimulationObject("Sum_W").EnergyFlow)
# adding constraints (g_i <= 0):
sim_smr.add_constraint(np.array([
lambda : 3 - sim_smr.flowsheet.GetFlowsheetSimulationObject("MITA1-Calc").OutputVariables['mita'],
lambda : 10*sim_smr.flowsheet.GetFlowsheetSimulationObject("MSTR-27").Phases[1].Properties.massfraction, # no phase separation in the cycle
lambda : 10*sim_smr.flowsheet.GetFlowsheetSimulationObject("MR-1").Phases[1].Properties.massfraction, # no phase separation in the cycle
lambda : 10*sim_smr.flowsheet.GetFlowsheetSimulationObject("MSTR-03").Phases[1].Properties.massfraction, # no phase separation in the cycle
lambda : 10*sim_smr.flowsheet.GetFlowsheetSimulationObject("MSTR-05").Phases[1].Properties.massfraction, # phase separation before MSHE
]))
pprint(vars(sim_smr))
# +
# Initial simulation optimization setup
# Initial guess of optimization
x0 = np.array( [0.269/3600, 0.529/3600, 0.619/3600, 2.847/3600, 2.3e5, 48.00e5] )
# Testing for simulation at x0
sim_smr.calculate_optProblem(1.0*x0)
print(sim_smr.x_val,
sim_smr.f_val,
sim_smr.g_val)
# Test saving simulation at x0 in 'savepath'
sim_smr.interface.SaveFlowsheet(sim_smr.flowsheet,sim_smr.savepath,True)
# Inspecting simulation object
pprint(vars(sim_smr))
# +
# Setup for optimization
# convergence tolerances
xtol=0.01
ftol=0.01
maxiter=5 # +- 20 seconds per iteration
# decision variables bounds
bounds_raw = np.array( [0.5*np.asarray(x0), 1.5*np.asarray(x0)] ) # 50 % around base case
bounds_raw[0][-1] = 153 # precool temperature low limit manually
bounds_raw[1][-1] = 253 # precool temperature upper limit manually
# regularizer calculation
regularizer = np.zeros(x0.size)
import math
for i in range(len(regularizer)):
regularizer[i] = 10**(-1*math.floor(math.log(x0[i],10))) # regularizer for magnitude order of 1e0
# bounds regularized
bounds_reg = regularizer*bounds_raw
bounds = optimize.Bounds(bounds_reg[0], bounds_reg[1])
# objective and constraints lambda definitions
f = lambda x: sim_smr.calculate_optProblem(np.asarray(x)/regularizer)[0:sim_smr.n_f]
g = lambda x: sim_smr.calculate_optProblem(np.asarray(x)/regularizer)[sim_smr.n_f:(sim_smr.n_f+sim_smr.n_g)]
nonlinear_constraint = optimize.NonlinearConstraint(g, -np.inf, 0, jac='2-point', hess=optimize.BFGS())
# -
print(sim_smr.n_f)
print(sim_smr.n_g)
res = sim_smr.calculate_optProblem(x0)
print(res[0:sim_smr.n_f])
print(res[sim_smr.n_f:(sim_smr.n_f+sim_smr.n_g)])
# +
# Local optimization with trust-region -> working to some extent
# print("starting local optimization")
# result = optimize.minimize( f, np.asarray(x0)*regularizer,
# method='trust-constr', jac='2-point', hess=optimize.BFGS(),
# constraints=[nonlinear_constraint], bounds=bounds, callback=None,
# options={'verbose': 3,
# 'xtol': xtol,
# 'maxiter': 1*maxiter,
# 'finite_diff_rel_step': None,
# 'initial_tr_radius': 0.1} )
# +
# Global optimization with PSO
from sko.PSO import PSO
# f_pen = lambda x: fpen_barrier(sim_smr,x/regularizer)
result_pso = PSO(func= lambda x: sim_smr.fpen_barrier(x/regularizer), n_dim=sim_smr.n_dof, pop=2*sim_smr.n_dof, max_iter=15, lb=bounds_reg[0], ub=bounds_reg[1], verbose=True)
result_pso.record_mode = True
if sim_smr.n_f > 1:
print("Multi-objective optimization not supported (yet)")
elif sim_smr.n_f < 1:
print("Invalid number of objective functions")
else:
print("Starting global optimization")
result_pso.run()
# +
# printing results of global optimization with Differential Evolution
# xpso = np.array([6.17810197e-05, 2.74573937e-04, 3.91942260e-04, 3.15410796e-04,
# 2.66089439e-04, 1.96572335e+05, 4.53996283e+06, 2.45857440e+02])
xpso = result_pso.gbest_x/regularizer
print(sim_smr.calculate_optProblem(xpso))
# saving results of local optimization with Differential Evolution
sim_smr.interface.SaveFlowsheet(sim_smr.flowsheet, sim_smr.savepath,True)
# -
sim_smr.dof
xpso[-1] = 300
xpso
print(sim_smr.calculate_optProblem(xpso))
# saving results
sim_smr.interface.SaveFlowsheet(sim_smr.flowsheet, sim_smr.savepath,True)
# +
import matplotlib.pyplot as plt
print(f(result_pso.gbest_x))
print(g(result_pso.gbest_x))
sim_smr.interface.SaveFlowsheet(sim_smr.flowsheet, sim_smr.savepath,True)
print(result_pso.gbest_x)
pprint(result_pso)
plt.plot(result_pso.gbest_y_hist)
plt.show()
| examples/PRICO_LNG/example_PRICO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/SotaYoshida/Lecture_DataScience/blob/2021/notebooks/Python_chapter_BayesianOptimization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zy7aOwXne87b"
# # ベイズ最適化による実験計画法
# + [markdown] id="WSCRPDv9bBpz"
# 以下では、ベイズ最適化を用いた実験計画法を見てみよう。
#
# 数学的部分やコードの詳細よりも「なんとなくこのあたりを探索しようかな」といった
# 人間の経験に依る部分を客観的な方法で置き換えた
# 実験計画の方法論の強力さを感じることが目的なので
# 難しいところはスキップしても構わない。
#
# ガウス過程の基本や詳細は[講義ノート](https://drive.google.com/file/d/1ZKi8DJFSg00xir1IoEQiw3z9vxmejeCv/view)(7章)に譲る.
#
#
# + id="XWIv70U6e6In"
#使うライブラリのインポート
import numpy as np
import matplotlib.pyplot as plt
import copy
from scipy import special
## データの生成用関数
def f(x):
return np.sin(x) + 0.2 * x
## ガウス過程のカーネル(共分散行列)の設計
def Mat52(Thetas,r):
tau,sigma = Thetas
thetar = r * np.sqrt(5.0)/sigma
return tau * (1.0 + thetar + (thetar**2) /3.0) * np.exp(-thetar)
def KernelMat(Thetas,xt,xp):
lt = len(xt); lp=len(xp)
Ktt = np.zeros((lt,lt)); Kpt = np.zeros((lp,lt)); Kpp = np.zeros((lp,lp))
for j in range(lt):
for i in range(j,lt):
r = abs(xt[i]-xt[j])
tmp = Mat52(Thetas,r)
Ktt[i,j] = tmp; Ktt[j,i] = tmp
for i in range(lp):
r= abs(xp[i]-xt[j])
Kpt[i,j] = Mat52(Thetas,r)
for j in range(lp):
for i in range(j,lp):
r= abs(xp[i]-xp[j])
tmp = Mat52(Thetas,r)
Kpp[i,j] = tmp; Kpp[j,i] = tmp
return Ktt,Kpt,Kpp
## 事後共分散行列の計算
def calcSj(cLinv,Kpt,Kpp,yt,mu_yt,mu_yp):
tKtp= np.dot(cLinv,Kpt.T)
return mu_yp + np.dot(Kpt,np.dot(cLinv.T,np.dot(cLinv,yt-mu_yt))), Kpp - np.dot(tKtp.T,tKtp)
## Cholesky分解
def Mchole(tmpA,ln) :
cLL = np.linalg.cholesky(tmpA)
logLii=0.0
for i in range(ln):
logLii += np.log(cLL[i,i])
return np.linalg.inv(cLL), 2.0*logLii
## 獲得関数を計算, 次点の計算点を決める
def calcEI(xp,mujoint,sigmaj,xbest,ybest):
EIs = [ (mujoint[i]-ybest) * Phi((mujoint[i]-ybest)/sigmaj[i]) +
sigmaj[i]* np.exp(-0.5* ((mujoint[i]-ybest)/sigmaj[i])**2) for i in range(len(xp))]
xnew,ynew,ind=xybest(xp,EIs)
ynew= np.sin(xnew) + 0.2*xnew #+ 0.01 * (0.5-np.random.rand())
return xnew,ynew,EIs,ind
def Phi(z):
return 0.5 * special.erfc(-(z/(2**0.5)) )
def xybest(xt,yt):
ind = np.argmax(yt)
return xt[ind],yt[ind],ind
## お絵かき
def plotGP0(xt,yt,xp,ytrue):
fig = plt.figure(figsize=(8,4))
axT = fig.add_subplot(1,1,1)
axT.set_xlabel("x"); axT.set_ylabel("y")
axT.set_xlim(-2.0,12); axT.set_ylim(-2.0,5.0)
axT.scatter(xt,yt,marker="o",color="black",label="Data")
axT.plot(xp,ytrue,color="red",label="True",linestyle="dotted")
axT.legend(loc="upper right")
plt.show()
#plt.savefig("BayesOpt_initial.pdf",bbox_inches="tight", pad_inches=0.1)
plt.close()
def plotGP(nxt,nyt,nxp,xp,ytrue,mujoint,sigmaj,ysamples,EIs):
fig = plt.figure(figsize=(16,4))
axT = fig.add_subplot(121)
axB = fig.add_subplot(122)
axT.set_xlabel("x"); axT.set_ylabel("y")
axB.set_xlabel("x"); axB.set_ylabel("Acquisition function")
axT.set_xlim(-2.0,12); axT.set_ylim(-2.0,5.0)
axB.set_xlim(-2.0,12)
axT.scatter(nxt,nyt,marker="o",color="black",label="Data")
for i in range(len(ysamples)):
axT.plot(nxp,ysamples[i],alpha=0.1)
axT.plot(nxp,mujoint,label="GP mean",linestyle="dashed",color="blue")
axB.plot(nxp,EIs,color="green")
axB.set_yticklabels([])
axT.fill_between(nxp,mujoint-sigmaj,mujoint+sigmaj,color="blue", alpha=0.3)
axT.plot(xp,ytrue,color="red",label="True",linestyle="dotted")
axT.legend(loc="upper right")
plt.show()
plt.close()
# + id="jajF8gfhfJOo" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="4f16979d-3b5f-4700-94e7-8fc3c8f0d634"
Thetas=[2.0,2.0]
oxt = np.array([ 0.0 + 1.02*i for i in range(11)])
xp = []
for tmp in np.arange(-2.0,12.0,0.1):
if (tmp in oxt)==False:
xp += [ tmp ]
xp = np.array(xp)
oyt = f(oxt)
ytrue = f(xp)
SVs=[]
xt =[oxt[2],oxt[6]]; yt =[oyt[2],oyt[6]]
plotGP0(xt,yt,xp,ytrue)
# + [markdown] id="xk7mXO-tfLVj"
# 一般には真の関数(赤色)は分からないので、勾配も計算できない。
# 数値的に勾配を計算するには、各点で微小にxをずらした場合の観測が必要、
# さらに、学習率を変えながら適当な値を探索するというのは、
# 1回のデータの観測(測定,取得,計算, etc.)コストが高い場合はあまり良い方策ではない。
# ([学習率]最適化の章を参照)
#
# 仮に勾配の計算ができたとしても、
# このデータの様に背後にある真の関数が多峰的(multimodal)な場合、
# 勾配のみに基づく単純な最適化手法では局所解に停留する危険もある。
# + id="5EmEXqIDfJ5P" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c2195de5-b03e-46b6-a839-0b264312aee0"
Thetas=[2.0,2.0]
nxp = list(copy.copy(xp))
nxt = copy.copy(xt)
nyt = copy.copy(yt)
n_iter = 10 ## 探索回数の上限
xopt = 6; yopt = -1.e+30
SVs=[]
plot = True
#plot = False
for iter in range(n_iter):
lt=len(nxt); lp=len(nxp)
Ktt,Kpt,Kpp = KernelMat(Thetas,nxt,nxp)
mu_yt= np.array([ 0.0 for i in range(lt)])
mu_yp= np.array([ 0.0 for i in range(lp)])
cLinv,logdetK = Mchole(Ktt,lt)
mujoint,Sjoint = calcSj(cLinv,Kpt,Kpp,nyt,mu_yt,mu_yp)
sigmaj=[ Sjoint[j][j] for j in range(lp)]
ysamples = [np.random.multivariate_normal(mujoint,Sjoint) for i in range(10)]
SVs += [ [ mujoint, sigmaj] ]
xbest,ybest,ind= xybest(nxt,nyt)
xnew,ynew,EIs,ind = calcEI(nxp,mujoint,sigmaj,xbest,ybest)
if plot :
plotGP(nxt,nyt,nxp,xp,ytrue,mujoint,sigmaj,ysamples,EIs)
nxt += [ xnew ]; nyt += [ ynew ]
nxp.pop(ind)
if ynew > yopt:
xopt= xnew; yopt = ynew
print(iter, xopt, yopt)
# + [markdown] id="T6Z8roWgcret"
# 探索点が増えるにつれて、効率的に最適解が探索出来ている(っぽい)。
#
# 4回目の探索でx=8.1が探索されていて、
# 真の解8.055...にそこそこ近いものが得られている。
# + [markdown] id="s1KRB5HALEEX"
# 同じデータで、勾配法による最適化もやってみる。
# + colab={"base_uri": "https://localhost:8080/"} id="iLMN0pJ3KcwN" outputId="1c26910c-6f10-4e9c-881a-908e1af2b4b6"
import numpy as np
def f(x):
return np.sin(x) + 0.2 * x
def derf(x):
return np.cos(x) + 0.2
xexact = 8.055339554764814
x = 6
xopt = x; yopt=f(x)
tol = 1.e-1
eta = 1.e-1
itnum = 10**4
for i in range(itnum):
x += eta * derf(x)
y = f(x)
if y > yopt:
xopt = x
yopt = y
if abs(xexact-xopt) < tol :
break
print("探索回数",i, "最適解(x,y)=",xopt,yopt)
# + [markdown] id="sOn3qlTGURH2"
# $\eta$を適切に選べれば、より少ない探索回数でより正確な解が求まるが、
# そんなことができたら苦労はしない...。
#
# また今の場合、勾配は式から計算したが、
# 実際には差分をとって微分を近似することになるため
# 探索回数は少なくとも2倍-3倍程度必要になる。
# + [markdown] id="4jSIRj1Dc3k8"
# **言及しなかった重要な事項**
#
# * カーネル関数の選択と依存性
# * ハイパーパラメータの最適化 or サンプリング
# * 獲得関数の定義・選択と依存性
# * 数値計算(とくにガウス過程の部分)のTips
# + [markdown] id="c1GBk01r0jz8"
# #### 備忘録: ライブラリの出力に関して
# + id="C4kpASFXurFl" colab={"base_uri": "https://localhost:8080/"} outputId="50a59f96-d156-4415-8fdc-e6f5be9c513c"
Thetas=[1.0,1.0]
nxp = np.linspace(-2,12,10)
nxt = copy.copy(xt);nyt = copy.copy(yt)
n_iter = 10 ## 探索回数の上限
xopt = 6; yopt = -1.e+30
SVs=[]
plot = False
lt=len(nxt); lp=len(nxp)
Ktt,Kpt,Kpp = KernelMat(Thetas,nxt,nxp)
mu_yt= np.array([ 0.0 for i in range(lt)])
mu_yp= np.array([ 0.0 for i in range(lp)])
cLinv,logdetK = Mchole(Ktt,lt)
mujoint,Sjoint = calcSj(cLinv,Kpt,Kpp,nyt,mu_yt,mu_yp)
sigmaj=[ Sjoint[j][j] for j in range(lp)]
print("train", nxt,nyt)
print("xp", nxp)
print("My muj ", mujoint)
# + id="_5TGmrsxxomr" colab={"base_uri": "https://localhost:8080/"} outputId="34980277-5f32-4a43-9c39-a9b35cbd2fe1"
from sklearn.gaussian_process import kernels as sk_kern
import sklearn.gaussian_process as skGP
# sklearn GP
nxp = np.linspace(-2,12,10)
nxt = np.array(copy.copy(xt))
nyt = np.array(copy.copy(yt))
kern = sk_kern.Matern(length_scale=1.0, length_scale_bounds=(1.0,1.0), nu=2.5)
sGP = skGP.GaussianProcessRegressor(
kernel=kern,
alpha=1e-15,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0)
sGP.fit(nxt.reshape(-1, 1), nyt)
print("sGP.kernel_", sGP.kernel_)
pred_mean, pred_std= sGP.predict(nxp.reshape(-1,1), return_std=True)
print(pred_mean)
# + id="4W09PtvH0hBl"
# !pip install GPy
# + id="rIZjn7IBxvsu" colab={"base_uri": "https://localhost:8080/"} outputId="4a6c9da0-847d-4e22-9a14-0c520a0b1e2d"
import GPy
nxp = np.linspace(-2,12,10).reshape(-1,1)
nxt = np.array(copy.copy(xt)).reshape(-1,1)
nyt = np.array(copy.copy(yt)).reshape(-1,1)
kern = GPy.kern.Matern52(input_dim=1,variance=1.0,lengthscale=1.0)
model = GPy.models.GPRegression(X=nxt, Y=nyt, kernel=kern,noise_var=1.e-15, normalizer=None)
print(model)
pred_mean, pred_var = model.predict(nxp)
print(pred_mean)
# + [markdown] id="6czVZ0d87wu_"
# GPyでは、予測誤差がデフォルトで1.0に設定されていることがわかった。
# これはかなり注意が必要。
#
# GPに限らず多くの場合、データを白色化(平均0,分散1)して使うので、
# 予測誤差の分散が1.0というデフォルト値を使うというのは、
# [GPの予測が、データ全体の広がりと同程度誤差を持つ」
# ことを仮定していて、なかなか非現実的な場合なのでは?
#
# Webに転がってるGPyを使ったコードだと、あまりこのあたりは認識されていないように思う。
# + [markdown] id="dPK_KIGcyuod"
# # LICENSE
# + [markdown] id="q943wB7Z4DYK"
#
# Copyright (C) 2021 <NAME>
#
# [ライセンス:クリエイティブ・コモンズ 4.0 表示 (CC-BY 4.0)](https://creativecommons.org/licenses/by/4.0/deed.ja)
| notebooks/Python_chapter_BayesianOptimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/San13deep/-Credit-Card-Default-Prediction/blob/main/Credit_Card_Default_Prediction_Capstone_Project_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="RbRG2UW0HQqh"
# # <b><u> Project Title : Predicting whether a customer will default on his/her credit card </u></b>
# + [markdown] id="Bbf-D1SLI6ZW"
# # **Problem Description**
#
# ### This project is aimed at predicting the case of customers default payments in Taiwan. From the perspective of risk management, the result of predictive accuracy of the estimated probability of default will be more valuable than the binary result of classification - credible or not credible clients. We can use the K-S chart to evaluate which customers will default on their credit card payments.
# + [markdown] id="qvcaCkVTJEtJ"
# ## <b> Data Description </b>
#
# ### <b>Attribute Information: </b>
#
# ### This research employed a binary variable, default payment (Yes = 1, No = 0), as the response variable. This study reviewed the literature and used the following 23 variables as explanatory variables:
# * ### X1: Amount of the given credit (NT dollar): it includes both the individual consumer credit and his/her family (supplementary) credit.
# * ### X2: Gender (1 = male; 2 = female).
# * ### X3: Education (1 = graduate school; 2 = university; 3 = high school; 4 = others).
# * ### X4: Marital status (1 = married; 2 = single; 3 = others).
# * ### X5: Age (year).
# * ### X6 - X11: History of past payment. We tracked the past monthly payment records (from April to September, 2005) as follows: X6 = the repayment status in September, 2005; X7 = the repayment status in August, 2005; . . .;X11 = the repayment status in April, 2005. The measurement scale for the repayment status is: -1 = pay duly; 1 = payment delay for one month; 2 = payment delay for two months; . . .; 8 = payment delay for eight months; 9 = payment delay for nine months and above.
# * ### X12-X17: Amount of bill statement (NT dollar). X12 = amount of bill statement in September, 2005; X13 = amount of bill statement in August, 2005; . . .; X17 = amount of bill statement in April, 2005.
# * ### X18-X23: Amount of previous payment (NT dollar). X18 = amount paid in September, 2005; X19 = amount paid in August, 2005; . . .;X23 = amount paid in April, 2005.
# + [markdown] id="zqivftJtKViF"
# # **1. Import Data**
# + id="ZxIPV_lNKhqR"
# Importing the libraries
import numpy as np
import pandas as pd
from numpy import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import plot_roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings('ignore')
# + colab={"base_uri": "https://localhost:8080/"} id="cecFSJ16KtAC" outputId="cf2cbfa8-91cf-4996-d7f2-7d600f8644cd"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="ZKcv-vlLK3Xk" outputId="256fecf1-de39-42a0-e681-2300e704beab"
# !pip install --upgrade xlrd
# + id="oKZGdee2LT2G"
# loading csv File
file_path = ('/content/drive/MyDrive/AlmaBetter/Capstone Projects/Credit Card Default Prediction/default of credit card clients.xls')
df=pd.read_excel(file_path)
# + colab={"base_uri": "https://localhost:8080/", "height": 322} id="YHfhV6U0LmGx" outputId="1c6421d9-5bad-488b-b59b-96e504c59778"
df.head()
# + [markdown] id="0Z3J7BbtL_Pc"
# # **2. Understand the Data**
# + colab={"base_uri": "https://localhost:8080/"} id="bqstsKijME2N" outputId="1e4989b1-c10f-4acb-fe2d-a4a541b5974d"
df.shape
# + id="2h6qyB67Msam"
df.rename(columns={'PAY_0':'PAY_1','default payment next month':'DEF_PAY_NMO'},inplace = True)
# + colab={"base_uri": "https://localhost:8080/", "height": 270} id="snbEawxGNOCp" outputId="2943d09c-4f27-4b58-80cd-d371b75bc7ba"
df.head(5)
# + colab={"base_uri": "https://localhost:8080/"} id="uOjbwgWiNUim" outputId="0c6b87b9-3d7b-4f9f-eab4-4f932eb85e7b"
pay = df[['PAY_1', 'PAY_2' ,'PAY_3','PAY_4', 'PAY_5', 'PAY_6']]
pay_melt = pd.melt(pay)
print(pay_melt['value'].value_counts())
# + colab={"base_uri": "https://localhost:8080/", "height": 500} id="Gf16J8plOKue" outputId="9768d47f-9760-401f-a023-660be05ffe47"
from locale import normalize
# Get the proportion of customers who had default payment in the next month (Oct.2005)?
# About 22% customers had default payment next month
x=df['DEF_PAY_NMO'].value_counts(normalize=True)
plt.figure(figsize=(8,8))
plt.pie(x, colors=['springgreen', 'coral'], shadow=True, autopct='%1.2f%%', startangle=200)
plt.legend(labels=['0','1'])
plt.title(" proportion of customers who had default payment in the next month")
# + [markdown] id="52mmXY-qPFgr"
# ## **3. Data Cleanning**
# + [markdown] id="oC_DhDiXPyza"
# **(1) Check duplicate records**
# + colab={"base_uri": "https://localhost:8080/"} id="CXbh6OZFOSn-" outputId="e8074e31-b35b-4a7c-a534-df882de1c090"
# Check if there is any duplicate IDs
condition = bool(df.duplicated(subset = 'ID').any())
if condition:
print('There are duplicate IDs')
else:
print('No duplicate IDs')
# + [markdown] id="rZT2dsSyQALK"
# **(2) Identify outliers**
# + colab={"base_uri": "https://localhost:8080/", "height": 291} id="FliFnaj0OVcU" outputId="0ca8fe76-6337-4c09-ee6f-dfcfa6c990b2"
df["LIMIT_BAL"].plot(kind="box")
plt.xlabel('Credit limit in NT$', fontweight='bold')
plt.ylabel('# of Customers', fontweight='bold')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 145} id="kCcBl6W5Oiw-" outputId="db2c8f8b-e627-431a-a642-7ad5c824bfaa"
outliers = df.loc[df['LIMIT_BAL']>900000]
outliers
# + [markdown] id="M6tnm44QQS94"
# **(3) Check numerical columns**
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="07qNu8fOQI0w" outputId="4580012f-9eac-4591-929e-f1947fceeba0"
# Get the statistic summary of the columns
# No data is extremly unresonable in these columns
df.describe()
# + [markdown] id="KCBSNbyMQqg3"
# **(4) Check categorical columns**
# + colab={"base_uri": "https://localhost:8080/"} id="tholpEMaQgdA" outputId="5916ffb2-30f9-437c-edcc-e110ff2d267f"
# Get SEX column's distribution. 1: male; 2: female
# No undocumented SEX code
df["SEX"].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="lYHlWtv1UOiD" outputId="95e5f8d0-4efd-40f0-cbb2-2a16d5d32df2"
print(df['EDUCATION'].unique())
df['EDUCATION'].value_counts()
# + id="dY-9-v1tUSzV"
df['EDUCATION']=df['EDUCATION'].replace({4:0,5:0,6:0})
# + colab={"base_uri": "https://localhost:8080/"} id="zy2BaYvjUoF2" outputId="bd0b9aad-8041-49bb-9aff-6cc27a420108"
df['EDUCATION'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="FEoPexJ-Ue9k" outputId="557f7c7f-ddfd-4f5e-cb89-43282e87acc8"
# From dataset description: MARRIAGE: Marital status (1=married, 2=single, 3=others), but there is also 0
# df["MARRIAGE"].unique()
print(df['MARRIAGE'].value_counts())
# + colab={"base_uri": "https://localhost:8080/"} id="GblWjNDUUv4_" outputId="d2a6f29a-6df3-4dd4-a2d4-b1407aa7d9d8"
df["MARRIAGE"] = df["MARRIAGE"].replace({0:3})
print(df['MARRIAGE'].value_counts())
# + [markdown] id="nkwgbVikWmfz"
# ### **4. Trends, Hypotheses and Findings**
# + id="fWs4iYd2U5Hj"
# Create a new column "HAS_DEF" to indicate customers who have at least 1 default payment from PAY_1 to Pay_6
# 0 : no default ; 1: has default
def_condition =(df.PAY_1>1) | (df.PAY_2>1) | (df.PAY_3>1) | (df.PAY_4>1) | (df.PAY_5>1) | (df.PAY_6>1)
df.loc[def_condition, "HAS_DEF"] = 1
df.loc[df.HAS_DEF.isna(), "HAS_DEF"] = 0
# print(type(def_condition))
# + colab={"base_uri": "https://localhost:8080/", "height": 270} id="K7Sr83gqU8ED" outputId="7403b0f5-4b54-4547-fa6f-cf9f5c8961f2"
df.head(5)
# + id="36F1W2CBW4tB"
# Define a function to plot bar chart since there will be multiple bars charts to plot
def relationship_bar(column):
# Get the percentage of default by each group
default_by_group = pd.crosstab(index=df['HAS_DEF'],columns = df[column], normalize = 'columns')
# Round up to 2 decimal
default_by_group = default_by_group.apply(lambda x: round(x,2))
labels = default_by_group.columns
list1 = default_by_group.iloc[0].to_list()
list2 = default_by_group.iloc[1].to_list()
list1_name = "No default"
list2_name = "Has default"
title = f"Default by {column}"
xlabel = column
ylabel = "Default percentage"
fig, ax = plt.subplots(figsize=(10, 5))
bar_width = 0.5
ax1 = ax.bar(labels,list1, bar_width, label = list1_name)
ax2 = ax.bar(labels,list2, bar_width, bottom = list1, label = list2_name)
ax.set_title(title, fontweight = "bold")
ax.set_xlabel(xlabel, fontweight = "bold")
ax.set_ylabel(ylabel, fontweight = "bold")
ax.legend(loc="best")
plt.xticks(list(range(len(labels))), labels,rotation=90)
plt.yticks(fontsize=9)
for r1, r2 in zip(ax1, ax2):
h1 = r1.get_height()
h2 = r2.get_height()
plt.text(r1.get_x() + r1.get_width() / 2., h1 / 2., f"{h1:.0%}", ha="center", va="center", color="white", fontsize=9, fontweight="bold")
plt.text(r2.get_x() + r2.get_width() / 2., h1 + h2 / 2., f"{h2:.0%}", ha="center", va="center", color="white", fontsize=9, fontweight="bold")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 346} id="CsvhF2FE-LXY" outputId="0d4c90d3-80d4-4f3c-c791-8ade1cd82a7f"
relationship_bar("SEX")
# + colab={"base_uri": "https://localhost:8080/", "height": 346} id="3D02Xg7QXNkf" outputId="14f735eb-fbc6-4db8-b393-ea91169884ed"
# Plot a bar chart to show default by education level
relationship_bar('EDUCATION')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="xX2g_7ZTXRSt" outputId="0796f687-9710-431c-b4af-7329b0390558"
# Use boxplot to visualize credit limit grouped by education level
data = []
x=df['EDUCATION'].unique()
for i in x:
temp = df.loc[df.EDUCATION == i, "LIMIT_BAL"]
data.append(temp)
fig, ax = plt.subplots()
ax.boxplot(data)
ax.set_xticklabels(["others","grad school", "university", "high school"])
plt.show()
print(data)
# + colab={"base_uri": "https://localhost:8080/", "height": 346} id="6tN2r8HxXVvU" outputId="bac0204a-97b5-45c2-e5c8-abec077610fe"
# Plot a bar chart to visualize default and non-default distribution by marital status
relationship_bar('MARRIAGE')
# + id="14nLCT7y-bSd"
# Segment the 'AGE' column to 6 groups
bins= [21,30,40,50,60,70,80]
labels = ['20-30','30-40','40-50','50-60','60-70','70-80']
df['AGE'] = pd.cut(df['AGE'],bins=bins, labels=labels,right=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 369} id="SxMyGXnW-goP" outputId="eb0920df-bdcf-4f67-8f68-acc7c596664d"
relationship_bar('AGE')
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="NDPZrb28XYQG" outputId="ea176483-d266-438d-96ae-fc0c486bc58b"
# Subset a dataframe with the records that have default
has_default = df[df['HAS_DEF']== 1]
default_trend = has_default[['PAY_6','PAY_5','PAY_4','PAY_3','PAY_2','PAY_1']].sum(axis=0)
# Draw a line chart to show the trend. The lower the number, the shorter delayed payment
fig,ax = plt.subplots()
ax.plot(default_trend)
plt.xticks(['PAY_6','PAY_5','PAY_4','PAY_3','PAY_2','PAY_1'],['Apr','May','Jun','Jul','Aug','Sep'])
plt.xlabel('Months in 2005',fontweight='bold')
plt.ylabel('Total delayed months',fontweight='bold')
plt.title('Delayed payment trend',fontweight='bold')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="YMf_m_59fPYq" outputId="ae8574a2-4473-4643-fd19-7ccc5207f0c1"
# Make a boxplot to visualize credit limit and default payment next month
# 1: default next month; 0: no default next month
def0 = df.loc[df['DEF_PAY_NMO'] == 0,'LIMIT_BAL']
def1 = df.loc[df['DEF_PAY_NMO'] == 1,'LIMIT_BAL']
fig, ax = plt.subplots()
ax.boxplot([def0, def1], showfliers=False)
ax.set_xticklabels(['No_default',"Default"],fontweight ='bold')
ax.set_ylabel('Credit limit',fontweight ='bold')
ax.set_title('Credit limit & default next month',fontweight ='bold')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="C1UyoyWIfPO2" outputId="3cd1b9ca-2419-4d8f-f114-8bfd4bb9cd80"
# Get statistic summary of bill statement columns
# The min numbers are negative
bill = df[['BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6']]
bill.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="Xxn2iKc-fU01" outputId="1bda979c-f015-4f69-b804-8db6a9f327f3"
# How many bills have negative amount?
# There are 3932 bills with negative amounts, each month has 599-688 records
bill_melt = pd.melt(bill, var_name = 'bill_cycle',value_name = 'amount')
neg_bill = bill_melt[bill_melt['amount']<0]
neg_bill.groupby('bill_cycle').count()
# + colab={"base_uri": "https://localhost:8080/"} id="T_7aA550fXYY" outputId="7435e330-b3f0-4957-d09a-a54834bf9f55"
# Get the average amount of negative bill each month
# Use USD/NTD = 30 to get the amount in USD. The average negative amount is $38-$102
neg_bill_mean_ndt = neg_bill.groupby('bill_cycle')['amount'].mean()
print('Average negative amounts in NTD are: \n')
print(neg_bill_mean_ndt)
print('\nAverage negative amounts in USD are: \n')
print(neg_bill_mean_ndt/30)
# + colab={"base_uri": "https://localhost:8080/"} id="NzanYwjGfaP2" outputId="f35c8d69-9771-4caa-ee60-9951165286bb"
# Is there any bill amount that is greater than credit limit?
condition1 = df['BILL_AMT1'] > df['LIMIT_BAL']
condition2 = df['BILL_AMT2'] > df['LIMIT_BAL']
condition3 = df['BILL_AMT3'] > df['LIMIT_BAL']
condition4 = df['BILL_AMT4'] > df['LIMIT_BAL']
condition5 = df['BILL_AMT5'] > df['LIMIT_BAL']
condition6 = df['BILL_AMT6'] > df['LIMIT_BAL']
large_bill = df[condition1 | condition2 |condition3 | condition4 | condition5 | condition6]
large_bill['HAS_DEF'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="wEex0JRrfgy6" outputId="cbd85103-eb31-4edc-b2d8-c5756672c281"
bill_amt = df[['BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6']]
no_transaction = bill_amt[bill_amt.sum(axis=1) ==0]
no_transaction
no_transaction_de=df.loc[no_transaction.index,['DEF_PAY_NMO']]
no_transaction_de.value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 270} id="iE8wNaXyfjGq" outputId="9321e1bb-561b-4ca8-9a21-f3704fa29852"
df.head(5)
# + colab={"base_uri": "https://localhost:8080/"} id="hZmNm1WVtRx3" outputId="6c92cfc4-5340-4675-bd3e-a957fa7ce673"
# Define predictor variables and target variable
X = df.drop(columns=['ID','DEF_PAY_NMO'])
y = df['DEF_PAY_NMO']
# Save all feature names as list
feature_cols = X.columns.tolist()
#print(feature_cols)
# Extract numerical columns and save as a list for rescaling
X_num = X.drop(columns=['SEX', 'EDUCATION', 'MARRIAGE', 'AGE'])
num_cols = X_num.columns.tolist()
print(num_cols)
# + colab={"base_uri": "https://localhost:8080/", "height": 346} id="MKYgOFrxuWkV" outputId="bd98fd7a-c664-4745-9513-5c2415602d7f"
# Check target classes balancec
cla_bal = df['DEF_PAY_NMO'].value_counts(normalize=True)
print(cla_bal)
# Plot the classes
cla_bal.plot(kind = 'bar')
plt.title('Nondefault(0) and default(1) comparison',fontweight = "bold")
plt.xlabel('Classes')
plt.ylabel('Percentage')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 270} id="tLNUyzoEyFX9" outputId="3808c6c3-d659-4d72-d0a4-1454be2f1ed1"
df.head(5)
# + id="zTGal37fuzGf"
#define a function that count for imbalances
def data_split(x,y,imbalance=False):
'''
This function will split the data according to the imbalance in the data set
if imbalance is there in then use SMOTE Analysis '''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,shuffle=True, stratify=y, random_state=42)
if imbalance:
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state = 42)
X_train, y_train = sm.fit_resample(X_train, y_train.ravel())
return X_train, X_test, y_train, y_test
# + id="lG9r2W7gu29U"
# Define function to rescale training data using StandardScaler
def rescaling(X_train, X_test, numerical_cols):
# Make copies of dataset
X_train_std = X_train.copy()
X_test_std = X_test.copy()
# Apply standardization on numerical features only
for i in numerical_cols:
scl = StandardScaler().fit(X_train_std[[i]]) # fit on training data columns
X_train_std[i] = scl.transform(X_train_std[[i]]) # transform the training data columns
X_test_std[i] = scl.transform(X_test_std[[i]]) # transform the testing data columns
return X_train_std,X_test_std
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="qqqPMra0u5WA" outputId="e7a9f1e4-59cc-47d0-c979-9764ecc3e3a8"
def logistic_regression(imbalance=False):
X_train, X_test, y_train, y_test=data_split(X,y,imbalance=imbalance)
X_train_std,X_test_std=rescaling(X_train,X_test,numerical_cols = num_cols)
#print(X_train_std)
clf_lr = LogisticRegression(random_state=42)
clf_lr.fit(X_train_std, y_train)
scores = cross_val_score(clf_lr, X_train_std, y_train, scoring ="roc_auc", cv = 5)
roc_auc_lr = np.mean(scores)
if imbalance:
return "Logistic Regression","With SMOTE",roc_auc_lr
else:
return "Logistic Regression","Without SMOTE",roc_auc_lr
model_result=[]
model_result.append(logistic_regression())
model_result.append(logistic_regression(imbalance=True))
pd.DataFrame(model_result,columns=['Model','Smote','ROC_AUC'])
# + colab={"base_uri": "https://localhost:8080/"} id="1_l2Rn2ku9fz" outputId="38f9768f-2a6e-4e73-9693-50838e5b7ab0"
# Randomized search for the best C parameter
# Split data with SMOTE
X_train, X_test, y_train, y_test = data_split(X, y, imbalance = True)
# Rescale data
X_train_std, X_test_std = rescaling(X_train, X_test, numerical_cols = num_cols)
logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200,random_state=42)
distributions = dict(C=uniform(loc=0, scale=4), penalty=['l2', 'l1','elasticnet'])
clf = RandomizedSearchCV(logistic, distributions, random_state=42)
lr_best= clf.fit(X_train_std, y_train)
#print(distributions)
print(lr_best.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="FUpf7I2IvAby" outputId="705981a7-e009-4a26-a981-f9ac8fd1ceae"
scores = cross_val_score(lr_best, X_train_std, y_train, scoring ="roc_auc", cv = 5)
roc_auc_lr = np.mean(scores)
print(f'Roc_Auc score for the Logistic regression with SMOTE :{roc_auc_lr,".3f"}')
# + id="1EarZ4MYvD-e"
def precision_recall(model,X_test,y_test):
y_pred=model.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
precision = tp / (tp + fp)
recall = tp / (tp + fn)
F1 = 2 * (precision * recall) / (precision + recall)
print(f'Precision:{precision:.3f}\nRecall:{recall:.3f}\nF1 score:{F1:.3f}')
# + colab={"base_uri": "https://localhost:8080/"} id="DtamLa46vGOq" outputId="193beba0-c85d-4ebb-f4cf-32502cae2965"
precision_recall(lr_best,X_test_std,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="IztqGdwG1e0R" outputId="788c917f-5cc5-4366-dc4f-578d5328f125"
def RandomForest(imbalance=False):
X_train, X_test, y_train, y_test=data_split(X,y,imbalance=imbalance)
X_train_std,X_test_std=rescaling(X_train,X_test,numerical_cols = num_cols)
clf_lr = RandomForestClassifier(random_state=42)
clf_lr.fit(X_train_std, y_train)
scores = cross_val_score(clf_lr, X_train_std, y_train, scoring ="roc_auc", cv = 5)
roc_auc_lr = np.mean(scores)
if imbalance:
return "Random Forest","With SMOTE",roc_auc_lr
else:
return "Random Forest","Without SMOTE",roc_auc_lr
model_result=[]
model_result.append(RandomForest())
model_result.append(RandomForest(imbalance=True))
pd.DataFrame(model_result,columns=['Model','Smote','ROC_AUC'])
# + colab={"base_uri": "https://localhost:8080/", "height": 270} id="VrQwq6lB1hrT" outputId="213afc95-3523-4026-e726-ffe9f01887bb"
df.head()
# + id="FU5j7uK_1lZj"
# Split data with SMOTE
X_train_sm, X_test, y_train_sm, y_test = data_split(X, y, imbalance = True)
# + colab={"base_uri": "https://localhost:8080/"} id="B1j7ahCC1o92" outputId="edd69683-1e02-4281-f246-73e2dd930fe8"
# Create parameter grid
param_grid = {
'max_depth': [60, 90, 110],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [100, 200, 300]
}
# Instantiate the model
clf_rf = RandomForestClassifier()
# Instantiate grid search model
grid_search = GridSearchCV(estimator = clf_rf, param_grid = param_grid,
cv = 2, n_jobs = -1, verbose = 1)
# Fit grid search to the data
grid_search.fit(X_train_sm, y_train_sm)
grid_search.best_params_
# + colab={"base_uri": "https://localhost:8080/"} id="29P5r35x1xIw" outputId="63bd6571-2cf3-4f93-c10a-f9f546285625"
grid_search.best_estimator_
# + colab={"base_uri": "https://localhost:8080/"} id="v9kBuOFe10FH" outputId="6037bee8-ebdb-4248-ffc0-bfa87bf27e19"
rf_best = RandomForestClassifier(**grid_search.best_params_)
rf_best.fit(X_train_sm,y_train_sm)
scores_best = cross_val_score(rf_best, X_train_sm, y_train_sm, scoring ="roc_auc", cv = 3)
roc_auc_best = np.mean(scores_best)
print(f'ROC_AUC training score after tuning for Random Forest: {roc_auc_best:.3f}')
# + colab={"base_uri": "https://localhost:8080/"} id="fzBOgYGD14ME" outputId="2ceb8c15-73a2-4314-b0e3-bbb5d049e3c5"
print("The F1 score,Precision and Recall value for Random Forest :")
precision_recall(rf_best,X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="5x2IEFqa17iI" outputId="bf98e58b-7e87-4d10-cfd8-6316c6ba4487"
def xgboost(imbalance=False):
X_train, X_test, y_train, y_test=data_split(X,y,imbalance=imbalance)
X_train_std,X_test_std=rescaling(X_train,X_test,numerical_cols = num_cols)
clf_lr = XGBClassifier(random_state=42)
clf_lr.fit(X_train_std, y_train)
scores = cross_val_score(clf_lr, X_train_std, y_train, scoring ="roc_auc", cv = 5)
roc_auc_lr = np.mean(scores)
if imbalance:
return "XGBOOST","With SMOTE",roc_auc_lr
else:
return "XGBOOST","Without SMOTE",roc_auc_lr
model_result=[]
model_result.append(xgboost())
model_result.append(xgboost(imbalance=True))
pd.DataFrame(model_result,columns=['Model','Smote','ROC_AUC'])
# + id="XjHPZ6tT1_H5"
# Split data with SMOTE
X_train_sm, X_test, y_train_sm, y_test = data_split(X, y, imbalance = True)
# + colab={"base_uri": "https://localhost:8080/"} id="1vBBLtZG6iPn" outputId="882f58e3-3f71-45d9-e420-43dfd739ec5b"
params = {
'gamma':0,
'learning_rate':0.01,
'max_depth':3,
'colsample_bytree':0.6,
'subsample':0.8,
'scale_pos_weight':3.5,
'n_estimators':1000,
'objective':'binary:logistic',
'reg_alpha':0.3
}
clf_xgb=XGBClassifier(**params)
scores_best = cross_val_score(clf_xgb, X_train_sm, y_train_sm, scoring ="roc_auc", cv = 3)
roc_auc_best = np.mean(scores_best)
print(f'ROC_AUC training score after tuning for initial parameter in XGBOOST: {roc_auc_best:.3f}')
# + colab={"base_uri": "https://localhost:8080/"} id="vH9YGT916opT" outputId="40ce08f7-9ce8-40a2-8e85-0e2334f132d1"
n_estimators = np.arange(200,1000,200)
# Minimum loss reduction required to make a further partition on a leaf node of the tree
# The larger gamma is, the more conservative the algorithm will be
gamma = np.arange(0.1,0.6,0.1)
# Default 0.3, range(0,1)
learning_rate = np.arange(0.1,0.6,0.1)
# Maximum number of levels in tree
max_depth = list(range(3,8,1))
# Subsample ratio of the training instances.Range(0,1)
subsample = np.arange(0.5,0.9,0.1)
# Subsample ratio of columns when constructing each tree. Range(0,1)
colsample_bytree = np.arange(0.5,0.9,0.1)
# Control the balance of positive and negative weights
# Sum(negative instances) / sum(positive instances)
scale_pos_weight = [1,3.5]
# Create the random grid
random_grid_xgb = {'n_estimators': n_estimators,
'gamma': gamma,
'learning_rate':learning_rate,
'max_depth': max_depth,
'subsample':subsample,
'colsample_bytree':colsample_bytree,
'scale_pos_weight':scale_pos_weight
}
print(random_grid_xgb)
# + colab={"base_uri": "https://localhost:8080/"} id="BogTOE6g6vET" outputId="92f97af4-bc18-492d-bd4a-6a7f1a67d3f1"
xgboost = XGBClassifier()
xgb_random = RandomizedSearchCV(estimator = xgboost,
param_distributions = random_grid_xgb,
n_iter = 10,
cv = 2,
verbose=1,
random_state=42,
n_jobs = -1,
scoring ='roc_auc')
xgb_random.fit(X_train_sm, y_train_sm)
xgb_random.best_params_, xgb_random.best_score_
print(xgb_random.best_params_,xgb_random.best_score_)
# + colab={"base_uri": "https://localhost:8080/"} id="bRr0oiOu6y_f" outputId="c15c06d3-367b-4e89-c99b-f538001e780d"
print("The F1 score, Precision and Recall for XGBOOST is :")
precision_recall(xgb_random,X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 911} id="II6NQQUf63Q7" outputId="035d6a5f-a031-4178-eca9-d48ea47b0de0"
fig,ax=plt.subplots(3,1,figsize=(10,15))
plot_confusion_matrix(lr_best,X_test_std,y_test,ax=ax[0])
ax[0].set_title("Logistic Regression")
plot_confusion_matrix(rf_best,X_test_std,y_test,ax=ax[1])
ax[1].set_title("Random Forest")
plot_confusion_matrix(xgb_random,X_test_std,y_test,ax=ax[2])
ax[2].set_title("XGBOOST")
# + colab={"base_uri": "https://localhost:8080/", "height": 893} id="0bVsjOL867Ld" outputId="6f5e5db0-dc82-4d4b-9e44-a7330a0f8135"
fig,ax=plt.subplots(figsize=(10,15))
plot_roc_curve(lr_best,X_test_std,y_test,ax=ax,color='Blue',label='Logistic Regression')
plot_roc_curve(rf_best,X_test_std,y_test,ax=ax,color='Red',label='Random Forest')
plot_roc_curve (xgb_random,X_test_std,y_test,ax=ax,color='Black',label='XGBOOST')
plt.title("roc-auc curve for 3 model")
plt.grid()
# + colab={"base_uri": "https://localhost:8080/", "height": 893} id="2GDKYQAR6_qj" outputId="14a5b770-55f7-4a11-d34e-20e88da8ce5e"
fig,ax=plt.subplots(figsize=(10,15))
plot_precision_recall_curve(lr_best,X_test_std,y_test,ax=ax,color='Blue',label='Logistic Regression')
plot_precision_recall_curve(rf_best,X_test_std,y_test,ax=ax,color='Red',label='Random Forest')
plot_precision_recall_curve(xgb_random,X_test_std,y_test,ax=ax,color='Black',label='XGBOOST')
plt.title("Precision Recall value for 3 Model :")
plt.grid()
# + colab={"base_uri": "https://localhost:8080/"} id="ty7TMhry7Dgz" outputId="78e7e483-a2e5-404e-9a2f-38cd964d9a31"
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="stratified")
dummy_clf.fit(X_train, y_train)
DummyClassifier(strategy='stratified')
y_pred_dummy = dummy_clf.predict(X_test)
print('Dummy model:')
precision_recall(dummy_clf, X_test, y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="sW8frito7Fzf" outputId="271f1acc-f31d-46f0-ba27-d98bf880adb5"
# Compute precision, recall and threshold of Random Forest
y_predict_rf = rf_best.predict_proba(X_test)
y_scores_rf = y_predict_rf[:,1]
#print(y_scores_rf)
precisions, recalls, thresholds = precision_recall_curve(y_test, y_scores_rf)
#print(precisions)
#print(recalls)
#print(thresholds)
recalls_80 = recalls[np.argmin(recalls >= 0.80)] # Recommend recall score = 0.8
precision_80 = precisions[np.argmin(recalls >= 0.80)]
threshold_80_recall = thresholds[np.argmin(recalls >= 0.80)]
thresholds = np.append(thresholds, 1)
recalls_80, precision_80, threshold_80_recall
# + colab={"base_uri": "https://localhost:8080/", "height": 518} id="3y9bzLM47KNT" outputId="d3570538-f548-4a0d-853f-6009e64a430b"
# Plot feature importance of winner model - Random Forest
fea_df = pd.DataFrame({'Feature': feature_cols, 'Feature importance': rf_best.feature_importances_})
fea_df = fea_df.sort_values(by='Feature importance')
figure, ax = plt.subplots(figsize = (10,8))
fea_df.plot.barh(x='Feature',y='Feature importance', ax=ax)
plt.title('Features importance',fontsize=14)
# + [markdown] id="UMh_yX8s7NpM"
# PAY_1" AND "Pay_2" are the most recent 2 months' payment status and they are the strongest predictors of future payment default risk.
# + [markdown] id="4PbBeu7Y7QOZ"
# # Conclusion
# Logistic Regression model has the highest recall but the lowest precision, if the business cares recall the most, then this model is the best candidate. If the balance of recall and precision is the most important metric, then Random Forest is the ideal model. Since Random Forest has slightly lower recall but much higher precision than Logistic Regression, I would recommend Random Forest.
| Credit_Card_Default_Prediction_Capstone_Project_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import tensorflow
# +
import matplotlib.pyplot as plt
import tensorflow as tf
layers = tf.keras.layers
import numpy as np
print(tf.__version__)
# -
# ## Download and preprocess the data
# +
mnist = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# -
x_train, x_test = x_train / 255.0, x_test / 255.0
# +
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[y_train[i]])
plt.show()
# -
# ## Build the model
model = tf.keras.Sequential()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# Such a model can then be compiled and trained in a few lines:
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
# ## Testing
model.evaluate(x_test, y_test)
predictions = model.predict(x_test)
# +
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# -
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, y_test, x_test)
plt.subplot(1,2,2)
plot_value_array(i, predictions, y_test)
plt.show()
predicted_label = class_names[np.argmax(predictions[0])]
print('Actual label:', class_names[y_test[0]])
print('Predicted label:', predicted_label)
| tensorflow-2-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
base_url = 'http://divulgacandcontas.tse.jus.br/divulga'
endpoint = '/rest/v1/candidatura/listar/2002/SP/0/6/candidatos'
req = requests.get(base_url+'/rest/v1/eleicao/ufs/SP/municipioss')
js = req.json()
js
from string import Template
ts_str = '/rest/v1/eleicao/suplementares/:ano/:sgUe'
ts = Template(ts_str.replace(':','$'))
ts.substitute({'ano':2018,'sqEleicao':0,'sgUe':0})
import re
expr = re.compile(':[A-Za-z]*')
expr.findall('/rest/v1/eleicao/listar/municipios/:eleicao/:sgUe/cargos')
ts_str
ts = """
`getResourceCandidatos`:
`/rest/v1/candidatura/listar/:ano/:sgUe/:eleicao/:cargo/candidatos`"""
expr = re.compile("[\s\S]*?`([A-Za-z]*)`[\s\S]*?`([A-Za-z/]*)`[\s\S]*?")
ts.index('`')
re.compile("`([A-Za-z0-9/:]*)`").findall(ts)
| notebooks/0.0-tb-testando-endpoints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # R Exam
#
# Gross domestic product (GDP) is a monetary measure of the market value of all the final goods and services produced in a specific time period. In spanish is called the PIB (Producto Interno Bruto).
# The Human Development Index (HDI) is a statistic composite index of life expectancy, education (Literacy Rate, Gross Enrollment Ratio at different levels and Net Attendance Ratio), and per capita income indicators, which are used to rank countries into four tiers of human development.
#
# Several online websites (among which the spanish wikipedia https://es.wikipedia.org/wiki/%C3%8Dndice_de_desarrollo_humano), have suggested that GDP and HDI are strongly correlated, meaning that the better the country's economy the better the human development of their citizens. Let's try to plot the data for Spain (from 2007) and analyze it.
#
# - Load the files GDP.csv and HDI.csv using read.csv()
# - Analyze the file that you have just loaded with head() and str()
# - Glue together the two data frames by columns (one next to the other) using cbind(). We want the province to match on the same rows, so you need first to sort the rows of the two data frames by Province, using order()
# - The new data frame will contain two columns names "Province". remove one of the columns
# - Check that everything is fine with head() and tail(). The final data frame should have 4 columns: Province GDP.per.capita, CCAA and HDI. **With each province matching its CCAA**. (Fig, 1)
# - Create scatterplot of text using geom_point(). Plot the GDP.per.capita (x-axis) against the HDI (y-axis). Color the points by CCAA (use color=CCAA inside the aes() function of geom_point).
# - Do the same but use geom_text(). Specify as labels the names of the provinces, using aes(label=Province) inside geom_text. Color by CCAA. Also specify check_overlap=T so that the names do not overlap, if you want. (Fig, 2)
# - Calculate the mean and standard deviation of GDP and HDI, by CCAA, using aggregate ().
# - Create a new data frame where you put each CCAA with the corresponding mean GDP, mean HDI, stdev GDP and stdev HDI
# - Madrid is a single province CCAA, so you will see that its standar deviation is NA. Replace those NAs with zeros, using is.na(). (Fig, 3)
# - Plot again the original scatterplot with geom_point and color by CCAA.
# - Add over this plot another scatterplot using the new data frame (specify data=...), plotting the mean GDP against the mean HDI by CCAA that you have calculated with aggregate. These points should appear somewhere in the middle of their corresponding provinces.
# - Add also vertical and horizontal error bars to express the variablity for each CCAA (with geom_errorbar and geom_errorbarh). For geom_errorbar() you have to specify the aestetic paramters x, y, ymin and ymax. For geom_errorbarh you have to specify the aestetic paramters x, y, xmin and xmax.
# - Finally, add the name of each CCAA on the plot with geom_label(). You can add the parameters hjust=1, vjust=1 to move the labels a little bit so they don't overlap the points (Fig, 4)
# - *(Optional)* It looks like that GDP is strongly correlated with HDI (the dots follow a straight line). <br> However, you should know that the formula for HDI is (roughly) LifeExpectancy + EducationIndex + GDP. <br>
# Do you think there is something weird? Do you still think that concluding, by simply looking at the chart, that increasing the GDP you can increase the human development of their citizens (or viceversa)? If not, why? Should we maybe compare GDP and HDI in a different way?
#
#
# **FIGURE 1**
# <img src="Fig 1.png">
# **FIGURE 2**
# <img src="Fig 2.png">
# **FIGURE 3**
# <img src="Fig 3.png">
# **FIGURE 4**
# <img src="Fig 4.png">
#
# +
#Load the files GDP.csv and HDI.csv using read.csv()
GDP <- read.csv("GDP.csv")
HDI <- read.csv("HDI.csv")
# +
#Analyze the file that you have just loaded with head() and str()
head(GDP)
str(GDP)
head(HDI)
str(HDI)
# +
#Glue together the two data frames by columns (one next to the other) using cbind().
#We want the province to match on the same rows, so you need first to sort the rows of the two data frames by Province, using order()
head( GDP[order(GDP$Province), ] )
head( HDI[order(HDI$Province), ] )
GDP_HDI <- cbind(GDP, HDI)
head(GDP_HDI)
# +
#The new data frame will contain two columns names "Province". remove one of the columns
GDP_HDI$Province <- NULL
head(GDP_HDI)
# +
#Create scatterplot of text using geom_point().
#Plot the GDP.per.capita (x-axis) against the HDI (y-axis).
#Color the points by CCAA (use color=CCAA inside the aes() function of geom_point).
library(ggplot2)
GDP_p_capita <- (GDP_HDI$GDP.per.capita)
HDI_p_province <- (GDP_HDI$HDI)
ggplot(GDP_HDI, aes(x = GDP_p_capita, y = HDI_p_province, color = Province)) + geom_point(size=4)
# +
#Do the same but use geom_text().
#Specify as labels the names of the provinces, using aes(label=Province) inside geom_text.
#Color by CCAA. Also specify check_overlap=T so that the names do not overlap, if you want. (Fig, 2)
ggplot(GDP_HDI, aes(x = GDP_p_capita, y = HDI_p_province, color = Province, label = Province)) + geom_text(size=4, check_overlap = TRUE)
# +
#Calculate the mean and standard deviation of GDP and HDI, by CCAA, using aggregate ().
#mean_GDP <- aggregate(x = GDP[ , colnames(GDP) != "CCAA"], # Sum by group
#by = list(GDP$CCAA),
#FUN = mean)
#head(mean_GDP)
mean_GDP <- aggregate(GDP_HDI$GDP.per.capita, by=list(GDP_HDI$CCAA), mean)
sd_GDP <- aggregate(GDP_HDI$GDP.per.capita, by=list(GDP_HDI$CCAA), sd)
mean_HDI <- aggregate(GDP_HDI$HDI, by=list(GDP_HDI$CCAA), mean)
sd_HDI <- aggregate(GDP_HDI$HDI, by=list(GDP_HDI$CCAA), sd)
# -
head(mean_GDP)
# +
#Create a new data frame where you put each CCAA with the corresponding mean GDP, mean HDI, stdev GDP and stdev HDI
#new_CCAA <- GDP_HDI$CCAA
new_GDP_HDI <- data.frame(GDP.mean = mean_GDP, HDI.mean = mean_HDI, stddev.GDP = sd_GDP, stddev.HDI = sd_HDI)
new_GDP_HDI$GDP.mean.Group.1 <- NULL
new_GDP_HDI$HDI.mean.Group.1 <- NULL
new_GDP_HDI$stddev.GDP.Group.1 <- NULL
head(new_GDP_HDI)
# +
#Madrid is a single province CCAA, so you will see that its standar deviation is NA.
#Replace those NAs with zeros, using is.na(). (Fig, 3)
head( is.na(new_GDP_HDI))
new_GDP_HDI <- na.omit(new_GDP_HDI)
head(new_GDP_HDI)
# +
#Plot again the original scatterplot with geom_point and color by CCAA.
#Add over this plot another scatterplot using the new data frame (specify data=...), plotting the mean GDP against the mean HDI by CCAA that you have calculated with aggregate.
#These points should appear somewhere in the middle of their corresponding provinces
ggplot(GDP_HDI, aes(x = GDP_p_capita, y = HDI_p_province, color = CCAA)) +
geom_text(data=GDP_HDI, aes(label = Province), size=4, check_overlap = TRUE) +
geom_point(data = new_GDP_HDI, aes(x = GDP.mean.x, y = HDI.mean.x), colour = "black")
# +
#Add also vertical and horizontal error bars to express the variablity for each CCAA (with geom_errorbar and geom_errorbarh).
#For geom_errorbar() you have to specify the aestetic paramters x, y, ymin and ymax.
#For geom_errorbarh you have to specify the aestetic paramters x, y, xmin and xmax.
ggplot(new_GDP_HDI, aes(x = GDP.mean.x, y = HDI.mean.x)) +
geom_point(size=4) + geom_errorbar(aes(x=GDP.mean.x, y = HDI.mean.x, ymin=HDI.mean.x-stddev.HDI.x, ymax=HDI.mean.x+stddev.HDI.x), width=0) +
geom_errorbarh(aes(x=GDP.mean.x, y = HDI.mean.x, xmin=GDP.mean.x-stddev.GDP.x, xmax=GDP.mean.x+stddev.GDP.x), width=0)
# +
#Finally, add the name of each CCAA on the plot with geom_label().
#You can add the parameters hjust=1, vjust=1 to move the labels a little bit
#so they don't overlap the points (Fig, 4)
ggplot(new_GDP_HDI, aes(x = GDP.mean.x, y = HDI.mean.x)) + geom_label(data=new_GDP_HDI, aes(label=stddev.HDI.Group.1), hjust=1, vjust=1) +
geom_point(size=4) + geom_errorbar(aes(x=GDP.mean.x, y = HDI.mean.x, ymin=HDI.mean.x-stddev.HDI.x, ymax=HDI.mean.x+stddev.HDI.x), width=0) +
geom_errorbarh(aes(x=GDP.mean.x, y = HDI.mean.x, xmin=GDP.mean.x-stddev.GDP.x, xmax=GDP.mean.x+stddev.GDP.x), width=0)
# +
#Finally, add the name of each CCAA on the plot with geom_label().
#You can add the parameters hjust=1, vjust=1 to move the labels a little bit
#so they don't overlap the points (Fig, 4)
ggplot(GDP_HDI, aes(x = GDP_p_capita, y = HDI_p_province)) +
geom_point(data=GDP_HDI, aes(x = GDP_p_capita, y = HDI_p_province, color=Province), size=4, check_overlap = TRUE) +
geom_point(data = new_GDP_HDI, aes(x = GDP.mean.x, y = HDI.mean.x), colour = "black")+
geom_label(data=new_GDP_HDI, aes(x=GDP.mean.x, y = HDI.mean.x, label=stddev.HDI.Group.1), hjust=1, vjust=1) +
geom_errorbar(data=new_GDP_HDI, aes(x=GDP.mean.x, y = HDI.mean.x, ymin=HDI.mean.x-stddev.HDI.x, ymax=HDI.mean.x+stddev.HDI.x), width=0) +
geom_errorbarh(data=new_GDP_HDI, aes(x=GDP.mean.x, y = HDI.mean.x, xmin=GDP.mean.x-stddev.GDP.x, xmax=GDP.mean.x+stddev.GDP.x), width=0)
# -
| R_Exam_Answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# # Input Image:
image = cv2.imread('contoursHeirarchy.png')
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_img, 127, 255, 0)
plt.imshow(image)
# ## 1. RETR_LIST Mode:
_, contours, hierarchy= cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
RETR_LIST_IMAGE = image.copy()
cv2.drawContours(RETR_LIST_IMAGE, contours, -1, (0, 255, 0), 3)
plt.imshow(RETR_LIST_IMAGE)
hierarchy
# ## 2. RETR_EXTERNAL Mode
_, contours, hierarchy= cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
RETR_EXTERNAL_IMAGE = image.copy()
cv2.drawContours(RETR_EXTERNAL_IMAGE, contours, -1, (0, 255, 0), 3)
plt.imshow(RETR_EXTERNAL_IMAGE)
hierarchy
# ## 3. RETR_CCOMP Mode
_, contours, hierarchy= cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
RETR_CCOMP_IMAGE = image.copy()
cv2.drawContours(RETR_CCOMP_IMAGE, contours, -1, (0, 255, 0), 3)
plt.imshow(RETR_CCOMP_IMAGE)
hierarchy
# ## 4. RETR_TREE Mode
_, contours, hierarchy= cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
RETR_TREE_IMAGE = image.copy()
cv2.drawContours(RETR_TREE_IMAGE, contours, -1, (0, 255, 0), 3)
plt.imshow(RETR_TREE_IMAGE)
hierarchy
| ContoursHierarchy/ContoursHierarchy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ML Simulation Training
# In this notebook you are going to train a ML policy.
#
# However, you won't use examples from the SDV as data, but other agents around it instead.
#
# This may sound like a small difference, but it has profound implications still:
# - by using data from multiple sources you're **including much more variability in your training data**;
# - two agents may have taken different choices at the same intersection, leading to **multi-modal data**;
# - the **quality of the annotated data is expected to be sensibility lower** compared to the SDV, as we're leveraging a perception system.
#
# Still, the final prize is even better than planning as this policy can potentially drive all the agents in the scene and it's not limited to the SDV only.
#
# 
#
# +
from tempfile import gettempdir
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from l5kit.configs import load_config_data
from l5kit.data import LocalDataManager, ChunkedDataset
from l5kit.dataset import AgentDataset
from l5kit.rasterization import build_rasterizer
from l5kit.geometry import transform_points
from l5kit.visualization import TARGET_POINTS_COLOR, draw_trajectory
from l5kit.planning.model import PlanningModel
import os
# -
# ## Prepare data path and load cfg
#
# By setting the `L5KIT_DATA_FOLDER` variable, we can point the script to the folder where the data lies.
#
# Then, we load our config file with relative paths and other configurations (rasteriser, training params...).
#@title Download L5 Sample Dataset and install L5Kit
import os
RunningInCOLAB = 'google.colab' in str(get_ipython())
if RunningInCOLAB:
# !wget https://raw.githubusercontent.com/lyft/l5kit/master/examples/setup_notebook_colab.sh -q
# !sh ./setup_notebook_colab.sh
os.environ["L5KIT_DATA_FOLDER"] = open("./dataset_dir.txt", "r").read().strip()
else:
print("Not running in Google Colab.")
os.environ["L5KIT_DATA_FOLDER"] = "/tmp/l5kit_data"
dm = LocalDataManager(None)
# get config
cfg = load_config_data("./config.yaml")
# +
# rasterisation
rasterizer = build_rasterizer(cfg, dm)
# ===== INIT DATASET
train_zarr = ChunkedDataset(dm.require(cfg["train_data_loader"]["key"])).open()
train_dataset = AgentDataset(cfg, train_zarr, rasterizer)
# plot some examples
for idx in range(0, len(train_dataset), len(train_dataset) // 10):
data = train_dataset[idx]
im = rasterizer.to_rgb(data["image"].transpose(1, 2, 0))
target_positions = transform_points(data["target_positions"], data["raster_from_agent"])
draw_trajectory(im, target_positions, TARGET_POINTS_COLOR)
plt.imshow(im)
plt.axis('off')
plt.show()
# -
model = PlanningModel(
model_arch=cfg["model_params"]["model_architecture"],
num_input_channels=rasterizer.num_channels(),
num_targets=3 * cfg["model_params"]["future_num_frames"], # X, Y, Yaw * number of future states,
weights_scaling= [1., 1., 1.],
criterion=nn.MSELoss(reduction="none")
)
print(model)
# # Prepare for training
# Our `AgentDataset` inherits from PyTorch `Dataset`; so we can use it inside a `Dataloader` to enable multi-processing.
# +
train_cfg = cfg["train_data_loader"]
train_dataloader = DataLoader(train_dataset, shuffle=train_cfg["shuffle"], batch_size=train_cfg["batch_size"],
num_workers=train_cfg["num_workers"])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
print(train_dataset)
# -
# # Training loop
# Here, we purposely include a barebone training loop. Clearly, many more components can be added to enrich logging and improve performance, such as:
# - learning rate drop;
# - loss weights tuning;
# - importance sampling
#
# To name a few.
#
#
# Still, the sheer size of our dataset ensures that a reasonable performance can be obtained even with this simple loop.
# +
tr_it = iter(train_dataloader)
progress_bar = tqdm(range(cfg["train_params"]["max_num_steps"]))
losses_train = []
model.train()
torch.set_grad_enabled(True)
for _ in progress_bar:
try:
data = next(tr_it)
except StopIteration:
tr_it = iter(train_dataloader)
data = next(tr_it)
# Forward pass
data = {k: v.to(device) for k, v in data.items()}
result = model(data)
loss = result["loss"]
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses_train.append(loss.item())
progress_bar.set_description(f"loss: {loss.item()} loss(avg): {np.mean(losses_train)}")
# -
# ### Plot the train loss curve
# We can plot the train loss against the iterations (batch-wise) to check if our model has converged.
plt.plot(np.arange(len(losses_train)), losses_train, label="train loss")
plt.legend()
plt.show()
# # Store the model
#
# Let's store the model as a torchscript. This format allows us to re-load the model and weights without requiring the class definition later.
#
# **Take note of the path, you will use it later to evaluate your planning model!**
# + pycharm={"name": "#%%\n"}
to_save = torch.jit.script(model.cpu())
path_to_save = f"{gettempdir()}/simulation_model.pt"
to_save.save(path_to_save)
print(f"MODEL STORED at {path_to_save}")
# -
# # Congratulations in training your first ML policy for simulation!
# ### What's Next
#
# Now that your model is trained and safely stored, you can use it to control the agents around ego. We have a notebook just for that.
#
# ### [Simulation evaluation](./simulation_test.ipynb)
# In this notebook a `planning_model` will control the SDV, while the `simulation_model` you just trained will be used for all other agents.
#
# Don't worry if you don't have the resources required to train a model, we provide pre-trained models just below.
#
# ## Pre-trained models
# we provide a collection of pre-trained models for the simulation task:
# - [simulation model](https://lyft-l5-datasets-public.s3-us-west-2.amazonaws.com/models/simulation_models/simulation_model_20210416_5steps.pt) trained on agents over the semantic rasteriser with history of 0.5s;
# - [planning model](https://lyft-l5-datasets-public.s3-us-west-2.amazonaws.com/models/simulation_models/planning_model_20210421_5steps.pt) trained on the AV over the semantic rasteriser with history of 0.5s;
#
# To use one of the models simply download the corresponding `.pt` file and load it in the evaluation notebooks.
| examples/simulation/train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# -
# # Parametric string equation example
# ## Import modules
# +
import numpy as np
import scipy.sparse as sps
import matplotlib.pyplot as plt
from pymor.basic import *
from pymor.core.config import config
from pymor.core.logger import set_log_levels
set_log_levels({'pymor.algorithms.gram_schmidt.gram_schmidt': 'WARNING'})
# -
# ## Assemble $M$, $D$, $K$, $B$, $C_p$
# +
n2 = 50
n = 2 * n2 - 1 # dimension of the system
k = 0.01 # stiffness
M = sps.eye(n, format='csc')
E = sps.eye(n, format='csc')
K = sps.diags([n * [2 * k * n ** 2],
(n - 1) * [-k * n ** 2],
(n - 1) * [-k * n ** 2]],
[0, -1, 1],
format='csc')
B = np.zeros((n, 1))
B[n2 - 1, 0] = n
Cp = np.zeros((1, n))
Cp[0, n2 - 1] = 1
# -
# ## Second-order system
Mop = NumpyMatrixOperator(M)
Eop = NumpyMatrixOperator(E) * ProjectionParameterFunctional('damping')
Kop = NumpyMatrixOperator(K)
Bop = NumpyMatrixOperator(B)
Cpop = NumpyMatrixOperator(Cp)
so_sys = SecondOrderModel(Mop, Eop, Kop, Bop, Cpop)
print(f'order of the model = {so_sys.order}')
print(f'number of inputs = {so_sys.input_dim}')
print(f'number of outputs = {so_sys.output_dim}')
mu_list = [1, 5, 10]
# +
fig, ax = plt.subplots()
for mu in mu_list:
poles = so_sys.poles(mu=mu)
ax.plot(poles.real, poles.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title('System poles')
ax.legend()
plt.show()
# +
w = np.logspace(-3, 2, 200)
fig, ax = plt.subplots()
for mu in mu_list:
so_sys.mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the full model')
ax.legend()
plt.show()
# -
fig, ax = plt.subplots(2, 2, figsize=(12, 8), sharey=True)
for mu in mu_list:
psv = so_sys.psv(mu=mu)
vsv = so_sys.vsv(mu=mu)
pvsv = so_sys.pvsv(mu=mu)
vpsv = so_sys.vpsv(mu=mu)
ax[0, 0].semilogy(range(1, len(psv) + 1), psv, '.-', label=fr'$\mu = {mu}$')
ax[0, 1].semilogy(range(1, len(vsv) + 1), vsv, '.-')
ax[1, 0].semilogy(range(1, len(pvsv) + 1), pvsv, '.-')
ax[1, 1].semilogy(range(1, len(vpsv) + 1), vpsv, '.-')
ax[0, 0].set_title('Position singular values')
ax[0, 1].set_title('Velocity singular values')
ax[1, 0].set_title('Position-velocity singular values')
ax[1, 1].set_title('Velocity-position singular values')
fig.legend(loc='upper center', ncol=len(mu_list))
plt.show()
for mu in mu_list:
print(f'mu = {mu}:')
print(f' H_2-norm of the full model: {so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' H_inf-norm of the full model: {so_sys.hinf_norm(mu=mu):e}')
print(f' Hankel-norm of the full model: {so_sys.hankel_norm(mu=mu):e}')
# ## Position Second-Order Balanced Truncation (SOBTp)
r = 5
roms_sobtp = []
for mu in mu_list:
sobtp_reductor = SOBTpReductor(so_sys, mu=mu)
rom_sobtp = sobtp_reductor.reduce(r)
roms_sobtp.append(rom_sobtp)
fig, ax = plt.subplots()
for rom_sobtp in roms_sobtp:
poles_rom_sobtp = rom_sobtp.poles()
ax.plot(poles_rom_sobtp.real, poles_rom_sobtp.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SOBTp reduced model's poles")
plt.show()
for mu, rom_sobtp in zip(mu_list, roms_sobtp):
err_sobtp = so_sys - rom_sobtp
print(f'mu = {mu}')
print(f' SOBTp relative H_2-error: {err_sobtp.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SOBTp relative H_inf-error: {err_sobtp.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SOBTp relative Hankel-error: {err_sobtp.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sobtp in zip(mu_list, roms_sobtp):
rom_sobtp.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SOBTp reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sobtp in zip(mu_list, roms_sobtp):
(so_sys - rom_sobtp).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SOBTp error system')
ax.legend()
plt.show()
# ## Velocity Second-Order Balanced Truncation (SOBTv)
r = 5
roms_sobtv = []
for mu in mu_list:
sobtv_reductor = SOBTvReductor(so_sys, mu=mu)
rom_sobtv = sobtv_reductor.reduce(r)
roms_sobtv.append(rom_sobtv)
fig, ax = plt.subplots()
for rom_sobtv in roms_sobtv:
poles_rom_sobtv = rom_sobtv.poles()
ax.plot(poles_rom_sobtv.real, poles_rom_sobtv.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SOBTv reduced model's poles")
plt.show()
for mu, rom_sobtv in zip(mu_list, roms_sobtv):
err_sobtv = so_sys - rom_sobtv
print(f'mu = {mu}')
print(f' SOBTv relative H_2-error: {err_sobtv.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SOBTv relative H_inf-error: {err_sobtv.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SOBTv relative Hankel-error: {err_sobtv.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sobtv in zip(mu_list, roms_sobtv):
rom_sobtv.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SOBTv reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sobtv in zip(mu_list, roms_sobtv):
(so_sys - rom_sobtv).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SOBTv error system')
ax.legend()
plt.show()
# ## Position-Velocity Second-Order Balanced Truncation (SOBTpv)
r = 5
roms_sobtpv = []
for mu in mu_list:
sobtpv_reductor = SOBTpvReductor(so_sys, mu=mu)
rom_sobtpv = sobtpv_reductor.reduce(r)
roms_sobtpv.append(rom_sobtpv)
fig, ax = plt.subplots()
for rom_sobtpv in roms_sobtpv:
poles_rom_sobtpv = rom_sobtpv.poles()
ax.plot(poles_rom_sobtpv.real, poles_rom_sobtpv.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SOBTpv reduced model's poles")
plt.show()
for mu, rom_sobtpv in zip(mu_list, roms_sobtpv):
err_sobtpv = so_sys - rom_sobtpv
print(f'mu = {mu}')
print(f' SOBTpv relative H_2-error: {err_sobtpv.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SOBTpv relative H_inf-error: {err_sobtpv.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SOBTpv relative Hankel-error: {err_sobtpv.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sobtpv in zip(mu_list, roms_sobtpv):
rom_sobtpv.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SOBTpv reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sobtpv in zip(mu_list, roms_sobtpv):
(so_sys - rom_sobtpv).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SOBTpv error system')
ax.legend()
plt.show()
# ## Velocity-Position Second-Order Balanced Truncation (SOBTvp)
r = 5
roms_sobtvp = []
for mu in mu_list:
sobtvp_reductor = SOBTvpReductor(so_sys, mu=mu)
rom_sobtvp = sobtvp_reductor.reduce(r)
roms_sobtvp.append(rom_sobtvp)
fig, ax = plt.subplots()
for rom_sobtvp in roms_sobtvp:
poles_rom_sobtvp = rom_sobtvp.poles()
ax.plot(poles_rom_sobtvp.real, poles_rom_sobtvp.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SOBTvp reduced model's poles")
plt.show()
for mu, rom_sobtvp in zip(mu_list, roms_sobtvp):
err_sobtvp = so_sys - rom_sobtvp
print(f'mu = {mu}')
print(f' SOBTvp relative H_2-error: {err_sobtvp.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SOBTvp relative H_inf-error: {err_sobtvp.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SOBTvp relative Hankel-error: {err_sobtvp.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sobtvp in zip(mu_list, roms_sobtvp):
rom_sobtvp.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SOBTvp reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sobtvp in zip(mu_list, roms_sobtvp):
(so_sys - rom_sobtvp).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SOBTvp error system')
ax.legend()
plt.show()
# ## Free-Velocity Second-Order Balanced Truncation (SOBTfv)
r = 5
roms_sobtfv = []
for mu in mu_list:
sobtfv_reductor = SOBTfvReductor(so_sys, mu=mu)
rom_sobtfv = sobtfv_reductor.reduce(r)
roms_sobtfv.append(rom_sobtfv)
fig, ax = plt.subplots()
for rom_sobtfv in roms_sobtfv:
poles_rom_sobtfv = rom_sobtfv.poles()
ax.plot(poles_rom_sobtfv.real, poles_rom_sobtfv.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SOBTfv reduced model's poles")
plt.show()
for mu, rom_sobtfv in zip(mu_list, roms_sobtfv):
err_sobtfv = so_sys - rom_sobtfv
print(f'mu = {mu}')
print(f' SOBTfv relative H_2-error: {err_sobtfv.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SOBTfv relative H_inf-error: {err_sobtfv.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SOBTfv relative Hankel-error: {err_sobtfv.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sobtfv in zip(mu_list, roms_sobtfv):
rom_sobtfv.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SOBTfv reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sobtfv in zip(mu_list, roms_sobtfv):
(so_sys - rom_sobtfv).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SOBTfv error system')
ax.legend()
plt.show()
# ## Second-Order Balanced Truncation (SOBT)
r = 5
roms_sobt = []
for mu in mu_list:
sobt_reductor = SOBTReductor(so_sys, mu=mu)
rom_sobt = sobt_reductor.reduce(r)
roms_sobt.append(rom_sobt)
fig, ax = plt.subplots()
for rom_sobt in roms_sobt:
poles_rom_sobt = rom_sobt.poles()
ax.plot(poles_rom_sobt.real, poles_rom_sobt.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SOBT reduced model's poles")
plt.show()
for mu, rom_sobt in zip(mu_list, roms_sobt):
err_sobt = so_sys - rom_sobt
print(f'mu = {mu}')
print(f' SOBT relative H_2-error: {err_sobt.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SOBT relative H_inf-error: {err_sobt.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SOBT relative Hankel-error: {err_sobt.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sobt in zip(mu_list, roms_sobt):
rom_sobt.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SOBT reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sobt in zip(mu_list, roms_sobt):
(so_sys - rom_sobt).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SOBT error system')
ax.legend()
plt.show()
# ## Balanced Truncation (BT)
r = 5
roms_bt = []
for mu in mu_list:
bt_reductor = BTReductor(so_sys.to_lti(), mu=mu)
rom_bt = bt_reductor.reduce(r)
roms_bt.append(rom_bt)
fig, ax = plt.subplots()
for rom_bt in roms_bt:
poles_rom_bt = rom_bt.poles()
ax.plot(poles_rom_bt.real, poles_rom_bt.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("BT reduced model's poles")
plt.show()
for mu, rom_bt in zip(mu_list, roms_bt):
err_bt = so_sys - rom_bt
print(f'mu = {mu}')
print(f' BT relative H_2-error: {err_bt.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' BT relative H_inf-error: {err_bt.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' BT relative Hankel-error: {err_bt.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_bt in zip(mu_list, roms_bt):
rom_bt.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of BT reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_bt in zip(mu_list, roms_bt):
(so_sys - rom_bt).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the BT error system')
ax.legend()
plt.show()
# ## Iterative Rational Krylov Algorithm (IRKA)
r = 5
roms_irka = []
for mu in mu_list:
irka_reductor = IRKAReductor(so_sys.to_lti(), mu=mu)
rom_irka = irka_reductor.reduce(r)
roms_irka.append(rom_irka)
fig, ax = plt.subplots()
for rom_irka in roms_irka:
poles_rom_irka = rom_irka.poles()
ax.plot(poles_rom_irka.real, poles_rom_irka.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("IRKA reduced model's poles")
plt.show()
for mu, rom_irka in zip(mu_list, roms_irka):
err_irka = so_sys - rom_irka
print(f'mu = {mu}')
print(f' IRKA relative H_2-error: {err_irka.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' IRKA relative H_inf-error: {err_irka.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' IRKA relative Hankel-error: {err_irka.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_irka in zip(mu_list, roms_irka):
rom_irka.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of IRKA reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_irka in zip(mu_list, roms_irka):
(so_sys - rom_irka).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the IRKA error system')
ax.legend()
plt.show()
# ## Second-Order Iterative Rational Krylov Algorithm (SOR-IRKA)
r = 5
roms_sor_irka = []
for mu in mu_list:
sor_irka_reductor = SORIRKAReductor(so_sys, mu=mu)
rom_sor_irka = sor_irka_reductor.reduce(r)
roms_sor_irka.append(rom_sor_irka)
fig, ax = plt.subplots()
for rom_sor_irka in roms_sor_irka:
poles_rom_sor_irka = rom_sor_irka.poles()
ax.plot(poles_rom_sor_irka.real, poles_rom_sor_irka.imag, '.', label=fr'$\mu = {mu}$')
ax.set_title("SORIRKA reduced model's poles")
plt.show()
for mu, rom_sor_irka in zip(mu_list, roms_sor_irka):
err_sor_irka = so_sys - rom_sor_irka
print(f'mu = {mu}')
print(f' SORIRKA relative H_2-error: {err_sor_irka.h2_norm(mu=mu) / so_sys.h2_norm(mu=mu):e}')
if config.HAVE_SLYCOT:
print(f' SORIRKA relative H_inf-error: {err_sor_irka.hinf_norm(mu=mu) / so_sys.hinf_norm(mu=mu):e}')
print(f' SORIRKA relative Hankel-error: {err_sor_irka.hankel_norm(mu=mu) / so_sys.hankel_norm(mu=mu):e}')
fig, ax = plt.subplots()
for mu, rom_sor_irka in zip(mu_list, roms_sor_irka):
rom_sor_irka.mag_plot(w, ax=ax, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of SORIRKA reduced models')
ax.legend()
plt.show()
fig, ax = plt.subplots()
for mu, rom_sor_irka in zip(mu_list, roms_sor_irka):
(so_sys - rom_sor_irka).mag_plot(w, ax=ax, mu=mu, label=fr'$\mu = {mu}$')
ax.set_title('Magnitude plot of the SORIRKA error system')
ax.legend()
plt.show()
| notebooks/parametric_string.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: imbalanced
# language: python
# name: imbalanced
# ---
# # K Means-SMOTE
#
# Creates new samples by interpolation of samples with SMOTE within selected clusters
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from imblearn.over_sampling import KMeansSMOTE
# -
# ## Create data
#
# https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html
#
# We will create 2 classes, one majority one minority, clearly separated to facilitate the demonstration.
# +
# Configuration options
blobs_random_seed = 42
centers = [(0, 0), (5, 5), (0,5)]
cluster_std = 1.5
num_features_for_samples = 2
num_samples_total = 2100
# Generate X
X, y = make_blobs(
n_samples=num_samples_total,
centers=centers,
n_features=num_features_for_samples,
cluster_std=cluster_std)
# transform arrays to pandas formats
X = pd.DataFrame(X, columns=['VarA', 'VarB'])
y = pd.Series(y)
# different number of samples per blob
X = pd.concat([
X[y == 0],
X[y == 1].sample(400, random_state=42),
X[y == 2].sample(100, random_state=42)
], axis=0)
y = y.loc[X.index]
# reset indexes
X.reset_index(drop=True, inplace=True)
y.reset_index(drop=True, inplace=True)
# create imbalanced target
y = pd.concat([
pd.Series(np.random.binomial(1, 0.3, 700)),
pd.Series(np.random.binomial(1, 0.2, 400)),
pd.Series(np.random.binomial(1, 0.1, 100)),
], axis=0).reset_index(drop=True)
# display size
X.shape, y.shape
# -
y.value_counts()
# +
sns.scatterplot(
data=X, x="VarA", y="VarB", hue=y, alpha=0.5
)
plt.title('Toy dataset')
plt.show()
# -
# ## K Means SMOTE
#
# https://imbalanced-learn.org/stable/generated/imblearn.over_sampling.KMeansSMOTE.html
# +
sm = KMeansSMOTE(
sampling_strategy='auto', # samples only the minority class
random_state=0, # for reproducibility
k_neighbors=2,
n_jobs=None,
kmeans_estimator=KMeans(n_clusters=3, random_state=0),
cluster_balance_threshold=0.1,
density_exponent='auto'
)
X_res, y_res = sm.fit_resample(X, y)
# +
# size of original data
X.shape, y.shape
# +
# size of undersampled data
X_res.shape, y_res.shape
# +
# number of minority class observations
y.value_counts(), y_res.value_counts()
# +
# plot of original data
sns.scatterplot(
data=X, x="VarA", y="VarB", hue=y,alpha=0.5
)
plt.title('Original dataset')
plt.show()
# +
# plot of original data
sns.scatterplot(
data=X_res, x="VarA", y="VarB", hue=y_res, alpha=0.5
)
plt.title('K-Means SMOTE')
plt.show()
# -
| Section-05-Oversampling/05-07-K-Means-SMOTE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/seryeongi/exec_machinlearning/blob/master/wholesale_decisiontreeclassification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="51xtoNUHeb7T" outputId="3cdce381-10d3-4df1-f528-5b4bc61e8b4e"
# !ls #list
# + colab={"base_uri": "https://localhost:8080/"} id="mu3R39ibfY7c" outputId="55b83626-e357-4ebb-f1ab-0a9da0c1fc6d"
# !ls -l
# + [markdown] id="16TsWVn4gaiT"
# 자세하게(맨 앞과 맨 뒤 보기)
# **d**rwxr-xr-x 1 root root 4096 Jun 15 13:37 **sample_data**(directory)
# **-**rwxr-xr-x 1 root root 1697 Jan 1 2000 **anscombe.json**(file)
# + colab={"base_uri": "https://localhost:8080/"} id="gpQ6IkBdfgl-" outputId="b934b590-aca7-4d80-a849-a963c2a5541e"
# !pwd # 현재 자기 위치
# + colab={"base_uri": "https://localhost:8080/"} id="XFluru-rfxYp" outputId="4983700d-f1e5-41c3-cb56-4638ffda8e0d"
# !ls -l ./sample_data # 내가 원하는 디렉토리명 or 파일명
# + colab={"base_uri": "https://localhost:8080/"} id="aEdUFX4iguS0" outputId="f67cf3c7-41c3-4816-9042-e199231e706e"
# !ls -l ./
# + colab={"base_uri": "https://localhost:8080/"} id="YLzjUUfVjHTz" outputId="f67be402-ec1b-4813-bde5-1f32c51415cc"
# !ls -l ./wholesale.xls
# + id="BJ60umwVin5K" colab={"base_uri": "https://localhost:8080/", "height": 455} outputId="251075d2-fb44-4d13-a847-7e5579629f77"
import pandas as pd
df = pd.read_excel('./wholesale.xls')
df
# + colab={"base_uri": "https://localhost:8080/"} id="nUERV7oQ5FPp" outputId="e94904ff-da91-426b-d56e-9bbcd192a914"
Y = df['label']
X = df.iloc[:,1:9]
Y.shape, X.shape
# + colab={"base_uri": "https://localhost:8080/"} id="yml1_4VE9RcX" outputId="6d3e7fec-9061-4d50-b25c-c25a4f46ed1a"
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X,Y)
# + colab={"base_uri": "https://localhost:8080/"} id="D6E_A5oT9sT5" outputId="0bf85ebb-20b4-4247-9b8a-aea55cae95b4"
dtree.score(X,Y)
# + id="ujYrvuDb-KUR"
| wholesale_decisiontreeclassification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating a normfit and transverse momentum+pseudorapidity
# The point of this exercise is to learn to create a normal distribution fit for the data, and to learn what are transverse momentum and pseudorapidity (and how are they linked together). The data used is open data released by the [CMS](https://home.cern/about/experiments/cms) experiment.
# ### First the fit
# Let's begin by loading the needed modules, data and creating a histogram of the data to see the more interesting points (the area for which we want to create the fit).
# +
# This is needed to create the fit
from scipy.stats import norm
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# +
# Let's choose Dimuon_DoubleMu.csv
data = pd.read_csv('http://opendata.cern.ch/record/545/files/Dimuon_DoubleMu.csv')
# And save the invariant masses to iMass
iMass = data['M']
# Plus draw the histogram
n, bins, patches = plt.hist(iMass, 300, facecolor='g')
plt.xlabel('Invariant Mass (GeV)')
plt.ylabel('Amount')
plt.title('Histogram of the invariant masses')
plt.show()
# -
# Let's take a closer look of the bump around 90GeVs.
# +
min = 85
max = 97
# Let's crop the area. croMass now includes all the masses between the values of min and max
croMass = iMass[(min < iMass) & (iMass < max)]
# Calculate the mean (µ) and standard deviation (sigma) of normal distribution using norm.fit-function from scipy
(mu, sigma) = norm.fit(croMass)
# Histogram of the cropped data. Note that the data is normalized (density = 1)
n, bins, patches = plt.hist(croMass, 300, density = 1, facecolor='g')
#mlab.normpdf calculates the normal distribution's y-value with given µ and sigma
# let's also draw the distribution to the same image with histogram
y = norm.pdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r-.', linewidth=3)
plt.xlabel('Invarian Mass(GeV)')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram \ and\ fit,\ where:}\ \mu=%.3f,\ \sigma=%.3f$' %(mu, sigma))
plt.show()
# -
#
# Does the invariant mass distribution follow normal distribution?
#
# How does cropping the data affect the distribution? (Try to crop the data with different values of min and max)
#
# Why do we need to normalize the data? (Check out of the image changes if you remove the normalisation [density])
# ## And then about transeverse momenta and pseudorapidity
# Transeverse momentum $p_t$ means the momentum, which is perpendicular to the beam. It can be calculated from the momenta to the x and y directions using vector analysis, but (in most datasets from CMS at least) can be found directly from the loaded data.
#
# Pseudorapidity tells the angle between the particle and the beam, although not using any 'classical' angle values. You can see the connection between degree (°) and pseudorapidity from an image a bit later. Pseudorapidity is the column Eta $(\eta)$ in the loaded data.
# Let's check out what does the distribution of transverse momenta looks like
# +
# allPt now includes all the transverse momenta
allPt = pd.concat([data.pt1, data.pt2])
# concat-command from the pandas module combines (concatenates) the information to a single column
# (it returns here a DataFrame -type variable, but it only has a singe unnamed column, so later
# we don't have to choose the wanted column from the allPt variable)
# And the histogram
plt.hist(allPt, bins=400, range = (0,50))
plt.xlabel('$p_t$ (GeV)', fontsize = 12)
plt.ylabel('Amount', fontsize = 12)
plt.title('Histogram of transverse momenta', fontsize = 15)
plt.show()
# -
# Looks like most of the momenta are between 0 and 10. Let's use this to limit the data we're about to draw
# +
# using the below cond, we only choose the events below that amount (pt < cond)
cond = 10
smallPt = data[(data.pt1 < cond) & (data.pt2 < cond)]
# Let's save all the etas and pts to variables
allpPt = pd.concat([smallPt.pt1, smallPt.pt2])
allEta = pd.concat([smallPt.eta1, smallPt.eta2])
# +
# and draw a scatterplot
plt.scatter(allEta, allpPt, s=1)
plt.ylabel('$p_t$ (GeV)', fontsize=13)
plt.xlabel('Pseudorapidity ($\eta$)', fontsize=13)
plt.title('Tranverse momenta vs. pseudorapidity', fontsize=15)
plt.show()
# -
# <img src = "https://upload.wikimedia.org/wikipedia/commons/thumb/9/93/Pseudorapidity.svg/800px-Pseudorapidity.svg.png"
# alt = "Pseudorapiditeetti" style = "height: 300px" align="left">
#
# Image on the left tells you the relation between pseudorapidity ($\eta$) and the angle ($\theta$). If $\eta = 0$, then the event is perpendicular to the beam and so on. Look at this picture and compare it to the plot above and try to answers the questions below.
#
# ### Some questions
#
# Why is the scatterplot shaped like it is? And why aren't particles with smaller momentum detected with $\eta$ being somewhere between -1 and 1?
#
# Why is pseudorapidity an interesting concept in the first place?
| Exercises-with-open-data/Advanced/Normfit-transversemomentum+pseudorapidity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbsphinx="hidden"
import open3d as o3d
import numpy as np
import copy
import os
import sys
# monkey patches visualization and provides helpers to load geometries
sys.path.append('..')
import open3d_tutorial as o3dtut
# change to True if you want to interact with the visualization windows
o3dtut.interactive = not "CI" in os.environ
# -
# # ICP Registration
# This tutorial demonstrates the ICP (Iterative Closest Point) registration algorithm. It has been a mainstay of geometric registration in both research and industry for many years. The input are two point clouds and an initial transformation that roughly aligns the source point cloud to the target point cloud. The output is a refined transformation that tightly aligns the two point clouds. A helper function `draw_registration_result` visualizes the alignment during the registration process. In this tutorial, we show two ICP variants, the point-to-point ICP and the point-to-plane ICP [Rusinkiewicz2001]_.
# ## Helper visualization function
# The function below visualizes a target point cloud and a source point cloud transformed with an alignment transformation. The target point cloud and the source point cloud are painted with cyan and yellow colors respectively. The more and tighter the two point clouds overlap with each other, the better the alignment result.
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp],
zoom=0.4459,
front=[0.9288, -0.2951, -0.2242],
lookat=[1.6784, 2.0612, 1.4451],
up=[-0.3402, -0.9189, -0.1996])
# <div class="alert alert-info">
#
# **Note:**
#
# Since the functions `transform` and `paint_uniform_color` change the point cloud, we call `copy.deepcopy` to make copies and protect the original point clouds.
#
# </div>
# ## Input
# The code below reads a source point cloud and a target point cloud from two files. A rough transformation is given.
#
# <div class="alert alert-info">
#
# **Note:**
#
# The initial alignment is usually obtained by a global registration algorithm. See [Global registration](../pipelines/global_registration.rst) for examples.
#
# </div>
source = o3d.io.read_point_cloud("../../test_data/ICP/cloud_bin_0.pcd")
target = o3d.io.read_point_cloud("../../test_data/ICP/cloud_bin_1.pcd")
threshold = 0.02
trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],
[-0.139, 0.967, -0.215, 0.7],
[0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])
draw_registration_result(source, target, trans_init)
# The function `evaluate_registration` calculates two main metrics:
#
# - `fitness`, which measures the overlapping area (# of inlier correspondences / # of points in target). The higher the better.
# - `inlier_rmse`, which measures the RMSE of all inlier correspondences. The lower the better.
print("Initial alignment")
evaluation = o3d.pipelines.registration.evaluate_registration(
source, target, threshold, trans_init)
print(evaluation)
# ## Point-to-point ICP
# In general, the ICP algorithm iterates over two steps:
#
# 1. Find correspondence set $\mathcal{K}=\{(\mathbf{p}, \mathbf{q})\}$ from target point cloud $\mathbf{P}$, and source point cloud $\mathbf{Q}$ transformed with current transformation matrix $\mathbf{T}$.
# 2. Update the transformation $\mathbf{T}$ by minimizing an objective function $E(\mathbf{T})$ defined over the correspondence set $\mathcal{K}$.
#
# Different variants of ICP use different objective functions $E(\mathbf{T})$ [\[BeslAndMcKay1992\]](../reference.html#beslandmckay1992) [\[ChenAndMedioni1992\]](../reference.html#chenandmedioni1992) [\[Park2017\]](../reference.html#park2017).
#
# We first show a point-to-point ICP algorithm [\[BeslAndMcKay1992\]](../reference.html#beslandmckay1992) using the objective
#
# \begin{equation}
# E(\mathbf{T}) = \sum_{(\mathbf{p},\mathbf{q})\in\mathcal{K}}\|\mathbf{p} - \mathbf{T}\mathbf{q}\|^{2}
# \end{equation}
#
# The class `TransformationEstimationPointToPoint` provides functions to compute the residuals and Jacobian matrices of the point-to-point ICP objective. The function `registration_icp` takes it as a parameter and runs point-to-point ICP to obtain the results.
print("Apply point-to-point ICP")
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint())
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# The `fitness` score increases from 0.174723 to 0.372450. The `inlier_rmse` reduces from 0.011771 to 0.007760. By default, `registration_icp` runs until convergence or reaches a maximum number of iterations (30 by default). It can be changed to allow more computation time and to improve the results further.
reg_p2p = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=2000))
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
draw_registration_result(source, target, reg_p2p.transformation)
# The final alignment is tight. The `fitness` score improves to 0.621123. The `inlier_rmse` reduces to 0.006583.
# ## Point-to-plane ICP
# The point-to-plane ICP algorithm [\[ChenAndMedioni1992\]](../reference.html#chenandmedioni1992) uses a different objective function
#
# \begin{equation}
# E(\mathbf{T}) = \sum_{(\mathbf{p},\mathbf{q})\in\mathcal{K}}\big((\mathbf{p} - \mathbf{T}\mathbf{q})\cdot\mathbf{n}_{\mathbf{p}}\big)^{2},
# \end{equation}
#
# where $\mathbf{n}_{\mathbf{p}}$ is the normal of point $\mathbf{p}$. [\[Rusinkiewicz2001\]](../reference.html#rusinkiewicz2001) has shown that the point-to-plane ICP algorithm has a faster convergence speed than the point-to-point ICP algorithm.
#
# `registration_icp` is called with a different parameter `TransformationEstimationPointToPlane`. Internally, this class implements functions to compute the residuals and Jacobian matrices of the point-to-plane ICP objective.
print("Apply point-to-plane ICP")
reg_p2l = o3d.pipelines.registration.registration_icp(
source, target, threshold, trans_init,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
print(reg_p2l)
print("Transformation is:")
print(reg_p2l.transformation)
draw_registration_result(source, target, reg_p2l.transformation)
# The point-to-plane ICP reaches tight alignment within 30 iterations (a `fitness` score of 0.620972 and an `inlier_rmse` score of 0.006581).
#
# <div class="alert alert-info">
#
# The point-to-plane ICP algorithm uses point normals. In this tutorial, we load normals from files. If normals are not given, they can be computed with [Vertex normal estimation](pointcloud.ipynb#vertex-normal-estimation).
#
# </div>
| examples/python/pipelines/icp_registration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #Bollinger Bands in Cufflinks
# +
import cufflinks as cf
import pandas as pd
cf.set_config_file(world_readable=True,offline=False)
cf.datagen.lines(1,200).ta_plot(study='boll',periods=14,title='Bollinger Bands')
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# ! pip install publisher --upgrade
import publisher
publisher.publish(
'bollinger_bands.ipynb', 'pandas/bollinger-bands/', 'Bollinger Bands',
'Bollinger Bands in Pandas and Cufflinks.',
title = 'Bollinger Bands in Pandas | plotly',
thumbnail='/images/bolbands.png', language='pandas',
page_type='example_index', has_thumbnail='true', display_as='financial_analysis', order=2)
# -
| _posts/pandas/financial_analysis/bollinger_bands.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dask
# Parallel processing for Numpy Arrays and Pandas DataFrames
#
# http://dask.pydata.org/en/latest/
#
# Interesting example and use-case:
#
# http://matthewrocklin.com/blog/work/2018/02/09/credit-models-with-dask
# ## Creating Dask Cluster
import dask
import dask.distributed
import dask.dataframe
import pandas as pd
from glob import glob
client = dask.distributed.Client() #starts Dask client
client
# In principle Dask runs also without it, but it seems to be less efficient (?) and more difficult to monitor
# ## Download Sample Data
#
# __Warning: this is a lot of data, make sure to have a fast and unlimited internet connection!__
import os
import urllib.request
for month in range(12):
filename = f'green_tripdata_2018-{month+1:02d}.csv'
url = f'https://s3.amazonaws.com/nyc-tlc/trip+data/{filename}'
csvfile = f'../example_files/{filename}'
if not os.path.exists(csvfile):
print('downloading sample data: ', filename)
print(url, csvfile)
urllib.request.urlretrieve(url, csvfile)
# ## Dask DataFrame
#
# The following example imports a large amount of data (from multiple csv files) into a Dask dataframe, does some analysis and aggregation and returns a Pandas dataframe.
file_pattern = '../example_files/green_*.csv'
#dask csv reader, works on wildcards (e.g. *), but not on zip files
ddf = dask.dataframe.read_csv(file_pattern, delimiter=',', decimal='.',
parse_dates=[1, 2], # columns to be parsed as dates
# manual specification of data types where inference does not work
dtype={'trip_type': 'float64'},
)
ddf.head()
ddf['date'] = ddf.lpep_pickup_datetime.dt.date
ddf['tip_fraction'] = ddf.tip_amount / ddf.total_amount
ddf.head()
agg = ddf.groupby(['date']).agg({'passenger_count':'sum', 'trip_distance':'sum',
'tip_fraction':'mean'}) #groupby analogue to Pandas synthax
agg_df = agg.compute() #creates Pandas DataFrame from Dask DataFrame and does the actual computing
agg_df.head()
# The execution progress and parallelism efficiency can be monitored in the web-gui of the Dask client:
#
# http://localhost:8787/status
# ### Importing zipped Files
#
# In contrast to Pandas, Dask DataFrames cannot be directly created from zipped csv files. The following code snippet uses Pandas to import zipped files on Dask grid via Dask Delayed.
#
# Note that the example below is not for zipped input files, but works 1:1 for zipped files.
# +
file_pattern = '../example_files/green_*.csv' # works also fine for *.zip files
files=glob(file_pattern)
dfs=[dask.delayed(pd.read_csv)(filename, delimiter=',', decimal='.', parse_dates=[1, 2])
for filename in files]
ddf=dask.dataframe.from_delayed(dfs)
ddf.head()
# -
# Note that the import of a single zipped csv file is not distributed, thus the memory of each worker must be sufficient to contain the Pandas DataFrame for each input file.
# ## Dask Delayed
#
#
# Dask Delayed is used to submit functions with defined input and output to the calculators.
#
# Note that the compute() statement is only executed for the result(s), the dependencies are handled automatically by Dask.
#
# If not a single function but a class is intended to be submitted, a helper function can be used. If the helper function is defined inside a class, it should be static (i.e. without referring to self).
import numpy as np
import time
# +
class A: #class which does the calculation, to be parallelized
def __init__(self, p1,x1):
self.p1=p1
self.x1=x1
def doCalc(self,p2,x2):
return np.dot(self.p1+self.x1,p2+x2)
def calcHelper(p1,p2,x1,x2):
a=A(p1,x1)
return a.doCalc(p2,x2)
def mainProg():
p1=np.arange(10000000)
p2=p1**2
p1s=client.scatter(p1) #distribute large data, which is required for all calculations, ahead to the calculators
p2s=client.scatter(p2)
result=[]
for x1 in range(100):
x2=x1**2
result.append(dask.delayed(calcHelper)(p1s,p2s,x1,x2))
result_tot=dask.delayed(np.sum)(result)
return result_tot.compute()
# -
print(f'started at {time.ctime()}')
print('result:', mainProg())
print(f'ended at {time.ctime()}')
# In the example above, the command xs=client.scatter(x) is used to distribute large data effectively to the calculators.
#
# If there are issues with multiprocessing, the client can be restricted to multithreading (which is usually slower) using
#
# client=dask.distributed.Client(processes=False, threads_per_worker=4)
# ## Dask Futures
#
# Dask Futures are similar to Dask Delayed, but computation already starts when the future is submitted (Dask Delayed is lazy, computation starts only when *compute()* is called).
futures = []
for i in range(4):
futures.append(client.submit(lambda x: x**2, i))
res = client.submit(sum, futures)
# Futures can be used as input for other calculations.
res
res.result()
# ## Cleanup
client.close()
| notebooks/dask.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Reinforcement learning
# language: python
# name: rl
# ---
# # TD(0) learning
#
# The TD(0) is an alternative algorithm that can estimate an environment's
# value function. The main difference is that TD(0) doesn't need to wait
# until the agent has reached the goal to update a state's estimated
# value.
#
# Here is the example pseudo-code we will be implementing:
#
# 
#
# From: Sutton and Barto, 2018. Ch. 6.
# +
# first, import necessary modules
import sys
import gym
import random
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# add your own path to the RL repo here
sys.path.append('/Users/wingillis/dev/reinforcement-learning')
from collections import defaultdict
from lib.envs.gridworld import GridworldEnv
from lib.plotting import plot_gridworld_value_function, plot_value_updates
# -
sns.set_style('white')
# define some hyperparameters
gamma = 0.75 # discounting factor
alpha = 0.1 # learning rate - a low value is more stable
n_episodes = 5000
# initialize the environment
shape = (5, 5) # size of the gridworld
env = GridworldEnv(shape, n_goals=2)
env.seed(23)
random.seed(23)
# define a policy function
def policy_fun():
return random.randint(0, 3)
# +
deltas = defaultdict(list)
# initialize the value function
for i in range(n_episodes):
# reset the env and get current state
while True:
# select the next action
# conduct the selected action, store the results
# update the value function
# store the change from the old value function to the new one
# stop iterating if you've reached the end
# update the current state for the next loop
# -
# ## Reference learning and value function
#
# For:
# - gamma: 0.75
# - alpha: 0.1
# - n_episodes: 5000
# - shape: (5, 5)
V.reshape(shape)
fig = plot_value_updates(deltas[12][:100])
plt.imshow(V.reshape(shape), cmap='mako')
plt.colorbar()
fig = plot_gridworld_value_function(V.reshape(shape))
fig.tight_layout()
| reinforcement learning/03-TD_0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # About
# Life visualization, from [here](https://towardsdatascience.com/how-to-visualize-the-rest-of-your-life-28f943b1f70b) (inspired by Kurzgesagt).
#
# Interactive [here](https://labs.coruscantconsulting.co.uk/life/weeks/) (I found this after writing this code)
# # Code
# Import libraries
# +
import pandas as pd
import math
from datetime import date
from datetime import datetime, timedelta
import altair as alt
# import widgets for interactivity
import ipywidgets as widgets
# create useful constants
c_label_lived = "lived weeks"
c_label_remaining = "remaining weeks"
c_label_current = "this week"
c_width = 1080 / 2.2
c_height = 1500 / 2.2
# -
# Personal stats and life expectancy
birthday = input(widgets.DatePicker(
description='Birthday:',
disabled=False
)
birthday
# Import life expectancy data:
#
# Filtered data from the WHO [here](https://apps.who.int/gho/data/node.main.688), using 2019 life expectancy data.
#
# _This isn't 100% accurate because those born before 2020 will have a different life expectancy (usually lower, but not always)._
#
# _Downloaded as a .json because I want to learn how to work with .json files_
#
# Method below from [here](https://towardsdatascience.com/how-to-convert-json-into-a-pandas-dataframe-100b2ae1e0d8), point #4
# +
import json
# load data using Python JSON module
with open('data.json','r') as f:
data = json.loads(f.read())
# Normalizing data
df = pd.json_normalize(data, record_path =['fact'])
# -
# create a list from the `dims.COUNTRY` column, to make a drop-down list
countries_full = df["dims.COUNTRY"].tolist()
# remove duplicates, while keeping the original order
countries = []
for i in countries_full:
if i not in countries:
countries.append(i)
# Allow selection of country of birth
country_selector = widgets.Combobox(
placeholder='Your country of birth',
options=countries,
# value = United States of America,
description='Country:',
ensure_option=True,
)
country_selector
# Get value from selection from `country selector` above, to use to search for life expectancy by selected country
selected_country = country_selector.value
selected_country
# Allow selection of gender
gender_selector = widgets.Combobox(
placeholder='Your gender',
options=['Male','Female','Both sexes'],
description='Gender'
)
gender_selector
selected_gender = gender_selector.value
life_exp = float(df.loc[(df['dims.COUNTRY']==selected_country) & (df['dims.SEX']==selected_gender),'Value'].values[0])
life_exp
life_exp2 = 76.3
birthday_val = birthday.value
life = timedelta(life_exp * 365.25)
current_date = date.today()
day_of_birth = birthday_val
day_of_death = birthday_val + life
day_of_death
# Calculate time from birth until now, time from now until death.
# +
lived_life = current_date - day_of_birth
rest_of_life = day_of_death - current_date
lived_life_years = (lived_life.days / 365.25)
lived_life_years_floor = math.floor(lived_life_years) # .floor = Round numbers down to the nearest integer
lived_life_weeks = (lived_life_years - lived_life_years_floor) * 365.25 / 7
lived_life_weeks_floor = math.floor(lived_life_weeks)
rest_of_life_years = (rest_of_life.days / 365.25)
rest_of_life_years_floor = math.floor(rest_of_life_years)
# -
# Create three data frames, to hold information about each week, whether it has already passed, whether it is the current week, or a future week.
# +
df_ll_weeks = pd.DataFrame(columns = ['week', 'year', 'label'])
for week in range (lived_life_weeks_floor):
df_ll_weeks = df_ll_weeks.append(pd.DataFrame({
'week':[week],
'year':[lived_life_years_floor],
'label':[c_label_lived]
}))
df_ll_weeks = df_ll_weeks.append(pd.DataFrame({
'week':[lived_life_weeks_floor],
'year':[lived_life_years_floor],
'label':[c_label_current]
}))
for week in range (lived_life_weeks_floor + 1, 52):
df_ll_weeks = df_ll_weeks.append(pd.DataFrame({
'week':[week],
'year':[lived_life_years_floor],
'label':[c_label_remaining]
}))
df_ll = pd.DataFrame(columns = ['week', 'year', 'label'])
for year in range(0, lived_life_years_floor + 0):
for week in range(52):
df_ll = df_ll.append(pd.DataFrame({
'week':[week],
'year':[year],
'label':[c_label_lived]
}))
df_rl = pd.DataFrame(columns = ['week', 'year', 'label'])
for year in range(lived_life_years_floor + 1, lived_life_years_floor + rest_of_life_years_floor + 1):
for week in range(52):
df_rl = df_rl.append(pd.DataFrame({
'week':[week],
'year':[year],
'label':[c_label_remaining]
}))
# -
# Create the chart
chart = (
alt.Chart(pd.concat([df_ll, df_rl, df_ll_weeks]))
.mark_square(
filled = True,
opacity = 1,
# color = "black",
size = 8
).encode(
x = alt.X("week", axis = None),
y = alt.Y("year", axis = None),
color = alt.Color(
"label", scale = alt.Scale(range = ["black", "lightgrey", "red"]),
legend = alt.Legend(orient = "bottom"), title = ""
),
tooltip=["year"]
).properties(
width = c_width,
height = c_height
).properties(
title = "Your Life in Weeks"
)
)
# Configure the chart
chart_config = (
chart
.configure_title(
fontSize = 40,
font = "Arial",
align = "center",
color = "black",
baseline = "bottom",
dy = 36
).configure_view(
strokeWidth = 0
)
)
display(chart_config)
chart_config.save("rest_of_life.html")
| jupyter_notebooks/life_vis/life_vis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RxnAch/ProjectsOnDeepLearning/blob/main/Predicting_House_Prices_on_kaggle.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="XG-x2r-uikMn"
import hashlib
import os
import tarfile
import zipfile
import requests
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
# + id="02QWX2-pxQVh"
def download(name, cache_dir=os.path.join('..', 'data')):
"""Download a file inserted into DATA_HUB, return the local filename."""
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f'Downloading {fname} from {url}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
# + [markdown] id="R3LP8Sx5fphW"
# #Getting data
# + id="_IEWLXvZz6_m"
DATA_HUB['kaggle_house_train'] = (
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = (
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
# + id="SEGpCHJGxae3"
# !pip install -U d2l
# + [markdown] id="0h03O7mP0Ple"
# #Importing Important Libraries
# + id="Z42Ff6apxRqp"
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
import torch
from torch import nn
from d2l import torch as d2l
# + colab={"base_uri": "https://localhost:8080/"} id="jJk-_O5YgmMU" outputId="ca4698b2-0c34-400a-f9a7-5618b52c2b62"
train_data = pd.read_csv(download('kaggle_house_train'))
test_data = pd.read_csv(download('kaggle_house_test'))
# + id="5BDMowfb0E96" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="7de4367e-0e54-4491-8e29-19a258096beb"
#First five rows
train_data.head()
# + id="FHeUd0nA0nbl" colab={"base_uri": "https://localhost:8080/"} outputId="2f0eed0b-3d64-4921-d11b-ae5656930ad3"
#Training data : (1460 examples 81 features(1 extra for SalePrice))
#Testing data : (1459 examples ,80 features)
print(train_data.shape)
print(test_data.shape)
# + id="sdVTBE6t0uj4" colab={"base_uri": "https://localhost:8080/"} outputId="32c316c1-365e-4085-d240-64b1d1dd917e"
# 81 Features(training data)
train_data.columns
# + id="wFTL2NrG0z20" colab={"base_uri": "https://localhost:8080/"} outputId="881a5568-145f-4fd0-809c-5493721cbc85"
#No of Numerical features
print(train_data.dtypes[train_data.dtypes!='object'].count())
#Numerical Features
print(train_data.dtypes[train_data.dtypes!='object'].index)
# + [markdown] id="y9O94shKZ7gR"
# #Data Preprocessing
# + id="_mNqVNIr5DeL"
#Removing Id and Saleprice features and concatinating train and test sets
all_features = pd.concat((train_data.iloc[:,1:-1],test_data.iloc[:,1:]))
#Numerical Features
numeric_features = all_features.dtypes[all_features.dtypes !='object'].index
# + [markdown] id="6-xbt3Z1ZvoX"
# Replacing all the missing values by correspoding feature's mean.
#
# We standardize the data by rescaling features to zero mean and unit variance.
#
# 
# + id="O2-G9zLZ6vkD"
#If test data were inaccessible, mean and standard deviation could be
# calculated from training data
all_features[numeric_features] = all_features[numeric_features].apply(
lambda x: (x - x.mean()) / (x.std()))
# After standardizing the data all means vanish, hence we can set missing
# values to 0
all_features[numeric_features] = all_features[numeric_features].fillna(0)
# + [markdown] id="il7gMTzrcb-G"
# Next we deal with discrete values. This includes features such as “MSZoning”. We replace them by a one-hot encoding.
# + colab={"base_uri": "https://localhost:8080/"} id="LPXoaeTHcdVW" outputId="63a339da-a0b5-479a-e991-c23026965d48"
# `Dummy_na=True` considers "na" (missing value) as a valid feature value, and
# creates an indicator feature for it
all_features = pd.get_dummies(all_features, dummy_na=True)
all_features.shape
# + [markdown] id="ApHp0al6cxWw"
# You can see that this conversion increases the number of features from 79 to 331. Finally, via the values attribute, we can extract the NumPy format from the pandas format and convert it into the tensor representation for training.
# + id="WlOksiHocyfm"
n_train = train_data.shape[0]
train_features = torch.tensor(all_features[:n_train].values,
dtype=torch.float32)
test_features = torch.tensor(all_features[n_train:].values,
dtype=torch.float32)
train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1, 1),
dtype=torch.float32)
# + [markdown] id="X00tRYaubLgf"
# #Loss
# Instead of just root-mean-squared-error,
#
# we are going to use root-mean-squared-error between the logarithm of the predicted price and the logarithm of the label price as in fig.
#
# 
# + id="2pJR6c0rVTWL"
loss = nn.MSELoss()
in_features = train_features.shape[1]
def log_rmse(net, features, labels):
# To further stabilize the value when the logarithm is taken, set the
# value less than 1 as 1
clipped_preds = torch.clamp(net(features), 1, float('inf'))##Clamp all elements in input into the range [ min, max ]
rmse = torch.sqrt(loss(torch.log(clipped_preds), torch.log(labels)))
return rmse.item()
def get_net():
net = nn.Sequential(nn.Linear(in_features, 1))
return net
# + [markdown] id="7mvlODW2c9wQ"
# #Train
# + id="0UIrMVE0cSqT"
def train(net, train_features, train_labels, test_features, test_labels,
num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
train_iter = d2l.load_array((train_features, train_labels), batch_size)
# The Adam optimization algorithm is used here
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate,
weight_decay=weight_decay)
for epoch in range(num_epochs):
for X, y in train_iter:
optimizer.zero_grad()
l = loss(net(X), y)
l.backward()
optimizer.step()
train_ls.append(log_rmse(net, train_features, train_labels))
if test_labels is not None:
test_ls.append(log_rmse(net, test_features, test_labels))
return train_ls, test_ls #returns training loss and testing loss
# + [markdown] id="D69ekVrtdSTO"
# #K -Fold Cross-Validation
# + id="1CllmcRVdNz3"
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = torch.cat([X_train, X_part], 0)
y_train = torch.cat([y_train, y_part], 0)
return X_train, y_train, X_valid, y_valid
def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,
batch_size):
train_l_sum, valid_l_sum = 0, 0
for i in range(k):
data = get_k_fold_data(k, i, X_train, y_train)
net = get_net()
train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
weight_decay, batch_size)
train_l_sum += train_ls[-1]
valid_l_sum += valid_ls[-1]
if i == 0:
d2l.plot(list(range(1, num_epochs + 1)), [train_ls, valid_ls],
xlabel='epoch', ylabel='rmse', xlim=[1, num_epochs],
legend=['train', 'valid'], yscale='log')
print(f'fold {i + 1}, train log rmse {float(train_ls[-1]):f}, '
f'valid log rmse {float(valid_ls[-1]):f}')
return train_l_sum / k, valid_l_sum / k
# + [markdown] id="FbrLqBTsdiXv"
# #Model Selection
# + colab={"base_uri": "https://localhost:8080/", "height": 368} id="ri0S3EYJdaI0" outputId="08f8eeaf-d82f-4077-e0c0-f960b74f05d7"
k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,
weight_decay, batch_size)
print(f'{k}-fold validation: avg train log rmse: {float(train_l):f}, '
f'avg valid log rmse: {float(valid_l):f}')
# + [markdown] id="WzjoeyVBein2"
# #Submitting prediction on kaggle
# + id="CdmrM3Fcd0CX"
def train_and_pred(train_features, test_feature, train_labels, test_data,
num_epochs, lr, weight_decay, batch_size):
net = get_net()
train_ls, _ = train(net, train_features, train_labels, None, None,
num_epochs, lr, weight_decay, batch_size)
d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch',
ylabel='log rmse', xlim=[1, num_epochs], yscale='log')
print(f'train log rmse {float(train_ls[-1]):f}')
# Apply the network to the test set
preds = net(test_features).detach().numpy()
# Reformat it to export to Kaggle
test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="aHc7scdMfgoW" outputId="6864e464-3548-480a-9ffc-d057e63536ad"
train_and_pred(train_features, test_features, train_labels, test_data,
num_epochs, lr, weight_decay, batch_size)
| Predicting_House_Prices_on_kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 图像分类数据集
# :label:`sec_fashion_mnist`
#
# (**MNIST数据集**) :cite:`LeCun.Bottou.Bengio.ea.1998`
# (**是图像分类中广泛使用的数据集之一,但作为基准数据集过于简单。
# 我们将使用类似但更复杂的Fashion-MNIST数据集**) :cite:`Xiao.Rasul.Vollgraf.2017`。
#
# + origin_pos=3 tab=["tensorflow"]
# %matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
d2l.use_svg_display()
# + [markdown] origin_pos=4
# ## 读取数据集
#
# 我们可以[**通过框架中的内置函数将Fashion-MNIST数据集下载并读取到内存中**]。
#
# + origin_pos=7 tab=["tensorflow"]
mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()
# + [markdown] origin_pos=8
# Fashion-MNIST由10个类别的图像组成,
# 每个类别由*训练数据集*(train dataset)中的6000张图像
# 和*测试数据集*(test dataset)中的1000张图像组成。
# 因此,训练集和测试集分别包含60000和10000张图像。
# 测试数据集不会用于训练,只用于评估模型性能。
#
# + origin_pos=10 tab=["tensorflow"]
len(mnist_train[0]), len(mnist_test[0])
# + [markdown] origin_pos=11
# 每个输入图像的高度和宽度均为28像素。
# 数据集由灰度图像组成,其通道数为1。
# 为了简洁起见,本书将高度$h$像素、宽度$w$像素图像的形状记为$h \times w$或($h$,$w$)。
#
# + origin_pos=12 tab=["tensorflow"]
mnist_train[0][0].shape
# + [markdown] origin_pos=13
# [~~两个可视化数据集的函数~~]
#
# Fashion-MNIST中包含的10个类别,分别为t-shirt(T恤)、trouser(裤子)、pullover(套衫)、dress(连衣裙)、coat(外套)、sandal(凉鞋)、shirt(衬衫)、sneaker(运动鞋)、bag(包)和ankle boot(短靴)。
# 以下函数用于在数字标签索引及其文本名称之间进行转换。
#
# + origin_pos=14 tab=["tensorflow"]
def get_fashion_mnist_labels(labels): #@save
"""返回Fashion-MNIST数据集的文本标签"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
# + [markdown] origin_pos=15
# 我们现在可以创建一个函数来可视化这些样本。
#
# + origin_pos=16 tab=["tensorflow"]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): #@save
"""绘制图像列表"""
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
ax.imshow(img.numpy())
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
# + [markdown] origin_pos=18
# 以下是训练数据集中前[**几个样本的图像及其相应的标签**]。
#
# + origin_pos=21 tab=["tensorflow"]
X = tf.constant(mnist_train[0][:18])
y = tf.constant(mnist_train[1][:18])
show_images(X, 2, 9, titles=get_fashion_mnist_labels(y));
# + [markdown] origin_pos=22
# ## 读取小批量
#
# 为了使我们在读取训练集和测试集时更容易,我们使用内置的数据迭代器,而不是从零开始创建。
# 回顾一下,在每次迭代中,数据加载器每次都会[**读取一小批量数据,大小为`batch_size`**]。
# 通过内置数据迭代器,我们可以随机打乱了所有样本,从而无偏见地读取小批量。
#
# + origin_pos=25 tab=["tensorflow"]
batch_size = 256
train_iter = tf.data.Dataset.from_tensor_slices(
mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))
# + [markdown] origin_pos=26
# 我们看一下读取训练数据所需的时间。
#
# + origin_pos=27 tab=["tensorflow"]
timer = d2l.Timer()
for X, y in train_iter:
continue
f'{timer.stop():.2f} sec'
# + [markdown] origin_pos=28
# ## 整合所有组件
#
# 现在我们[**定义`load_data_fashion_mnist`函数**],用于获取和读取Fashion-MNIST数据集。
# 这个函数返回训练集和验证集的数据迭代器。
# 此外,这个函数还接受一个可选参数`resize`,用来将图像大小调整为另一种形状。
#
# + origin_pos=31 tab=["tensorflow"]
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""下载Fashion-MNIST数据集,然后将其加载到内存中"""
mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()
# 将所有数字除以255,使所有像素值介于0和1之间,在最后添加一个批处理维度,
# 并将标签转换为int32。
process = lambda X, y: (tf.expand_dims(X, axis=3) / 255,
tf.cast(y, dtype='int32'))
resize_fn = lambda X, y: (
tf.image.resize_with_pad(X, resize, resize) if resize else X, y)
return (
tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(
batch_size).shuffle(len(mnist_train[0])).map(resize_fn),
tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(
batch_size).map(resize_fn))
# + [markdown] origin_pos=32
# 下面,我们通过指定`resize`参数来测试`load_data_fashion_mnist`函数的图像大小调整功能。
#
# + origin_pos=33 tab=["tensorflow"]
train_iter, test_iter = load_data_fashion_mnist(32, resize=64)
for X, y in train_iter:
print(X.shape, X.dtype, y.shape, y.dtype)
break
# + [markdown] origin_pos=34
# 我们现在已经准备好使用Fashion-MNIST数据集,便于下面的章节调用来评估各种分类算法。
#
# ## 小结
#
# * Fashion-MNIST是一个服装分类数据集,由10个类别的图像组成。我们将在后续章节中使用此数据集来评估各种分类算法。
# * 我们将高度$h$像素,宽度$w$像素图像的形状记为$h \times w$或($h$,$w$)。
# * 数据迭代器是获得更高性能的关键组件。依靠实现良好的数据迭代器,利用高性能计算来避免减慢训练过程。
#
# ## 练习
#
# 1. 减少`batch_size`(如减少到1)是否会影响读取性能?
# 1. 数据迭代器的性能非常重要。你认为当前的实现足够快吗?探索各种选择来改进它。
# 1. 查阅框架的在线API文档。还有哪些其他数据集可用?
#
# + [markdown] origin_pos=37 tab=["tensorflow"]
# [Discussions](https://discuss.d2l.ai/t/1786)
#
| submodules/resource/d2l-zh/tensorflow/chapter_linear-networks/image-classification-dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### K - Nearest Neighbours
# #### Preprocessing the data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# +
d = pd.read_csv('Social_Network_Ads.csv') # dataframe
features = d.iloc[ :, 2:-1] #dataframe
#or features = d.iloc[ :, [2,3]
bought = d.iloc[ :, -1] #series
x = d.iloc[ :, 2:-1].values #array
y = d.iloc[ :, -1].values #array
features
# -
bought
#Spltiting the data
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.25, random_state =0)
np.shape(x_train)
#Feature scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# #### Fitting the model to the training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
classifier.fit(x_train, y_train)
# Predicting the test results
y_pred = classifier.predict(x_test)
#Making the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
# Visualising the Training set results
from matplotlib.colors import ListedColormap
x_set, y_set = x_train, y_train
x1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01),
np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01))
plt.contourf(x1, x2, classifier.predict(np.array([x1.ravel(), x2.ravel()]).T).reshape(x1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(x1.min(), x1.max())
plt.ylim(x2.min(), x2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j, edgecolors= 'black')
plt.title('KNN (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
x_set, y_set = x_test, y_test
x1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01),
np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01))
plt.contourf(x1, x2, classifier.predict(np.array([x1.ravel(), x2.ravel()]).T).reshape(x1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(x1.min(), x1.max())
plt.ylim(x2.min(), x2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j, edgecolors='black')
plt.title('KNN (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| Classification/.ipynb_checkpoints/KNN-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%time
num = input("Enter a number: ")
num = int(num)
factors = []
factor_factors = []
prime_factors = []
for i in range(1, int(num ** (1/2))):
if num / i == num // i:
factors.append(i)
print(factors)
for p in factors:
for b in range(1, p + 1):
if p / b == p // b:
factor_factors.append(b)
#print(factor_factors)
if len(factor_factors) == 2:
prime_factors.append(p)
factor_factors.clear()
#print(prime_factors)
print(max(prime_factors))
# -
| Largest Prime Factor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Option payoff formulae and diagrams
# Options have total value which comprises two elements:
# 1. **Time value** - Time value arises due to the volatility of the option's underlying asset and the fact that it can move over the period before expiration. Time value depends on *time* to option expiration (obviously) and *volatility*
# 2. **Intrinsic value** - this is the value, if any, which arises from the price of the underlying asset and and the option strike price. If the underlying asset price *S* is less than the option strike price *K*, a call option will have no intrinsic value, whereas the intrinsic value of the put option is equal to *K-S*
#
# At expiration, options clearly have no time value, so all that remains is *intrinsic value*. The payoff formulae at expiration are thefore:
# * *call = MAX(S - K, 0)*
# * *put = MAX(K - S, 0)*
#
# ## Combining payoffs - put-call parity example
# Suppose we have two options, one call and one put - same:
# * expiration
# * underlying
# * strike
#
# Furthermore, imagine we own (are *long*) the call option and we have sold (are *short*) the put option.
#
# At expiration, therefore, the profile can be constructed by subtracting (since we are *short*) the above put payoff from the (long) call payoff. You will notice that this combination of a +call and a -put (long call and short put) gives rise to a linear instrument, which is quite remarkable : we are constructing a linear asset from two non-linear option instruments.
#
# This combination is therefore sometimes referred to as a **synthetic forward**.
# # Black Scholes Option Valuation Formula
# The Black–Scholes formula calculates the price of European put and call options. This price is consistent with the Black–Scholes equation as above; this follows since the formula can be obtained by solving the equation for the corresponding terminal and boundary conditions.
#
# The value of a call option for a non-dividend-paying underlying stock in terms of the Black–Scholes parameters is:
#
# \begin{aligned}C(S_{t},t)&=N(d_{1})S_{t}-N(d_{2})Ke^{-r(T-t)}\\d_{1}&={\frac {1}{\sigma {\sqrt {T-t}}}}\left[\ln \left({\frac {S_{t}}{K}}\right)+\left(r+{\frac {\sigma ^{2}}{2}}\right)(T-t)\right]\\d_{2}&=d_{1}-\sigma {\sqrt {T-t}}\\\end{aligned}
#
# Where:
#
# * **$N ( ⋅ ) $** is the cumulative distribution function of the standard normal distribution
# * **$T − t $** is the time to maturity (expressed in years)
# * **$S_t $** is the spot price of the underlying asset
# * **$K $** is the strike price
# * **$r $** is the risk free rate (annual rate, expressed in terms of continuous compounding)
# * **$σ $** is the volatility of returns of the underlying asset
#
# +
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
__author__ = '<NAME>'
__copyright__ = 'Copyright 2017 GFT'
K= 8000 # oPTION STRIKE PRICE
S = np.linspace(7000, 9000, 100)# numpy array of 100 spot values for underlying
#Here are the expiration payoff equations for call and put options
call = np.maximum(S - K, 0)
put = np.maximum(K - S, 0)
plt.figure()
plt.grid(True)
plt.plot(S, call, lw=2.5, label='Call option pay-off')
plt.xlabel('Underlying level $S_t$ at maturity')
plt.ylabel('Value')
plt.legend(loc='upper left')
plt.show()
plt.plot(S, put, lw=2.5, color='red', label='Put option pay-off')
plt.legend(loc='upper right')
plt.grid(True)
plt.show()
plt.plot(S, call-put, lw=2.5, label='Long call + short put pay-off')
plt.legend(loc='upper center')
plt.grid(True)
plt.show()
# +
# Black-Scholes-Merton option valuation Model
from scipy import stats
import math
def BSM_Option_value(S, K, T, r, vol, pc='call'):
''' Analytical European call option value for Black-Scholes-Merton (1973).
S0: Underlying price
K: option strike price
T: expiration - time-to-maturity in fraction of year
r: constant short interest rate
vol:volatility of underlying asset
Returns :European put/call option present value
'''
d1 = (math.log(S/K) + (r+0.5*vol** 2)*T) / (vol*math.sqrt(T))
d2 = d1 - vol * math.sqrt(T)
if pc.lower()=='call':
return( S*stats.norm.cdf( d1)- K*math.exp(-r*T)*stats.norm.cdf(d2))
else:
return(-S*stats.norm.cdf(-d1)+ K*math.exp(-r*T)*stats.norm.cdf(-d2))
S0 = 7500.00 # initial index level
K = 8000.00 # strike price
T = .25 # call option maturity
r = 0.05 # constant short rate
vol = 0.25 # constant volatility factor of diffusion
print "Value of European {} option is {}".format('call', BSM_Option_value(S0, K, T, r, vol, 'call') )
# +
from numpy import vectorize
plt.close()
S = np.linspace(7000, 9000, 100)
#vectorise BSM_Option_value function so we can use numpy.ndarray vector of 'S' underlying prices.
vBSValue=vectorize(BSM_Option_value)
fig=plt.figure()
plt.grid(True)
plt.plot(S, call, lw=2.5, label="Call expiration payoff")
pvs=list()
Ts=[.05, .1, .25, .5] # Selection list of expiry times in fractions of year
for i,T in enumerate(Ts):
pvs.append(vBSValue(S, 8000, Ts[i], r, .15))
plt.plot(S, pvs[i], lw=.5, label="Call option value for T={exp:1.2f}".format(exp=Ts[i]))
plt.xlabel('Underlying level $S_t$')
plt.ylabel('Value')
plt.legend(loc='upper left')
plt.show()
# +
from numpy import vectorize
plt.close()
S = np.linspace(7000, 9000, 100)
#vectorise BSM_Option_value function so we can use numpy.ndarray vector of 'S' underlying prices.
vBSValue=vectorize(BSM_Option_value)
fig=plt.figure()
plt.grid(True)
plt.plot(S, put, lw=2.5, label="Put expiration payoff")
pvs=list()
Ts=[.05, .1, .25, .5] # Selection list of expiry times in fractions of year
for i,T in enumerate(Ts):
pvs.append(vBSValue(S, 8000, Ts[i], r, .15, pc="put"))
plt.plot(S, pvs[i], lw=.5, label="Put option value for T={exp:1.2f}".format(exp=Ts[i]))
plt.xlabel('Underlying level $S_t$')
plt.ylabel('Value')
plt.legend(loc='upper right')
plt.show()
# +
from numpy import vectorize
plt.close()
S = np.linspace(7000, 9000, 100)
#vectorise BSM_Option_value function so we can use numpy.ndarray vector of 'S' underlying prices.
vBSValue=vectorize(BSM_Option_value)
fig=plt.figure()
plt.grid(True)
plt.plot(S, call-put, lw=2.5, label="Call-put parity with synthetic forwards")
calls=list()
puts =list()
Ts=[.05, .1, .25, .5] # Selection list of expiry times in fractions of year
for i,T in enumerate(Ts):
calls.append(vBSValue(S, 8000, Ts[i], r, .15, pc="call"))
puts.append(vBSValue(S, 8000, Ts[i], r, .15, pc="puts"))
plt.plot(S, calls[i]-puts[i], lw=.5, label="synthetic forward value for T={exp:1.2f}".format(exp=Ts[i]))
plt.xlabel('Underlying level $S_t$')
plt.ylabel('Value')
plt.legend(loc='upper left',prop={'size':9})
plt.show()
| Misc/Black+Scholes+payoffs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.6
# language: julia
# name: julia-0.4
# ---
println(readall(`cmd /c type lin_int2.jl`))
include("lin_int2.jl")
f=lin_inter(grid,vals)
g(x)=2*sin(x)+3*cos(2x)+1.5
using PyPlot
x=linspace(0,5,100)
y=linspace(0,4.9,100)
plot(x,g(x),label="Original")
units=[1,2,5]
for i in 1:3
n=5*units[i]
grid=Array(Float64,n+1)
vals=Array(Float64,n+1)
for j in 1:n+1
grid[j]=(j-1)/units[i]
vals[j]=g((j-1)/units[i])
end
f=lin_inter(grid,vals)
plot(y, f(y),label="lin_int by$(1/units[i])")
end
title("Linear Interpolation")
legend()
# +
function lin_inter(grid,vals)
function func(x::Real)
n = length(grid)
if x<grid[1]
print("Error")
elseif x > grid[n]
print("Error")
else index=searchsortedlast(grid,x)
return (vals[index+1]-vals[index])/(grid[index+1]-grid[index])*(x-grid[index])+vals[index]
end
end
function func{T<:Real}(x::AbstractVector{T})
m=length(x)
out=Array(Any,m)
for i in 1:m
out[i]=func(x[i])
end
return out
end
return func
end
# -
f(1)
f=lin_inter(grid,vals)
| linear-interpolation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
pwd
# cd dat/2015/250m gender/250m gender
coords_df = pd.read_csv('250m.txt')
jan_df = pd.read_csv('jan.txt')
jan_df
new_df = coords_df.rename(columns={'SPO_NO_250':'ID_250'})
df0 = pd.merge(new_df, jan_df)
df0
df = pd.DataFrame(columns=jan_df.columns)
mon = ["jun", "jul", "aug"]
df.columns
df = df.drop(df.columns[0], axis=1)
df
for i in mon:
df2 = pd.read_csv('{mon}.txt'.format(mon=i))
df = df.append(df2, ignore_index=True)
df
data_df = df.groupby("ID_250").agg({'M10': np.sum, 'M20': np.sum, 'M30': np.sum, 'M40': np.sum, 'M50': np.sum, 'M60': np.sum,\
'W10': np.sum, 'W20': np.sum, 'W30': np.sum, 'W40': np.sum, 'W50': np.sum, 'W60': np.sum})
data_df
data_df.to_csv('sum.csv')
data_df["ID_250"] = data_df.index
data_df
co_df = pd.merge(new_df, data_df)
co_df
only_df = co_df[[co_df.columns[0], co_df.columns[1], 'M50']]
only_df = only_df[only_df['M50'] > 20]
only_df
(only_df.columns[0].mean, only_df.columns[1].mean)
# 추자도 부분 좌표 제거
only_df = only_df[((only_df[only_df.columns[0]] < 886636)|(only_df[only_df.columns[0]] > 893124))|\
(only_df[only_df.columns[1]] < 1543000)]
only_df = only_df[((only_df[only_df.columns[0]] < 902800)|(only_df[only_df.columns[0]] > 914500))|\
((only_df[only_df.columns[1]] < 1498000)|(only_df[only_df.columns[1]] > 1504900))]
only_df
(only_df[only_df.columns[0]].mean(), only_df[only_df.columns[1]].mean())
list = zip((only_df[only_df.columns[0]] - 910527.4941838279), (only_df[only_df.columns[1]] - 1488111.3421648254))
list
X = np.array(list)
plt.scatter(X[:, 0], X[:, 1], s=100)
color = mpl.colors.cnames.keys()
from sklearn.cluster import KMeans
model = KMeans(n_clusters=25, init="random", n_init=1, max_iter=1, random_state=0).fit(X)
c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24 = model.cluster_centers_
for i, j in zip(model.cluster_centers_, range(25)):
plt.scatter(X[model.labels_== j,0], X[model.labels_==j,1], s=100, marker='v', color=color[j]);
plt.scatter(i[0], i[1], s=100, c=color[j])
# +
def kmeans_df(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24):
df = pd.DataFrame(np.hstack([X,
model.labels_[:, np.newaxis]]),
columns=["x0", "x1", "c"])
return df
cl_df = kmeans_df(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24)
cl_df
# -
cl_df["x,y"] = zip(cl_df['x0'], cl_df['x1'])
cl_df
only_df['x,y'] = zip((only_df[only_df.columns[0]] - 910527.4941838279) , (only_df[only_df.columns[1]] - 1488111.3421648254))
only_df
merged_df = pd.merge(only_df, cl_df)
merged_df
# +
pop_df = merged_df.groupby("c").agg({
'M50' : [np.sum,]
})
pops_df = pop_df.sort_values(by=pop_df.columns[0], ascending=False)
pops_df
# -
def jeju(t):
c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24 = model.cluster_centers_
for i, j in zip(model.cluster_centers_, range(25)):
if j in [int(pops_df.index[t-1])]:
plt.scatter(X[model.labels_== j,0], X[model.labels_==j,1], s=100, marker='v', color=color[j]);
plt.scatter(i[0], i[1], s=100, c=color[j])
else:
plt.scatter(X[model.labels_== j,0], X[model.labels_==j,1], s=100, marker='^', color='white');
plt.scatter(i[0], i[1], s=100, c='black')
jeju(1)
| summer_gender_cluster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Lab 11: Predicting Low Birthweight
#
# Please complete this lab by providing answers in cells after the question. Use **Code** cells to write and run any code you need to answer the question and **Markdown** cells to write out answers in words. After you are finished with the assignment, remember to download it as an **HTML file** and submit it in **ELMS**.
#
# This assignment is due by **11:59pm on Thursday, April 28**.
# +
import numpy as np
from datascience import *
# These lines do some fancy plotting magic.
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
# This is for linear regression
from sklearn.linear_model import LinearRegression
# This is for logistic regression
from sklearn.linear_model import LogisticRegression
# For getting results from the test set
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
# -
# ### North Carolina Births Data
#
# In this lab, we will work with a dataset of births in North Carolina. A series of variables were collected, most notably the smoking habit of the mother as well as the birthweight of the baby. We are interested in what factors are associated with a low birthweight. We'll first look at predicting `birthweight` as a numerical variable, then use the categorical `lowbirthweight` and try to predict low birthweight status using logistic regression.
ncbirths = Table.read_table('ncbirths.csv')
ncbirths.show(5)
# Note that we need to work with the data a little bit to be able to fit a model using categorical data. The `sklearn` package only allows for categorical variables that have been changed into dummy variables (that is, 0/1 variables). So, we're going to need to create new variables that contain the same information, except as numbers. Luckily, True/False maps onto 1/0, so we can just do comparisons. We'll use a lot of these variables later, so let's do some cleaning now.
# +
ncbirths_dummy = ncbirths.with_columns('premature', ncbirths.column('premie') == 'premie', # True if premature, False if not
'female', ncbirths.column('gender') == 'female', # True if female, False if not
'smoker', ncbirths.column('habit') == 'smoker', # True if smoker, False if not
'label', ncbirths.column('lowbirthweight') == 'low') # Our outcome. True if low birthweight, False if not
ncbirths_dummy.show(5)
# -
# Drop redundant rows now
ncbirths_with_dummies = ncbirths_dummy.drop('premie', 'gender', 'habit','lowbirthweight')
# <font color = 'red'>**Question 1. Using `ncbirths_with_dummies`, change the `whitemom` and `mature` variables so that they are True/False values. Add a column called `younger` which is True if `mature` is "younger mom" and False otherwise, and a column called `white` which is True if `whitemom` is "white" and False otherwise. Assign this to `ncbirths_clean`. Make sure to remove the redundant variables (`whitemom` and `mature`).**</font>
# +
# -
# Now that we've cleaned up our dataset, let's try fitting a logistic regression line.
# ## Using a Logistic Regression
#
# A logistic regression is essentially the same as a linear regression, except we have a binary (two category) categorical variable instead of a numerical variable as the outcome, and we get predicted probabilities out of the predictions. The way we fit a logistic regression model is essentially the same as a linear regression model with `sklearn`. We set up a `LogisticRegression` object first, then we use `fit` to provide it the data.
# +
# Create a model object
logit = LogisticRegression()
# Define predictor and outcome
predictor = ncbirths_clean.select('mage', 'weeks', 'female', 'smoker').rows
outcome = ncbirths_clean.column('label')
# Fit the model
logit.fit(X = predictor, y = outcome)
# -
# We can get the coefficients and intercept the same way as well.
logit.coef_
logit.intercept_
# <font color = 'red'>**Question 2. Write out the form of the equation in the logistic regression model that we just ran. You can just use `logit(birthweight status)` as the outcome instead of writing out the logit transformation.**</font>
# *Your answer here.*
# <font color = 'red'>**Question 3. Focusing on just the sign of the coefficients, interpret the relationship between each of the predictors and whether the baby had a low birthgweight.**</font>
# *Your answer here.*
# ## Intuition for Logistic Regression
#
# We are treating the outcome variable (in this case, low birthweight status) as a 0/1 variable. The graph below shows the relationship between number of weeks of pregnancy and the actual outcome.
weeks_and_weight = Table().with_columns('Weeks', ncbirths_clean.column('weeks'),
'Low Birthweight Status', ncbirths_clean.column('label').astype(int))
weeks_and_weight.scatter('Weeks', 'Low Birthweight Status')
# Now let's add in gold the predicted probabilities at the different number of weeks.
weeks_and_weight = Table().with_columns('Weeks', ncbirths_clean.column('weeks'),
'Low Birthweight Status', ncbirths_clean.column('label').astype(int))
weeks_and_weight.scatter('Weeks', 'Low Birthweight Status')
plt.scatter(ncbirths_clean.column('weeks'), logit.predict_proba(predictor)[:,1], lw=1, color='gold')
# The logit transformation makes it so that even though there is a linear relationship between the (transformed) outcome and the predictors, all predicted probabilities are between 0 and 1, and the relationship between weeks and low birthweight status can clearly be seen here, with higher number of weeks corresponding to lower probability of low birthweight.
# <font color = 'red'>**Question 4. Try adding another predictor to the logistic regression model and running it again. Make sure you do not use `weight`, since that is exactly what the `lowbirthweight` variable is based on. How does your model change? What are the coefficients? How would you interpret the coefficients for the variable you added?**</font>
# +
# -
# ### Predictions using Logistic Regression
#
# To do predictions, we use the `predict_proba` method rather than the `predict` method. This will give us a set of two probabilities for each row. The first represents the probability of a 0, while the second represents the probability of a 1. Here, we are more interested in predicting the probability that a baby will have a low birthweight so that we can provide some sort of help or care in advance. So, we'll just try to find the ones with the highest probability.
#
# In this example, we simply use the same dataset (`ncbirths`) to make predictions, but in reality, you'd use a different dataset. In the next section, we'll go over something called training and testing data that can help improve your models and their performance on future data.
# Predict for each element in original dataset
# Each row has probabilities for 0 and 1
# Should add up to one on each row.
logit.predict_proba(predictor)
# Let's extract predicted probability of being low birthweight.
# The [:,1] means that we want everything from the rows, and only the column with index 1 (so, the second column)
lowbirthweight_probability = logit.predict_proba(predictor)[:,1]
Table().with_column('Low Birthweight Predicted Probability',lowbirthweight_probability).hist()
# <font color = 'red'>**Question 5. What would be the predicted probability of a baby being lowbirthweight according to our model if the mother's age was 30, the pregnancy lasted 36 weeks, the mother was not a smoker, and the baby was female?**</font>
#
# **Hint:** You'll need to create a Table to use with `predict_proba`.
# ## Splitting into Train and Test
#
# What if we wanted to get a sense for how our model might perform when given new data? We can split our data into train and test sets, build our model using the train, and assess our model using test.
#
# First, let's start by taking a sample of our data. We'll use 80% of the data to build our model, then using the remaining 20% to see how it is doing. To do this, we first shuffle our dataset (the cleaned version), then take the first 80% of the rows as `train`. Then, we assign the rest to `test`. The shuffling is to make sure that we are doing a random split.
# +
shuffled_births = ncbirths_clean.sample(with_replacement = False)
nrows = ncbirths.num_rows
ntrain = int(nrows*.8) # 80% used to train
train = shuffled_births.take(np.arange(ntrain))
test = shuffled_births.take(np.arange(ntrain,nrows))
# -
train.num_rows
test.num_rows
# Now, we can take the same exact steps as we did earlier, except using the `train` object to build our model.
# <font color = 'red'>**Question 6. Fit a logistic regression model using `train` with `mage`, `weeks`, `female`, and `smoker` as the predictors. What are the coefficients? How does it differ from fitting the model using the full data?**</font>
# +
# -
# Next, we can use `predict` to predict for the cases in `test` and see how our model did. The `predict` function takes the predicted probabilities and assigns a value of 1 as the prediction if the probability is above 0.5 and 0 if it is below 0.5.
test_predictions = logit.predict(test.select('mage', 'weeks', 'female', 'smoker').rows)
confusion_matrix(test.column('label'), test_predictions)
# The columns represent predictions and the rows represent actual values, so the top left is true negatives, the bottom right is true positives, the top right is false positives, and the bottom left is false negatives. We can calculate the accuracy of our model using the `accuracy_score` function. This takes the diagonal values and divides by the total number of cases to give us how often we were correct.
accuracy_score(test.column('label'), test_predictions)
# However, remember that `predict` by default uses 0.5 as the cutoff point for predicting 0 or 1. We might actually want a different cutoff. For example, if we think a 10% chance is a high enough risk that we want to make sure we flag the baby as possibly being low birthweight, then we might want to set a cutoff value of 0.1 instead. This would make it so that we are predicting more babies as low birthweight. We can do this using `predict_proba` and setting a cutoff manually.
test_predictions = logit.predict_proba(test.select('mage', 'weeks', 'female', 'smoker').rows)[:,1] > 0.1
confusion_matrix(test.column('label'), test_predictions)
accuracy_score(test.column('label'), test_predictions)
# You should notice that the true positives go up, but so do the false positives. That is, we are predicting more babies to be low birthweight, which is able to catch more cases, but we also increase the number of times we incorrectly predict a baby is low birthweight. This is a tradeoff -- the lower we set the threshold, the fewer false negatives we get, but also the more false positives as well.
#
# You probably observed a lower accuracy for one of the models. This doesn't necessarily mean we would always prefer the higher accuracy predictions. For example, we might be ok with more errors as long as we are able to catch all of the babies who might be at risk of being low birthweight.
# <font color = 'red'>**Question 7. Try a few different values for the cutoff other than 0.1 and 0.5. How does the number of false positives and false negatives change? How does the accuracy change? What do you think would be the preferred set of predictions to use?**</font>
| labs/Lab-11-LogisticRegression/lab11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/shubham-dalmia/sds-tasks/blob/main/numpy.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="WErWZicJCd-X"
# 1. Import the numpy package under the name np
# + id="jO8Rzd9oB-kh"
import numpy as np
# + [markdown] id="bVOhZVXxCcTO"
# 2. Print the numpy version and the configuration
# + colab={"base_uri": "https://localhost:8080/"} id="geLfWppdCPLw" outputId="986a6363-1732-4e8f-8ff4-391d6371e38f"
print(np.__version__)
print(np.show_config)
# + [markdown] id="cCh6y5ATDgU-"
# 3. Create a null vector of size 10
# + colab={"base_uri": "https://localhost:8080/"} id="by4R1ZDoDkAQ" outputId="dcdba234-4101-49e6-c24b-70751a4a1f20"
a=np.zeros(10)
print(a)
# + [markdown] id="6lxtfhTiDyxp"
# 4. How to find the memory size of any array
# + colab={"base_uri": "https://localhost:8080/"} id="2Oujyj8MD2iB" outputId="b7efd544-439e-4e33-e65e-e2399b5ec43e"
arr=np.array([1,2,3,4,5])
print(arr.nbytes)
# + [markdown] id="bC8XGUH6EKYo"
# 5. How to get the documentation of the numpy add function from the command line?
# + colab={"base_uri": "https://localhost:8080/"} id="sPv5y2IUEOfX" outputId="83262eb3-5813-40d7-a20a-b41c2bc01a53"
print(np.info(np.add))
# + [markdown] id="OUanIwkyE2XX"
# 6. Create a null vector of size 10 but the fifth value which is 1
# + colab={"base_uri": "https://localhost:8080/"} id="9VJe3o8lE6XY" outputId="2a776325-d9b4-440e-c8a9-5b431756a9cc"
a=np.zeros(10)
a[4]=1
print(a)
# + [markdown] id="wrytmSKIFDB1"
# 7. Create a vector with values ranging from 10 to 49
# + colab={"base_uri": "https://localhost:8080/"} id="ViypaB_1FHXn" outputId="c993cd80-6285-451f-e530-774805079ebb"
b=np.arange(10,50)
print(b)
# + [markdown] id="sIiAFnavFXP4"
# 8. Reverse a vector (first element becomes last)
# + colab={"base_uri": "https://localhost:8080/"} id="pUwtLvn5FbWj" outputId="d6f7f807-161e-44e7-a7dc-a0c7b6b472d0"
b=b[::-1]
print(b)
# + [markdown] id="G8UejFmiGGhr"
# 9. Create a 3x3 matrix with values ranging from 0 to 8
# + colab={"base_uri": "https://localhost:8080/"} id="jxWNWzG5KiLm" outputId="773d1ce0-ed42-40c9-ab6f-d4a1dbfa0af9"
np.matrix(np.arange(9).reshape((3,3)))
# + [markdown] id="AoUK0qGdLy_L"
# 10. Find indices of non-zero elements from [1,2,0,0,4,0]
# + colab={"base_uri": "https://localhost:8080/"} id="lqK872YmL4Ce" outputId="b17e1efa-c71e-4ebe-d9ad-bf8e7f0c789b"
np.nonzero([1,2,0,0,4,0])
# + [markdown] id="y6e7eFEpMXZh"
# 11. Create a 3x3 identity matrix
# + colab={"base_uri": "https://localhost:8080/"} id="fTl6dl21MdBG" outputId="93cc880d-02d0-4d1f-bedb-17bb85fbe968"
np.identity(3)
# + [markdown] id="9G5YiDw6MsBW"
# 12. Create a 3x3x3 array with random values
# + colab={"base_uri": "https://localhost:8080/"} id="oqGst1muMwxB" outputId="a17b4149-c19c-4d46-f460-0e4b47453518"
np.random.random((3,3,3))
# + [markdown] id="vuGTN1ESNg0H"
# 13. Create a 10x10 array with random values and find the minimum and maximum values
# + colab={"base_uri": "https://localhost:8080/"} id="t6DfBdFpNll3" outputId="b8f0fc65-e1ee-4bce-fba4-ead2a20095ea"
d=np.random.random((10,10))
print(d)
print(d.min())
print(d.max())
# + [markdown] id="HaJteJKZOGsb"
# 14. Create a random vector of size 30 and find the mean value
# + colab={"base_uri": "https://localhost:8080/"} id="3HdbkGR0OPYU" outputId="88b05d80-35ea-4acc-805d-c5784447b97d"
e=np.random.random(30)
print(e.size)
print(e)
print(e.mean())
# + [markdown] id="cMzWX1ygPjdi"
# 15. Create a 2d array with 1 on the border and 0 inside
# + colab={"base_uri": "https://localhost:8080/"} id="lizRmDS6PmST" outputId="f092d0c8-995c-42b1-c883-768742c9e970"
f=np.ones((3,3))
f[1:-1,1:-1]=0
print(f)
# + [markdown] id="Sg4BVInWQAgF"
# 16. How to add a border (filled with 0's) around an existing array?
# + colab={"base_uri": "https://localhost:8080/"} id="MuxNuWBvQE5K" outputId="e9793447-1896-4844-fd92-30d82b44d036"
g=np.ones((5,5))
print(np.pad(g, pad_width=1, mode='constant',
constant_values=0))
# + [markdown] id="WTN09yq8Rc4a"
# 17. What is the result of the following expression?
#
# 0 * np.nan
#
# np.nan == np.nan
#
# np.inf > np.nan
#
# np.nan - np.nan
#
# np.nan in set([np.nan])
#
# 0.3 == 3 * 0.1
# + colab={"base_uri": "https://localhost:8080/"} id="7QTLAdqlTDaw" outputId="ea8bebb2-91b2-45d0-bc50-3cc5f81509b1"
print(0 * np.nan)
print(np.nan == np.nan)
print(np.inf > np.nan)
print(np.nan - np.nan)
print(np.nan in set([np.nan]))
print(0.3 == 3 * 0.1)
# + [markdown] id="pw8cU1ceTsSx"
# 18. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal
# + colab={"base_uri": "https://localhost:8080/"} id="Tll4Y8o0Tviv" outputId="5e4ac789-72c4-493d-f0e7-e1fa002c6a54"
h=np.zeros((5,5))
for i in range(4):
h[i+1,i]=i+1
print(h)
# + [markdown] id="a9w3tuRviS7U"
# 19. Create a 8x8 matrix and fill it with a checkerboard pattern
# + colab={"base_uri": "https://localhost:8080/"} id="SmhifuKviXS6" outputId="d0c952e4-6272-4a32-bb9f-135e78103087"
i=np.zeros((8,8))
i[0:8:2,0:8:2]=1
i[1:9:2,1:9:2]=1
print(i)
# + [markdown] id="NNsQP7LTrZkj"
# 20.Consider a (6,7,8) shape array, what is the index (x,y,z) of the 100th **element**
# + colab={"base_uri": "https://localhost:8080/"} id="zD7CQ6WErdPD" outputId="61dc8847-695c-472b-cbea-35cff05cb01e"
print(np.unravel_index(100,(6,7,8)))
# + [markdown] id="2gQCuGdWr2XN"
# 21. Create a checkerboard 8x8 matrix using the tile function
# + colab={"base_uri": "https://localhost:8080/"} id="HVQ0VPgNr7d7" outputId="6a61d4bf-50ec-4534-ec70-05b30768a950"
arr=np.array([[1,0],[0,1]])
brr=np.tile(arr,(4,4))
print(brr)
# + [markdown] id="qTOzsGwBttj3"
# 22. Normalize a 5x5 random matrix
# + colab={"base_uri": "https://localhost:8080/"} id="7YpubBVTtxYf" outputId="7ce2b2f3-a23b-486d-9bc0-d8f2c8421c60"
j=np.random.random((5,5))
print(j)
max=j.max()
min=j.min()
j=(j-min)/(max-min)
print(j)
# + [markdown] id="BsLabvLNveiH"
# 23. Create a custom dtype that describes a color as four unsigned bytes (RGBA)
# + colab={"base_uri": "https://localhost:8080/"} id="ibg1QDD4y50n" outputId="e12c5c58-7238-4da1-e272-ad5120dbe34e"
color = np.dtype([("r", np.ubyte, 1),
("g", np.ubyte, 1),
("b", np.ubyte, 1),
("a", np.ubyte, 1)])
# + [markdown] id="beuJPzcA0Tpp"
# 24. Multiply a 5x3 matrix by a 3x2 matrix (real matrix product)
# + colab={"base_uri": "https://localhost:8080/"} id="RPEnCXbW0YKD" outputId="6813b143-84c0-4262-e9d9-28faeb125a1e"
arr1=np.full((5,3),4)
arr2=np.full((3,2),2)
print(np.matmul(arr1,arr2))
# + [markdown] id="U8Geu4Z_0zTE"
# 25. Given a 1D array, negate all elements which are between 3 and 8, in place.
# + colab={"base_uri": "https://localhost:8080/"} id="B8S1T07z01-l" outputId="3f90c377-99c4-4672-b322-32381e27743b"
arr = np.arange(15)
print(arr)
print("\n")
arr[(arr >= 3) & (arr<= 8)] = 0
print(arr)
# + [markdown] id="7bchc-0K1cLX"
# 26. What is the output of the following script?
# + colab={"base_uri": "https://localhost:8080/"} id="AYSDBOLS3avY" outputId="c90a6046-5f4f-4147-c978-b93f637441c1"
print(sum(range(5),-1))
from numpy import *
print(sum(range(5),-1))
# + [markdown] id="p0jNBKVy37mo"
# 27.Consider an integer vector Z, which of these expressions are legal?
# + colab={"base_uri": "https://localhost:8080/"} id="ChSgqgl24Dqy" outputId="d080e128-c79f-4451-a8ed-5bb027229ef3"
Z=3
print(Z**Z)
print(2 << Z >> 2)
print(Z <- Z)
print(1j*Z)
print(Z/1/1)
print(Z<Z>Z)
# + [markdown] id="vQbif2bj4mlF"
# 28. What are the result of the following expressions?
# + colab={"base_uri": "https://localhost:8080/"} id="gBFhMdb84sBj" outputId="0a06ad3c-0d81-498a-b388-cfb16b0b5a09"
print(np.array(0) / np.array(0))
print(np.array(0) // np.array(0))
print(np.array([np.nan]).astype(int).astype(float))
# + [markdown] id="WVTaifsT4975"
# 29. How to round away from zero a float array ?
# + colab={"base_uri": "https://localhost:8080/"} id="gy5j4VM05CqA" outputId="8ee1e819-1a02-4153-f2ec-824084503414"
k=np.array([0.9,1.4,2.5,3.5,4.0,5.0199])
print(np.round(k,0))
# + [markdown] id="RiNeOIkm5b8U"
# 30. How to find common values between two arrays?
# + colab={"base_uri": "https://localhost:8080/"} id="xw-HumTi5fX9" outputId="6ffc0692-23db-495e-d37a-86c5ea0a43b1"
l=np.arange((7))
m=np.arange((10))
print(np.intersect1d(l,m))
# + [markdown] id="Uf7toPXi6QMf"
# 31. How to ignore all numpy warnings (not recommended)?
# + id="Mshyh4TC6TN_"
# Suicide mode on
defaults = np.seterr(all="ignore")
Z = np.ones(1) / 0
# Back to sanity
_ = np.seterr(**defaults)
# + [markdown] id="nGk9HEG27JqJ"
# 32. Is the following expressions true?
# + colab={"base_uri": "https://localhost:8080/"} id="R8v95JZe7Mv8" outputId="db16b6df-6c98-47f7-e91a-e73fdfc42375"
print(np.sqrt(-1) == np.emath.sqrt(-1))
#print(np.sqrt(-1))
#print(np.emath.sqrt(-1))
# + [markdown] id="WjwBUIHZ7nNg"
# 33. How to get the dates of yesterday, today and tomorrow?
# + colab={"base_uri": "https://localhost:8080/"} id="8zuMMCKX7rCe" outputId="b9875347-60a2-43e7-d7b6-e2171edbbe7f"
print("TODAY = ", np.datetime64('today', 'D'))
print("YESTERDAY = ", np.datetime64('today', 'D') - np.timedelta64(1, 'D'))
print("TOMORROW = ",np.datetime64('today', 'D') + np.timedelta64(1, 'D'))
# + [markdown] id="G312Ldjn85pN"
#
| numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Linked Panning
#
# "Linked Panning" is the capability of updating multiple plot ranges simultaneously as a result of panning one plot. In Bokeh, linked panning is acheived by shared `x_range` and/or `y_range` values between plots.
#
# Exeute the cells below and pan the resulting plots.
from bokeh.io import output_notebook, show
from bokeh.layouts import gridplot
from bokeh.plotting import figure
output_notebook()
x = list(range(11))
y0 = x
y1 = [10-xx for xx in x]
y2 = [abs(xx-5) for xx in x]
# +
# create a new plot
s1 = figure(width=250, plot_height=250, title=None)
s1.circle(x, y0, size=10, color="navy", alpha=0.5)
# create a new plot and share both ranges
s2 = figure(width=250, height=250, x_range=s1.x_range, y_range=s1.y_range, title=None)
s2.triangle(x, y1, size=10, color="firebrick", alpha=0.5)
# create a new plot and share only one range
s3 = figure(width=250, height=250, x_range=s1.x_range, title=None)
s3.square(x, y2, size=10, color="olive", alpha=0.5)
# -
p = gridplot([[s1, s2, s3]], toolbar_location=None)
show(p)
| examples/howto/Linked panning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Multiplication
#
# This example will show you how to multiply two values.
# The model architecture can be thought of as
# a combination of the combining demo and the squaring demo.
# Essentially, we project both inputs independently into a 2D space,
# and then decode a nonlinear transformation of that space
# (the product of the first and second vector elements).
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import nengo
from nengo.dists import Choice
from nengo.processes import Piecewise
# -
# ## Step 1: Create the model
#
# The model has four ensembles:
# two input ensembles ('A' and 'B'),
# a 2D combined ensemble ('Combined'),
# and an output ensemble ('D').
# +
# Create the model object
model = nengo.Network(label="Multiplication")
with model:
# Create 4 ensembles of leaky integrate-and-fire neurons
A = nengo.Ensemble(100, dimensions=1, radius=10)
B = nengo.Ensemble(100, dimensions=1, radius=10)
combined = nengo.Ensemble(
220, dimensions=2, radius=15
) # This radius is ~sqrt(10^2+10^2)
prod = nengo.Ensemble(100, dimensions=1, radius=20)
# This next two lines make all of the encoders in the Combined population
# point at the corners of the cube.
# This improves the quality of the computation.
# Comment out the line below for 'normal' encoders
combined.encoders = Choice([[1, 1], [-1, 1], [1, -1], [-1, -1]])
# -
# ## Step 2: Provide input to the model
#
# We will use two varying scalar values for the two input signals
# that drive activity in ensembles A and B.
with model:
# Create a piecewise step function for input
inputA = nengo.Node(Piecewise({0: 0, 2.5: 10, 4: -10}))
inputB = nengo.Node(Piecewise({0: 10, 1.5: 2, 3: 0, 4.5: 2}))
correct_ans = Piecewise({0: 0, 1.5: 0, 2.5: 20, 3: 0, 4: 0, 4.5: -20})
# ## Step 3: Connect the elements of the model
with model:
# Connect the input nodes to the appropriate ensembles
nengo.Connection(inputA, A)
nengo.Connection(inputB, B)
# Connect input ensembles A and B to the 2D combined ensemble
nengo.Connection(A, combined[0])
nengo.Connection(B, combined[1])
# Define a function that computes the multiplication of two inputs
def product(x):
return x[0] * x[1]
# Connect the combined ensemble to the output ensemble D
nengo.Connection(combined, prod, function=product)
# ## Step 4: Probe the output
#
# Collect output data from each ensemble and input.
with model:
inputA_probe = nengo.Probe(inputA)
inputB_probe = nengo.Probe(inputB)
A_probe = nengo.Probe(A, synapse=0.01)
B_probe = nengo.Probe(B, synapse=0.01)
combined_probe = nengo.Probe(combined, synapse=0.01)
prod_probe = nengo.Probe(prod, synapse=0.01)
# ## Step 5: Run the model
# Create the simulator
with nengo.Simulator(model) as sim:
# Run it for 5 seconds
sim.run(5)
# ## Step 6: Plot the results
#
# To check the performance of the model,
# we can plot the input signals and decoded ensemble values.
# Plot the input signals and decoded ensemble values
plt.figure()
plt.plot(sim.trange(), sim.data[A_probe], label="Decoded A")
plt.plot(sim.trange(), sim.data[B_probe], label="Decoded B")
plt.plot(sim.trange(), sim.data[prod_probe], label="Decoded product")
plt.plot(
sim.trange(), correct_ans.run(sim.time, dt=sim.dt), c="k", label="Actual product"
)
plt.legend(loc="best")
plt.ylim(-25, 25)
# The input signals we chose make it obvious when things are working,
# as the inputs are zero often (so the product should be).
# When choosing encoders randomly around the circle (the default in Nengo),
# you may see more unwanted interactions between the inputs.
# To see this, comment the above code that sets the encoders
# to the corners of the cube (in Step 1 where it says
# `# Comment out the line below for 'normal' encoders`).
# ## Bonus step: Make a subnetwork
#
# If you find that you need to compute the product
# in several parts of your network,
# you can put all of the components necessary
# to compute the product
# together in a subnetwork.
# By making a function to construct this subnetwork,
# it becomes easy to make many such networks
# in a single model.
# +
def Product(neuron_per_dimension, input_magnitude):
# Create the model object
model = nengo.Network(label="Product")
with model:
# Create passthrough nodes to redirect both inputs
model.A = nengo.Node(output=None, size_in=1)
model.B = nengo.Node(output=None, size_in=1)
model.combined = nengo.Ensemble(
neuron_per_dimension * 2,
dimensions=2,
radius=np.sqrt(input_magnitude ** 2 + input_magnitude ** 2),
encoders=Choice([[1, 1], [-1, 1], [1, -1], [-1, -1]]),
)
model.prod = nengo.Ensemble(
neuron_per_dimension, dimensions=1, radius=input_magnitude * 2
)
# Connect everything up
nengo.Connection(model.A, model.combined[0], synapse=None)
nengo.Connection(model.B, model.combined[1], synapse=None)
def product(x):
return x[0] * x[1]
nengo.Connection(model.combined, model.prod, function=product)
return model
# The previous model can then be replicated with the following
model = nengo.Network(label="Multiplication")
with model:
inputA = nengo.Node(Piecewise({0: 0, 2.5: 10, 4: -10}))
inputB = nengo.Node(Piecewise({0: 10, 1.5: 2, 3: 0, 4.5: 2}))
A = nengo.Ensemble(100, dimensions=1, radius=10)
B = nengo.Ensemble(100, dimensions=1, radius=10)
prod = Product(100, input_magnitude=10)
nengo.Connection(inputA, A)
nengo.Connection(inputB, B)
nengo.Connection(A, prod.A)
nengo.Connection(B, prod.B)
inputA_probe = nengo.Probe(inputA)
inputB_probe = nengo.Probe(inputB)
A_probe = nengo.Probe(A, synapse=0.01)
B_probe = nengo.Probe(B, synapse=0.01)
combined_probe = nengo.Probe(prod.combined, synapse=0.01)
prod_probe = nengo.Probe(prod.prod, synapse=0.01)
# Create the simulator
with nengo.Simulator(model) as sim:
# Run it for 5 seconds
sim.run(5)
# Plot the input signals and decoded ensemble values
plt.figure()
plt.plot(sim.trange(), sim.data[A_probe], label="Decoded A")
plt.plot(sim.trange(), sim.data[B_probe], label="Decoded B")
plt.plot(sim.trange(), sim.data[prod_probe], label="Decoded product")
plt.plot(
sim.trange(), correct_ans.run(sim.time, dt=sim.dt), c="k", label="Actual product"
)
plt.legend(loc="best")
plt.ylim(-25, 25)
# -
# Alternatively, you can use Nengo's built in
# [`nengo.networks.Product` network](
# https://www.nengo.ai/nengo/networks.html#nengo.networks.Product).
# This network works with input of any dimensionality
# (e.g., to compute the dot product of two large vectors)
# and uses special optimizatons to make the product
# more accurate than this implementation.
| docs/examples/basic/multiplication.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
from IPython.display import HTML
HTML("""
<br><br>
<a href=http://wwwgong.pythonanywhere.com/cuspea/default/list_talks target=new>
<font size=+3 color=blue>CUSPEA Talks</font>
</a>
<br><br>
<img src=images/jupyter-notebook-wen-gong.jpg><br>
""")
# + [markdown] nbpresent={"id": "6ee77bce-39d1-46a1-802d-c7aa0f07f653"}
# # Fun with [Jupyter](http://jupyter.org/)
# + [markdown] nbpresent={"id": "5676905a-4d3a-478a-bd10-06df67ffce84"}
# ## Table of Contents
#
# * [Motivation](#hid_why)
# * [Introduction](#hid_intro)
# * [Problem Statement](#hid_problem)
# * [Import packages](#hid_pkg)
# * [Estimate x range](#hid_guess)
# * [Use IPython as a calculator](#hid_calculator)
# * [Use Python programming to find solution](#hid_program)
# * [Graph the solution with matplotlib](#hid_graph)
# * [Solve equation precisely using SymPy](#hid_sympy)
# * [Pandas for Big Data Analytics](#hid_panda)
# * [Multimedia with HTML5 -Text, Image, Audio, Video](#hid_html5)
# * [Interactive widgets](#hid_widget)
# * [Working with SQLite Databases](#hid_sqlite)
# * [References](#hid_ref)
# * [Contributors](#hid_author)
# * [Appendix](#hid_apend)
# - [How to install Jupyter Notebook](#hid_setup)
# - [How to share a notebook](#hid_share)
#
# -
# ## Motivation <a class="anchor" id="hid_why"></a>
# * Current Choice
#
# <img src=http://www.cctechlimited.com/pics/office1.jpg>
# * A New Option
#
# > The __Jupyter Notebook__ is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text. Uses include: data cleaning and transformation, numerical simulation, statistical modeling, machine learning and much more.
#
# Useful for many tasks
#
# * Programming
# * Blogging
# * Learning
# * Research
# * Documenting work
# * Collaborating
# * Communicating
# * Publishing results
#
# or even
#
# * Doing homework as a student
#
HTML("<img src=images/office-suite.jpg>")
# See this [Demo Notebook](https://nbviewer.jupyter.org/github/waltherg/notebooks/blob/master/2013-12-03-Crank_Nicolson.ipynb)
#
# <img src=https://nbviewer.jupyter.org/static/img/example-nb/pde_solver_with_numpy.png>
# + [markdown] nbpresent={"id": "031da43c-0284-4433-bd3d-c6c596c92b27"}
# ## Introduction <a class="anchor" id="hid_intro"></a>
# + [markdown] nbpresent={"id": "dc9f34ec-b74d-4c7b-8fd8-3be534221c35"}
# By solving a simple math problem here, I hope to demonstrate many basic features of Jupyter Notebook.
# + [markdown] nbpresent={"id": "81e52f61-9b24-49b2-9953-191e6fe26656"}
# ## Problem Statement <a class="anchor" id="hid_problem"></a>
# + [markdown] nbpresent={"id": "fcc24795-9fe9-4a15-ae38-3444d4044697"}
# A hot startup is selling a hot product.
#
# In 2014, it had 30 customers; 3 years later, its customer base expands to 250.
#
# Question: What is its annual growth rate?
#
# _Translate into math:_ Let $x$ be annual growth rate,
#
# then
# $$ 30 \cdot (1 + x)^3 = 250 $$
# + [markdown] nbpresent={"id": "30861e9b-f7e6-41b2-be2d-d1636961816b"}
# ## Import packages <a class="anchor" id="hid_pkg"></a> (check out from Library)
# + nbpresent={"id": "40d4fcce-0acd-452d-b56a-0caf808e1464"}
# math function
import math
# create np array
import numpy as np
# pandas for data analysis
import pandas as pd
# plotting
import matplotlib.pyplot as plt
# %matplotlib inline
# symbolic math
import sympy as sy
# html5
from IPython.display import HTML, SVG, YouTubeVideo
# widgets
from collections import OrderedDict
from IPython.display import display, clear_output
from ipywidgets import Dropdown
# csv file
import csv
# work with Sqlite database
import sqlite3
# + [markdown] nbpresent={"id": "17336b61-d6bd-496d-863a-1ad759a9e4b7"}
# ## Estimate x range <a class="anchor" id="hid_guess"></a> (in Elementary School)
# -
# If $x$ = 1, then _l.h.s_ = 240, therefore $x$ > 1
# + [markdown] nbpresent={"id": "d63d625e-79d0-4bf8-a405-36c9df127bb5"}
# If $x$ = 1.2, then l.h.s =
# + nbpresent={"id": "0ff80a09-136d-4033-ab6d-4169cff99d42"}
30*(1+1.2)**3
# + [markdown] nbpresent={"id": "7e061caa-9505-4452-ae28-62a29553ed8b"}
# therefore we know $x$ range = (1.0, 1.2)
# + [markdown] nbpresent={"id": "1a60ecbe-653f-43fe-bd30-b52425a51920"}
# ## Use IPython as a calculator <a class="anchor" id="hid_calculator"></a> (in Middle School)
# + nbpresent={"id": "5ca9a4ea-0a1b-491f-97bb-03275691c867"}
# import math
# + nbpresent={"id": "09fdb815-ef9e-4fd7-8dd3-c0f30437990a"}
math.exp(math.log(250/30)/3) - 1
# + nbpresent={"id": "543b2781-e45b-4dbb-894e-0b6dfac0dfd3"}
10**(math.log10(250/30)/3) - 1
# + nbpresent={"id": "b21b7c89-1442-4320-ae43-9e150e087ff0"}
math.pow(10, math.log10(250/30)/3) -1
# + [markdown] nbpresent={"id": "f5c44fb8-78ff-46e5-b444-f484e81030ab"}
# ** annual customer growth rate = 102% **
# + [markdown] nbpresent={"id": "8b8d1cf7-3571-43f8-b3a0-66121ce196de"}
# ## Use Python programming to find solution <a class="anchor" id="hid_program"></a> (in High School)
# + [markdown] nbpresent={"id": "66c4b853-a7be-4619-b348-514df558e698"}
# ### use loop
# + nbpresent={"id": "8642a48c-db83-4b9a-81d3-84c6255763f8"}
nstep = 100
x_min, x_max = 1.0, 1.2
dd = (x_max-x_min)/float(nstep)
x_l = [(x_min + i*dd) for i in range(nstep)]
type(x_l)
# + nbpresent={"id": "957a73e1-cb5e-4b00-af89-30cff8918b7b"}
print(x_l)
# + nbpresent={"id": "79871358-f571-4f61-90fb-ee2e2fe1e792"}
for t in x_l:
err = abs(30*(1+t)**3 - 250)
if err <= 0.5:
print("t={x}: error={e:.4f}".format(x=t,e=err))
# + [markdown] nbpresent={"id": "c017894a-42a7-4936-9a3e-b6ad9b916f7e"}
# ### create a numpy array
# + nbpresent={"id": "fb3debd8-f726-4a26-9ebf-7c7adfab7996"}
# import numpy as np
# import pandas as pd
# + nbpresent={"id": "64c1a6f3-7751-487c-b71b-0d501cce1199"}
print(x_l)
# + [markdown] nbpresent={"id": "dda6cbdf-98e7-4ce7-97c9-ad3715163327"}
# #### using arange()
# + nbpresent={"id": "46cadb76-3a49-44e6-922b-24f60a9398b5"}
x = np.arange(1.0, 1.2, 0.005)
print(x)
# + [markdown] nbpresent={"id": "c157bb0f-5d4d-456b-8f87-5c8cde8945ff"}
# check its type
# + nbpresent={"id": "76c1bb8b-1f47-4325-9e65-3089408d86b8"}
type(x)
# + nbpresent={"id": "b33222f5-ca6f-4f6b-91f4-fa13ae22864a"}
len(x)
# + nbpresent={"id": "3a342312-18db-4f98-8621-34b695a12c4d"}
print(30*(1+x)**3 - 250)
# + nbpresent={"id": "a59b53cf-e6b9-4dae-9f5a-b94aca860131"}
x_ge_0 = (30*(1+x)**3 - 250) >= 0
x_ge_0
# + nbpresent={"id": "b8c7bd40-31ba-4950-93a7-b9793c3b28d7"}
x_lt_0 = (30*(1+x)**3 - 250) < 0
x_lt_0
# + [markdown] nbpresent={"id": "755745d2-55dc-4e42-85b9-42713e2dae83"}
# x_ge_0 and x_lt_0 are logical array
# + nbpresent={"id": "ab275db8-3d80-4efb-b221-2db8de59a314"}
for t in x:
err = abs(30*(1+t)**3 - 250)
if err <= 1.0:
print("t={x}: error={e:.4f}".format(x=t,e=err))
# + [markdown] nbpresent={"id": "c9c0cdfb-e6b9-448a-bc7f-9c54f53089a0"}
# #### using [linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)
# + nbpresent={"id": "240d239b-5ffb-4fba-bc1f-66593f0d0d5b"}
x1 = np.linspace(1.0, 1.2, 100)
x1
# + nbpresent={"id": "fb165454-8031-48d4-ad53-13a529881c4f"}
for t in x1:
err = math.fabs(30*(1+t)**3 - 250)
if err <= 1.0:
print("t={x}: error={e:.4f}".format(x=t,e=err))
# + [markdown] nbpresent={"id": "5c813ea9-fbf2-46b2-b5e1-fb75d2497744"}
# ## Graph the solution using Matplotlib (in High School)
# <a class="anchor" id="hid_graph"></a>
# + [markdown] nbpresent={"id": "e52f4a27-8dc3-4f11-9d71-1e05704cdedf"}
# 
#
# [matplotlib](http://matplotlib.org/contents.html?v=20170307111739) is visualization pkg for python
# + nbpresent={"id": "31fd2fc9-6d40-42a5-9802-412c8149cb49"}
# import matplotlib.pyplot as plt
# # %matplotlib inline
# + nbpresent={"id": "95cf6bd7-4a1d-430c-9c92-f3f0ecfcb1e2"}
x2 = np.linspace(1.0, 1.05, 100)
f1 = 30*(1+x2)**3 - 250
f2 = np.zeros_like(x2) # draw a horizontal line at y=0
# + [markdown] nbpresent={"id": "92028685-116f-4efe-bf56-d5059998a976"}
# $x$ intersection of two lines f1/f2 gives the solution
# + nbpresent={"id": "635887f9-ec1e-4761-87e3-10dd2fb8f229"}
plt.xlabel(r'$x$')
plt.ylabel(r'$\delta$')
plt.grid(True)
plt.title('IPython is a great analytical tool')
plt.axis([0.995,1.055, -11, 9])
#plt.axis([1.02, 1.04, -11, 9])
plt.plot(x2, f1, 'r+')
plt.plot(x2, f2, 'b-', lw=2)
plt.show()
# + [markdown] nbpresent={"id": "c4b654eb-9403-4aef-b9b3-e3c53b91a5c3"}
# ## Solve equation precisely using SymPy (in High School) <a class="anchor" id="hid_sympy"></a>
# + nbpresent={"id": "28c3e44a-666b-4f62-9a86-23056fbbcc0b"}
# from sympy import *
# import sympy as sy
# + nbpresent={"id": "46a79e4d-b1cd-43ee-a68d-9263f5b3ddb5"}
sy.var('x')
# + nbpresent={"id": "cad6f08b-37b5-4539-8e94-66f490c45466"}
sy.solve(30*(1+x)**3 - 250, x)
# + [markdown] nbpresent={"id": "4c21da2f-faa6-4ff0-a645-9722cc922919"}
# Ignore other 2 solutions because they are complex numbers
# + nbpresent={"id": "b439d6a8-2071-4e06-b827-1dc3ac81b767"}
grow_rate = -1 + 15**(2/3)/3
grow_rate
# + [markdown] nbpresent={"id": "521ce1d7-514f-4d86-bae5-2fcc4bf4ab7f"}
# ## Pandas for Big Data Analytics (in College) <a class="anchor" id="hid_panda"></a>
#
# [pandas](http://pandas.pydata.org/pandas-docs/stable/?v=20170307111739) stands for powerful Python data analysis toolkit
# + nbpresent={"id": "95d6b24e-5799-4179-a690-4c1b492405df"}
# import pandas as pd
# + nbpresent={"id": "cd8c9d7c-1a28-4894-9382-726f892959ab"}
year = [2014, 2015, 2016, 2017]
# + nbpresent={"id": "1b7a87cb-30d0-41d7-ae84-3e44c821b406"}
customer_count = [30*(1+grow_rate)**i for i in range(4)]
print(customer_count)
# + nbpresent={"id": "3d742078-bbfa-4a9c-a99d-0b432f40d5a3"}
df = pd.DataFrame(list(zip(year,customer_count)), columns=['Year','Customers'])
df
# + nbpresent={"id": "340592f6-78d1-48b0-ac3b-bfa9808a4dc1"}
df.head(2)
# + nbpresent={"id": "286db730-6d49-4f87-9e9a-44770f14101a"}
df.tail(2)
# + [markdown] nbpresent={"id": "941ecb8e-8d62-4cba-8d4d-49b851313ca1"}
# #### Line chart
# + nbpresent={"id": "1c17a7bf-7077-48d0-b8f5-38845853660e"}
plt.plot(df['Year'], df['Customers'])
# + [markdown] nbpresent={"id": "e669accf-2611-4273-8ba0-01d92517a89b"}
# #### Bar chart
# + nbpresent={"id": "8e314949-a08d-475c-8098-43f8cfa78dc6"}
plt.xlabel('Year')
plt.ylabel('Customer Count')
plt.grid(True)
plt.title('Customer Growth')
plt.bar(df['Year'], df['Customers'])
plt.plot(df['Year'], df['Customers'], 'r-')
# + [markdown] nbpresent={"id": "d040f9c4-f02a-46c8-b4c6-ab7cb7c0ab46"}
# ## Multimedia with HTML5 -Text, Image, Audio, Video <a class="anchor" id="hid_html5"></a> (Having graduated from all the schools)
# + nbpresent={"id": "1ec80f66-2106-432d-b0cc-2293f8324bd6"}
# from IPython.display import HTML, SVG, YouTubeVideo
# + [markdown] nbpresent={"id": "5d438cf4-af7e-4df7-8942-90761ccdc03d"}
# ### create an HTML table dynamically with Python, and we display it in the (HTML-based) notebook.
# + [markdown] nbpresent={"id": "469d012d-4381-4c97-93c2-e5170b43df46"}
# Let me create a multiplication table
# + nbpresent={"id": "d4eefee9-e016-4e0f-9b67-497f560b01c4"}
HTML('''
<table style="border: 2px solid black;">
''' +
''.join(['<tr>' +
''.join(['<td>{row}x{col}={prod}</td>'.format(
row=row, col=col, prod=row*col
) for col in range(10)]) +
'</tr>' for row in range(10)]) +
'''
</table>
''')
# + [markdown] nbpresent={"id": "df9e7c92-a0a3-496c-b213-eafdb304e767"}
# ### display image
# + nbpresent={"id": "6605234f-93e2-41e7-8ce0-7c7aac5e7c4b"}
HTML("""
<img src="https://s-media-cache-ak0.pinimg.com/564x/59/b6/cc/59b6cc26b9502fa7d8f494050ca80ac4.jpg" alt="sunset" style="width:250px;height:400px;">
""")
# + [markdown] nbpresent={"id": "42ac8c27-526c-4f9e-ac2e-a398b3776c0a"}
# ### create a SVG graphics dynamically.
# + nbpresent={"id": "eb0843a1-e45a-4ef9-b191-3d12a6dc8c7c"}
SVG('''<svg width="600" height="80">''' +
''.join(['''<circle cx="{x}" cy="{y}" r="{r}"
fill="red" stroke-width="2" stroke="black">
</circle>'''.format(
x=(30+3*i)*(10-i), y=30, r=3.*float(i)
) for i in range(10)]) +
'''</svg>''')
# + [markdown] nbpresent={"id": "8abf318a-6523-40c3-97d8-aa6e113db74a"}
# ### embed an audio clip
# + nbpresent={"id": "1384a433-443d-4b5c-b6b7-ec7486028927"}
HTML("""
<table>
<tr>
<td>
<a href=https://talkpython.fm>TalkPython </a>
<h2> <NAME></h2> (Python Creator & Tsar)
<h3> Python past, present, and future </h3>
<br/>
<audio controls>
<source src="https://downloads.talkpython.fm/static/episode_cache/100-guido-van-rossum.mp3" type="audio/mpeg">
Your browser does not support the audio element.
</audio>
</td>
<td>
<img src="https://pbs.twimg.com/profile_images/424495004/GuidoAvatar_400x400.jpg" alt="Guido" style="width:200px;height:200px;"
</td>
</tr>
</table>
""")
# + [markdown] nbpresent={"id": "cf191bef-5eb9-4a6d-8628-d3041143a995"}
# ### display a Youtube video by giving its identifier to YoutubeVideo.
# + [markdown] nbpresent={"id": "6d56798b-148a-4838-98fc-b32974781ceb"}
# #### SciPy 2013 Keynote: IPython
# + nbpresent={"id": "f0a8f2a1-b6c8-4454-932f-c6faf1d4a4b9"}
YouTubeVideo('j9YpkSX7NNM')
# + [markdown] nbpresent={"id": "b35dd3ab-1fe9-4328-b416-61ab83ee438a"}
# ## Interactive widgets <a class="anchor" id="hid_widget"></a>
#
# we illustrate the latest interactive features in IPython 2.0+.
#
# This version brings graphical widgets in the notebook that can interact with Python objects.
#
# We will create a drop-down menu allowing us to display one among several videos.
# + nbpresent={"id": "89084255-cc83-4070-b2f5-5434e7f433f8"}
# How to comment out multiple lines of code in python
"""
from collections import OrderedDict
from IPython.display import display, clear_output
from ipywidgets import Dropdown
"""
# + nbpresent={"id": "469b7baf-79e3-46f6-a5a6-d2219c22f23d"}
# We create a Dropdown widget, with a dictionary containing
# the keys (video name) and the values (Youtube identifier) of every menu item.
dw = Dropdown(options=OrderedDict([
('SciPy 2012', 'iwVvqwLDsJo'),
('PyCon 2012', '2G5YTlheCbw'),
('SciPy 2013', 'j9YpkSX7NNM'),
('<NAME>', 'EBRMq2Ioxsc'),
('Mendelssohn Violin', 'o1dBg__wsuo')
]))
# We create a callback function that displays the requested Youtube video.
def on_value_change(name, val):
clear_output()
display(YouTubeVideo(val))
# Every time the user selects an item, the function
# `on_value_change` is called, and the `val` argument
# contains the value of the selected item.
dw.on_trait_change(on_value_change, 'value')
# We choose a default value.
dw.value = dw.options['Mendelssohn Violin']
# Finally, we display the widget.
display(dw)
# -
# ## Working with SQLite Databases <a class="anchor" id="hid_sqlite"></a>
#
# read [blog](https://www.dataquest.io/blog/python-pandas-databases/) at [DataQuest](https://www.dataquest.io)
# +
# open connection to db
conn = sqlite3.connect("dataset/open_src.sqlite")
# create a cursor
cur = conn.cursor()
# select query
results = cur.execute("select * from os_history limit 100;").fetchall()
#print(results)
# count # of rows
results = cur.execute("select count(*) from os_history;").fetchall()
#print(results)
# store data from csv file into db
with open('dataset/open_src_move_v2_1.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
#print(row['Year'])
insert_str="""
insert into os_history(year,subject,subjecturl,person,picture,history)
values ({year}, "{subject}", "{subject_url}", "{person}", "{picture}", "{history}")
"""
#print(insert_str.format(year=row['Year'],subject=row['Subject'],subject_url=row['SubjectURL'],person=row['Person'],picture=row['Picture'],history=row['History']))
cur.execute(insert_str.format(year=row['Year'],subject=row['Subject'],subject_url=row['SubjectURL'],person=row['Person'],picture=row['Picture'],history=row['History']))
conn.commit()
# create a dataframe
df = pd.read_sql_query("select * from os_history limit 5;",conn)
# inspect data
df
# + [markdown] nbpresent={"id": "58dc82ce-5499-45ba-a45d-5983a5c22edb"}
# ## References <a class="anchor" id="hid_ref"></a>
# + [markdown] nbpresent={"id": "ac06c191-b6a8-48f6-86f6-c78c76462861"}
# ### Websites
# + [markdown] nbpresent={"id": "bcf75c29-93f1-453d-81c1-fbd5d2c95c2c"}
# * [DataCamp - Jupyter Notebook Tutorial](https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook#gs.ClmI4Jc)
#
#
# * http://docs.python.org
#
# It goes without saying that Python’s own online documentation is an excellent resource if you need to delve into the finer details of the language and modules. Just make sure you’re looking at the documentation for Python 3 and not earlier versions.
#
# * http://www.python.org/dev/peps
#
# Python Enhancement Proposals (PEPs) are invaluable if you want to understand the motivation for adding new features to the Python language as well as subtle implementation details. This is especially true for some of the more advanced language features. In writing this book, the PEPs were often more useful than the official documentation.
#
# * http://pyvideo.org
#
# This is a large collection of video presentations and tutorials from past PyCon conferences, user group meetings, and more. It can be an invaluable resource for learning about modern Python development. Many of the videos feature Python core developers talking about the new features being added in Python 3.
#
# * http://code.activestate.com/recipes/langs/python
#
# The ActiveState Python recipes site has long been a resource for finding the solution to thousands of specific programming problems. As of this writing, it contains approximately 300 recipes specific to Python 3. You’ll find that many of its recipes either expand upon topics covered in this book or focus on more narrowly defined tasks. As such, it’s a good companion.
#
# * http://stackoverflow.com/questions/tagged/python
#
# Stack Overflow currently has more than 175,000 questions tagged as Python-related (and almost 5000 questions specific to Python 3). Although the quality of the questions and answers varies, there is a lot of good material to be found.
# + [markdown] nbpresent={"id": "66662698-f8b7-4482-b9dc-d220a918e51d"}
# ### Books
# + [markdown] nbpresent={"id": "28979679-24a9-455b-a06c-f82ace24848b"}
# * [Learning IPython for Interactive Computing and Data Visualization - Second Edition (By Cyrille Rossant)](https://github.com/ipython-books/minibook-2nd-code)
# * [IPython Interactive Computing and Visualization Cookbook (By Cyrille Rossant)](https://github.com/ipython-books/cookbook-code)
# * [Python Cookbook, 3rd Edition by <NAME>; <NAME>](https://github.com/dabeaz/python-cookbook)
# * [Python for Data Analysis by <NAME>](https://github.com/wesm/pydata-book)
#
# + [markdown] nbpresent={"id": "06ce80e4-29de-4994-9808-c8feffa25d8d"}
# ### Other Resources
#
# * Idea
# - [Google Search](http://www.google.com)
# * Text
# - [Wikipedia](https://www.wikipedia.org/)
# * Image
# - [Google Images](https://www.google.com/imghp)
# * Video
# - [YouTube](https://www.youtube.com/)
#
# + [markdown] nbpresent={"id": "095385ad-d26d-4168-9bd6-09029a9fe701"}
# ## Contributors <a class="anchor" id="hid_author"></a>
# + [markdown] nbpresent={"id": "3c647eed-ff6d-4b34-ae0b-08ee99798711"}
# * <EMAIL> (first created on 2017-03-09)
# + [markdown] nbpresent={"id": "08d52625-4758-4290-b292-0f166e9ae95d"}
# ## Appendix <a class="anchor" id="hid_apend"></a>
# + [markdown] nbpresent={"id": "ec7bcfc3-e3fc-413c-ab52-59da41e9d885"}
# ### How to install Jupyter Notebook <a class="anchor" id="hid_setup"></a>
# + [markdown] nbpresent={"id": "6d1f3b83-78be-4853-bc1d-e61c7e1950c6"}
# I use Windows and follow this link to [install Ananconda Python distribution](https://www.tensorflow.org/install/)
#
# * Follow the instructions on the [Anaconda download site](https://www.continuum.io/downloads) to download and install Anaconda.
#
# * open a DOS command box, launch jupyter by typing
# > jupyter notebook
#
# * wait till Jupyter homepage to open in a browser
#
# * start to create your own Notebook
# + [markdown] nbpresent={"id": "fa5bff1d-0529-4f92-8a18-3acc585bcbd7"}
# ### How to share a notebook <a class="anchor" id="hid_share"></a>
# + [markdown] nbpresent={"id": "02fbc4ce-f18f-4ac0-a8b1-3cca3e3d29e4"}
# share your learning or work via nbviewer at http://nbviewer.jupyter.org/
# -
HTML("<img src=images/to-be-continued-1.jpg>")
| fun_with_jupyter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# imports
import pysiaf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatch
from matplotlib.collections import PatchCollection
# +
# Load NIRCam SIAF
nrc_siaf = pysiaf.Siaf('NIRCam')
# To see the aperture names, uncomment this line:
#nrc_siaf.apernames
# +
# Create a plot that shows the SUB160 apertures on Module B.
# Figure setup
fig = plt.figure(figsize=(8, 8))
ax = plt.gca()
# Plot the outline of each aperture, with reference points marked (plus symbol is default).
# Plotting blue and red lines separately, blue for short wavelength and red for long wavelength.
nrc_siaf.plot('tel', ['NRCB1_SUB160','NRCB2_SUB160','NRCB3_SUB160','NRCB4_SUB160'],
mark_ref=True, fill_color='None', color='Blue', label=False)
nrc_siaf.plot('tel', ['NRCB5_SUB160'],
mark_ref=True, fill_color='None', color='Red', label=False, clear=False)
plt.title('Module B, SUB160 Apertures')
# Fill in the apertures with transparent colors (there's probably a better way to do this...)
lw = nrc_siaf['NRCB5_SUB160'] #grab the aperture of interest
c = lw.corners('tel') # grab the corners of that aperture
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]]) # reformat the corners array
# plot the aperture:
line = plt.Polygon(clist, facecolor='r', closed=True, alpha=0.3)
plt.gca().add_line(line)
# Repeat for other apertures, selecting colors as appropriate
aperfill = ['NRCB1_SUB160', 'NRCB2_SUB160', 'NRCB3_SUB160', 'NRCB4_SUB160']
for i in aperfill:
sw = nrc_siaf[i]
c = sw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='b', closed=True, alpha=0.3)
plt.gca().add_line(line)
# -
# +
# Make a plot of the full NIRCam FOV with SCA labels
fig = plt.figure(figsize=(12, 6))
# LW full arrays
nrc_siaf.plot('tel', ['NRCB5_FULL','NRCA5_FULL'], fill_color='None', color='Red', label=False, lw=2)
# SW full arrays
nrc_siaf.plot('tel', ['NRCB1_FULL','NRCB2_FULL','NRCB3_FULL','NRCB4_FULL',
'NRCA1_FULL','NRCA2_FULL','NRCA3_FULL','NRCA4_FULL'],
fill_color='None', color='Blue', label=False, clear=False, lw=2)
plt.title('NIRCam Field of View')
aperfill = ['NRCB5_FULL', 'NRCA5_FULL']
for i in aperfill:
lw = nrc_siaf[i]
c = lw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='r', closed=True, alpha=0.3)
plt.gca().add_line(line)
aperfill = ['NRCA1_FULL', 'NRCA2_FULL', 'NRCA3_FULL', 'NRCA4_FULL',
'NRCB1_FULL', 'NRCB2_FULL', 'NRCB3_FULL', 'NRCB4_FULL']
for i in aperfill:
lw = nrc_siaf[i]
c = lw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='b', closed=True, alpha=0.3)
plt.gca().add_line(line)
# Annotate the SCA names
ax = plt.gca()
ax.text(-140, -480, 'B1', color='Blue', fontsize=16)
ax.text(-140, -510, 'B2', color='Blue', fontsize=16)
ax.text( -30, -480, 'B3', color='Blue', fontsize=16)
ax.text( -30, -510, 'B4', color='Blue', fontsize=16)
ax = plt.gca()
ax.text( 35, -480, 'A4', color='Blue', fontsize=16)
ax.text( 35, -510, 'A3', color='Blue', fontsize=16)
ax.text(145, -480, 'A2', color='Blue', fontsize=16)
ax.text(145, -510, 'A1', color='Blue', fontsize=16)
# -
# +
# Plot the coronagraph subarrays
fig = plt.figure(figsize=(10, 6))
# Start with the LW subarrays (in red)
nrc_siaf.plot('tel', ['NRCA5_MASK335R', 'NRCA5_MASK430R', 'NRCA5_MASKLWB_F250W',
'NRCA5_MASKLWB_F300M', 'NRCA5_MASKLWB_F277W', 'NRCA5_MASKLWB_F335M',
'NRCA5_MASKLWB_F360M', 'NRCA5_MASKLWB_F356W', 'NRCA5_MASKLWB_F410M',
'NRCA5_MASKLWB_F430M', 'NRCA5_MASKLWB_F460M', 'NRCA5_MASKLWB_F480M',
'NRCA5_MASKLWB_F444W'],
mark_ref=True, fill_color='None', color='Red', label=False, clear=False, lw=1)
# These are teh SW subarrays
nrc_siaf.plot('tel', ['NRCA2_MASK210R', 'NRCA4_MASKSWB_F182M', 'NRCA4_MASKSWB_F187N',
'NRCA4_MASKSWB_F210M', 'NRCA4_MASKSWB_F212N', 'NRCA4_MASKSWB_F200W'],
mark_ref=True, fill_color='None', color='Blue', label=False, clear=False, lw=1)
# These are the TA apertures for SW (bright sources)
nrc_siaf.plot('tel', ['NRCA2_TAMASK210R', 'NRCA4_TAMASKSWB', 'NRCA4_TAMASKSWBS'],
mark_ref=True, fill_color='None', color='Blue', label=False, clear=False, lw=1)
# These are the faint source TA apertures for SW
nrc_siaf.plot('tel', ['NRCA2_FSTAMASK210R', 'NRCA4_FSTAMASKSWB'],
mark_ref=True, fill_color='None', color='Blue', label=False, clear=False, lw=1, ls='--')
# These are the bright source TA apertures for LW
nrc_siaf.plot('tel', ['NRCA5_TAMASK335R', 'NRCA5_TAMASK430R', 'NRCA5_TAMASKLWB', 'NRCA5_TAMASKLWBL'],
mark_ref=True, fill_color='None', color='Red', label=False, clear=False, lw=1)
# These are the faint source TA apertures for SW
nrc_siaf.plot('tel', ['NRCA5_FSTAMASKLWB', 'NRCA5_FSTAMASK335R', 'NRCA5_FSTAMASK430R'],
mark_ref=True, fill_color='None', color='Red', label=False, clear=False, lw=1, ls='--')
plt.title('Module A, Coronagraph Subarrays')
plt.ylim(-425, -390) #restrict axes
# Fill in the main subarrays
aperfill = ['NRCA5_MASKLWB', 'NRCA5_MASK335R', 'NRCA5_MASK430R']
for i in aperfill:
lw = nrc_siaf[i]
c = lw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='r', closed=True, alpha=0.3)
plt.gca().add_line(line)
aperfill = ['NRCA4_MASKSWB', 'NRCA2_MASK210R']
for i in aperfill:
lw = nrc_siaf[i]
c = lw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='b', closed=True, alpha=0.3)
plt.gca().add_line(line)
# Annotations
ax = plt.gca()
ax.text(108, -418, 'A5_MASK335R', color='Red', ha='center')
ax.text(128, -418, 'A2_MASK210R', color='Blue', ha='center')
ax.text(88, -418, 'A5_MASK430R', color='Red', ha='center')
ax.text(67, -418, 'A4_MASKSWB', color='Blue', ha='center')
ax.text(48, -419, 'A5_MASKLWB', color='Red', ha='center')
# +
# Plot some Grism subarrays to demonstrate the TA process
fig = plt.figure(figsize=(8, 6))
# Start with LW grism subarray. First line marks the reference point in black, second line outlines the aperture
nrc_siaf.plot('tel', ['NRCA5_GRISM64_F444W'],
mark_ref=True, label=False, color='k', fill_color='None', lw=1)
nrc_siaf.plot('tel', ['NRCA5_GRISM64_F444W'],
mark_ref=False, label=False, color='r', fill_color='None', lw=2, clear=False)
# Add the TA boxes. Mark reference points in black, and outline in color
nrc_siaf.plot('tel', ['NRCA5_TAGRISMTS_SCI_F444W'],
mark_ref=True, label=False, color='k', fill_color='None', lw=1, clear=False)
nrc_siaf.plot('tel', ['NRCA5_TAGRISMTS32'],
mark_ref=True, label=False, color='k', fill_color='None', lw=1, clear=False)
nrc_siaf.plot('tel', ['NRCA5_TAGRISMTS_SCI_F444W'],
mark_ref=False, label=False, color='Purple', fill_color='None', lw=2, clear=False)
nrc_siaf.plot('tel', ['NRCA5_TAGRISMTS32'],
mark_ref=False, label=False, color='Yellow', fill_color='None', lw=2, clear=False)
# Add the associated SW subarrays in dotted lines
nrc_siaf.plot('tel', ['NRCA1_GRISMTS64','NRCA3_GRISMTS64'],
mark_ref=False, label=False, color='k', ls=':', fill_color='None', lw=1, clear=False)
# restrict axes range
plt.xlim(100, 70)
plt.title('Module A, Grism Target Acquisition')
# Fill in apertures
aperfill = ['NRCA5_FULL', 'NRCA5_GRISM64_F444W', 'NRCA5_GRISM64_F444W']
for i in aperfill:
lw = nrc_siaf[i]
c = lw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='r', closed=True, alpha=0.3)
plt.gca().add_line(line)
lw = nrc_siaf['NRCA5_TAGRISMTS32']
c = lw.corners('tel')
clist = np.array([[c[0][i], c[1][i]] for i in [0, 1, 2, 3]])
line = plt.Polygon(clist, facecolor='Yellow', closed=True, alpha=0.5)
plt.gca().add_line(line)
ax = plt.gca()
# Annotate
ax.text(94.5, -556.6, 'NRCA5_GRISM64_F444W (target position after grism deflection)', color='k')
ax.text(93.5, -555.0, 'NRCA5_TAGRISMTS_SCI_F444W (TA destination)', color='k')
ax.text(80.0, -553.5, 'NRCA5_TAGRISMTS32 (TA)', color='k')
ax.text(99.5, -558.0, 'SUBGRISM64', color='k')
# -
| examples/make_nircam_siaf_figures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Main imports
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import sys
import re
import pandas as pd
import numpy as np
import _pickle as cPickle
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
from matplotlib.patches import Circle, RegularPolygon
from matplotlib.path import Path
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
from matplotlib.spines import Spine
from matplotlib.transforms import Affine2D
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
import traci
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
from model_classes import ActorCriticNetwork, Agent, DQN
# -
# ### Important constants and state dict
n_actions = 5981
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
experiment_dict = {
'dqn_base': {
'weight_path_1_layer': './resultados_centro_1mlp/escenario0/1_layer/policy_net_weights_experiment_ep_2999.pt',
'weight_path_2_layer': './resultados_centro_1mlp/escenario0/2_layer/policy_net_weights_experiment_ep_2999.pt',
'weight_path_3_layer': './resultados_centro_1mlp/escenario0/3_layer/policy_net_weights_experiment_ep_2999.pt',
'config': "../sumo_simulation/sim_config/km2_centro/scenario/osm.sumocfg",
'plot_name': 'dqn_scenario_0'
},
'dqn_case_2x': {
'weight_path_1_layer': './resultados_centro_1mlp/escenario1/1_layer/policy_net_weights_experiment_ep_2999.pt',
'weight_path_2_layer': './resultados_centro_1mlp/escenario1/2_layer/policy_net_weights_experiment_ep_2999.pt',
'weight_path_3_layer': './resultados_centro_1mlp/escenario1/3_layer/policy_net_weights_experiment_ep_2999.pt',
'config': "../sumo_simulation/sim_config/km2_centro/scenario_2/osm.sumocfg",
'plot_name': 'dqn_scenario_2',
},
'dqn_case_4x': {
'weight_path_1_layer': './resultados_centro_1mlp/escenario2/1_layer/policy_net_weights_experiment_ep_2999.pt',
'weight_path_2_layer': './resultados_centro_1mlp/escenario2/2_layer/policy_net_weights_experiment_ep_2999.pt',
'weight_path_3_layer': './resultados_centro_1mlp/escenario2/3_layer/policy_net_weights_experiment_ep_2999.pt',
'config': "../sumo_simulation/sim_config/km2_centro/scenario_3/osm.sumocfg",
'plot_name': 'dqn_scenario_3'
},
'pg_base': {
'weight_path_1_layer': './resultados_pg/escenario0/1_layer/ac_weights_experiment_ep_2999.pt',
'weight_path_2_layer': './resultados_pg/escenario0/2_layer/ac_weights_experiment_ep_2999.pt',
'weight_path_3_layer': './resultados_pg/escenario0/3_layer/ac_weights_experiment_ep_2999.pt',
'config': "../sumo_simulation/sim_config/km2_centro/scenario/osm.sumocfg",
'plot_name': 'scenario_0_pg'
},
'pg_2x': {
'weight_path_1_layer': './resultados_pg/escenario1/1_layer/ac_weights_experiment_ep_2999.pt',
'weight_path_2_layer': './resultados_pg/escenario1/2_layer/ac_weights_experiment_ep_2999.pt',
'weight_path_3_layer': './resultados_pg/escenario1/3_layer/ac_weights_experiment_ep_1293.pt',
'config': "../sumo_simulation/sim_config/km2_centro/scenario_2/osm.sumocfg",
'plot_name': 'scenario_1_pg'
},
'pg_4x': {
'weight_path_1_layer': './resultados_pg/escenario2/1_layer/ac_weights_experiment_ep_2999.pt',
'weight_path_2_layer': './resultados_pg/escenario2/2_layer/ac_weights_experiment_ep_2999.pt',
'weight_path_3_layer': './resultados_pg/escenario2/3_layer/ac_weights_experiment_ep_2192.pt',
'config': "../sumo_simulation/sim_config/km2_centro/scenario_3/osm.sumocfg",
'plot_name': 'scenario_2_pg'
}
}
# ### Helper functions
def load_dqn_sd(state_dict, num_layers):
state_dict["mlp1.weight"] = state_dict['module.mlp1.weight']
state_dict["mlp1.bias"] = state_dict['module.mlp1.bias']
state_dict["head.weight"] = state_dict['module.head.weight']
state_dict["head.bias"] = state_dict['module.head.bias']
del state_dict['module.mlp1.weight'], state_dict['module.mlp1.bias'], state_dict['module.head.weight'], state_dict['module.head.bias']
if num_layers == '1':
policy_net = DQN(n_actions, False, False).to(device)
elif num_layers == '2':
policy_net = DQN(n_actions, True, False).to(device)
state_dict["mlp2.weight"] = state_dict['module.mlp2.weight']
state_dict["mlp2.bias"] = state_dict['module.mlp2.bias']
del state_dict['module.mlp2.weight'], state_dict['module.mlp2.bias']
else:
policy_net = DQN(n_actions, True, True).to(device)
state_dict["mlp2.weight"] = state_dict['module.mlp2.weight']
state_dict["mlp2.bias"] = state_dict['module.mlp2.bias']
state_dict["mlp3.weight"] = state_dict['module.mlp3.weight']
state_dict["mlp3.bias"] = state_dict['module.mlp3.bias']
del state_dict['module.mlp2.weight'], state_dict['module.mlp2.bias'], state_dict['module.mlp3.weight'], state_dict['module.mlp3.bias']
policy_net.load_state_dict(state_dict)
policy_net.eval()
return policy_net
def load_pg_sd(state_dict, num_layers):
if num_layers == '1':
agent = Agent(alpha=0.001, input_dims=[6], gamma=0.001,
n_actions=n_actions, layer2_size=False, layer3_size=False)
elif num_layers == '2':
agent = Agent(alpha=0.001, input_dims=[6], gamma=0.001,
n_actions=n_actions, layer3_size=False)
else:
agent = Agent(alpha=0.001, input_dims=[6], gamma=0.001,
n_actions=n_actions)
agent.actor_critic.load_state_dict(state_dict)
agent.actor_critic.eval()
return agent
# ### Main agent loops
def run_agent_simulation(key, weight_path, sumoCmd):
num_layers = re.findall('\d', weight_path)
if 'dqn' in key:
num_layers = num_layers[2]
else:
num_layers = num_layers[1]
state_dict = torch.load(weight_path)
if 'dqn' in key:
agent_kind = 'dqn'
agent = load_dqn_sd(state_dict, num_layers)
else:
agent_kind = 'pg'
agent = load_pg_sd(state_dict, num_layers)
traci.start(sumoCmd)
action_dict = cPickle.load(open('../sumo_simulation/input/action_to_zone_km2_lince.pkl', 'rb'))
state = torch.zeros([1,6], device=device)
traci_ep = 0
lane_id_list = traci.lane.getIDList()
states_agent = []
states_agent_mean = []
truck_emissions_agent = []
for e in range(86400):
if traci_ep % 3600 == 0 and traci_ep != 0:
#Start agent interaction
if agent_kind == 'dqn':
action = agent(state)
else:
action = agent.choose_action(state)
#Apply regulation and run steps
reg_action = action > 0
#print(reg_action.view(-1))
for index, lane_id in enumerate(reg_action.view(-1)):
#for lane_id in lane_indices:
if lane_id.item() == 1:
if action_dict[index] is not None:
traci.lane.setDisallowed(action_dict[index], ['truck'])
else:
pass
else:
if action_dict[index] is not None:
traci.lane.setAllowed(action_dict[index], ['truck'])
else:
pass
vehicle_id_list = traci.vehicle.getIDList()
vehicle_types = [traci.vehicle.getTypeID(v_id) for v_id in vehicle_id_list]
vehicle_co2 = [traci.vehicle.getCO2Emission(v_id) for i, v_id in enumerate(vehicle_id_list)
if 'truck' in vehicle_types[i]]
try:
truck_emissions_agent.append(sum(vehicle_co2)/len(vehicle_co2))
except:
truck_emissions_agent.append(0)
#Get simulation values
co2 = [traci.lane.getCO2Emission(edge_id) for edge_id in lane_id_list]
co = [traci.lane.getCOEmission(edge_id) for edge_id in lane_id_list]
nox = [traci.lane.getNOxEmission(edge_id) for edge_id in lane_id_list]
pmx = [traci.lane.getPMxEmission(edge_id) for edge_id in lane_id_list]
noise = [traci.lane.getNoiseEmission(edge_id) for edge_id in lane_id_list]
fuel = [traci.lane.getFuelConsumption(edge_id) for edge_id in lane_id_list]
sim_results = np.array([co2, co, pmx, nox, noise, fuel])
next_state = np.transpose(sim_results).mean(axis=0)
states_agent.append(np.transpose(sim_results).sum(axis=0))
states_agent_mean.append(next_state)
next_state = torch.from_numpy(next_state).to(device).float()
state += next_state
traci.simulationStep()
traci_ep += 1
traci.close(False)
if agent_kind == 'dqn':
values = [agent(torch.from_numpy(state).float().to(device).view(-1,6))
for state in states_agent_mean]
else:
values = [agent.choose_action(torch.from_numpy(state).float().to(device).view(-1,6))
for state in states_agent_mean]
#values = torch.cat(values).view(-1).detach().cpu().numpy()
return states_agent, truck_emissions_agent, values
# ### Result Dictionary to store results
# +
result_dict = {'{}_{}_layers'.format(algorithm, layer):''
for algorithm in experiment_dict.keys() for layer in range(1,4)}
for key in result_dict.keys():
result_dict[key] = {k: '' for k in ['cauchy', 'chi_squared', 'gaussian']}
# -
# ### Main loop for tests
for key in experiment_dict.keys():
for path in experiment_dict[key]:
if 'weight' in path:
for test in ['sc0', 'sc1', 'sc2']:
sumoCmd = ['/usr/bin/sumo/bin/sumo','-c',
'/home/andres/Documents/tesis_pregrado/sumo_simulation/sim_config/km2_lince/geo_test/{}/osm.sumocfg'.format(test),
'-e', '86400']
print(key, path, test)
result_dict['_'.join([key,path.split('_')[2], 'layers'])][test] = run_agent_simulation(key, experiment_dict[key][path], sumoCmd)
# ### Result Values
for test in ['sc0', 'sc1', 'sc2']:
for key in result_dict.keys():
print(key, test)
np.save('arrays_lince/{}_{}.npy'.format(key,test), np.array(result_dict[key][test][0]).sum(axis=0))
print(list(np.array(result_dict[key][test][0]).sum(axis=0)))
for key in result_dict.keys():
print(key)
print(np.array(result_dict[key]['sc0'][0]).sum(axis=0))
for test in ['sc0', 'sc1', 'sc2']:
d = pd.DataFrame([np.array(result_dict[key][test][0]).sum(axis=0) for key in result_dict.keys()])
print(d.to_latex())
| notebooks/Lince test cases.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ATTEMPTING TO USE REGULARIZATION
import os
os.chdir(os.path.pardir)
print(os.getcwd())
from utilities import plot_fd_and_original, plot_fd_and_speeds, get_all_result_data, plot_results
import matplotlib.pyplot as plt
# +
### CORRIDOR_85
# -
# corridor scenario
corridor_85_results = {'(1,)--1': {'tr': (0.045912862271070484, 0.0031267345031365146), 'val': (0.04742169372737408, 0.0031119121269140935), 'test': (0.0476672403847664, 0.0039469590234120335)},
'(2,)--1': {'tr': (0.03664128817617894, 0.003182476102173639), 'val': (0.03853945817798376, 0.0033564231210634296), 'test': (0.04050630591336996, 0.002731126083062373)},
'(3,)--1': {'tr': (0.033373928852379324, 0.0018829191626169833), 'val': (0.035154239647090434, 0.0018664107327763268), 'test': (0.03871563824589665, 0.0016801948788747623)},
'(4, 2)-0.4': {'tr': (0.030664595104753972, 0.001851953384442897), 'val': (0.03249462600797414, 0.0019940795626529786), 'test': (0.03825132212658357, 0.0019405094251864585)},
'(5, 2)-0.4': {'tr': (0.029973307326436043, 0.0019320427350314425), 'val': (0.03196305438876153, 0.002029259209802658), 'test': (0.03746844874792181, 0.0010017009011062954)},
'(5, 3)-0.4': {'tr': (0.03051294256001711, 0.002172826645295372), 'val': (0.03253234028816223, 0.0023648521244316076), 'test': (0.03710711689572175, 0.0018621574798779472)},
'(6, 3)-0.4': {'tr': (0.03004354938864708, 0.0021208966134423995), 'val': (0.03249930314719677, 0.0027163875850789647), 'test': (0.03759107491804222, 0.0019266819358078092)},
'(10, 4)-0.4': {'tr': (0.02741089586168528, 0.001986981721097928), 'val': (0.02964006066322326, 0.0021461797559127775), 'test': (0.03635013228379024, 0.0010135238565525883)}}
tr_mean, tr_std, val_mean, val_std, test_mean, test_std = get_all_result_data(corridor_85_results)
plot_results(corridor_85_results, tr_mean, tr_std, val_mean, val_std, test_mean, test_std, title="corridor_85_dropout")
corridor_85_results = {'(1,)': {'tr': (0.045258586704730985, 0.004853406766732973), 'val': (0.04714435026049614, 0.004653697406037759), 'test': (0.04686351530840396, 0.0012961715061691429)},
'(2,)': {'tr': (0.03719027779996395, 0.0020733957764011005), 'val': (0.041013209708034994, 0.0023950303734451657), 'test': (0.041472892633610745, 0.0024322010449302914)},
'(3,)': {'tr': (0.034038560874760156, 0.0013176987634252566), 'val': (0.03938843499869109, 0.0014865264094550156), 'test': (0.03914595563880523, 0.0010876265935449074)},
'(4, 2)': {'tr': (0.03526925183832645, 0.0029871246019426367), 'val': (0.04105438970029355, 0.0023958553187249138), 'test': (0.04126933727288883, 0.002845698267115585)},
'(5, 2)': {'tr': (0.035543992817401886, 0.0037010336212553643), 'val': (0.04197082627564668, 0.0025311766537130707), 'test': (0.04235170498124496, 0.00310823593982254)},
'(5, 3)': {'tr': (0.0334813929721713, 0.0037666412204319824), 'val': (0.041099048890173434, 0.003189935635059386), 'test': (0.04175731243589502, 0.003266454835106925)},
'(6, 3)': {'tr': (0.03116175480186939, 0.0022358561594462813), 'val': (0.03927014149725437, 0.002838615402109067), 'test': (0.039632171653358556, 0.001673111008447824)},
'(10, 4)': {'tr': (0.03036436103284359, 0.0047435348802268695), 'val': (0.0395459621399641, 0.003676904672583768), 'test': (0.03933931185179666, 0.002598055735067817)}}
tr_mean, tr_std, val_mean, val_std, test_mean, test_std = get_all_result_data(corridor_85_results)
plot_results(corridor_85_results, tr_mean, tr_std, val_mean, val_std, test_mean, test_std, title="corridor_85")
# ### BOTTLENECK_070
# bottleneck scenario
bottleneck_070_results = {'(1,)--1': {'tr': (0.04420872837305069, 0.0031599580431971148), 'val': (0.04631242074072361, 0.00335683444621767), 'test': (0.04404655892877306, 0.0006008492096808868)}, '(2,)--1': {'tr': (0.04261961035430431, 0.002050199416835133), 'val': (0.045190324261784556, 0.002178216300224125), 'test': (0.04425408023114118, 0.0008173671821354187)}, '(3,)-0.2': {'tr': (0.04100157126784325, 0.0010626492394933162), 'val': (0.04339961282908916, 0.001260815392322356), 'test': (0.04458922187974386, 0.0010175661421570764)}, '(4, 2)-0.2': {'tr': (0.03906548090279102, 0.004921965053963709), 'val': (0.04078196577727795, 0.0036245149614968913), 'test': (0.046912882178146036, 0.0033055514368516012)}, '(5, 2)-0.2': {'tr': (0.03745610669255257, 0.0019795109400485966), 'val': (0.04004896491765976, 0.0020840289722979183), 'test': (0.04501635135997649, 0.002588274218708516)}, '(5, 3)-0.2': {'tr': (0.03834013599902392, 0.006017051040785215), 'val': (0.040765413381159306, 0.005984761763053988), 'test': (0.045824969932470705, 0.003220847503347146)}, '(6, 3)-0.2': {'tr': (0.0381568444147706, 0.004517518886731544), 'val': (0.040278446376323704, 0.004180930432849594), 'test': (0.04526766299581149, 0.0028434087673124566)}, '(10, 4)-0.2': {'tr': (0.0350448851287365, 0.004679395392748039), 'val': (0.03768944654613733, 0.004560997047374159), 'test': (0.04326768843836832, 0.003076716056470942)}}
tr_mean, tr_std, val_mean, val_std, test_mean, test_std = get_all_result_data(bottleneck_070_results)
plot_results(bottleneck_070_results, tr_mean, tr_std, val_mean, val_std, test_mean, test_std, title="bottleneck_070_dropout")
bottleneck_070_results = {'(1,)': {'tr': (0.04955769553780556, 0.0057570421608399685), 'val': (0.05123388793319463, 0.005096320416198529), 'test': (0.051791704269027836, 0.005582774144924822)},
'(2,)': {'tr': (0.04184448003768921, 0.004760314561714955), 'val': (0.045550852119922644, 0.0046212267515334735), 'test': (0.04491133585904681, 0.0022928527407057274)},
'(3,)': {'tr': (0.04102290675044059, 0.003348045017959455), 'val': (0.04824198216199875, 0.004444882233662878), 'test': (0.04584651970424176, 0.002920595092481756)},
'(4, 2)': {'tr': (0.04031206876039505, 0.0051648797471331685), 'val': (0.048427933976054195, 0.0037716492903911523), 'test': (0.04719023609986878, 0.004526066784584657)},
'(5, 2)': {'tr': (0.03668047532439232, 0.003667154431939146), 'val': (0.04531076699495316, 0.0025808096847436896), 'test': (0.0461659374304043, 0.0027352878110425798)},
'(5, 3)': {'tr': (0.04162880904972553, 0.007151998469464794), 'val': (0.049127314463257785, 0.005513828520265613), 'test': (0.049083456967275084, 0.005650612198688121)},
'(6, 3)': {'tr': (0.037646884098649025, 0.005999453764399783), 'val': (0.04634510710835457, 0.004201980591786483), 'test': (0.04620908804838612, 0.0035137353907239953)},
'(10, 4)': {'tr': (0.036670266091823576, 0.00472639424911708), 'val': (0.04604802552610636, 0.003957607023497358), 'test': (0.045713807815980285, 0.003483159691551754)}}
tr_mean, tr_std, val_mean, val_std, test_mean, test_std = get_all_result_data(bottleneck_070_results)
plot_results(bottleneck_070_results, tr_mean, tr_std, val_mean, val_std, test_mean, test_std, title="bottleneck_070")
| src/attempts/regularization_attempt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scipy.integrate
import numpy as np
import pandas as pd
R0 = 2.2 #2.2 #Reproduction number
D_incubation = 5.2 #Length of incubation period
D_infectious = 1 # Duration patient is infectious
population = 1380004000 # 1,38,00,04,000 # Source WHO 2020
def model(y, t, D_inf = D_infectious, Rt = R0):
# :param array y: S E I R values
# :param array t: Time array (days)
N = population
beta = Rt / D_inf
sigma = 1 / D_incubation
gamma = 1 / D_inf
S, E, I, R = y
dS = -beta * S * I / N
dE = beta * S * I / N - sigma * E
dI = sigma * E - gamma * I
dR = gamma * I
return ([dS, dE, dI, dR])
def solve(model, population, E0, D_inf, Rt):
T = np.arange(365)
Y0 = population - E0, E0, 0, 0 # S, E, I, R at initial step
y_data_var = scipy.integrate.odeint(model, Y0, T, args=(D_inf, Rt))
S, E, I, R = y_data_var.T # transpose and unpack
return T, S, E, I, R
print("Plotting the variation of initial infected population")
for initp in [1,100,1000,10000]:
X, S, E, I, R = solve(model, population, initp, D_infectious, R0)
i_peak = np.amax(I)
day = np.where(i_peak == I)
print("Initial Population =", initp, "Peak day =" ,day[0][0], "Peak infected =", f"{i_peak:,.0f}", "Peak Death =", f"{(i_peak*0.03):,.0f}")
# The above table shows that the Peak day gets effected by the initial number of cases reported
# +
r = pd.DataFrame({
"Day": X,
"S": S,
"E": E,
"I": I,
"R": R
})
ax1 = r.plot(
x = "Day",
y = ["E", "I"],
grid = True,
figsize = (10, 5)
)
# textbox
peakx = 70
ax1.text(
peakx + 2,
r['I'].iloc[peakx] * 1.25,
"Peak Infections\n" + str(int(r['I'].iloc[peakx])),
bbox={'facecolor':'white'}
)
# red cross
ax1.scatter(
[peakx],
[r['I'].iloc[peakx]],
c='red',
marker='x'
)
# arrow
ax1.annotate(
"Peak Exposed",
xy=(70, r['E'].iloc[70]),
xytext=(100, 160000000),
arrowprops={},
bbox={'facecolor':'white'}
)
# Scales are different hence plotting separately
ax2 = r.plot(
x = "Day",
y = ["S", "R"],
grid = True,
figsize = (10, 5)
)
settledx = 150
residualS = r['S'].iloc[settledx]
residualR = r['R'].iloc[settledx]
# red cross
ax2.scatter(
[settledx],
[residualS],
c='red',
marker='x'
)
ax2.text(
settledx + 2,
residualS * 1.25,
"Residual Susceptible\n" + str(int(residualS)),
bbox={'facecolor':'white'}
)
# red cross
ax2.scatter(
[settledx],
[residualR],
c='red',
marker='x'
)
ax2.text(
settledx + 2,
residualR * 1.1,
"Residual Recovered\n" + str(int(residualR)),
bbox={'facecolor':'white'}
)
Exposed=residualR-residualS
print("Exposed=", int(Exposed), " %=", (Exposed/population)*100)
# # arrow
# ax1.annotate(
# "Peak Exposed",
# xy=(70, r['E'].iloc[70]),
# xytext=(100, 160000000),
# arrowprops={},
# bbox={'facecolor':'white'}
# )
# -
print("Plotting the variation of R0")
for Rt in [1.1,1.4,1.5,2.0,2.2,2.5,3.0]:
X, S, E, I, R = solve(model, population, 3, D_infectious, Rt)
i_peak = np.amax(I)
day = np.where(i_peak == I)
print("R0 =", Rt, "Peak day =" ,day[0][0], "Peak infected =", f"{i_peak:,.0f}", "Peak Death =", f"{(i_peak*0.03):,.0f}")
print("Plotting the variation of D_infectious")
for D_infectious in [1.1,1.4,1.5,2.0,2.2,2.5,3.0]:
X, S, E, I, R = solve(model, population, 3, D_infectious, R0)
i_peak = np.amax(I)
day = np.where(i_peak == I)
print("D_infectious =", D_infectious, "Peak day =" ,day[0][0], "Peak Death =", f"{(i_peak*0.03):,.0f}")
r = pd.DataFrame({
"Day": X,
"S": S,
"E": E,
"I": I,
"R": R
})
# Scales are different hence plotting separately
r.plot(
x = "Day",
y = ["S", "R"],
grid = True,
#subplots = True,
figsize = (10, 5)
)
| 05_SEIR_simple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py2 (Kali)
# language: python
# name: kali
# ---
# # Comparison between three different cadences
# Now we can start comparing how different cadence can recover CARMA model parameters by fitting downsampled LCs to certain models. Here, we will be examining three proposal cadences, the temporal baseline cadence, the baseline cadence without paired visits and the rolling cadence. Please refer to the LSST Observing Strategy White Paper for detailed description of each cadence. Bellow is a short introduction:
# - Baseline Cadence(**minion_1016**): 85% WFD + 15% Mini Surveys. The main survey, WFD, will try cover as much area as possible in a given night, also each field will be observed twice in the same night.
# - Baseline, no visit pair(**kraken_1043**): Same as baseline cadence, except no requirement for visiting same field twice within one night. The result is a 2.4% increase in the total number of visit.
# - Rolling Cadence(**enigma_1260**): Non-uniform cadence. The observing frequency at some regions will be doubled or tripled within a given time interval, rotate and focus on next region afterwards. The result is that some designated regions get more visits that others within one peirod of time and the overal uniformity is achived at the end the 10 year survey.
# #### Load Cadences
# %matplotlib inline
import numpy as np
import kali.carma
import kali
import gatspy
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = [14,8]
from lsstlc import * # derived LSST lightcurve sub-class
# +
maf = np.load('/home/mount/MAF output/58_-27_poster.npz')
# get the result for minion_1016, baseline cadence
bl_meta = maf['meta1'] # [ra, dec, min_gap, opsim_id], min_gap in hours
bl_cadence = maf['cadence1'] # cadence in structured array
#rolling cadence
roll_meta = maf['meta3']
roll_cadence = maf['cadence3']
# +
maf_np = np.load('/home/mount/MAF output/58_-27_np.npz')
# baseline no visit pairs
np_meta = maf_np['meta2']
np_cadence = maf_np['cadence2']
# -
print 'Total number of visits from each cadence are: \n'
print 'Baseline Cadence: {}'.format(bl_cadence.shape[0])
print 'Baseline no visit paris: {}'.format(np_cadence.shape[0])
print 'Rolling Cadence: {}'.format(roll_cadence.shape[0])
# #### Look at the distribution on a histrogram:
fig = plt.figure()
w1 = np.ones_like(bl_cadence['expDate'])/float(bl_cadence.shape[0])
w2 = np.ones_like(np_cadence['expDate'])/float(np_cadence.shape[0])
w3 = np.ones_like(roll_cadence['expDate'])/float(roll_cadence.shape[0])
plt.hist([bl_cadence['expDate']/86400, np_cadence['expDate']/86400, roll_cadence['expDate']/86400], weights=[w1*100, w2*100, w3*100], bins=10,
color=['g', 'orange', 'b'], label=['Baseline Cadence', 'Baseline w/o pairs', 'Rolling Cadence'])
plt.ylim(0,24)
plt.xlim(0,3650)
plt.ylabel('Percentage of Visits',fontsize=25)
plt.legend(loc=2)
plt.locator_params(axis='y', nbins=4)
plt.xlabel('Time(days)', fontsize=25)
# #### Parameter Estimation
# Next, we downsample mock LCs gerenated using DRW and DHO at three cadences shown above, and then find the best-fit parameters for the downsampled LCs.
# ##### Fit with DRW
# +
# read in mock LC and downsample at different cadence
min_sep = float(bl_meta[2])
drw_lc = extLC('/home/mount/LC/drw_full.npz')
drw_bl = lsstlc(bl_meta[0], bl_meta[1], bl_cadence['expDate'], drw_lc, min_sep)
drw_np = lsstlc(np_meta[0], np_meta[1], np_cadence['expDate'], drw_lc, min_sep)
drw_roll = lsstlc(roll_meta[0], roll_meta[1], roll_cadence['expDate'], drw_lc, min_sep)
# Initiate carma task
drwTask = kali.carma.CARMATask(1,0,nsteps = 500)
# -
drwTask.clear()
drwTask.fit(drw_bl)
print 'Bseline best timescales: {}'.format(drwTask.bestTau)
drwTask.clear()
drwTask.fit(drw_np)
print 'Bseline w/o visit pair best timescales: {}'.format(drwTask.bestTau)
drwTask.clear()
drwTask.fit(drw_roll)
print 'Rolling best timescales: {}'.format(drwTask.bestTau)
# ##### Fit with DHO
# +
# read in mock LC and downsample at different cadence
dho_lc = extLC('/home/mount/LC/dho_full.npz')
dho_bl = lsstlc(bl_meta[0], bl_meta[1], bl_cadence['expDate'], dho_lc, min_sep)
dho_np = lsstlc(np_meta[0], np_meta[1], np_cadence['expDate'], dho_lc, min_sep)
dho_roll = lsstlc(roll_meta[0], roll_meta[1], roll_cadence['expDate'], dho_lc, min_sep)
# Initiate carma task
dhoTask = kali.carma.CARMATask(2,1,nsteps = 500)
# -
dhoTask.clear()
dhoTask.fit(dho_bl)
print 'Bseline best timescales: {}'.format(dhoTask.bestTau)
dhoTask.clear()
dhoTask.fit(dho_np)
print 'Bseline w/o visit pair best timescales: {}'.format(dhoTask.bestTau)
dhoTask.clear()
dhoTask.fit(dho_roll)
print 'Rolling best timescales: {}'.format(dhoTask.bestTau)
# #### Best-fit parameters in a table
# #### DRW
# | Cadences/Input | $\tau$ (days) | $\sigma$ |
# | --- | --- | --- |
# | Input | 150 | 1.0 |
# | Baseline | 120.758 | 0.888 |
# | Baseline no pair | 119.490 | 0.883 |
# | Rolling | 122.864 | 0.920 |
# #### DHO
# | Cadences/Input | $\tau_{1}$ (days) | $\tau_{2}$ (days) | MA timescale (days) | $\sigma$ |
# | --- | --- | --- | --- |
# | Input | 107.8 | 33.2 | 5.5 | 1.0 |
# | Baseline | 151.418 | 36.706 | 7.633 | 1.138 |
# | Baseline no pair | 160.596 | 34.627 | 6.82 | 1.151 |
# | Rolling | 180.686 | 25.7 | 5.49 | 1.111 |
| Cadence/compare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + pycharm={"name": "#%%\n"}
#https://machinelearningmastery.com/naive-bayes-classifier-scratch-python/
# Naive Bayes On The Iris Dataset
from csv import reader
from random import seed
from random import randrange
from math import sqrt
from math import exp
from math import pi
# Load a CSV file
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
# Convert string column to integer
def str_column_to_int(dataset, column):
class_values = [row[column] for row in dataset]
unique = set(class_values)
lookup = dict()
for i, value in enumerate(unique):
lookup[value] = i
for row in dataset:
row[column] = lookup[row[column]]
return lookup
# Split a dataset into k folds
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
# Split the dataset by class values, returns a dictionary
def separate_by_class(dataset):
separated = dict()
for i in range(len(dataset)):
vector = dataset[i]
class_value = vector[-1]
if (class_value not in separated):
separated[class_value] = list()
separated[class_value].append(vector)
return separated
# Calculate the mean of a list of numbers
def mean(numbers):
return sum(numbers)/float(len(numbers))
# Calculate the standard deviation of a list of numbers
def stdev(numbers):
avg = mean(numbers)
variance = sum([(x-avg)**2 for x in numbers]) / float(len(numbers)-1)
return sqrt(variance)
# Calculate the mean, stdev and count for each column in a dataset
def summarize_dataset(dataset):
summaries = [(mean(column), stdev(column), len(column)) for column in zip(*dataset)]
del(summaries[-1])
return summaries
# Split dataset by class then calculate statistics for each row
def summarize_by_class(dataset):
separated = separate_by_class(dataset)
summaries = dict()
for class_value, rows in separated.items():
summaries[class_value] = summarize_dataset(rows)
return summaries
# Calculate the Gaussian probability distribution function for x
def calculate_probability(x, mean, stdev):
exponent = exp(-((x-mean)**2 / (2 * stdev**2 )))
return (1 / (sqrt(2 * pi) * stdev)) * exponent
# Calculate the probabilities of predicting each class for a given row
def calculate_class_probabilities(summaries, row):
total_rows = sum([summaries[label][0][2] for label in summaries])
probabilities = dict()
for class_value, class_summaries in summaries.items():
probabilities[class_value] = summaries[class_value][0][2]/float(total_rows)
for i in range(len(class_summaries)):
mean, stdev, _ = class_summaries[i]
probabilities[class_value] *= calculate_probability(row[i], mean, stdev)
return probabilities
# Predict the class for a given row
def predict(summaries, row):
probabilities = calculate_class_probabilities(summaries, row)
best_label, best_prob = None, -1
for class_value, probability in probabilities.items():
if best_label is None or probability > best_prob:
best_prob = probability
best_label = class_value
return best_label
# Naive Bayes Algorithm
def naive_bayes(train, test):
summarize = summarize_by_class(train)
predictions = list()
for row in test:
output = predict(summarize, row)
predictions.append(output)
return(predictions)
# Test Naive Bayes on Iris Dataset
seed(1)
filename = 'iris.csv'
dataset = load_csv(filename)
for i in range(len(dataset[0])-1):
str_column_to_float(dataset, i)
# convert class column to integers
str_column_to_int(dataset, len(dataset[0])-1)
# evaluate algorithm
n_folds = 5
scores = evaluate_algorithm(dataset, naive_bayes, n_folds)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
# + pycharm={"name": "#%%\n"}
| tutorials/python/MAP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Rainmier/Linear-Algebra-58019/blob/main/Matrix_Algebra.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="r4ZESP_DA8jF"
# ## Python program to inverse and transpose
# + colab={"base_uri": "https://localhost:8080/"} id="giRVXrzfAyNN" outputId="53ccd6f2-2646-423d-c359-a79d5ebde4f8"
import numpy as np
A=np.array([[1,2,3],[4,5,6],[7,8,9]])
print(A)
# + colab={"base_uri": "https://localhost:8080/"} id="RFbY56cTBLHy" outputId="177417f0-5164-45a9-e6ae-ca510269f5e0"
import numpy as np
A=np.array([[1,2],[4,7]])
B=(np.linalg.inv(A))
print(B)
# + colab={"base_uri": "https://localhost:8080/"} id="bMQDBNMKBSVk" outputId="a37b7a90-f459-453a-e1f6-bbc3634efb36"
import numpy as np
A=np.array([[1,2],[4,7]])
invA=(np.linalg.inv(A))
print(invA)
# + colab={"base_uri": "https://localhost:8080/"} id="zQknNMGIBWMj" outputId="355bf7a7-1e5a-4c20-d6b4-9882f86f32d8"
C=np.dot(A,invA)
print(C)
# + colab={"base_uri": "https://localhost:8080/"} id="clJ2HlMqBYRo" outputId="7a4d837a-396a-4e41-fc0d-65c122551bb9"
##Python Program to Transpose a 3x3 Matrix A=([6,1,1],[4,-2,5],[2,8,7])
A=np.array([[6,1,1],[4,-2,5],[2,8,7]])
print(A)
B=np.transpose(A)
print(B)
# + colab={"base_uri": "https://localhost:8080/"} id="w1ha8qeqBd4s" outputId="d98196cd-4b36-4fb6-d7d6-bc645e91fb7f"
#Python Program to Inverse a 3x3 Matrix A=([6,1,1],[4,-2,5],[2,8,7])
A=np.array([[6,1,1],[4,-2,5],[2,8,7]])
print(A)
B=(np.linalg.inv(A))
print(B)
# + [markdown] id="xytiCY3LHiRc"
# ## Coding Activity 3
# + [markdown] id="JdR-fSbqGbzN"
# ## Create a Python Program to inverse and transpose a 4x4 matrix
#
#
#
# ## A = ( [6,1,1,3],[4,-2,5,1],[2,8,7,6],[3,1,9,7] )
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="iVaqt8-nGrWy" outputId="45b26a96-5c26-4ba5-f19d-30bb6a013c32"
#Python Program to Inverse a 4x4 Matrix A=([6,1,1,3],[4,-2,5,1],[2,8,7,6],[3,1,9,7])
A=np.array([[6,1,1,3],[4,-2,5,1],[2,8,7,6],[3,1,9,7]])
print(A)
B=(np.linalg.inv(A))
print(B)
# + colab={"base_uri": "https://localhost:8080/"} id="TKPQWhsZGu-Q" outputId="6ceed9cc-146e-4351-f45a-e85659c749af"
#Python Program to Transpose a 4x4 Matrix A=([6,1,1,3],[4,-2,5,1],[2,8,7,6],[3,1,9,7])
A=np.array([[6,1,1,3],[4,-2,5,1],[2,8,7,6],[3,1,9,7]])
print(A)
B=np.transpose(A)
print(B)
| Matrix_Algebra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SARIMAX: Model selection, missing data
# The example mirrors Durbin and Koopman (2012), Chapter 8.4 in application of Box-Jenkins methodology to fit ARMA models. The novel feature is the ability of the model to work on datasets with missing values.
# + jupyter={"outputs_hidden": false}
# %matplotlib inline
# + jupyter={"outputs_hidden": false}
import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import matplotlib.pyplot as plt
# + jupyter={"outputs_hidden": false}
import requests
from io import BytesIO
from zipfile import ZipFile
# Download the dataset
dk = requests.get('http://www.ssfpack.com/files/DK-data.zip').content
f = BytesIO(dk)
zipped = ZipFile(f)
df = pd.read_table(
BytesIO(zipped.read('internet.dat')),
skiprows=1, header=None, sep='\s+', engine='python',
names=['internet','dinternet']
)
# -
# ### Model Selection
#
# As in Durbin and Koopman, we force a number of the values to be missing.
# + jupyter={"outputs_hidden": false}
# Get the basic series
dta_full = df.dinternet[1:].values
dta_miss = dta_full.copy()
# Remove datapoints
missing = np.r_[6,16,26,36,46,56,66,72,73,74,75,76,86,96]-1
dta_miss[missing] = np.nan
# -
# Then we can consider model selection using the Akaike information criteria (AIC), but running the model for each variant and selecting the model with the lowest AIC value.
#
# There are a couple of things to note here:
#
# - When running such a large batch of models, particularly when the autoregressive and moving average orders become large, there is the possibility of poor maximum likelihood convergence. Below we ignore the warnings since this example is illustrative.
# - We use the option `enforce_invertibility=False`, which allows the moving average polynomial to be non-invertible, so that more of the models are estimable.
# - Several of the models do not produce good results, and their AIC value is set to NaN. This is not surprising, as Durbin and Koopman note numerical problems with the high order models.
# + jupyter={"outputs_hidden": false}
import warnings
aic_full = pd.DataFrame(np.zeros((6,6), dtype=float))
aic_miss = pd.DataFrame(np.zeros((6,6), dtype=float))
warnings.simplefilter('ignore')
# Iterate over all ARMA(p,q) models with p,q in [0,6]
for p in range(6):
for q in range(6):
if p == 0 and q == 0:
continue
# Estimate the model with no missing datapoints
mod = sm.tsa.statespace.SARIMAX(dta_full, order=(p,0,q), enforce_invertibility=False)
try:
res = mod.fit(disp=False)
aic_full.iloc[p,q] = res.aic
except:
aic_full.iloc[p,q] = np.nan
# Estimate the model with missing datapoints
mod = sm.tsa.statespace.SARIMAX(dta_miss, order=(p,0,q), enforce_invertibility=False)
try:
res = mod.fit(disp=False)
aic_miss.iloc[p,q] = res.aic
except:
aic_miss.iloc[p,q] = np.nan
# -
# For the models estimated over the full (non-missing) dataset, the AIC chooses ARMA(1,1) or ARMA(3,0). Durbin and Koopman suggest the ARMA(1,1) specification is better due to parsimony.
#
# $$
# \text{Replication of:}\\
# \textbf{Table 8.1} ~~ \text{AIC for different ARMA models.}\\
# \newcommand{\r}[1]{{\color{red}{#1}}}
# \begin{array}{lrrrrrr}
# \hline
# q & 0 & 1 & 2 & 3 & 4 & 5 \\
# \hline
# p & {} & {} & {} & {} & {} & {} \\
# 0 & 0.00 & 549.81 & 519.87 & 520.27 & 519.38 & 518.86 \\
# 1 & 529.24 & \r{514.30} & 516.25 & 514.58 & 515.10 & 516.28 \\
# 2 & 522.18 & 516.29 & 517.16 & 515.77 & 513.24 & 514.73 \\
# 3 & \r{511.99} & 513.94 & 515.92 & 512.06 & 513.72 & 514.50 \\
# 4 & 513.93 & 512.89 & nan & nan & 514.81 & 516.08 \\
# 5 & 515.86 & 517.64 & nan & nan & nan & nan \\
# \hline
# \end{array}
# $$
#
# For the models estimated over missing dataset, the AIC chooses ARMA(1,1)
#
# $$
# \text{Replication of:}\\
# \textbf{Table 8.2} ~~ \text{AIC for different ARMA models with missing observations.}\\
# \begin{array}{lrrrrrr}
# \hline
# q & 0 & 1 & 2 & 3 & 4 & 5 \\
# \hline
# p & {} & {} & {} & {} & {} & {} \\
# 0 & 0.00 & 488.93 & 464.01 & 463.86 & 462.63 & 463.62 \\
# 1 & 468.01 & \r{457.54} & 459.35 & 458.66 & 459.15 & 461.01 \\
# 2 & 469.68 & nan & 460.48 & 459.43 & 459.23 & 460.47 \\
# 3 & 467.10 & 458.44 & 459.64 & 456.66 & 459.54 & 460.05 \\
# 4 & 469.00 & 459.52 & nan & 463.04 & 459.35 & 460.96 \\
# 5 & 471.32 & 461.26 & nan & nan & 461.00 & 462.97 \\
# \hline
# \end{array}
# $$
#
# **Note**: the AIC values are calculated differently than in Durbin and Koopman, but show overall similar trends.
# ### Postestimation
#
# Using the ARMA(1,1) specification selected above, we perform in-sample prediction and out-of-sample forecasting.
# + jupyter={"outputs_hidden": false}
# Statespace
mod = sm.tsa.statespace.SARIMAX(dta_miss, order=(1,0,1))
res = mod.fit(disp=False)
print(res.summary())
# + jupyter={"outputs_hidden": false}
# In-sample one-step-ahead predictions, and out-of-sample forecasts
nforecast = 20
predict = res.get_prediction(end=mod.nobs + nforecast)
idx = np.arange(len(predict.predicted_mean))
predict_ci = predict.conf_int(alpha=0.5)
# Graph
fig, ax = plt.subplots(figsize=(12,6))
ax.xaxis.grid()
ax.plot(dta_miss, 'k.')
# Plot
ax.plot(idx[:-nforecast], predict.predicted_mean[:-nforecast], 'gray')
ax.plot(idx[-nforecast:], predict.predicted_mean[-nforecast:], 'k--', linestyle='--', linewidth=2)
ax.fill_between(idx, predict_ci[:, 0], predict_ci[:, 1], alpha=0.15)
ax.set(title='Figure 8.9 - Internet series');
| examples/notebooks/statespace_sarimax_internet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.9 64-bit
# language: python
# name: python3
# ---
import matplotlib as plt
import numpy as np
from numpy import pi
from qiskit import *
import math
from qiskit import Aer
from qiskit.tools.visualization import plot_histogram
from qiskit.circuit.library import QFT
# +
def rotation(circuit, phase, control_qubit, target_qubit):
circuit.cp(2 * pi / 2**(phase), control_qubit, target_qubit)
def draper_adder_gate(num_qubits) :
circuit = QuantumCircuit(num_qubits)
for target_qubit in range(num_qubits - 1, num_qubits // 2 - 1, -1) :
phase_index = 1
for control_qubit in range(num_qubits -1 - target_qubit, -1, -1) :
rotation(circuit, phase_index, control_qubit, target_qubit)
phase_index += 1
return circuit
# +
circuit = QuantumCircuit(8, 8)
# first number will be 1010 and second 0101 which should be added to 1111
circuit.x([0, 3])
circuit.x([5, 6])
circuit.barrier()
circuit.compose(QFT(4, do_swaps = True, name = 'qft'), qubits = [4, 5, 6, 7], inplace = True)
circuit.barrier()
# binary fraction addition of the two numbers, with the help of controlled phase gates
circuit.compose(draper_adder_gate(8).to_gate(label = 'Adder'), inplace = True)
circuit.barrier()
circuit.compose(QFT(4, do_swaps = True, inverse=True, name='qft_dagger'), qubits = [4, 5, 6, 7], inplace = True)
circuit.barrier()
circuit.measure([0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7])
circuit.draw('mpl')
# +
# simulate the circuit
simulator = simulator = Aer.get_backend('qasm_simulator')
result = execute(circuit, backend = simulator, shots = 1000).result()
counts = result.get_counts()
plot_histogram(counts)
| arithmetics/qft_adder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Networks Using Blocks (VGG)
#
# We use the `vgg_block` function to implement this basic VGG block. This function takes the number of convolutional layers `num_convs` and the number of output channels `num_channels` as input.
# + attributes={"classes": [], "id": "", "n": "1"}
import d2l
from mxnet import gluon, init, nd
from mxnet.gluon import nn
def vgg_block(num_convs, num_channels):
blk = nn.Sequential()
for _ in range(num_convs):
blk.add(nn.Conv2D(num_channels, kernel_size=3,
padding=1, activation='relu'))
blk.add(nn.MaxPool2D(pool_size=2, strides=2))
return blk
# + [markdown] slideshow={"slide_type": "slide"}
# **VGG Architecture**
#
# 
# + attributes={"classes": [], "id": "", "n": "2"} slideshow={"slide_type": "slide"}
conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
# -
# Now, we will implement VGG-11. This is a simple matter of executing a for loop over `conv_arch`.
# + attributes={"classes": [], "id": "", "n": "3"}
def vgg(conv_arch):
net = nn.Sequential()
# The convolutional layer part.
for (num_convs, num_channels) in conv_arch:
net.add(vgg_block(num_convs, num_channels))
# The fully connected layer part.
net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5),
nn.Dense(4096, activation='relu'), nn.Dropout(0.5),
nn.Dense(10))
return net
net = vgg(conv_arch)
# + [markdown] slideshow={"slide_type": "slide"}
# **Memory usage for single input**
#
# Next, we will construct a single-channel data example with a height and width of 224 to observe the output shape of each layer.
# + attributes={"classes": [], "id": "", "n": "4"}
net.initialize()
X = nd.random.uniform(shape=(1, 1, 224, 224))
for blk in net:
X = blk(X)
print(blk.name, 'output shape:\t', X.shape)
# + [markdown] slideshow={"slide_type": "slide"}
# **Model Training**
#
# Since VGG-11 is more complicated than AlexNet let's use a smaller network.,
# + attributes={"classes": [], "id": "", "n": "5"}
ratio = 4
small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]
net = vgg(small_conv_arch)
# + [markdown] slideshow={"slide_type": "slide"}
# **Training Loop**
# -
lr, num_epochs, batch_size, ctx = 0.05, 5, 128, d2l.try_gpu()
net.initialize(ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)
d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
| slides/2_28/vgg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# ## 3次元グラフ
import matplotlib.pyplot as plt
import numpy as np
# まずはデータ用意
z = np.arange(-2*np.pi, 2*np.pi, 0.01)
x = np.cos(z)
y = np.sin(z)
# 3次元のグラフにするにはadd_ubplotの引数projectionを3dに設定する
# ```
# fig.add_subplot(projection='3d')
# ```
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.plot(x, y, z, label="hoge")
plt.show()
# それ以外は平面のときと同じ
# +
x2 = np.cos(z+np.pi); y2 = np.sin(z+np.pi)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.plot(x, y, z, label="1", linestyle='solid', color="blue")
ax.plot(x2, y2, z, label="2", linestyle='dashed', color="green")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.legend()
ax.grid()
plt.show()
| graph/python_src/3d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cooltools
# +
# cooltools.numutils.zoom_array?
# + language="javascript"
# require(["codemirror/keymap/sublime", "notebook/js/cell", "base/js/namespace"],
# function(sublime_keymap, cell, IPython) {
# cell.Cell.options_default.cm_config.keyMap = 'sublime';
#
# var cells = IPython.notebook.get_cells();
# for(var cl=0; cl< cells.length ; cl++){
# cells[cl].code_mirror.setOption('keyMap', 'sublime');
# }
# }
# );
# -
# change the cell width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width: 90% !important; }</style>"))
# +
# %load_ext autoreload
# %autoreload 2
import os
import cooler
import cooltools
import numpy as np
from cooltools import eigdecomp
import bioframe
from pathlib import Path
import multiprocess as mp
import pandas as pd
from copy import copy
# %matplotlib inline
# %config InlineBackend.print_figure_kwargs={'facecolor' : "w"}
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# import all samples as dictionary ...
from samples import *
# +
# ins_samples = {
# "Hap1-WT-combined.hg19" : "Hap1-WT-combined.mcool",
# }
ins_samples = {
#cooler locations, some expected at these locations
# CTCF degron
"CkoCT442_NT_pool.hg19" : "/data/alv/CTCF_degron/data/CkoCT442-NT-pool.mcool",
"CkoCT442_IAA_pool.hg19" : "/data/alv/CTCF_degron/data/CkoCT442-IAA-pool.mcool",
#polII degron
"PolII-NT.hg19" : "/data/alv/polII_degron/data/20200417_remap_polIIdegron/coolers_library_group/PTB2539-NT.hg19.mapq_30.1000.mcool",
"PolII-IAA.hg19" : "/data/alv/polII_degron/data/20200417_remap_polIIdegron/coolers_library_group/PTB2539-IAA.hg19.mapq_30.1000.mcool",
#AAVS1 clone
"mutControl-NT.hg19" : "/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/CkoCT442-AAVS1-NT-pool.mcool",
"mutControl-IAA.hg19" : "/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/CkoCT442-AAVS1-IAA-pool.mcool",
#DDX55 clones
"mutDDX55-NT.hg19" : "/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/DDX55-clones-NT.hg19.mapq_30.1000.mcool",
"mutDDX55-IAA.hg19" : "/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/DDX55-clones-IAA.hg19.mapq_30.1000.mcool",
#TAF5L clones
"mutTAF5L-NT.hg19" : "/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/TAF5L-clones-NT.hg19.mapq_30.1000.mcool",
"mutTAF5L-IAA.hg19" : "/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/TAF5L-clones-IAA.hg19.mapq_30.1000.mcool",
#siCONTROL
"siControl-NT.hg19" : "/data/alv/CTCF_degron/data/siCTRL-NT.hg19.mapq_30.1000.mcool",
"siControl-IAA.hg19" : "/data/alv/CTCF_degron/data/siCTRL-IAA.hg19.mapq_30.1000.mcool",
#siDDX55
"siDDX55-NT.hg19" : "/data/alv/CTCF_degron/data/siDDX55-NT.hg19.mapq_30.1000.mcool",
"siDDX55-IAA.hg19" : "/data/alv/CTCF_degron/data/siDDX55-IAA.hg19.mapq_30.1000.mcool",
#siTAF5L
"siTAF5L-NT.hg19" : "/data/alv/CTCF_degron/data/siTAF5L-NT.hg19.mapq_30.1000.mcool",
"siTAF5L-IAA.hg19" : "/data/alv/CTCF_degron/data/siTAF5L-IAA.hg19.mapq_30.1000.mcool",
#RAD21 degron
"RAD21-NT.hg19" : "/data/alv/CTCF_degron/data/RAD21-AID-NT.hg19.mapq_30.1000.mcool",
"RAD21-IAA.hg19" : "/data/alv/CTCF_degron/data/RAD21-AID-IAA-6H.hg19.mapq_30.1000.mcool",
#PlaB splicing inhibition
"CtrlPlaB-NT.hg19" : "/data/alv/CTCF_degron/data/NT-hg19-combined-90000000.mcool",
"CtrlPlaB-IAA.hg19" : "/data/alv/CTCF_degron/data/IAA-hg19-combined-90000000.mcool",
"PlaB-NT.hg19" : "/data/alv/CTCF_degron/data/NT-PlaB-hg19-combined-90000000.mcool",
"PlaB-IAA.hg19" : "/data/alv/CTCF_degron/data/IAA-PlaB-hg19-combined-90000000.mcool",
#compare with WT
"Ctrl500M-noTIR1.hg19" : "/data/alv/CTCF_degron/data/WT-44-442-pool/Hap1-WT-combined-500000000.mcool",
"Ctrl500M-wtHAP1.hg19" : "/data/alv/CTCF_degron/data/WT-44-442-pool/CkoC44-NO-TIR1-pool.mcool",
"Ctrl500M-CT442-NT.hg19" : "/data/alv/CTCF_degron/data/WT-44-442-pool/CkoCT442-NT-pool-500000000.mcool",
"Ctrl500M-CT442-IAA.hg19" : "/data/alv/CTCF_degron/data/WT-44-442-pool/CkoCT442-IAA-pool-500000000.mcool",
}
# +
# ins_samples = {
# "Hap1-WT-combined.hg19" : f"Hap1-WT-combined.mcool",
# }
binsize = 5_000
binsize_human = f"{int(binsize/1_000)}kb"
exp_samples = {
#cooler locations, some expected at these locations
# CTCF degron
"CkoCT442_NT_pool.hg19" : f"/data/alv/CTCF_degron/data/CkoCT442-NT-pool.{binsize_human}.cis.exp.tsv",
"CkoCT442_IAA_pool.hg19" : f"/data/alv/CTCF_degron/data/CkoCT442-IAA-pool.{binsize_human}.cis.exp.tsv",
#polII degron
"PolII-NT.hg19" : f"/data/alv/polII_degron/data/20200417_remap_polIIdegron/coolers_library_group/PTB2539-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"PolII-IAA.hg19" : f"/data/alv/polII_degron/data/20200417_remap_polIIdegron/coolers_library_group/PTB2539-IAA.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#AAVS1 clone
"mutControl-NT.hg19" : f"/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/CkoCT442-AAVS1-NT-pool.{binsize_human}.cis.exp.tsv",
"mutControl-IAA.hg19" : f"/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/CkoCT442-AAVS1-IAA-pool.{binsize_human}.cis.exp.tsv",
#DDX55 clones
"mutDDX55-NT.hg19" : f"/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/DDX55-clones-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"mutDDX55-IAA.hg19" : f"/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/DDX55-clones-IAA.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#TAF5L clones
"mutTAF5L-NT.hg19" : f"/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/TAF5L-clones-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"mutTAF5L-IAA.hg19" : f"/data/alv/CTCF_degron/data/DDX55-TAF5L-ctrl-pool/TAF5L-clones-IAA.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#siCONTROL
"siControl-NT.hg19" : f"/data/alv/CTCF_degron/data/siCTRL-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"siControl-IAA.hg19" : f"/data/alv/CTCF_degron/data/siCTRL-IAA.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#siDDX55
"siDDX55-NT.hg19" : f"/data/alv/CTCF_degron/data/siDDX55-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"siDDX55-IAA.hg19" : f"/data/alv/CTCF_degron/data/siDDX55-IAA.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#siTAF5L
"siTAF5L-NT.hg19" : f"/data/alv/CTCF_degron/data/siTAF5L-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"siTAF5L-IAA.hg19" : f"/data/alv/CTCF_degron/data/siTAF5L-IAA.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#RAD21 degron
"RAD21-NT.hg19" : f"/data/alv/CTCF_degron/data/RAD21-AID-NT.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
"RAD21-IAA.hg19" : f"/data/alv/CTCF_degron/data/RAD21-AID-IAA-6H.hg19.mapq_30.1000.{binsize_human}.cis.exp.tsv",
#PlaB splicing inhibition
"CtrlPlaB-NT.hg19" : f"/data/alv/CTCF_degron/data/NT-hg19-combined-90000000.{binsize_human}.cis.exp.tsv",
"CtrlPlaB-IAA.hg19" : f"/data/alv/CTCF_degron/data/IAA-hg19-combined-90000000.{binsize_human}.cis.exp.tsv",
"PlaB-NT.hg19" : f"/data/alv/CTCF_degron/data/NT-PlaB-hg19-combined-90000000.{binsize_human}.cis.exp.tsv",
"PlaB-IAA.hg19" : f"/data/alv/CTCF_degron/data/IAA-PlaB-hg19-combined-90000000.{binsize_human}.cis.exp.tsv",
#compare with WT
"Ctrl500M-noTIR1.hg19" : f"/data/alv/CTCF_degron/data/WT-44-442-pool/Hap1-WT-combined-500000000.{binsize_human}.cis.exp.tsv",
"Ctrl500M-wtHAP1.hg19" : f"/data/alv/CTCF_degron/data/WT-44-442-pool/CkoC44-NO-TIR1-pool.{binsize_human}.cis.exp.tsv",
"Ctrl500M-CT442-NT.hg19" : f"/data/alv/CTCF_degron/data/WT-44-442-pool/CkoCT442-NT-pool-500000000.{binsize_human}.cis.exp.tsv",
"Ctrl500M-CT442-IAA.hg19" : f"/data/alv/CTCF_degron/data/WT-44-442-pool/CkoCT442-IAA-pool-500000000.{binsize_human}.cis.exp.tsv",
}
# -
# #### get some gene annotation the way we used it in the stackups (one from RefSeq, from HiGlass ...)
# +
genes_df = pd.read_csv(
"./gene_annot_data/hg19/geneAnnotationsExonUnions.bed",
sep="\t",
header=None,
names = ["chr",
"txStart",
"txEnd",
"geneName",
"citationCount",
"strand",
"union_geneId", # "refseqId",
"geneId",
"geneType",
"geneDesc",
"cdsStart",
"cdsEnd",
"exonStarts",
"exonEnds"]
)
genes_df.head()
### we'll be removing duplicated genes because they're likely unamppable and/or harder to deal with anyways ...
### there are of course expections, but will do it anyways:
geneId_grp = genes_df.groupby("geneId")
genes_df = geneId_grp.first()[geneId_grp.size() == 1].reset_index()
# -
# #### Get expression data measured in TPMs
# column names for the RNA-seq samples generated for various controls, mutants and siRNAs ...
rsem_si_cols = ['rsem.out.S442_IAA',
'rsem.out.S442_IAAR2',
'rsem.out.S442_NT',
'rsem.out.S442_NTR2',
'rsem.out.S442_siCTRL_IAAR2',
'rsem.out.S442_siCTRL_NTR2',
'rsem.out.S442_siCtrl_IAA',
'rsem.out.S442_siCtrl_NT',
'rsem.out.S442_siDDX55_IAA',
'rsem.out.S442_siDDX55_IAAR2',
'rsem.out.S442_siDDX55_NT',
'rsem.out.S442_siDDX55_NTR2',
'rsem.out.S442_siTAF5L_IAA',
'rsem.out.S442_siTAF5L_IAAR2',
'rsem.out.S442_siTAF5L_NT',
'rsem.out.S442_siTAF5L_NTR2',
'rsem.out.S44_IAA',
'rsem.out.S44_IAAR2',
'rsem.out.S44_NT',
'rsem.out.S44_NTR2']
rsem_mut_cols = ['rsem.out.AAVS1_sg24_IAA',
'rsem.out.AAVS1_sg24_IAAR2',
'rsem.out.AAVS1_sg24_NT',
'rsem.out.AAVS1_sg24_NTR2',
'rsem.out.DDX55_sg27_IAA',
'rsem.out.DDX55_sg27_IAAR2',
'rsem.out.DDX55_sg27_NT',
'rsem.out.DDX55_sg27_NTR2',
'rsem.out.DDX55_sg2B_IAA',
'rsem.out.DDX55_sg2B_IAAR2',
'rsem.out.DDX55_sg2B_NT',
'rsem.out.DDX55_sg2B_NTR2',
'rsem.out.TAF5L_sg23_IAA',
'rsem.out.TAF5L_sg23_IAAR2',
'rsem.out.TAF5L_sg23_NT',
'rsem.out.TAF5L_sg23_NTR2',
'rsem.out.TAF5L_sg27_IAA',
'rsem.out.TAF5L_sg27_IAAR2',
'rsem.out.TAF5L_sg27_NT',
'rsem.out.TAF5L_sg27_NTR2']
# +
rsem_si_df = pd.read_csv("merged_TPM_genes_si.tsv",sep="\t")[["Id"] + rsem_si_cols]
rsem_mut_df = pd.read_csv("merged_TPM_genes_mut.tsv",sep="\t")[["Id"] + rsem_mut_cols]
# merge si and mut parts to create a monster RNA-seq data set ...
rsem_df = pd.merge(rsem_si_df,rsem_mut_df,how="inner",on="Id")
# some checks
assert np.asarray([(c in rsem_df.columns) for c in rsem_mut_cols]).all()
assert np.asarray([(c in rsem_df.columns) for c in rsem_si_cols]).all()
assert len(rsem_df) == len(rsem_si_df) == len(rsem_mut_df)
# -
# #### We'll be using minimum and maximum levels of RNA-seq per gene across samples to define our lists of "commonly" active and inactive genes - so let's calculate min/max and drop all other columns ...
# +
# explore a bit how min vs max behave ...
# are there a lot of genes that go from expressed to not expressed etc ...
_max_col = rsem_df[ rsem_si_cols+rsem_mut_cols ].max(axis=1)
_min_col = rsem_df[ rsem_si_cols+rsem_mut_cols ].min(axis=1)
x = _max_col
y = _min_col
f,axs = plt.subplots(1,2,figsize=(9,4))
ax = axs[0]
ax.scatter(x,y,alpha=0.1)
ax.set_xlabel("max-TPM")
ax.set_ylabel("min-TPM")
ax.set_xlim((-10,250))
ax.set_ylim((-10,250))
ax = axs[1]
ax.loglog(x,y,"ro",alpha=0.1)
ax.set_xlabel("max-TPM")
ax.set_ylabel("min-TPM")
# -
rsem_df = rsem_df[["Id"]]
rsem_df["exp_min"] = _min_col
rsem_df["exp_max"] = _max_col
# #### Overlap that refined dataframe of RNA-seq results with "our" list of genes ...
genes_exp_df = pd.merge(
rsem_df,
genes_df,
how = 'inner',
left_on="Id",
right_on="geneName"
)
genes_exp_df.head(3)
# genes_Df overlap rsem
# ~4_300 ~23_200 ~2_500
# +
# refine the dataframe ...
# columns needed for TSS/TTS bed files output:
tx_cols = ["chr", "txStart", "txEnd", "strand", "exp_min", "exp_max"]
tx_cols_rename = {"chr":"chrom",
"txStart":"start",
"txEnd":"end"}
gdf = genes_exp_df[tx_cols].reset_index(drop=True).rename(tx_cols_rename,axis=1)
gdf["size"] = gdf["end"] - gdf["start"]
# -
# final adjustments ...
gdf = gdf.sort_values(["chrom","start"])
gdf = gdf[gdf["chrom"].isin(autosomal_chroms)]
gdf = gdf.reset_index(drop=True)
gdf.head(3)
all_genes = copy(gdf)
all_genes
# ## Deal with gene-CTCF relationships ...
# ### Deal with CTCF either in the body of the gene or @TSS/TTS ...
# ### Let's exclude genes that have CTCF within 2kb of TSS ...
# +
from copy import copy
ctcf_df = bioframe.read_table(
"NT-CTCF-narrowPeaks-sort-merge-with-strength.bed",
schema="bed3",
index_col=False
)
ctcf_radius = 2_000
_tss = copy(all_genes)
_tss["tss"] = _tss.apply(lambda r: r["start"] if r["strand"]=="+" else r["end"],axis=1)
_tss["tss_mflank"] = _tss["tss"] - ctcf_radius
_tss["tss_pflank"] = _tss["tss"] + ctcf_radius
# _tss[["chrom","tss_mflank","tss_pflank"]]
tss_ctcf_overlap = bioframe.overlap(
_tss[["chrom","tss_mflank","tss_pflank"]],
ctcf_df,
how='left',
suffixes=('_', ''),
return_index=True,
# keep_order=False,
cols1=["chrom","tss_mflank","tss_pflank"],
)
_tts = copy(all_genes)
_tts["tts"] = _tts.apply(lambda r: r["end"] if r["strand"]=="+" else r["start"],axis=1)
_tts["tts_mflank"] = _tts["tts"] - ctcf_radius
_tts["tts_pflank"] = _tts["tts"] + ctcf_radius
# _tts[["chrom","tss_mflank","tss_pflank"]]
tts_ctcf_overlap = bioframe.overlap(
_tts[["chrom","tts_mflank","tts_pflank"]],
ctcf_df,
how='left',
suffixes=('_', ''),
return_index=True,
# keep_order=False,
cols1=["chrom","tts_mflank","tts_pflank"],
)
# -
# #### Now let's actually exclude genes with CTCF near TSS/TTS from downstream analysis ...
# +
ctcf_index = tss_ctcf_overlap["index_"][
~pd.isna(tss_ctcf_overlap["index"])
]
noctcf_index = tss_ctcf_overlap["index_"][
pd.isna(tss_ctcf_overlap["index"])
]
ctcf_index = np.unique(ctcf_index.astype(np.int).values)
noctcf_index = np.unique(noctcf_index.astype(np.int).values)
all_genes_ctcf = all_genes.loc[ ctcf_index ].reset_index(drop=True)
all_genes_noctcf = all_genes.loc[ noctcf_index ].reset_index(drop=True)
print(
f"""{len(all_genes_ctcf)} genes have CTCF@TSS,
{len(all_genes_noctcf)} genes do not -
total # of genes {len(all_genes_noctcf)+len(all_genes_ctcf)}"""
)
# -
# #### same for TTS ...
# +
ctcf_index = tts_ctcf_overlap["index_"][
~pd.isna(tts_ctcf_overlap["index"])
]
noctcf_index = tts_ctcf_overlap["index_"][
pd.isna(tts_ctcf_overlap["index"])
]
ctcf_index = np.unique(ctcf_index.astype(np.int).values)
noctcf_index = np.unique(noctcf_index.astype(np.int).values)
all_genes_ctcf = all_genes.loc[ ctcf_index ].reset_index(drop=True)
all_genes_noctcf = all_genes.loc[ noctcf_index ].reset_index(drop=True)
print(
f"""{len(all_genes_ctcf)} genes have CTCF@TTS,
{len(all_genes_noctcf)} genes do not -
total # of genes {len(all_genes_noctcf)+len(all_genes_ctcf)}"""
)
# -
# #### combine TSS and TTS - without CTCF
# +
ctcf_index_tts = tts_ctcf_overlap["index_"][
~pd.isna(tts_ctcf_overlap["index"])
]
noctcf_index_tts = tts_ctcf_overlap["index_"][
pd.isna(tts_ctcf_overlap["index"])
]
# these are indices of the genes (from all_genes) that have (don't have) CTCF@TTS
ctcf_index_tts = np.unique(ctcf_index_tts.astype(np.int).values)
noctcf_index_tts = np.unique(noctcf_index_tts.astype(np.int).values)
ctcf_index_tss = tss_ctcf_overlap["index_"][
~pd.isna(tss_ctcf_overlap["index"])
]
noctcf_index_tss = tss_ctcf_overlap["index_"][
pd.isna(tss_ctcf_overlap["index"])
]
# these are indices of the genes (from all_genes) that have (don't have) CTCF@TSS
ctcf_index_tss = np.unique(ctcf_index_tss.astype(np.int).values)
noctcf_index_tss = np.unique(noctcf_index_tss.astype(np.int).values)
# here is the strictest set - no CTCF at TSS AND TTS ...
strict_index = np.intersect1d(
noctcf_index_tts,
noctcf_index_tss
)
all_genes_noctcf_atall = all_genes.loc[ strict_index ].reset_index(drop=True)
print(
f"""{len(all_genes_noctcf_atall)} genes have no CTCF@TTS and no CTCF@TSS"""
)
# -
# ### Use `gdf` variable to choose which DataFrame with genes we are going to work on ....
gdf = copy(all_genes_noctcf_atall)
# ### explore gene size and expression levels ...
bins = np.r_[0,np.geomspace(100,1_000_000),10_000_000]
gdf[gdf["strand"]=="+"]["size"].plot.hist(bins=bins,log=True,label="+")
gdf[gdf["strand"]=="-"]["size"].plot.hist(bins=bins,log=True,alpha=0.7,label="-")
ax = plt.gca()
ax.set_xscale("log")
ax.set_xlabel("transcript size, bp")
ax.legend(frameon=False)
# +
# actually - negative polarity genes are less expressed than tha positive strand genes ...
bins = np.r_[0,np.geomspace(1,12000)]
f,axs = plt.subplots(1,2,figsize=(10,4),sharey=True)
ax = axs[0]
gdf[gdf["strand"]=="+"]["exp_min"].plot.hist(bins=bins,log=True,label="+",ax=ax)
gdf[gdf["strand"]=="-"]["exp_min"].plot.hist(bins=bins,log=True,alpha=0.7,label="-",ax=ax)
ax.set_xscale("log")
ax.set_xlabel("TPM")
ax.legend(frameon=False)
ax.set_title("min")
ax = axs[1]
gdf[gdf["strand"]=="+"]["exp_max"].plot.hist(bins=bins,log=True,label="+",ax=ax)
gdf[gdf["strand"]=="-"]["exp_max"].plot.hist(bins=bins,log=True,alpha=0.7,label="-",ax=ax)
ax.set_xscale("log")
ax.set_xlabel("TPM")
ax.legend(frameon=False)
ax.set_title("max")
# -
# #### Classify genes into discrete categories by gene-size and expression-level ...
# +
exp_bin_edges = [0,.5,5,np.inf]
exp_labels = ("no","low","high")
sizes_bin_edges = [0,50_000,np.inf]
sizes_labels = ("short","long")
gdf["gexp_min"] = pd.cut(
gdf["exp_min"],
bins = exp_bin_edges,
labels = exp_labels,
include_lowest = True
)
gdf["gexp_max"] = pd.cut(
gdf["exp_max"],
bins = exp_bin_edges,
labels = exp_labels,
include_lowest = True
)
gdf["gsize"] = pd.cut(
gdf["size"],
bins = sizes_bin_edges,
labels = sizes_labels,
include_lowest = True
)
display(gdf.head(2))
display( gdf.groupby(["gexp_min","gsize"]).size().unstack(fill_value=0) )
# finding the common active genes and common inactive ones ...
display( gdf.groupby(["gexp_min","gexp_max","gsize"]).size().unstack(fill_value=0) )
# +
# exp_bin_edges = [0,.5,10,np.inf]
# exp_labels = ("no","low","high")
# sizes_bin_edges = [0,50_000,np.inf]
# sizes_labels = ("short","long")
# gdf["gexp"] = pd.cut(
# gdf["exp"],
# bins = exp_bin_edges,
# labels = exp_labels,
# include_lowest = True
# )
# gdf["gsize"] = pd.cut(
# gdf["size"],
# bins = sizes_bin_edges,
# labels = sizes_labels,
# include_lowest = True
# )
# display(gdf.head())
# display( gdf.groupby(["gexp","gsize"]).size().unstack(fill_value=0) )
# -
# ### Let's use coolpup as Python API ...
from matplotlib.colors import LogNorm
# https://stackoverflow.com/questions/48625475/python-shifted-logarithmic-colorbar-white-color-offset-to-center
class MidPointLogNorm(LogNorm):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
LogNorm.__init__(self, vmin=vmin, vmax=vmax, clip=clip)
self.midpoint=midpoint
def __call__(self, value, clip=None):
result, is_scalar = self.process_value(value)
x, y = [np.log(self.vmin), np.log(self.midpoint), np.log(self.vmax)], [0, 0.5, 1]
return np.ma.array(np.interp(np.log(value), x, y), mask=result.mask, copy=False)
# #### we'll use regions to make use of "proper" expected ...
# +
# Use bioframe to fetch the genomic features from the UCSC.
hg19_chromsizes = bioframe.fetch_chromsizes('hg19', as_bed=True)
hg19_cens = bioframe.fetch_centromeres('hg19')
hg19_arms = bioframe.split(hg19_chromsizes, hg19_cens, cols_points=['chrom', 'mid'])
# Select only chromosomes that are present in the cooler.
# This step is typically not required! we call it only because the test data are reduced.
hg19_chromsizes = hg19_chromsizes.set_index("chrom").loc[autosomal_chroms].reset_index()
hg19_arms = hg19_arms.set_index("chrom").loc[autosomal_chroms].reset_index()
# call this to automaticly assign names to chromosomal arms:
hg19_arms = bioframe.from_any(hg19_arms)
hg19_arms["name"] = [f"{chrom}{arm}" for chrom in autosomal_chroms for arm in list('pq')]
# -
# import standard python libraries
import seaborn as sns
# import libraries for biological data analysis
import coolpuppy as cp
import cooler
import bioframe
# import cooltools
# import cooltools.expected
# from cooltools.lib import plotting
# +
# ooe - Explanations and reminder ...
# (default)False: (o1 +o2 +o3 ... oN)/(e1+e2+e3+...eN) [shifting-normalization gose according to this scenraio as well]
# True: o1/e1 +o2/e2 +o3/e3 ... oN/eN
# -
def get_pup(sites, clr, resolution, exp_df, regions_df):
"""
generate scaled local pile-up for a collection of sites for a given
cooler (clr) at a given resolution - keeping the padding size equal
to the interval itself.
"""
_sites = sites[["chrom","start","end","strand"]].reset_index(drop=True)
if (len(_sites)>2_000)or(len(_sites)<1_000):
return None
else:
cc = cp.CoordCreator(
_sites,
resolution=resolution,
basetype='bed',
local=True,
fraction_pad=1.
)
pu = cp.PileUpper(
clr,
cc,
control = True,
rescale = True,
rescale_size = 199,
)
# pu = cp.PileUpper(
# clr,
# cc,
# regions= regions_df,
# expected = exp_df,
# ooe = True,
# control = False,
# rescale = True,
# rescale_size = 199,
# )
print(f"working on a group of {len(_sites)} size ...")
pup = pu.pileupsByStrandWithControl(nproc=18)
return pup
# +
# key_samples = ["Ctrl500M-CT442-NT.hg19"]#, "Ctrl500M-CT442-IAA.hg19"]
key_samples = ["CkoCT442_NT_pool.hg19",
"CkoCT442_IAA_pool.hg19",
"PolII-NT.hg19",
"PolII-IAA.hg19",
"mutControl-NT.hg19",
"mutControl-IAA.hg19",
"mutDDX55-NT.hg19",
"mutDDX55-IAA.hg19",
"mutTAF5L-NT.hg19",
"mutTAF5L-IAA.hg19",
"siControl-NT.hg19",
"siControl-IAA.hg19",
"siDDX55-NT.hg19",
"siDDX55-IAA.hg19",
"siTAF5L-NT.hg19",
"siTAF5L-IAA.hg19",
"RAD21-NT.hg19",
"RAD21-IAA.hg19",
"CtrlPlaB-NT.hg19",
"CtrlPlaB-IAA.hg19",
"PlaB-NT.hg19",
"PlaB-IAA.hg19",
"Ctrl500M-noTIR1.hg19",
"Ctrl500M-wtHAP1.hg19",
"Ctrl500M-CT442-NT.hg19",
"Ctrl500M-CT442-IAA.hg19"]
pup_dict = {}
# group genes by expression and size before extracting corresponding profiles of a list of features :
gene_groups = gdf.groupby(["gexp_min","gexp_max","gsize"]) #"gexp_min","gexp_max","gsize" or "gexp","gsize"
for sample in key_samples:
print(f"working on {sample} ...")
cname = ins_samples[sample]
exp_name = exp_samples[sample]
clr = cooler.Cooler(f"{cname}::/resolutions/{binsize}")
exp_df = pd.read_csv(exp_name, sep="\t")
pup_dict[sample] = gene_groups.apply( lambda sub_df: get_pup(sub_df, clr, binsize, exp_df, hg19_arms) )
# -
# #### Save all of the results on disk using pickle - to avoid re-running it again
import pickle
file_to_store = open("PUP_DICT_5kb_SHIFT_EXP_4exp-size-groups_COMMON_expression.pickle", "wb")
# file_to_store = open("PUP_DICT_5kb_SHIFT_EXP_4exp-size-groups.pickle", "wb")
pickle.dump(pup_dict, file_to_store)
# Save object to file
file_to_store.close()
# +
# # ! readlink -f PUP_DICT_5kb_SHIFT_EXP.pickle
# -
# ### plotting different pileups for different groups ...
from matplotlib import colors
# +
# glens=("short","long")
# gexps=("no","low","high")
# fig = plt.figure(figsize=(12,8),constrained_layout=True)
# gs = fig.add_gridspec(len(glens),len(gexps))
# orient="--"
# _yyyy = pup_dict["CkoCT442_IAA_pool.hg19"].reset_index().set_index(["gexp_min","gexp_max","gsize","orientation"])
# # # divnorm = colors.TwoSlopeNorm(vmin=0.75, vcenter=1, vmax=1.25)
# for i,glen in enumerate(glens):
# for j,gexp in enumerate(gexps):
# ax = fig.add_subplot(gs[i,j])
# pup_heatmap = _yyyy.loc[(gexp,gexp,glen,orient),"data"]
# img = ax.imshow(
# pup_heatmap,
# # norm = MidPointLogNorm(vmin=0.4,vmax=1.1,midpoint=1),
# norm = colors.TwoSlopeNorm(vmin=0.75, vcenter=1, vmax=1.25),
# cmap="coolwarm",
# interpolation="nearest",
# )
# ax.set_title(f"{gexp}-{glen}-{orient}")
# plt.colorbar(img)
# +
####
# grouped using gxp_moin and gexp_max ...
####
_yyyy = pup_dict["CkoCT442_NT_pool.hg19"].reset_index().set_index(["gexp_min","gexp_max","gsize","orientation"])
# # divnorm = colors.TwoSlopeNorm(vmin=0.75, vcenter=1, vmax=1.25)
# glens=("short","long")
# gexps=("no","low","high")
fig = plt.figure(figsize=(12,8),constrained_layout=True)
gs = fig.add_gridspec( 2, int(len(_yyyy.index)/2) )
# orient="--"
the_norm = colors.TwoSlopeNorm(vmin=0.75, vcenter=1, vmax=1.25)
for i,(gmi,gma,glen,orient) in enumerate(_yyyy.index[::2]):
# print(gmi,gma,glen)
ax = fig.add_subplot(gs[0,i])
pup_heatmap = _yyyy.loc[(gmi,gma,glen,"--"),"data"]
img = ax.imshow(
pup_heatmap[::-1,::-1],
norm = the_norm,
cmap="coolwarm",
interpolation="nearest",
)
ax.set_title(f"{gmi}-{gma}-{glen}---")
# print(gmi,gma,glen)
ax = fig.add_subplot(gs[1,i])
pup_heatmap = _yyyy.loc[(gmi,gma,glen,"++"),"data"]
img = ax.imshow(
pup_heatmap,
norm = the_norm,
cmap="coolwarm",
interpolation="nearest",
)
ax.set_title(f"{gmi}-{gma}-{glen}-++")
plt.colorbar(img)
| Submission1_notebooks/Rescaled_gene_pileups_attempt-SHADOW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib
del matplotlib.font_manager.weight_dict['roman']
matplotlib.font_manager._rebuild()
# +
import numpy as np
L = 10
dx = 0.1
dt = 0.01
tmin = 0.0
tmax = 5.0
#BCs
xmin = 0.0
xmax = L
c = 1.0 #propagation speed
C = (c*dt/dx)**2 #Courant constant
nx = int((xmax-xmin)/dx) + 1
nt = int((tmax-tmin)/dt) + 2
X = np.arange(xmin, xmax+dx, dx)
u = np.zeros((nt,nx))
#ICs
u_0 = np.exp(-((X-5)**2))
u_1 = np.zeros(nx)
u[0] = u_0
u[1] = u[0] + dt * u_1
#simulation
for t in range(1,nt-1):
for x in range(1,nx-1):
u[t+1,x] = 2*(1-C)*u[t,x]-u[t-1,x]+C*(u[t,x-1]+u[t,x+1])
# +
import matplotlib.pyplot as plt
#Plot
fig = plt.figure()
fig_1 = fig.add_subplot(111)
fig_2 = fig.add_subplot(111)
fig_3 = fig.add_subplot(111)
fig_1.plot(X,u[0], label=r"$t = 0$")
fig_2.plot(X,u[250], label=r"$t = 2.5$")
fig_3.plot(X,u[450], label=r"$t = 4.5$")
plt.rcParams['font.family'] = 'Times New Roman' # font familyの設定
plt.rcParams['mathtext.fontset'] = 'stix' # math fontの設定
plt.rcParams["font.size"] = 15 # 全体のフォントサイズが変更されます。
plt.rcParams['xtick.labelsize'] = 12 # 軸だけ変更されます。
plt.rcParams['ytick.labelsize'] = 12 # 軸だけ変更されます
plt.rcParams['xtick.direction'] = 'in' # x axis in
plt.rcParams['ytick.direction'] = 'in' # y axis in
plt.rcParams['axes.linewidth'] = 1.0 # axis line width
plt.rcParams['axes.grid'] = False # make grid
plt.rcParams["legend.fancybox"] = False # 丸角
plt.rcParams["legend.framealpha"] = 0 # 透明度の指定、0で塗りつぶしなし
plt.rcParams["legend.handlelength"] = 1 # 凡例の線の長さを調節
plt.rcParams["legend.labelspacing"] = 0.2 # 垂直方向(縦)の距離の各凡例の距離
plt.rcParams["legend.handletextpad"] = 0.2 # 凡例の線と文字の距離の長さ
plt.rcParams["legend.markerscale"] = 2 # 点がある場合のmarker scale
fig_1.set_xlabel(r"$x$")
fig_1.set_ylabel(r"$u(x,t)$")
fig_1.legend(loc=0)
plt.show()
#Save
fig.savefig('wave.eps', bbox_inches="tight", pad_inches=0.05)
| wave/PDE_wave.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (py2)
# language: python
# name: py2
# ---
""" load them libs """
import matplotlib.pyplot as plt
import networkx as nx
import random
import math
import pandas as pd
import statsmodels.api as sm
import glob
import os
import numpy as np
from PIL import Image
from helpers import *
import pickle
import time
import matplotlib.patches as mpatches
#random.seed(100)
#tic = time.time()
# +
""" get all files """
files = glob.glob('./matrix_csvs/*')
all_files = sorted([f for f in files if (f.split('/')[-1].split('_')[-1]=='influence.csv')])
print all_files
# +
""" get a dict going with files and data """
data = {}
years = range(2007,2018)
years_m = [2007,2008,2009,2010,2012,2013,2014,2015,2016,2017]
for y in years_m:
fname_all = [f for f in all_files if ((str(y)+'.75') in f)][0]
data[y]={'year':y,
'fname_all':fname_all,
'data_all':0.0}
#2011 notes
data[2011]={'year': 2011,
'fname_all': './matrix_csvs/2011.5_por_green_influence.csv',
'data_all': 0.0}
for y in years:
print data[y]
# -
for y in years:
print y
data[y]['data_all']=pd.read_csv(data[y]['fname_all'], encoding='utf-8')
print data[y]['data_all'].shape
print list(data[y]['data_all'])
# +
""" fill out extra columns """
all_indices = []
for y in years:
all_indices += list(data[y]['data_all'])
print set(all_indices)
for y in years:
new_cols = [v for v in set(all_indices) if v not in data[y]['data_all']]
for v in new_cols:
data[y]['data_all'][v]=0
# +
""" add INFLUENCE_t-1"""
for y in years[1:]:
data[y]['data_all'] = data[y]['data_all'].merge(data[y-1]['data_all'][['INFLUENCE']], left_index=True, right_index=True, how='left', suffixes=('','_t-1'))
data[y]['data_all']['INFLUENCE_t-1'] = data[y]['data_all']['INFLUENCE_t-1'].fillna(0.0)
print data[2017]['data_all']
# +
""" concat whole list """
df = pd.concat([data[y]['data_all'] for y in years[1:]], axis=0)
df.DATE = df.DATE-2007
print df
print df.shape
# +
""" Train an OLS model """
Y = df.POR_GREEN
X = df.drop(['POR_GREEN','INFLUENCE','Unnamed: 0'],axis=1)
#X.to_csv('test_df.csv')
print X
#print list(X)
#print X.DATE
X.DATE = (X.DATE - X.DATE.min())/(X.DATE.max()-X.DATE.min())
X['INFLUENCE_t-1'] = (X['INFLUENCE_t-1'] - X['INFLUENCE_t-1'].min())/(X['INFLUENCE_t-1'].max()-X['INFLUENCE_t-1'].min())
#print X['INFLUENCE_t-1']
#X.to_csv('data.csv')
#print X.isnull().any().any()
#print X.DATE
#print X['INFLUENCE_t-1']
#print X.shape
print Y
#print list(X)
#print list(Y)
X = sm.add_constant(X)
est = sm.OLS(Y,X)
est = est.fit()
print est.summary()
#print est.summary().as_csv()
#with pd.option_context('display.max_rows', None, 'display.max_columns', 3):
# print est.params
# +
'''This takes the result of an statsmodel results table and transforms it into a dataframe'''
print dir(est)
results_df = pd.DataFrame({"pvals":est.pvalues,
"coeff":est.params,
"conf_lower":est.conf_int()[0],
"conf_higher":est.conf_int()[1],
'tvalues':est.tvalues
})
#Reordering...
results_df = results_df[["coeff","tvalues","pvals","conf_lower","conf_higher"]]
print results_df
# +
""" plot country coefficients"""
df_iso2 = pd.read_csv('all_portions.csv', encoding='utf-8').set_index('iso2')
#print df_centroids.get_value('TH','latitude')
print df_iso2
cols_dict = {
'OTHERX':'#7700a3',
'US':'#00ffcb',
'LAM':'#ffb200',
'OECD_EUR':'#0021ff',
'OECD_PAC':'#00cbff',
'OECD_AMX':'#0f0066',
'AFRICA':'#00ff08',
'ME':'#3ea535',
'CN':'#ff0043',
'IN':'#ff00d4',
'TE':'#6b5900'
}
#results_df = results_df.merge(df_iso2[['SIPS_REGION']], how='left')
results_df['SIPS_REGION'] = df_iso2['SIPS_REGION']
results_df['color'] = np.nan
results_df['color'] = results_df[pd.notnull(results_df.SIPS_REGION)].apply(lambda row: cols_dict[row['SIPS_REGION']], axis=1)
#results_df['color'] = results_df['color'].replace({'color',cols_dict})
print results_df
# +
f, axarr = plt.subplots(2, sharex=True, figsize=(16,9))
#axarr.set_yscale('log')
bars_df = results_df[pd.notnull(results_df.SIPS_REGION)]
bars_df = bars_df.groupby('SIPS_REGION').apply(pd.DataFrame.sort_values, 'coeff')
print bars_df[:]
axarr[0].bar(range(len(bars_df)),
bars_df.coeff,
color=bars_df['color']
)
axarr[1].bar(range(len(bars_df)),
bars_df.tvalues,
color=bars_df['color']
)
axarr[0].set_xticks([])
axarr[0].set_xticklabels([])
axarr[0].set_ylabel(r'$\alpha$ Coefficient')
axarr[1].set_ylabel('t-statistic')
patches = []
labels=[]
for k,v in cols_dict.items():
labels.append(k)
patches.append(mpatches.Patch(color=v))
f.legend(patches, labels, loc='center', bbox_to_anchor=(0.5,0.93), ncol=len(cols_dict))
f.savefig('./figures/OLS_countries.png')
plt.show()
# +
f, axarr = plt.subplots(2, sharex=True, figsize=(4,9))
#axarr.set_yscale('log')
bars_df = results_df[pd.isnull(results_df.SIPS_REGION)]
print bars_df
axarr[0].bar(range(len(bars_df)),
bars_df.coeff,
color='gray'
)
axarr[1].bar(range(len(bars_df)),
bars_df.tvalues,
color='gray'
)
axarr[0].set_xticks(range(len(bars_df)))
axarr[0].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\beta$'])
axarr[1].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\beta$'])
axarr[0].set_ylabel(r'$\beta$ Coefficient')
axarr[1].set_ylabel('t-statistic')
axarr[1].axhline(1.962, linestyle='--', color='black')
axarr[1].text(2,3,'t_crit=1.962', horizontalalignment='center')
f.savefig('./figures/OLS_other.png')
plt.show()
# -
| Data_OLS_New.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy
import sys
import nmslib
import time
import math
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import train_test_split
print(sys.version)
print("NMSLIB version:", nmslib.__version__)
P=3 # p for an L_p metric
# Just read the data
all_data_matrix = numpy.loadtxt('../../sample_data/final128_10K.txt')
# Create a held-out query data set
(data_matrix, query_matrix) = train_test_split(all_data_matrix, test_size = 0.1)
print("# of queries %d, # of data points %d" % (query_matrix.shape[0], data_matrix.shape[0]) )
# +
# Set index parameters
# These are the most important onese
M = 15
efC = 100
num_threads = 4
index_time_params = {'M': M, 'indexThreadQty': num_threads, 'efConstruction': efC, 'post' : 0,
'skip_optimized_index' : 1 # using non-optimized index!
}
# -
# Number of neighbors
K=100
# Space name should correspond to the space name
# used for brute-force search
space_name='lp'
space_params={'p':P}
# Intitialize the library, specify the space, the type of the vector and add data points
index = nmslib.init(method='hnsw', space=space_name, space_params=space_params,
data_type=nmslib.DataType.DENSE_VECTOR)
index.addDataPointBatch(data_matrix)
# Create an index
start = time.time()
index.createIndex(index_time_params)
end = time.time()
print('Index-time parameters', index_time_params)
print('Indexing time = %f' % (end-start))
# Setting query-time parameters
efS = 100
query_time_params = {'efSearch': efS}
print('Setting query-time parameters', query_time_params)
index.setQueryTimeParams(query_time_params)
# Querying
query_qty = query_matrix.shape[0]
start = time.time()
nbrs = index.knnQueryBatch(query_matrix, k = K, num_threads = num_threads)
end = time.time()
print('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %
(end-start, float(end-start)/query_qty, num_threads*float(end-start)/query_qty))
# +
# Computing gold-standard data
print('Computing gold-standard data')
start = time.time()
sindx = NearestNeighbors(n_neighbors=K, metric='minkowski', p=P, algorithm='brute').fit(data_matrix)
end = time.time()
print('Brute-force preparation time %f' % (end - start))
start = time.time()
gs = sindx.kneighbors(query_matrix)
end = time.time()
print('brute-force kNN time total=%f (sec), per query=%f (sec)' %
(end-start, float(end-start)/query_qty) )
# -
# Finally computing recall
recall=0.0
for i in range(0, query_qty):
correct_set = set(gs[1][i])
ret_set = set(nbrs[i][0])
recall = recall + float(len(correct_set.intersection(ret_set))) / len(correct_set)
recall = recall / query_qty
print('kNN recall %f' % recall)
# Save a meta index and the data
index.saveIndex('dense_index_nonoptim.bin', save_data=True)
# Re-intitialize the library, specify the space, the type of the vector.
newIndex = nmslib.init(method='hnsw', space=space_name, space_params=space_params, data_type=nmslib.DataType.DENSE_VECTOR)
# Re-load the index and the data
newIndex.loadIndex('dense_index_nonoptim.bin', load_data=True)
# +
# Setting query-time parameters and querying
print('Setting query-time parameters', query_time_params)
newIndex.setQueryTimeParams(query_time_params)
query_qty = query_matrix.shape[0]
start = time.time()
new_nbrs = newIndex.knnQueryBatch(query_matrix, k = K, num_threads = num_threads)
end = time.time()
print('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %
(end-start, float(end-start)/query_qty, num_threads*float(end-start)/query_qty))
# -
# Finally computing recall for the new result set
recall=0.0
for i in range(0, query_qty):
correct_set = set(gs[1][i])
ret_set = set(new_nbrs[i][0])
recall = recall + float(len(correct_set.intersection(ret_set))) / len(correct_set)
recall = recall / query_qty
print('kNN recall %f' % recall)
| python_bindings/notebooks/search_vector_dense_lp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:immune-evolution]
# language: python
# name: conda-env-immune-evolution-py
# ---
# # Imports
# %load_ext autoreload
# %autoreload 2
# +
import itertools
import math
import pandas as pd
import json
import os
import glob
from tqdm import tqdm
import seaborn as sns
import screed
import sklearn
# %matplotlib inline
# -
from path_constants import QFO_EUKARYOTA_FOLDER, ORPHEUM_BENCHMARKING_FOLDER
# # Get ids of "good" reads to use for classification
# ## Subset to only reads from complete protein sequences -- *will not change with input species*
#
# could maybe write these ids to file
# +
busco_mammalia_uniprot_protein_starts_with_m = []
# Use only the busco mammalia proteins
with screed.open(
os.path.join(QFO_EUKARYOTA_FOLDER, "UP000005640_9606__busco_mammlia_odbv10.fasta")
) as records:
for record in records:
if record["sequence"].startswith("M"):
busco_mammalia_uniprot_protein_starts_with_m.append(
record["name"].split()[0].split("|")[1]
)
print(
"busco_mammalia_uniprot_protein_starts_with_m",
len(busco_mammalia_uniprot_protein_starts_with_m),
)
uniprot_dna_starts_with_atg = []
with screed.open(
os.path.join(QFO_EUKARYOTA_FOLDER, "UP000005640_9606_DNA.fasta")
) as records:
for record in records:
if record["sequence"].startswith("ATG"):
uniprot_dna_starts_with_atg.append(record["name"].split()[0].split("|")[1])
print("uniprot_dna_starts_with_atg", len(uniprot_dna_starts_with_atg))
# -
busco_mammalia_startswith_m__and__dna_startswith_atg = set(busco_mammalia_uniprot_protein_starts_with_m).intersection(uniprot_dna_starts_with_atg)
len(busco_mammalia_startswith_m__and__dna_startswith_atg)
# ## Read gold standard reading frame file
# +
parquet = os.path.join(ORPHEUM_BENCHMARKING_FOLDER, 'true_reading_frames.parquet')
true_coding_frame = pd.read_parquet(parquet)
# Create just a series (single column) from this
true_coding_frame = true_coding_frame['is_coding']
true_coding_frame.head()
# -
# # Read coding score csvs
# ## Add read_id_frame and is_coding for computing metrics
def add_read_id_frame_and_is_coding(df):
df["read_id_frame"] = (
df.read_id.astype(str) + "__frame=" + df.translation_frame.astype(str)
)
df["is_coding"] = df["category"] == "Coding"
return df
# ## Get human busco mammalia reads
# +
csv = os.path.join(
QFO_EUKARYOTA_FOLDER,
"busco_mammalia_human_uniprot_ids_in_qfo.csv",
)
human_busco_mammalia = pd.read_csv(csv)
print(human_busco_mammalia.shape)
human_busco_mammalia.head()
# -
# ## Function to read coding scores CSVs consistently
def read_translate_csvs(globber, species=None,
human_busco_mammalia=human_busco_mammalia,
uniprot_dna_starts_with_atg=uniprot_dna_starts_with_atg,
true_coding_frame=true_coding_frame):
dfs = []
for filename in tqdm(glob.glob(globber)):
dirname = os.path.dirname(filename)
parent_dir = os.path.basename(dirname)
molecule_, ksize_ = parent_dir.split('_')
molecule = molecule_.split('-')[1]
ksize = int(ksize_.split('-')[1])
df = pd.read_csv(filename)
df['alphabet'] = molecule
df['ksize'] = ksize
df['species'] = species
df = add_read_id_frame_and_is_coding(df)
# Get only reads generated from busco mammalia data
df['protein_id'] = df['read_id'].map(lambda x: x.split('/')[1].split(';')[0])
df['uniprot_id'] = df['protein_id'].str.split('|').str[1]
df = df.query('uniprot_id in @human_busco_mammalia.source__uniprot_id')
# Get only reads from transcripts starting with ATG and no Ns, and only busco mammalia
df = df.query('uniprot_id in @busco_mammalia_startswith_m__and__dna_startswith_atg')
# Only use reads and frames in the gold standard data
df = df.query('read_id_frame in @true_coding_frame.index')
# Set the read id and frame as the row names
df = df.set_index('read_id_frame')
dfs.append(df)
concatenated = pd.concat(dfs, ignore_index=False)
concatenated = concatenated.sort_index()
return concatenated
# ## Read Busco mammalia coding scores
# %%time
dfs = []
globber = os.path.join(
ORPHEUM_PIPELINE_RESULTS_FOLDER, "nf-predictorthologs--busco-mammalia-*"
)
for subfolder in glob.glob(globber):
basename = os.path.basename(subfolder)
print(basename)
species = basename.split("--")[-1]
print(species)
csvs = os.path.join(subfolder, "translate", "*", "*.csv")
df = read_translate_csvs(
csvs,
species=species,
human_busco_mammalia=human_busco_mammalia,
uniprot_dna_starts_with_atg=uniprot_dna_starts_with_atg,
true_coding_frame=true_coding_frame,
)
dfs.append(df)
coding_scores = pd.concat(dfs)
print(coding_scores.shape)
coding_scores.head()
for d in dfs:
print(set(d['species']))
for d in dfs:
d.species.unique()
coding_scores.species.value_counts()
# ### Write concatenated n frames per read, categorization to file
coding_scores.to_parquet(
os.path.join(ORPHEUM_PIPELINE_RESULTS_FOLDER, "coding_scores.parquet")
)
| notebooks/figure_2_04_read_orpheum_translate_mammalia_benchmarking_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="jxK1_8f1dvrc"
# <div>
# <img src="https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_" width="350"/>
# </div>
#
# #**Artificial Intelligence - MSc**
# ET5003 - MACHINE LEARNING APPLICATIONS
#
# ###Instructor: <NAME>
# ###ET5003_Etivity-1
# + id="LqXD_IwUQuBF" cellView="form"
#@title Current Date
Today = '2021-08-22' #@param {type:"date"}
# + id="uzDKau31OjVO" cellView="form"
#@markdown ---
#@markdown ### Enter your details here:
Student_ID = "" #@param {type:"string"}
Student_full_name = "" #@param {type:"string"}
#@markdown ---
# + id="r39xGZckTpKx" cellView="form"
#@title Notebook information
Notebook_type = 'Example' #@param ["Example", "Lab", "Practice", "Etivity", "Assignment", "Exam"]
Version = 'Draft' #@param ["Draft", "Final"] {type:"raw"}
Submission = False #@param {type:"boolean"}
# + [markdown] id="80m304lUefG4"
# ## MNIST dataset
# + [markdown] id="Bs8mHGcidHSa"
#
#
# The MNIST database is a dataset of handwritten digits that has been and is extensively used in machine learning. There are $10$ classes, each image is $28\times28$ pixels and, therefore, each input is $x_i\in\mathbb{R}^{784}$.
# + [markdown] id="ailycCq5epj2"
# ## Task
# + [markdown] id="a-yNAxhUemjM"
# You have to extend the code to manage any arbitrary number of classes, in other words you have to implement a general-recipe multinomial logistic classifier and Bayesian multinomial logistic classifier.
#
# You must then select 3 digits at random and perform the following task.
#
# 1. Your goal is to use Bayesian multinomial logistic regression (as in the road-sign notebook) to solve this classification problem.
#
# 2. You can downsize the training dataset (e.g., 40% training and 60%testing) if the computation of the posterior takes too much time in your computer.
#
# 3. Use the posterior uncertainty to detect the instances (digits) in the test set that are hard to classify and remove them from the test-set.
#
# 4. Then you need to compute again the accuracy of the general-recipe logistic regression on the remaining (non-difficult) instances and comment on the result.
#
# 5. In practice, the task is to use uncertainty estimation to detect the difficult instances in the test-set. This is equivalent to refuse to classify all high-uncertainty instances or, in other words, when we are uncertain we say "I don't know" and we do not return any class. In this way, you will learn how uncertainty can be used to make safer decisions, by detecting the instances that are difficult to classify.
#
# + [markdown] id="nMRKRTQZe5fW"
# ## Libraries
# + id="IxusAui7AX_f"
# Suppressing Warnings:
import warnings
warnings.filterwarnings("ignore")
# + id="MQOfGMQpdHSb"
# Import libraries
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import scipy.optimize as optimize
from scipy.special import erf
import pandas as pd
import numpy as np
import seaborn as sns
from tqdm import tqdm
from skimage.io import imread, imshow
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
import arviz as az
from scipy.io import loadmat
import pymc3 as pm
import random
from IPython.display import HTML
import pickle
import theano as tt
import cv2
from sklearn.utils import shuffle
from skimage.color import rgb2gray
# + id="P5-qMSjpAQ-9"
# Setting a seed:
np.random.seed(123)
# + [markdown] id="r4hSuwkUfVQb"
# ## Dataset
# + [markdown] id="w99Pc66YdHSd"
# ### Loading the MNIST dataset
# + colab={"base_uri": "https://localhost:8080/"} id="CYFWAbXVzynp" outputId="a18b4283-3b97-4eb3-e0ed-d2f241176139"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="g4rCnS4vdHSd" outputId="87b5c3c5-8f1f-4133-f7c6-808adc4d73e2"
# Path, copy the path from your Drive
Path = '/content/drive/MyDrive/Colab Notebooks/Enrique/Data/'
# MNIST Data
train_data = Path + 'mnist_train.csv'
test_data = Path + 'mnist_test.csv'
# train data
df_train = pd.read_csv(train_data)
X_train = df_train.drop("label",axis=1).values
y_train = df_train.label.values
print(X_train.shape)
# test data
df_test = pd.read_csv(test_data)
X_test = df_test.drop("label",axis=1).values
y_test = df_test.label.values
print(X_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="O2ubJ_WoAqBh" outputId="d9445837-a9ea-4b7e-a2df-180748492c6b"
# Normalizing the Inputs:
X_train = X_train/255
X_test = X_test/255
# Printing the new input range of values:
minv = np.min(X_train)
maxv = np.max(X_train)
print(minv,maxv)
# + [markdown] id="SR6HpkWndHSe"
# ### Description of Data:
# + colab={"base_uri": "https://localhost:8080/"} id="sibN1Vv1dHSf" outputId="1003de54-5653-47cf-a6ce-67e7abaa3768"
# Number of examples
n_train = len(X_train)
n_test = len(X_test)
# Shape of an traffic sign image
image_shape = X_train.shape[1]
# unique classes/labels in the training dataset.
alltotal = set(y_train)
n_classes = len(alltotal)
print("Number of Training examples =", n_train)
print("Number of Test examples =", n_test)
print("Image input shape =", image_shape)
print("Number of classes =", n_classes)
# + [markdown] id="6HQDSvrRKZF6"
# ### Class Distribution:
# + id="XG8GdlpBKdCt"
fig, ax = plt.subplots()
ind = np.arange(n_classes)
n, bins, patches = ax.hist(y_train, n_classes)
ax.set_xlabel('classes')
ax.set_ylabel('counts')
ax.set_title(r'Histogram of Digit images')
plt.show()
# + [markdown] id="EyLWw3nsLCtk"
# ## Downsampling
# + [markdown] id="2U1lFEwhLKBf"
# ### Randomly selecting 3 of the 10 Digit Classes
# + id="0EeRZZWdLRPT"
# We select the number of Classes we want:
n_classes =
# Empty list to append the random digit classes we select:
classes =
# We select 3 digits at random and make sure they are unique:
while len(classes) < :
# Randomly drawing a digit from 0-9:
num2choose = np.random.randint(0,10)
# Append the digit if it's not already in our list of classes:
if label not in classes:
classes.append(num2choose)
# Sorting the Classes smallest to largest
classes.___
# print classes selected
classes
# + id="2M8R5NqKMB_M"
# The number of instances we'll keep for each of our 3 digits:
inst_class =
# Loop to randomly sample the instances for each digit:
for r in classes:
imgs = X_train[np.where(y_train==r)[0],:]
inputs.append(imgs[np.random.permutation(imgs.shape[0]),:][0:inst_class,:])
labels.append(np.ones(inst_class)*r)
# Shaping inputs and labels in the right format
X_train = np.vstack(inputs).astype(np.float64)
y_train = np.hstack(labels)
# + [markdown] id="_6-YHrQQMicy"
# New Classes Distribution
# + id="RA300COaMxWm"
# new histogram
# + id="eFgP4xugMvJm"
# plot digits
def plot_digits(instances, images_per_row=5, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
images = [instance.reshape(size,size) for instance in instances]
n_rows = (len(instances) - 1) // images_per_row + 1
row_images = []
n_empty = n_rows * images_per_row - len(instances)
images.append(np.zeros((size, size * n_empty)))
for row in range(n_rows):
rimages = images[row * images_per_row : (row + 1) * images_per_row]
row_images.append(np.concatenate(rimages, axis=1))
image = np.concatenate(row_images, axis=0)
plt.imshow(image, cmap='gist_yarg', **options)
plt.axis("off")
# + id="zeEG-LGOM4fJ"
# Show a few instances from each Digit:
plt.figure(figsize=(8,8))
# Selecting a few label indices from each of the 3 classes to show:
# Plotting 'original' image
plot_digits(X_train[label_indices,:],images_per_row=9)
plt.title("Original", fontsize=14)
# + [markdown] id="FsAOnOcNNG_V"
# ### Splitting the Training data into both Training and Validation Sets:
#
# - Although this is the Training set, we can still set aside some samples (for instance 20%) of the 1,500 instances we have for Model Validation purposes.
#
#
# - With that Validation Set, we can then select the amount of Uncertainty we are happy with from our Model to use out of sample on other unseen data.
#
#
# - We can then test out how well our decision performs on the Test Set that we put aside earlier.
# + id="YdsmyVAtPXNn"
### Split tha dataset in training and validation sets
# choose the fraction of your validation data from the training set
w = 0.20
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=w, random_state=0)
# Shuffling the training instaces around to randomize the order of inputs to the model:
X_train, y_train = shuffle(X_train, y_train)
# + id="qXwJwP0iPxhi"
# print shape of your validation and training set
# + [markdown] id="aOroY1QoP8DY"
# ### Encoding the Class labels for the Probabilistic ML Model:
#
# This is an example:
#
# - **[1,0,0]** for first digit
# - **[0,1,0]** for second digit
# - **[0,0,1]** for third digit
# + [markdown] id="rjUaqWTqQIcp"
# ### General-Recipe ML
# + id="QzgdivxfQNv5"
# model
model_log =
# Classification:
y_pred_log =
y_pred_logi_prob = model_log.predict_proba(X_val)
# Maybe taking the maximum probability
# in any of the classes for each observation
# Computing the Accuracy:
accuracy_score(y_pred_log, y_val)
# + [markdown] id="3uQG6JsOQxH5"
# ### Probabilistic Multinomial Logistic Regression:
# + id="W3jzczJzRAtT"
# + [markdown] id="irlmUNw7Q5YL"
# The Multinomial Logistic Regression has some parameters:
#
# - $\alpha$, which is the intercept term:
#
# - $\beta$, which is a vector of coefficients which give a weighting to the importance of each input feature:
#
# + id="1o7mbKWmRhz5"
# + id="aj6Uzc05Rhtr"
# + id="2MFH4gwlRhrB"
# + id="WNmJvYc4Rho7"
# + id="XXh5GXJsRhmr"
# + [markdown] id="NcshsLOGRPrk"
# ## Summary
# + [markdown] id="XTc4pYKGRR60"
# Populate this section with all of your findings and comments fron the discussion with your peers.
| Week-1/Etivity_1_template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (myenv)
# language: python
# name: myenv
# ---
# # KNN Classifier
# ### Dataset
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
#Load data
iris = pd.read_csv('Iris.csv')
#data cleaning
iris.drop(columns="Id",inplace=True)
# +
#features and labels
X=iris.iloc[:,0:4].values
y=iris.iloc[:,4].values
#Train and Test split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)
# -
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
''' using a L2 distance '''
def dist(a,b):
return np.linalg.norm(a-b,2)
''' A K nearest Classifier Parameterized by K,X_train,y_train '''
''' returns the class with max votes '''
def KNN_Classifier(K,X_train ,y_train,input_x):
count = {}
res = []
for i in range(len(X_train)):
x = X_train[i]
d = dist(x,np.copy(input_x))
res.append([d,y_train[i]])
sorted_res = sorted(res,key = lambda x : x[0])
for i in range(K):
r = sorted_res[i][1]
if r in count.keys():
count[r] += 1
else:
count[r] = 1
final_votes = -1
final_label = ""
for s in count.keys():
if(count[s] > final_votes):
final_label = s
final_votes = count[s]
return final_label
K_vals = [1,3,5,7,9,11,13,15]
Accuracy = []
A = []
for i in range(len(K_vals)):
k = K_vals[i]
#print(k)
#print("")
d = len(X_test)
acc = 0
for j in range(d):
input_x = X_test[j]
res_y = KNN_Classifier(k,X_train,y_train,input_x)
#print(res_y)
#print(y_test[j])
if(res_y == y_test[j]):
acc += 1
A.append(acc)
Accuracy.append(acc/d)
#print(acc)
#print(d)
#print(" ")
print(Accuracy)
print(A)
# +
import matplotlib.pyplot as plt
#fig = plt.figure(figsize = (30,30))
ax = plt.gca()
#ax.set_xlim([xmin, xmax])
ax.set_ylim([0, 1.1])
plt.scatter(K_vals,Accuracy)
plt.xlabel("K")
plt.ylabel("Accuracy")
# -
| Assignment2/Assignment_2/KNN classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This program gives the user 3 different cloud source data platforms to choose from:
# finviz
# stocktwits
# reddit
# +
# File name: preprocessingProduction
import pandas as pd
import numpy as np
#import nltk
#from nltk.sentiment.vader import SentimentIntensityAnalyzer
#import nltk.classify
#from nltk import NaiveBayesClassifier
import os
import re
from bs4 import BeautifulSoup
import sys
import time
#analyzer = SentimentIntensityAnalyzer()
#from nltk.corpus import stopwords
# -
# ************************
# Table of Contents
# #10* initializes the dataframe "df" and imports the csv into df;
# #20* calls getdata to import the csv into the dataframe, 'dfAPI'
# #30 removes any duplicate records; duplicate records imply bot records
# #40 finds certain words in the strings ('body') and deletes the entire record.
# #50* Vader sentiment analyzer
# #60* creates a new column called 'compound_bin' from the raw_compound scores
# #70* converts the 'raw_compound' data to either a 1, 0 or -1. 1 if nltk sentiment number are >= .1; 0 if -.1 < x < .1
# #80* Converts sentiment ratings into numerical values and put the value into 'sentiment_number'.
# #90 Determines the percent correct and incorrect for the Vader sentiment values vs the stocktwits sentiment values
# #100 counts how many "None" sentiment values are there for the stocktwits sentiment value
# #110 This removes every other "None" record to reduce the total number of "None" rating. This is to make
# #115 Provides statistics on sentiments; bullish, none or bearish.
# #120 Allows user to manually input value when stocktwits sentiment value is "None"
# #130 Loads a csv file into the df dfAPI and print out the first 21 records
# #140 This will change the modified rating to the nltk rating only when they are opposite to see if it improves
# the accuracy number
# #440 sets up stopword removal; returns stopWords
# #470 creates a list of new stopwords and then adds them to the set provided by nltk
# Note: it is case sensitive; Input is the nltk stopword list ("stopWords")
# #490 Checks to see of the words were removed from the stopWords list.
# inputs: stopword list: output from def remove_from_stopwords(sw); the word to be removed
# #510 Removes stopwords from all the "body" text (tweets); to do this it must tokenize the string which means it must parse
# the string into individual words. It then compares the words with the words in the stopwords list and if there is not
# match it puts the word into the "wordsFiltered" list. It keeps appending to the list until all of the words are checked.
# It then joins the individual words back into a string.
# There is a difference between "deep" copy and "shallow" copy. "Deep" copy make a copy where the index and data are
# separate from the original. "Shallow" copy is like a pointer where the two df share a common index and data
# dfScrubbed = df #This is a shallow copy
# #550 converts the scrubbed_compound scores into a 1 significant figure integer from a float number; rounding up
# this is only needed if you are going to uses the 'scrubbed_compound' value as the label.
# #550 converts the 'scrubbed_compound' (column 10) data to either a 1, 0 or -1.
# if nltk sentiment number are >= .1; 0 if -.1 < x < .1 and -1 if <= -.1 and over-rights the value in compound_bin
# creates a new column called 'compound_bin' from the raw_compound scores
# #640 compares the first record (index = 0) raw data ("body" column) with scrubbed (stopwords removed) data
# inputs: df - original df; dfs - scrubbed df (stopwords removed)
# #650 Loads and combines two different dataframes in df; this is to combine two input datasets where the 'none'
# values have been modified; this is to see if increased records will increase the accuracy of the model.
# #660 Writes a csv file
# METHODS
# +
#import re
class SentimentAnalysisPreprocessing():
import nltk
import re
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
nltk.download('wordnet') #not in original code
# Lemmatize the text
lemmer = WordNetLemmatizer()
def __init__(self):
self.relevant_path = 'C:/Users/User/Documents/Personal/retooling/projects/Kokoro/Parsers'
# displays a list of file with on a csv suffix
def list_dir_files(self):
# https://clay-atlas.com/us/blog/2019/10/27/python-english-tutorial-solved-unicodeescape-error-escape-syntaxerror/?doing_wp_cron=1618286551.1528689861297607421875
#need to change \ to /
import os
included_extensions = ['csv']
file_names = [fn for fn in os.listdir(self.relevant_path) # uses os.listdir to display only .csv files
if any(fn.endswith(ext) for ext in included_extensions)]
print('Path: ', self.relevant_path)
for f in file_names:
print(f)
# 10 initializes the dataframe "df" and imports the csv into df;
# the argument is the name/address of the file.
# https://stackoverflow.com/questions/33440805/pandas-dataframe-read-csv-on-bad-data
def getData(self):
import pandas as pd
d = SentimentAnalysisPreprocessing()
df = pd.DataFrame() # defines df1 as a dataframe
d.list_dir_files()
filename = input('What is the name of the csv file you want to get? ')
self.filename = self.relevant_path + '/' + filename
df = pd.read_csv(self.filename, header = 0)
self.df = df
return df
# 30 removes any duplicate records; duplicate records imply bot records
def remove_duplicates(self):
print('\nDropping duplicates ...')
# %time self.df = self.df.drop_duplicates()
self.df = self.df.reset_index(drop = True) # resets the index
return self.df
# remove HTTP tags
def remove_http_tags(self):
import pandas as pd
import re
import time
print('\nRemoving http tags ...')
# %time self.df['blog_title_processed'] = self.df['blog_title'].map(lambda x : ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",x).split()))
# %time self.df['comments_processed'] = self.df['comments'].map(lambda x : ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",x).split()))
return self.df
# coverts to all lower case
def lower_case(self):
print('\nConverting to lower case ...')
# %time self.df['blog_title_processed'] = self.df['blog_title_processed'].map(lambda x: x.lower())
# %time self.df['comments_processed'] = self.df['comments_processed'].map(lambda x: x.lower())
return self.df
# removes all punctuation
def remove_punctuation(self):
print('\nRemoving punctuation ...')
# %time self.df['blog_title_processed'] = self.df['blog_title_processed'].map(lambda x: re.sub(r'[^\w\s]', '', x))
# %time self.df['comments_processed'] = self.df['comments_processed'].map(lambda x: re.sub(r'[^\w\s]', '', x))
return self.df
# removes unicodes (emojis)
def remove_unicode(self):
print('\nRemoving unicode ...')
# %time self.df['blog_title_processed'] = self.df['blog_title_processed'].map(lambda x : re.sub(r'[^\x00-\x7F]+',' ', x))
# %time self.df['comments_processed'] = self.df['comments_processed'].map(lambda x : re.sub(r'[^\x00-\x7F]+',' ', x))
return self.df
def lemmatize(self): #lemmer must be defined outside of the function and passed in
print('\nLemmatizing ...')
import nltk
#import re
#from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
#display(stop_words)
nltk.download('wordnet') #not in original code
# Lemmatize the text
lemmer = WordNetLemmatizer()
# %time self.df['blog_title_processed'] = self.df['blog_title_processed'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
# %time self.df['comments_processed'] = self.df['comments_processed'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
return self.df
# Remove additional stopwords other than those provided by nltk
def remove_additional_stopwords(self): #stop_words must be defined outside of the function and passed in
print('\nRemoving addtional stopwords ...')
#adds new stopwords to list
newStopWords = ['a', 'about', 'above', 'across', 'after', 'afterwards']
newStopWords += ['again', 'against', 'all', 'almost', 'alone', 'along']
newStopWords += ['already', 'also', 'although', 'always', 'am', 'among']
newStopWords += ['amongst', 'amoungst', 'amount', 'an', 'and', 'another']
newStopWords += ['any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere']
newStopWords += ['are', 'around', 'as', 'at', 'back', 'be', 'became']
newStopWords += ['because', 'become', 'becomes', 'becoming', 'been']
newStopWords += ['before', 'beforehand', 'behind', 'being', 'below']
newStopWords += ['beside', 'besides', 'between', 'beyond', 'bill', 'both']
newStopWords += ['bottom', 'but', 'by', 'call', 'can', 'cannot', 'cant']
newStopWords += ['co', 'computer', 'con', 'could', 'couldnt', 'cry', 'de']
newStopWords += ['describe', 'detail', 'did', 'do', 'done', 'down', 'due']
newStopWords += ['during', 'each', 'eg', 'eight', 'either', 'eleven', 'else']
newStopWords += ['elsewhere', 'empty', 'enough', 'etc', 'even', 'ever']
newStopWords += ['every', 'everyone', 'everything', 'everywhere', 'except']
newStopWords += ['few', 'fifteen', 'fifty', 'fill', 'find', 'fire', 'first']
newStopWords += ['five', 'for', 'former', 'formerly', 'forty', 'found']
newStopWords += ['four', 'from', 'front', 'full', 'further', 'get', 'give']
newStopWords += ['go', 'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her']
newStopWords += ['here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers']
newStopWords += ['herself', 'him', 'himself', 'his', 'how', 'however']
newStopWords += ['hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed']
newStopWords += ['interest', 'into', 'is', 'it', 'its', 'itself', 'keep']
newStopWords += ['last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made']
newStopWords += ['many', 'may', 'me', 'meanwhile', 'might', 'mill', 'mine']
newStopWords += ['more', 'moreover', 'most', 'mostly', 'move', 'much']
newStopWords += ['must', 'my', 'myself', 'name', 'namely', 'neither', 'never']
newStopWords += ['nevertheless', 'next', 'nine', 'nobody', 'none'] #removed 'no'
newStopWords += ['noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'of']
newStopWords += ['off', 'often', 'on','once', 'one', 'only', 'onto', 'or']
newStopWords += ['other', 'others', 'otherwise', 'our', 'ours', 'ourselves']
newStopWords += ['out', 'over', 'own', 'part', 'per', 'perhaps', 'please']
newStopWords += ['put', 'rather', 're', 's', 'same', 'see', 'seem', 'seemed']
newStopWords += ['seeming', 'seems', 'serious', 'several', 'she', 'should']
newStopWords += ['show', 'side', 'since', 'sincere', 'six', 'sixty', 'so']
newStopWords += ['some', 'somehow', 'someone', 'something', 'sometime']
newStopWords += ['sometimes', 'somewhere', 'still', 'such', 'system', 'take']
newStopWords += ['ten', 'than', 'that', 'the', 'their', 'them', 'themselves']
newStopWords += ['then', 'thence', 'there', 'thereafter', 'thereby']
newStopWords += ['therefore', 'therein', 'thereupon', 'these', 'they']
newStopWords += ['thick', 'thin', 'third', 'this', 'those', 'though', 'three']
newStopWords += ['three', 'through', 'throughout', 'thru', 'thus', 'to']
newStopWords += ['together', 'too', 'top', 'toward', 'towards', 'twelve']
newStopWords += ['twenty', 'two', 'un', 'under', 'until', 'up', 'upon']
newStopWords += ['us', 'very', 'via', 'was', 'we', 'well', 'were', 'what']
newStopWords += ['whatever', 'when', 'whence', 'whenever', 'where']
newStopWords += ['whereafter', 'whereas', 'whereby', 'wherein', 'whereupon']
newStopWords += ['wherever', 'whether', 'which', 'while', 'whither', 'who']
newStopWords += ['whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with']
newStopWords += ['within', 'without', 'would', 'yet', 'you', 'your']
newStopWords += ['yours', 'yourself', 'yourselves'] #provided by Codecademy??
# additional stopwords:
newStopWords += ['[screenshot]', 'screenshot', '[screenshot]great', 'screenshot',
'the', 'smart', 'yah', 'got', 'nutty', 'moving', 'weeks', 'Got', 'So', 'today', 'Been', 'or']
newStopWords += ['i', 'you', 'He', 'he', 'she', 'they', 'their', 'it'] # pronouns
newStopWords += ['amd','nvda', 'tsla', 'goog', 'ba', 'fb', 'googl', 'intc', 'intel', 'csco', 'mu',
'smh', 'tsm','aapl', 'csco', 'poetf', 'photonics', 'dd', 'arwr', 't', 'infini', 'amc', 'arl',
'gme', 'nio', 'qs', 'msft', 'adbe', 'unh'] # Stock symbols or names
newStopWords += [] # nouns
#newStopWords += ['.', '?', '!', ';', ',', "'", '.'] # punctuation
newStopWords += ['&', '#', '%', '$', '@', '/'] # symbols
newStopWords += ['41.75', '530.05', '39', 'Two', 'two', 'One', 'one', 'Three', 'three', 'Four', 'four',
'Five', 'five', 'Six', 'six', 'Seven', 'seven', 'Eight', 'eight', 'Nine', 'nine', 'Ten',
'ten', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '39', ' 270',
'270000', '4033477', '244', '16', '399', '800', '270', '000', '60', '74',
'1600', '993', '392', '98', '00', '1601'] # numbers
for w in newStopWords:
stop_words.append(w)
#print('stop_words: ', stop_words)
#removes the stopwords from the column body_processed
# %time self.df['blog_title_processed'] = self.df['blog_title_processed'].map(lambda x : ' '.join([w for w in x.split() if w not in stop_words]))
# %time self.df['comments_processed'] = self.df['comments_processed'].map(lambda x : ' '.join([w for w in x.split() if w not in stop_words]))
return self.df
def writeCsv(self):
import csv
from datetime import date
today = date.today()
filename = self.filename.replace('.csv', '')
self.df.to_csv(self.filename.replace('.csv', '') + '_preprocessed_' + str(today) + '.csv', encoding = 'utf-8', index = False)
print('The csv filename is: ', self.filename.replace('.csv', '') + '_preprocessed_' + str(today) + '.csv')
############################################################
# 40 finds certain words in the strings ('body') and deletes the entire record.
#Note: When the record is deleted the df is re-indexed. The index for the while statement is not so the result is
#that the record right after the deleted record is skipped. To remedy the problem the index (i) for the while statement
#is decremented by one.
#Also, the filtering terms are not case sensitive.
def filter_records(self, df):
import fnmatch
data = []
counter = 0
advert = ['* sec *', '* daily News *', '*Huge Print*', '* Form *',
'*SweepCast*', '*Large Print*', '*Huge Print*', '*8-K*',
'*SmartOptions*', '*Big Trade*', '*SEC Form*', '*Notice of Exempt*',
'*created_at*', '*stock news*', '*Trading Zones*', '*Entry:*',
'*New Article*', '*ooc.bz*', '*http*', 'Huge Trade', 'Trading is easy',
'www.', '#wallstreetbets', 'wallstreetbets', 'Huge Trade', '#unitedtraders',
'stockbeep.com', 'Big Trade'] # words or phrases whose records are to be removed; It is not case sensitive.
for a in advert:
i = 0
df = df.reset_index(drop = True) # resets the index before each iteration; removes the gaps; resets len(df)
while i < len(df):
dat = df.loc[i, ('body')] # 2 represents the 'body' column
data = [dat] # sets the string from the df into a list for the fnmatch.filter
#print('index = ', i)
filtered = fnmatch.filter(data, a) # compares the information in the 'body' column with the 'advert' list; it places the matched items in the 'filtered' variable.
#https://www.geeksforgeeks.org/fnmatch-unix-filename-pattern-matching-python/
if len(filtered) != 0: #if returns a True then record needs to be removed
counter += 1
df = df.drop(df.index[i]) # drops (deletes) the record
df = df.reset_index(drop = True) # resets the index; removes the gaps
#print('after the record is dropped:', df..log[i,('body')], 'i = ', i)
#Note: When the record is dropped there is a change in the 'index' number. after the drop index number
#5 becomes index number 4. Since the counter increments one more time it skips the record right after
#the record that was just checked. That is why it takes multiple runs to remove all of the target
#records. To correct this decrement the index, i, by
i -= 1
i += 1
df = df.reset_index(drop = True) # resets the index; removes the gaps
len(df)
return df
#480 This removes words from the list of stopwords and writes list to csv file
# https://stackoverflow.com/questions/29771168/how-to-remove-words-from-a-list-in-python#:~:text=one%20more%20easy%20way%20to%20remove%20words%20from,%3D%20words%20-%20stopwords%20final_list%20%3D%20list%20%28final_list%29
#new_words = list(filter(lambda w: w not in stop_words, initial_words))
def remove_from_stopwords(self, sw, relevant_path):
WordsToBeRem = ['no']
stopWords = list(filter(lambda w: w not in WordsToBeRem, sw)) #It will retain anyword in sw that is not in WordsToBeRemoved
#converts the stopword list to a df so that it can then be written to a csv file
df_stopwords = pd.DataFrame(stopWords, columns = ['stopwords'])
name_of_csv_file = relevant_path + '/' + 'stopwords.csv'
df_stopwords.to_csv(name_of_csv_file, index = False) #writes stopwords to csv file
#print(stopWords)
return stopWords
#490 Checks to see of the words were removed from the stopWords list.
#inputs: stopword list (sw) and the word to be removed from the so (WordToBeRem):
def check_stopwords(self, sw, WordToBeRem):
r = 0
for w in sw:
#print(w)
if w == WordToBeRem:
print('The word ', w , ' is still in the stopWords list!')
r += 1
if r == 0:
print('It did remove the words from the stopWords list!')
#print(len(stopWords))
#510 Removes stopwords from all the "body" text (tweets); to do this it must tokenize the string which means it must parse
# the string into individual words. It then compares the words with the words in the stopwords list and if there is not
# match it puts the word into the "wordsFiltered" list. It keeps appending to the list until all of the words are checked.
# It then joins the individual words back into a string.
#There is a difference between "deep" copy and "shallow" copy. "Deep" copy make a copy where the index and data are
# separate from the original. "Shallow" copy is like a pointer where the two df share a common index and data
#dfScrubbed = df #This is a shallow copy
def rem_stopwords(self, df, stopWords):
from nltk.tokenize import sent_tokenize, word_tokenize
dfScrubbed = df.copy() #This is a deep copy. df.copy(deep = True); deep = True is default
i = 0
while i < len(df):
data = df.loc[i,('body')]
words = word_tokenize(data) # separates the string into a individual words.
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w) # makes a new word list without the stopwords
joinedWordsFiltered = ' '.join(wordsFiltered)
dfScrubbed.loc[i,('body')] = joinedWordsFiltered # replaces the recorded in dfScrubbed with the stopWords removed
# from the 'body'
i += 1
#print(wordsFiltered)
#### method removes empty body rows and reindexes
dfScrubbed = remove_empty_body_rows(dfScrubbed)
#### checks to see if there are any empty records left
print('Are there any empty body records?')
empty = np.where(pd.isnull(dfScrubbed['body'])) #checks to see if there are any empty records in the column 'body'
print(empty)
#print(dfScrubbed.head())
return dfScrubbed
#640 compares the first record (index = 0) raw data ("body" column) with scrubbed (stopwords removed) data
#inputs: df - original df; dfs - scrubbed df (stopwords removed)
def compare_scrubbed(self, df, dfs):
print(df.loc[0,('body')])
print(dfs.loc[0,('body')])
# 650 Loads and combines two different dataframes in df; this is to combine two input datasets where the 'none'
#values have been modified; this is to see if increased records will increase the accuracy of the model.
def combine_dfs(self, df1, df2):
df = df1.append(df2)
print('The length of file 1 is:', len(df1))
print('The length of file 2 is:', len(df2))
print('The length of the combined dataframe is:', len(df))
return df
# 660 Writes a csv file
#input: df that is to be saved as a csv; output file name (eg 'tech stockTwit 03112021 dup advert stopwords.csv'
def write_csv(self, df, filename_output, relevant_path):
df.to_csv(relevant_path + '/' + filename_output, index = False, encoding = 'utf-8')
print('The csv file was written. File name: ', filename_output)
# removes specific rows and resets the index
def remove_empty_body_rows(self, df):
df.dropna(subset=['body'], inplace=True) #drops empty body records
df = df.reset_index(drop = True) # resets the index
return df
#### checks to see if there are any empty records left
def empty_records_check(self, df):
print('Are there any empty body records?')
empty = np.where(pd.isnull(df['body'])) #checks to see if there are any empty records in the column 'body'
if empty[0].size == 0:
print('There are no empty records! \n', empty)
else:
print('There are empty records ...\n', empty)
#### Removes Imogis
def remove_emoji(self, string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
# combines both names of file wanted to combing and writes csv file
def combine_two_files(self):
name1 = input('How many files do you want to combine?')
first_filename = input()
frames = [df1, df2]
result = pd.concat(frames)
def rem_dup_adver_ever_oth_emoji(self, df):
#remove duplicates
r_d = input('Do you want to remove duplicates? [Press enter if no] ')
if r_d in yes_resp:
df = remove_duplicates(df) #return df; removes duplicates
remove_dupl = 'r_d '
else:
remove_dupl = ''
#remove advertisements
r_a = input('Do you want to remove advertisements? [Press enter if no] ')
if r_a in yes_resp:
df = filter_records(df) #returns df; removes addvertisements
remove_advertisements = 'r_a '
else:
remove_advertisements = ''
# remove emojis
r_emoj = input('Do you want to remove emojis from the body records: [Press enter if no] ')
if r_emoj in yes_resp:
#print('location1')
i = 0
#print('location2')
while i < len(df):
#print('location3', i)
string = df.loc[i, ('body')]
#print('location4')
#print('original string: ', string)
new_string = remove_emoji(string)
#print('location5')
#print('new string: ', new_string)
df.loc[i, ('body')] = new_string
#print(df['body'][i])
r_emoji = 'r_emoj '
i += 1
else:
r_emoji = ''
return df, r_emoji, rem_every_other, remove_advertisements, remove_dupl
# -
# +
##############################################################
#### MAIN ####
##############################################################
import pandas as pd
pd.set_option("display.max_rows", None, "display.max_columns", None) #sets to display entire df
d = SentimentAnalysisPreprocessing()
df = d.getData()
#display(df)
df1 = d.remove_duplicates()
#display(df1)
#import re
df2 = d.remove_http_tags()
#display(df2)
df3 = d.lower_case()
#display(df3)
df4 = d.remove_punctuation()
#display(df4)
df5 = d.remove_unicode()
#display(df5)
#for d.lemmatize(); for some reason imports aren't recogized inside the the class method
#they are recognized just inside the class.
#check to see if import <class_name>.<module_name>
'''import nltk
#import re
#from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
display(stop_words)
nltk.download('wordnet') #not in original code
# Lemmatize the text
lemmer = WordNetLemmatizer()'''
df6 = d.lemmatize()
#display(df6)
df7 = d.remove_additional_stopwords()
#display(df7)
d.writeCsv()
# -
# !pip install finVizFetchPkg
import finVizFetchPkg.finVizScaper
finvizdf = finVizFetchPkg.finVizScaper.finvizStreamer() #comment out to run in notebook
df = finvizdf.scrape_finziz()
display(df)
# MAIN
# +
#######################################################
#### MAIN ####
#######################################################
yes_resp = ['yes', 'YES', 'y', 'Y', 'Yes']
no_resp = ['no', 'NO', 'n', 'N', 'No']
#need to accommodate for three different types of inputs.
# - finviz
# - stocktwits
# - reddit
# step one is to either scrape or parse the data into a dataframe, df.
#############
# Finviz scraper
#############
#finvizdf = finvizStreamer()
import finVizFetchPkg()
finvizdf = finVizFetchPkg.finVizScaper.finvizStreamer() #comment out to run in notebook
df = finvizdf.scrape_finziz()
#############
# stocktwits API
#############
#############
# reddit API
#############
# step two is to convert each of the dataframes into a common format with the same column names (data)
if finviz_resp in yes_resp:
df.rename(columns = {'ticker':'symbol', 'title':'body', 'Sentiment':'sentiment'}, inplace = True) #renames the columns to match the stocktwits names
####################################
'''finviz output columns:
ticker,date,time,title,Sentiment
stocktwits parser output columsn:
symbol,messageID ,created_at,body,followers,sentiment,date,time
ticker = symbol
title = body
Sentiment = sentiment
date = date
time = time
"messageID", "created_at", "followers" do not exist in scraped finviz csv '''
######################################
#step 3 is to scrub the df to optimize the natural Language sentiment classification model development and accuracy
df = remove_duplicate_headers(df)
df = remove_duplicates(df)
df = remove_http_tags(df)
df = remove_punctuation(df)
df = remove_unicode(df)
df = lower_case(df)
#df = convert_sentiment_to_numerical(df) #line 240
#df = compound_binning(df) #line 210
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
df = remove_stopwords(df, stop_words)
lemmer = WordNetLemmatizer()
df = lemmatize(df, lemmer)
#### checks to see if there are any empty records
print('Test empty records before writing the csv file')
empty_records_check(df)
df = remove_empty_body_rows(df)
# Writes a csv file; input df that is to be saved as a csv; output file name is combination of types of editing
w_csv = input('Do you want to write a csv file? [Press enter if no] ')
if w_csv in yes_resp:
new_name = name.replace('.csv', '') #removes the .csv from the input file's name
print(new_name)
processed = 'preprocessed'
# creates a file name that is a combination of all the different scrubbing types
#filename_output = processed + remove_dupl + remove_advertisements + rem_every_other + swords + ed + r_emoji + vader_run + name
filename_output = new_name + '_' + processed + '_lemmatized.csv'
if name == filename_output: #Checks to see if the file already exists
os.remove(filename_output) #If the file already exists it deletes the original file
print('The old file was deleted.\n')
write_csv(df, filename_output, relevant_path) #Writes the df to a new file
print('The file was written with the filename of: ', filename_output, '\n')
# NOTE TO SELF - When there is a record that has spaces only, it is encoded as a 'NaN' or empty record
#when encoded as a utf-8 csv file. It will cause the postprocessing Vader app to crash. Importing the csv file
#and then removing the 'NaN' and then rewriting the csv file should take care of the problem.
final_name = relevant_path + '/' + filename_output
print('The filename is: \n', final_name)
dftest = getData(final_name)
print('csv file read into df to see if all of the empty records are removed.')
empty_records_check(dftest)
df_final = remove_empty_body_rows(dftest)
empty_records_check(df_final)
os.remove(final_name) #If the file already exists it deletes the original file
write_csv(df_final, filename_output, relevant_path) #Writes the df to a new file
# combines two dfs
c_t_dfs = input('Do you want to combine two files? [Press enter if no] ')
if c_t_dfs in yes_resp:
print('Here is a list of the csv files to choose from: \n')
list_dir_files(relevant_path)
first_name = input('\nWhat is the first file you want to combine? ')
df = getData(relevant_path + '/' + first_name) #returns df; reads csv file into df
print('Imported the csv file.')
second_name = input('What is the second file you want to add? ')
df2 = getData(relevant_path + '/' + second_name)
# 650 Loads and combines two different dataframes in dfAPI; this is to combine two input datasets where the 'none'
#values have been modified; this is to see if increased records will increase the accuracy of the model.
df = combine_dfs(df1, df2)
w_csv = input('Do you want to write a csv file? [Press enter if no] ')
if w_csv in yes_resp:
first_name_no_csv = first_name.replace('.csv', ' + ')
duo_name = first_name_no_csv + second
write_csv(df, duo_name, relevant_path, encoding = 'UNICODE') #Writes the df to a new file
print('The file was written with the filename of: ', duo_name, '\n')
print('\nAll done ....')
# -
print(df.columns)
display(df.head())
testname = 'test_name.csv'
new_name = testname.replace('.csv', '') #removes the .csv from the input file's name
new_name = new_name + '_test.csv'
print(new_name)
import nltk
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
def test(df):
print('\nLemmatizing ...')
import nltk
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
nltk.download('wordnet') #not in original code
# Lemmatize the text
lemmer = WordNetLemmatizer()
# %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
return df
# +
print('\nLemmatizing ...')
import nltk
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
nltk.download('wordnet') #not in original code
# Lemmatize the text
lemmer = WordNetLemmatizer()
# %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
# -
df = test(df)
def lemmatize(df):
print('\nLemmatizing ...')
import nltk
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
nltk.download('wordnet') #not in original code
# Lemmatize the text
lemmer = WordNetLemmatizer()
# %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
return df
df = lemmatize(df)
def test(df):
import nltk
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
#adds new stopwords to list
new_stop_words = ['intc', 'nvda', 'tsla', 'mu', 'msft', 'tsm', 'adbe', 'unh', '39', ' 270',
'270000', '4033477', '244', '16', '399', '800', '270', '000', '60', '74',
'1600', '993', '392', '98', '00', '1601', 'amd', 'aapl']
for w in new_stop_words:
stop_words.append(w)
print('stop_words: ', stop_words)
#removes the stopwords from the column body_processed
# %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([w for w in x.split() if w not in stop_words]))
return df
df = test(df)
display(df)
# +
import nltk
import re
from bs4 import BeautifulSoup
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
#adds new stopwords to list
new_stop_words = ['intc', 'nvda', 'tsla', 'mu', 'msft', 'tsm', 'adbe', 'unh', '39', ' 270',
'270000', '4033477', '244', '16', '399', '800', '270', '000', '60', '74',
'1600', '993', '392', '98', '00', '1601', 'amd', 'aapl']
for w in new_stop_words:
stop_words.append(w)
print('stop_words: ', stop_words)
#removes the stopwords from the column body_processed
# %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([w for w in x.split() if w not in stop_words]))
# -
display(df)
dftest = getData('preprocessed tech stockTwit 03112021.csv')
print(dftest.head())
#df = df.reset_index(drop = True)
print(df)
# 100 counts how many "None" sentiment values are there for the stocktwits sentiment value
none_count_raw(df)
# +
yes_resp = ['yes', 'YES', 'y', 'Y', 'Yes']
no_resp = ['no', 'NO', 'n', 'N', 'No']
test = input('do you want to test? ')
if test in yes_resp:
print('yes I do')
# +
name1 = 'output.csv'
remove_dupl = 'a '
remove_advertisements = 'b '
remove_every_other = ''
ed = 'd '
filename_output = remove_dupl + remove_advertisements + remove_every_other + ed + name1
print(filename_output)
# -
# +
#how to determine if column exists
import pandas as pd
df = pd.DataFrame([[10, 20, 30, 40], [7, 14, 21, 28], [55, 15, 8, 12]],
columns=['Apple', 'Orange', 'Banana', 'Pear'],
index=['Basket1', 'Basket2', 'Basket3'])
if 'apple' not in df.columns:
print("in - no")
else:
print("notin - yes")
if set(['Apple','Orange']).issubset(df.columns):
print("Yes")
else:
print("No")
# +
def remove_duplicate_headers(df):
column = 'symbol'
df.drop(df[df['symbol'] == column].index, inplace=True)
return df
print('Here is a list of the csv files to choose from: \n')
list_dir_files()
name = input('\nWhat file do you want to use? ')
df = getData(name) #returns df; reads csv file into df
print('Imported the csv file.')
print(df.head())
i = 0
while i < len(df):
if df.iloc[i , 0] == "symbol":
print('The index is: ', i)
i += 1
print('starting to remove headers')
df = remove_duplicate_headers(df)
print('done removing headers')
i = 0
while i < len(df):
if df.iloc[i , 0] == "symbol":
print('The index is: ', i)
i += 1
else:
print('They are all gone!')
print(df.head())
# -
# +
import os
def remove_duplicate_headers(df):
column = 'symbol'
df.drop(df[df['symbol'] == column].index, inplace=True)
return df
relevant_path = 'C:/Users/pstri/OneDrive/Documents/Personal/Kokoro/NLTK/Code Project/Post Processing'
included_extensions = ['csv']
file_names = [fn for fn in os.listdir(relevant_path)
if any(fn.endswith(ext) for ext in included_extensions)]
for f in file_names:
print(f)
name = input('What file do you want: ')
df = getData(relevant_path + '/' + name)
print(df.head(120))
print('before:')
empty = np.where(pd.isnull(df['body']))
print('empty')
df = remove_duplicate_headers(df)
df = df.fillna(value ={'body':' '}) #replaces any empty 'body' records with a space
print('after:')
np.where(pd.isnull(df['body']))
print(df.head(120))
# +
# finding and removing empty records in a df
dftest = getData(relevant_path + '/' + filename_output)
print(relevant_path + '/' + filename_output)
print('csv file read into df to see if all of the empty records are removed.')
#finds empty records
empty = np.where(pd.isnull(dftest['body'])) #checks to see if there are any empty records in the column 'body'; empty is a tuple where the first element is the array, the second is dtype of the array
if empty[0].size == 0:
print('There are no empty records: \n', empty)
else:
print('There are empty records: \n', empty, '\n')
print(dftest.iloc[110:125,])
#drops empty records
dftest.dropna(subset=['body'], inplace=True) #drops empty body records
dftest = dftest.reset_index(drop = True) # resets the index
empty = np.where(pd.isnull(dftest['body'])) #checks to see if there are any empty records in the column 'body'; empty is a tuple where the first element is the array, the second is dtype of the array
print('\nAFTER DROP: \n', empty, '\n')
print(dftest.iloc[110:125,])
# removes specific rows and resets the index
def remove_empty_body_rows(df):
df.dropna(subset=['body'], inplace=True) #drops empty body records
df = df.reset_index(drop = True) # resets the index
return df
# -
print(df['body'][10:20])
# +
#removes emojis
# Inports the csv file of choice
relevant_path = 'C:/Users/pstri/OneDrive/Documents/Personal/Kokoro/NLTK/Code Project/Scraped Files'
print('Here is a list of the csv files to choose from: \n')
list_dir_files(relevant_path)
name = input('\nWhat file do you want to use? ')
df = getData(relevant_path + '/' + name) #returns df; reads csv file into df
print('Imported the csv file.')
def remove_emoji(string):
import re
import sys
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
i = 0
yes_dec = ['yes', 'y']
decision = input('decide: ')
if decision in yes_dec:
i = 0
while i < len(df):
string = df.loc[i, ('body')]
#print('original string: ', string)
new_string = remove_emoji(string)
#print('new string: ', new_string)
df.loc[i, ('body')] = new_string
#print(df['body'][i])
i += 1
print('all done')
print(df.loc[13,'body'])
# -
print(df.loc[13,'body'])
# +
i = 0
while i < 2:
#string = df.iloc[i,2]
string = df.loc[i, ('body')]
dat = df.loc[i, ('body')]
data = [dat] #
print('original string: ', dat)
print(data)
i += 1
# +
#manipulating two names and then adding them together
first = 'first.csv'
second = 'second.csv'
first_no_csv = first.replace('.csv', ' + ')
first_second = first_no_csv + second
print(first_second)
# -
negative = -1.9
rounding = int(negative)
print(rounding)
# +
# 40 finds certain words in the strings ('body') and deletes the entire record.
#Note: When the record is deleted the df is re-indexed. The index for the while statement is not so the result is
#that the record right after the deleted record is skipped. To remedy the problem the index (i) for the while statement
#is decremented by one.
#Also, the filtering terms are not case sensitive.
def filter_records(df):
import fnmatch
data = []
counter = 0
advert = ['* sec *', '* daily News *', '*Huge Print*', '* Form *', '*SweepCast*', '*Large Print*',
'*Huge Print*', '*8-K*', '*SmartOptions*', '*Big Trade*', '*SEC Form*', '*Notice of Exempt*',
'*created_at*', '*stock news*', '*Trading Zones*', '*Entry:*', '*New Article*', '*ooc.bz*',
'*http*', 'Huge Trade', 'Trading is easy', 'www.', '#wallstreetbets', 'wallstreetbets',
'Huge Trade', '#unitedtraders', 'stockbeep.com', 'Big Trade'] # words or phrases whose records are to be removed; It is not case sensitive.
for a in advert:
i = 0
df = df.reset_index(drop = True) # resets the index before each iteration; removes the gaps; resets len(df)
while i < len(df):
dat = df.loc[i, ('body')] # 2 represents the 'body' column
data = [dat] # sets the string from the df into a list for the fnmatch.filter
#print('index = ', i)
filtered = fnmatch.filter(data, a) # compares the information in the 'body' column with the 'advert' list; it places the matched items in the 'filtered' variable.
#https://www.geeksforgeeks.org/fnmatch-unix-filename-pattern-matching-python/
if len(filtered) != 0: #if returns a True then record needs to be removed
counter += 1
df = df.drop(df.index[i]) # drops (deletes) the record
#print('after the record is dropped:', df..log[i,('body')], 'i = ', i)
#Note: When the record is dropped there is a change in the 'index' number. after the drop index number
#5 becomes index number 4. Since the counter increments one more time it skips the record right after
#the record that was just checked. That is why it takes multiple runs to remove all of the target
#records. To correct this decrement the index, i, by
i -= 1
i += 1
df = df.reset_index(drop = True) # resets the index; removes the gaps
len(df)
return df
df = filter_records(df)
# +
# 40 finds certain words in the strings ('body') and deletes the entire record.
#Note: When the record is deleted the df is re-indexed. The index for the while statement is not so the result is
#that the record right after the deleted record is skipped. To remedy the problem the index (i) for the while statement
#is decremented by one.
#Also, the filtering terms are not case sensitive.
import fnmatch
df = df.reset_index(drop = True) # resets the index; removes the gaps
data = []
counter = 0
advert = ['* sec *', '* daily News *', '*Huge Print*', '* Form *', '*SweepCast*', '*Large Print*',
'*Huge Print*', '*8-K*', '*SmartOptions*', '*Big Trade*', '*SEC Form*', '*Notice of Exempt*',
'*created_at*', '*stock news*', '*Trading Zones*', '*Entry:*', '*New Article*', '*ooc.bz*',
'*http*', 'Huge Trade', 'Trading is easy', 'www.', '#wallstreetbets', 'wallstreetbets',
'Huge Trade', '#unitedtraders', 'stockbeep.com', 'Big Trade'] # words or phrases whose records are to be removed; It is not case sensitive.
for a in advert:
i = 0
df = df.reset_index(drop = True) # resets the index before each iteration; removes the gaps; resets len(df)
while i < len(df):
dat = df.loc[i, ('body')] # 2 represents the 'body' column
#print('index =', i)
#print(dat)
#print(a)
data = [dat] # sets the string from the df into a list for the fnmatch.filter
#print('index = ', i)
filtered = fnmatch.filter(data, a) # compares the information in the 'body' column with the 'advert' list; it places the matched items in the 'filtered' variable.
#https://www.geeksforgeeks.org/fnmatch-unix-filename-pattern-matching-python/
if len(filtered) != 0: #if returns a True then record needs to be removed
counter += 1
df = df.drop(df.index[i]) # drops (deletes) the record
df = df.reset_index(drop = True) # resets the index; removes the gaps
#print('after the record is dropped:', df..log[i,('body')], 'i = ', i)
#Note: When the record is dropped there is a change in the 'index' number. after the drop index number
#5 becomes index number 4. Since the counter increments one more time it skips the record right after
#the record that was just checked. That is why it takes multiple runs to remove all of the target
#records. To correct this decrement the index, i, by
i -= 1
i += 1
df = df.reset_index(drop = True) # resets the index; removes the gaps
len(df)
# -
print(df.loc[340:350,:])
# +
# 110 This removes every other "None" record to reduce the total number of "None" rating. This is to make
#the 'None' proportions more equal. It also prints the ratios of each sentiment response to the total number
#of responses.
def remove_every_other(df):
i = 0
counter = 0
df = df.reset_index(drop = True) #resets the index to be continuous
while i < len(df):
print('index =', i, i % 2, df.loc[i,('sentiment')])
if df.loc[i,('sentiment')] == 'None': #column 4 is sentiment
if i % 2 == 0: #identifies every even index where the sentiment is "None"
df.drop(df.index[i]) #drops (deletes) the record
print('index =', i, df.loc[i,('sentiment')])
i += 1
df = df.reset_index(drop = True) #resets the index to be continuous
i = 0
sentiment_number = 0
while i < len(df):
if df.loc[i,('sentiment')] == 'None':
sentiment_number += 1
i += 1
print('\nThe total number of records is: ', len(df))
print('The number of "None" stocktwits sentiment values is:', sentiment_number)
print('The percentage of "None" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
i = 0
sentiment_number = 0
while i < len(df):
if df.loc[i,('sentiment')] == 'Bullish':
sentiment_number += 1
i += 1
print('The number of "Bullish" stocktwits sentiment values is:', sentiment_number)
print('The percentage of "Bullish" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
i = 0
sentiment_number = 0
while i < len(df):
if df.loc[i,('sentiment')] == 'Bearish':
sentiment_number += 1
i += 1
print('The number of "Bearish" stocktwits sentiment values is:', sentiment_number)
print('The percentage of "Bearish" values is:', (int(sentiment_number/len(df) * 1000)/10), '% \n')
return df
remove_every_other(df)
# -
print(df.loc[0:10, :])
# +
# 110 This removes every other "None" record to reduce the total number of "None" rating. This is to make
#the 'None' proportions more equal. It also prints the ratios of each sentiment response to the total number
#of responses.
def remove_every_other(df):
i = 1
counter = 0
df = df.reset_index(drop = True) #resets the index to be continuous
print(len(df))
while i < len(df):
print('index =', i, i % 2, df.loc[i,('sentiment')])
if df.loc[i,('sentiment')] == 'None': #column 4 is sentiment
if i % 2 == 0: #identifies every even index where the sentiment is "None"
print('inside :',i, i % 2)
print(df.loc[i], '\n right before drop')
df = df.drop(df.index[i]) #drops (deletes) the record
df = df.reset_index(drop = True) #resets the index to be continuous
#df.drop([i]) #drops (deletes) the record
print('index =', i, df.loc[i,('sentiment')])
i += 1
df = df.reset_index(drop = True) #resets the index to be continuous
print(len(df))
return df
remove_every_other(df)
# -
len(df)
# +
i = 0
df = df.drop(df.index[i]) #drops (deletes) the record
#print('index =', i, df.loc[i,('sentiment')])
print(df.loc[0:15,:])
# +
#Create a DataFrame
import pandas as pd
import numpy as np
d = { 'Name':['Alisa','raghu','jodha','jodha','raghu','Cathrine', 'Alisa','Bobby','Bobby','Alisa','raghu','Cathrine'],
'Age':[26,23,23,23,23,24,26,24,22,26,23,24],
'Score':[85,31,55,55,31,77,85,63,42,85,31,np.nan]}
df = pd.DataFrame(d,columns=['Name','Age','Score'])
df
df.drop([1,2])
df
# +
yes_resp = ['yes', 'YES', 'y', 'Y', 'Yes']
no_resp = ['no', 'NO', 'n', 'N', 'No']
finviz_resp = input('Is this a file from scraping Finviz? ')
if finviz_resp in yes_resp:
print('It is in there')
else:
print('It is not in there')
# +
'''if 'raw_compound' not in df.columns:
before_scrubbing = input('Do you want to run the Vader analysis before scrubbing? \n')
if before_scrubbing in yes_resp:
vader_run = 'v_b '
print('\nThis is the first time this file has been preprocessed.\n')
print('Performing Vader sentiment analysis before scrubbing... \n')
df = vader_analysis(df)
df, r_emoji, rem_every_other, remove_advertisements, remove_dupl = rem_dup_adver_ever_oth_emoji(df)
df, swords = remove_stopwords(df)
else:
vader_run = 'v_a '
print('\n Performing Vader sentiment analysis after scrubbing... \n')
df, r_emoji, rem_every_other, remove_advertisements, remove_dupl = rem_dup_adver_ever_oth_emoji(df)
df, swords = remove_stopwords(df)
df = vader_analysis(df)
else:
print('\nThis file has been preprocessed before. There is no need to run the VADER analysis.\n')
'''
'''# 90 OPTIONAL Compares the Vader sentiment numbers with the Stocktwits sentiment ratings.
v_c = input('Do you want to compare the Vader sentiment numbers with the Stocktwits sentiment ratings? [Press enter if no] ')
if v_c in yes_resp:
if 'raw_compound' in df.columns: #checks to see if this file have been prepocessed before by seeing if the column 'raw_compond' exists
vader_correct(df)
# 100 OPTIONAL: Counts how many "None" sentiment values are there for the stocktwits sentiment value
c_n_s = input('Do you want to count the "None" sentiment values for the Stocktwits sentiments before any edits? [Press enter if no] ')
if c_n_s in yes_resp:
none_count_raw(df)
# 115 OPTIONAL: Provides statistics on Stocktwits sentiments; bullish, none or bearish.
s_o_s = input('Do you want to see the statistics on the Stocktwits sentiments? [Press enter if no] ')
if s_o_s in yes_resp:
stats(df)
# 120 OPTIONAL: Allows user to manually input value when stocktwits sentiment value is "None"
# It counts every 20 edits and gives the user the option to quit. If the user chooses to quit
# it breaks from the while look and writes the df to a csv file so all work is saved up to that point.
# upon start up it ask if thie is the first time processing the raw data. If no it loads the csv file into
# the dataframe and starts where the previous session left off. If "modified?" is "Yes and "sentiment" is "None"
# it skips the record. Therefore it will re-start at the first "modified?" is "No" and "sentiment" is "None"
e_n = input('Do you want to edit the "None" records? [Press enter if no] ')
if e_n in yes_resp:
df = edit(df) #returns df
ed = 'edited '
else:
ed = ''
# 180 OPTIONAL: counts how many "None" sentiment values are there for the stocktwits sentiment values after the edit
n_r_a_e = input('Do you want to see how many "None" records there are after the edits? [Press enter if no] ')
if n_r_a_e in yes_resp:
none_count(df)
# 140 OPTIONAL: This will change the modified rating to the nltk rating only when they are opposite to see if it improves
#the accuracy number
# flip vader rating if opposite to stocktwits sentiment
f_v_r = input('Do you want to flip the Vader sentiment rating when it is the opposite of the Stocktwits sentiment rating? [Press enter if no] ')
if f_v_r in yes_resp:
df = change_opp_nltk(df) #returns df
# 180 OPTIONAL: counts how many "None" sentiment values are there for the stocktwits sentiment value
n_c_a_e = input('Do you want to see the number of "None" sentiments after the edit? [Press enter if no] ')
if n_c_a_e in yes_resp:
none_count(df) '''
# -
# # methods
#
# class SentimentAnalysisPreprocessing(self):
#
# def __init__(self, df):
# self.df = df
#
# # 10 initializes the dataframe "df" and imports the csv into df;
# # the argument is the name/address of the file.
# # https://stackoverflow.com/questions/33440805/pandas-dataframe-read-csv-on-bad-data
# def getData(name):
# df1 = pd.DataFrame() # defines df1 as a dataframe
# df1 = pd.read_csv(name, header = 0)
# return df1
#
# # removes duplicate headers
# def remove_duplicate_headers(df):
# print('\nDropping duplicate headers ...')
# column = 'symbol'
# # %time df.drop(df[df['symbol'] == column].index, inplace=True)
# df = df.reset_index(drop = True) # resets the index
# return df
#
# # 30 removes any duplicate records; duplicate records imply bot records
# def remove_duplicates(df):
# print('\nDropping duplicates ...')
# # %time df = df.drop_duplicates()
# df = df.reset_index(drop = True) # resets the index
# len(df)
# return df
#
# # remove HTTP tags
# def remove_http_tags(df):
# print('\nRemoving http tags ...')
# # %time df['body_processed'] = df['body'].map(lambda x : ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",x).split()))
# return df
#
# # coverts to all lower case
# def lower_case(df):
# print('\nConverting to lower case ...')
# # %time df['body_processed'] = df['body_processed'].map(lambda x: x.lower())
# return df
#
# # removes all punctuation
# def remove_punctuation(df):
# print('\nRemoving punctuation ...')
# # %time df['body_processed'] = df['body_processed'].map(lambda x: re.sub(r'[^\w\s]', '', x))
# return df
#
# # removes unicodes (emojis)
# def remove_unicode(df):
# print('\nRemoving unicode ...')
# # %time df['body_processed'] = df['body_processed'].map(lambda x : re.sub(r'[^\x00-\x7F]+',' ', x))
# return df
#
# def lemmatize(df, lemmer): #lemmer must be defined outside of the function and passed in
# print('\nLemmatizing ...')
# # %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([lemmer.lemmatize(w) for w in x.split() if w not in stop_words]))
# return df
#
# # Remove stopwords
# def remove_stopwords(df, stop_words): #stop_words must be defined outside of the function and passed in
# print('\nRemoving stopwords ...')
#
# #adds new stopwords to list
#
# newStopWords = ['a', 'about', 'above', 'across', 'after', 'afterwards']
# newStopWords += ['again', 'against', 'all', 'almost', 'alone', 'along']
# newStopWords += ['already', 'also', 'although', 'always', 'am', 'among']
# newStopWords += ['amongst', 'amoungst', 'amount', 'an', 'and', 'another']
# newStopWords += ['any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere']
# newStopWords += ['are', 'around', 'as', 'at', 'back', 'be', 'became']
# newStopWords += ['because', 'become', 'becomes', 'becoming', 'been']
# newStopWords += ['before', 'beforehand', 'behind', 'being', 'below']
# newStopWords += ['beside', 'besides', 'between', 'beyond', 'bill', 'both']
# newStopWords += ['bottom', 'but', 'by', 'call', 'can', 'cannot', 'cant']
# newStopWords += ['co', 'computer', 'con', 'could', 'couldnt', 'cry', 'de']
# newStopWords += ['describe', 'detail', 'did', 'do', 'done', 'down', 'due']
# newStopWords += ['during', 'each', 'eg', 'eight', 'either', 'eleven', 'else']
# newStopWords += ['elsewhere', 'empty', 'enough', 'etc', 'even', 'ever']
# newStopWords += ['every', 'everyone', 'everything', 'everywhere', 'except']
# newStopWords += ['few', 'fifteen', 'fifty', 'fill', 'find', 'fire', 'first']
# newStopWords += ['five', 'for', 'former', 'formerly', 'forty', 'found']
# newStopWords += ['four', 'from', 'front', 'full', 'further', 'get', 'give']
# newStopWords += ['go', 'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her']
# newStopWords += ['here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers']
# newStopWords += ['herself', 'him', 'himself', 'his', 'how', 'however']
# newStopWords += ['hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed']
# newStopWords += ['interest', 'into', 'is', 'it', 'its', 'itself', 'keep']
# newStopWords += ['last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made']
# newStopWords += ['many', 'may', 'me', 'meanwhile', 'might', 'mill', 'mine']
# newStopWords += ['more', 'moreover', 'most', 'mostly', 'move', 'much']
# newStopWords += ['must', 'my', 'myself', 'name', 'namely', 'neither', 'never']
# newStopWords += ['nevertheless', 'next', 'nine', 'nobody', 'none'] #removed 'no'
# newStopWords += ['noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'of']
# newStopWords += ['off', 'often', 'on','once', 'one', 'only', 'onto', 'or']
# newStopWords += ['other', 'others', 'otherwise', 'our', 'ours', 'ourselves']
# newStopWords += ['out', 'over', 'own', 'part', 'per', 'perhaps', 'please']
# newStopWords += ['put', 'rather', 're', 's', 'same', 'see', 'seem', 'seemed']
# newStopWords += ['seeming', 'seems', 'serious', 'several', 'she', 'should']
# newStopWords += ['show', 'side', 'since', 'sincere', 'six', 'sixty', 'so']
# newStopWords += ['some', 'somehow', 'someone', 'something', 'sometime']
# newStopWords += ['sometimes', 'somewhere', 'still', 'such', 'system', 'take']
# newStopWords += ['ten', 'than', 'that', 'the', 'their', 'them', 'themselves']
# newStopWords += ['then', 'thence', 'there', 'thereafter', 'thereby']
# newStopWords += ['therefore', 'therein', 'thereupon', 'these', 'they']
# newStopWords += ['thick', 'thin', 'third', 'this', 'those', 'though', 'three']
# newStopWords += ['three', 'through', 'throughout', 'thru', 'thus', 'to']
# newStopWords += ['together', 'too', 'top', 'toward', 'towards', 'twelve']
# newStopWords += ['twenty', 'two', 'un', 'under', 'until', 'up', 'upon']
# newStopWords += ['us', 'very', 'via', 'was', 'we', 'well', 'were', 'what']
# newStopWords += ['whatever', 'when', 'whence', 'whenever', 'where']
# newStopWords += ['whereafter', 'whereas', 'whereby', 'wherein', 'whereupon']
# newStopWords += ['wherever', 'whether', 'which', 'while', 'whither', 'who']
# newStopWords += ['whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with']
# newStopWords += ['within', 'without', 'would', 'yet', 'you', 'your']
# newStopWords += ['yours', 'yourself', 'yourselves'] #provided by Codecademy??
#
# # additional stopwords:
# newStopWords += ['[screenshot]', 'screenshot', '[screenshot]great', 'screenshot',
# 'the', 'smart', 'yah', 'got', 'nutty', 'moving', 'weeks', 'Got', 'So', 'today', 'Been', 'or']
#
# newStopWords += ['i', 'you', 'He', 'he', 'she', 'they', 'their', 'it'] # pronouns
#
# newStopWords += ['amd','nvda', 'tsla', 'goog', 'ba', 'fb', 'googl', 'intc', 'intel', 'csco', 'mu',
# 'smh', 'tsm','aapl', 'csco', 'poetf', 'photonics', 'dd', 'arwr', 't', 'infini', 'amc', 'arl',
# 'gme', 'nio', 'qs', 'msft', 'adbe', 'unh'] # Stock symbols or names
#
# newStopWords += [] # nouns
#
# #newStopWords += ['.', '?', '!', ';', ',', "'", '.'] # punctuation
#
# newStopWords += ['&', '#', '%', '$', '@', '/'] # symbols
#
# newStopWords += ['41.75', '530.05', '39', 'Two', 'two', 'One', 'one', 'Three', 'three', 'Four', 'four',
# 'Five', 'five', 'Six', 'six', 'Seven', 'seven', 'Eight', 'eight', 'Nine', 'nine', 'Ten',
# 'ten', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '39', ' 270',
# '270000', '4033477', '244', '16', '399', '800', '270', '000', '60', '74',
# '1600', '993', '392', '98', '00', '1601'] # numbers
#
# for w in newStopWords:
# stop_words.append(w)
#
# #print('stop_words: ', stop_words)
#
# #removes the stopwords from the column body_processed
# # %time df['body_processed'] = df['body_processed'].map(lambda x : ' '.join([w for w in x.split() if w not in stop_words]))
#
# return df
#
# # 40 finds certain words in the strings ('body') and deletes the entire record.
# #Note: When the record is deleted the df is re-indexed. The index for the while statement is not so the result is
# #that the record right after the deleted record is skipped. To remedy the problem the index (i) for the while statement
# #is decremented by one.
# #Also, the filtering terms are not case sensitive.
# def filter_records(df):
# import fnmatch
#
# data = []
# counter = 0
# advert = ['* sec *', '* daily News *', '*Huge Print*', '* Form *',
# '*SweepCast*', '*Large Print*', '*Huge Print*', '*8-K*',
# '*SmartOptions*', '*Big Trade*', '*SEC Form*', '*Notice of Exempt*',
# '*created_at*', '*stock news*', '*Trading Zones*', '*Entry:*',
# '*New Article*', '*ooc.bz*', '*http*', 'Huge Trade', 'Trading is easy',
# 'www.', '#wallstreetbets', 'wallstreetbets', 'Huge Trade', '#unitedtraders',
# 'stockbeep.com', 'Big Trade'] # words or phrases whose records are to be removed; It is not case sensitive.
#
# for a in advert:
# i = 0
# df = df.reset_index(drop = True) # resets the index before each iteration; removes the gaps; resets len(df)
# while i < len(df):
# dat = df.loc[i, ('body')] # 2 represents the 'body' column
# data = [dat] # sets the string from the df into a list for the fnmatch.filter
# #print('index = ', i)
# filtered = fnmatch.filter(data, a) # compares the information in the 'body' column with the 'advert' list; it places the matched items in the 'filtered' variable.
# #https://www.geeksforgeeks.org/fnmatch-unix-filename-pattern-matching-python/
#
# if len(filtered) != 0: #if returns a True then record needs to be removed
# counter += 1
#
# df = df.drop(df.index[i]) # drops (deletes) the record
# df = df.reset_index(drop = True) # resets the index; removes the gaps
#
# #print('after the record is dropped:', df..log[i,('body')], 'i = ', i)
#
# #Note: When the record is dropped there is a change in the 'index' number. after the drop index number
# #5 becomes index number 4. Since the counter increments one more time it skips the record right after
# #the record that was just checked. That is why it takes multiple runs to remove all of the target
# #records. To correct this decrement the index, i, by
#
# i -= 1
#
# i += 1
#
# df = df.reset_index(drop = True) # resets the index; removes the gaps
# len(df)
# return df
#
# #50 Vader sentiment analyzer
# def vader_sentiment(df):
# vader = SentimentIntensityAnalyzer()
#
# f = lambda tweet: vader.polarity_scores(tweet)['compound']
#
# df['raw_compound'] = df['body'].apply(f)
#
# print('The number of clean records in the df are: ', len(df) , '\n')
# #print(df.head())
#
# return df
#
# # 60 creates a new column called 'compound_bin' from the raw_compound scores. This creates a column that the raw
# #where the translated raw compound scores will be placed (either a -1, 0, 1.)
# def compound_binning(df):
# df['compound_bin'] = df['raw_compound'] # Creates a column called 'compound_bin'
#
# #del df['Unnamed: 0'] # deletes the column named 'Unnamed: 0'
#
# #print(df.head())
#
# # 70 converts the 'raw_compound' data to either a 1, 0 or -1. 1 if nltk sentiment number are >= .1; 0 if -.1 < x < .1
# #and -1 if <= -.1 and over-rights the value in compound_bin
#
# i = 0
# while i < len(df):
# if df.loc[i,('raw_compound')] >= 0.1: # column 5 is 'raw_compound'
# df.loc[i, ('compound_bin')] = np.int(df.loc[i, ('raw_compound')] + .9) # column 6 is 'compound_bin'
#
# if df.loc[i,('raw_compound')] < .1 and df.loc[i,('raw_compound')] > -.1:
# df.loc[i, ('compound_bin')] = 0
#
# if df.loc[i,('raw_compound')] <= -.1:
# df.loc[i, ('compound_bin')] = np.int(df.loc[i,('raw_compound')] - .9)
# i += 1
#
# #print(df)
#
# return df
#
# # 80 Converts sentiment ratings into numerical values and put the value into 'sentiment_number'.
# #Stocktwits sentiment rating (bullish or Bearish) is used as the standard;
# #Stocktwits sentiment rating of 'None' is not used as a standard because people could have simply elected to not enter it.
# #https://www.dataquest.io/blog/tutorial-add-column-pandas-dataframe-based-on-if-else-condition/
# def convert_sentiment_to_numerical(df):
#
# print('\nconverting sentiment values to numerical values ...')
# import numpy as np
#
# conditions = [(df['sentiment'] == 'Bullish'), (df['sentiment'] == 'None'), (df['sentiment'] == 'Bearish')] #column 4 is 'sentiment'
#
# values = [1.0, 0.0, -1.0]
#
# # %time df['sentiment_number'] = np.select(conditions, values)
#
# df['modified_rating'] = 0 # adds a column "modified_rating" and sets it equal to 0
# df['modified?'] = 'No' # adds a column "modified?" and sets it equal to 'No'
#
#
# #print(df)
#
# return df
#
# # 90 Determines the percent correct and incorrect for the Vader sentiment values vs the stocktwits sentiment values
# def vader_correct(df):
# correct = 0
# incorrect = 0
# total = len(df)
# i = 0
# while i < len(df):
# if df.loc[i, ('compound_bin')] == df.loc[i, ('sentiment_number')]: # column 6 is 'compound_bin' and column 7 is 'sentiment_number'
# correct += 1
# else:
# incorrect += 1
#
# i += 1
#
# print('The Vader percent correct to stocktwits raw data is:', int(100 * correct/total), '%')
# print('The Vader percent incorrect to stocktwits raw data is:', int(100 * incorrect/total), '%')
#
# #return df
#
# # 100 counts how many "None" sentiment values are there for the stocktwits sentiment value
# def none_count_raw(df):
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'None': # column 4 is 'sentiment'
# sentiment_number += 1
# i += 1
#
# print('The number of "None" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "None" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
#
# # 110 This removes every other "None" record to reduce the total number of "None" rating. This is to make
# #the 'None' proportions more equal. It also prints the ratios of each sentiment response to the total number
# #of responses.
# def remove_every_other(df):
# i = 0
# counter_before = 0
# counter_after = 0
# df = df.reset_index(drop = True) #resets the index to be continuous
#
# while i < len(df): #count the 'None' records before the drop.
# if df.loc[i,('sentiment')] == 'None':
# counter_before += 1
# i += 1
#
# print('\nThe total number of records is: ', len(df))
# print('The number of "None" stocktwits sentiment values before removal is:', counter_before)
#
# i = 0
# while i < len(df):
# if df.loc[i,('sentiment')] == 'None': #column 4 is sentiment
# if i % 2 == 0: #identifies every even index where the sentiment is "None"
# #df = df.drop(df.index[i]) #drops (deletes) the record
# df = df.drop(df.index[i])
# df = df.reset_index(drop = True) #resets the index to be continuous
#
# i -= 1
#
#
# i += 1
#
# df = df.reset_index(drop = True) #resets the index to be continuous
#
# i = 0
# counter_after = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'None':
# counter_after += 1
# i += 1
#
# print('\nThe total number of records is: ', len(df))
# print('The number of "None" stocktwits sentiment values after removal is:', counter_after)
# print('The percentage of "None" values is:', (int(counter_after/len(df) * 1000)/10), '%')
#
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'Bullish':
# sentiment_number += 1
# i += 1
#
# print('The number of "Bullish" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "Bullish" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
#
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'Bearish':
# sentiment_number += 1
# i += 1
#
# print('The number of "Bearish" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "Bearish" values is:', (int(sentiment_number/len(df) * 1000)/10), '% \n')
#
# return df
#
# # 115 Provides statistics on sentiments; bullish, none or bearish.
# def stats(df):
#
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'None':
# sentiment_number += 1
# i += 1
#
# print('The total number of records is: ', len(df))
# print('The number of "None" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "None" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
#
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'Bullish':
# sentiment_number += 1
# i += 1
#
# print('The number of "Bullish" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "Bullish" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
#
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('sentiment')] == 'Bearish':
# sentiment_number += 1
# i += 1
#
# print('The number of "Bearish" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "Bearish" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
#
# # 120 Allows user to manually input value when stocktwits sentiment value is "None"
# # It counts every 20 edits and gives the user the option to quit. If the user chooses to quit
# # it breaks from the while look and writes the df to a csv file so all work is saved up to that point.
# # upon start up it ask if thie is the first time processing the raw data. If no it loads the csv file into
# # the dataframe and starts where the previous session left off. If "modified?" is "Yes and "sentiment" is "None"
# # it skips the record. Therefore it will re-start at the first "modified?" is "No" and "sentiment" is "None"
# def edit(df):
#
# import copy
#
# i = 0
# counter = 0 # counter to see if user want to stop
#
# while i < len(df):
# #while i < 6:
#
# if df.loc[i,('sentiment')] == 'None' and df.loc[i,('modified?')] == 'No': # Column 9 is 'modified?'
# print('\nindex number:', i, '\n', df.loc[i, ('body')])
# #print('This is the body of the tweet:\n', df..log[i,('body')])
# rating = int(input('Enter your rating (1, 0 or -1.):'))
# df.loc[i,('modified_rating')] = copy.deepcopy(rating) # writes inputed number to the 'modified_rating'
# df.loc[i,('modified?')] = 'Yes' # sets "modified?" equal to 'Yes' to identify which records have been modified; so that it can start at the next record at start up
#
# counter += 1
#
# elif df.loc[i,('sentiment')] == 'Bearish':
#
# df.loc[i,('modified_rating')] = df.loc[i,('sentiment_number')] #copies the stocktwits 'sentiment_number' (7) to the 'modified_rating(8)
#
# elif df.loc[i,('sentiment')] == 'Bullish':
#
# df.loc[i,('modified_rating')] = df.loc[i,('sentiment_number')] #copies the stocktwits 'sentiment_number' (7) to the 'modified_rating(8)
#
# if counter == 20: # represents 20 edits
# quit = input('Do you want to quit? (Enter either a "y" or "Y") ')
# if quit == 'y' or quit == 'Y':
# print('You are exiting.')
# break
# else:
# counter = 0 # resets the counter to 0 so there must be another 20 records reviewed and modified
#
# i += 1
#
# #df.to_csv(filename, index = False)
# #print('The csv file was written. File name: ', filename)
#
# return df
#
# # 140 This will change the modified rating (8) to the nltk rating (6) only when they are opposite to see if it improves
# #the accuracy number
# def change_opp_nltk(df):
#
# filename = 'tech stockTwit 02232021 opposite compound_bin vs modified_rating.csv'
#
# print('The name of the csv file that will be written to is: ', filename)
#
# correct_name = input('Is this the correct filename? (enter "N" or "n" for no)')
#
# if correct_name == 'N' or correct_name == 'n':
# new_name = input('What is the correct name?')
# filename = new_name
#
# i = 0
#
# import copy
#
# counter = 0 # counter to see if user want to stop
#
# while i < len(df):
#
# if df.loc[i,('sentiment')] == -1 and df.loc[i,('modified_rating')] == 1:
# df.loc[i,('modified_rating')] = copy.deepcopy(df.loc[i,('sentiment')]) # change "modified_rating" (8) to "compound_bin" (6)
#
# elif df.loc[i,('sentiment')] == 1 and df.loc[i,('modified_rating')] == -1:
# df.loc[i,('modified_rating')] = copy.deepcopy(df.loc[i,('sentiment')]) # change "modified_rating" to "compound_bin"
#
# i += 1
#
# df.to_csv(filename, index = False)
# print('The csv file was written. File name: ', filename)
#
# return df
#
# # 180 counts how many "None" sentiment values are there for the stocktwits sentiment modified rating values
# def none_count(df):
# i = 0
# sentiment_number = 0
#
# while i < len(df):
# if df.loc[i,('modified_rating')] == 0.0: # column #8 is 'modified_rating'
# sentiment_number += 1
# i +=1
#
# print('The number of "None" stocktwits sentiment values is:', sentiment_number)
# print('The percentage of "None" values is:', (int(sentiment_number/len(df) * 1000)/10), '%')
#
#
# #480 This removes words from the list of stopwords and writes list to csv file
# # https://stackoverflow.com/questions/29771168/how-to-remove-words-from-a-list-in-python#:~:text=one%20more%20easy%20way%20to%20remove%20words%20from,%3D%20words%20-%20stopwords%20final_list%20%3D%20list%20%28final_list%29
# #new_words = list(filter(lambda w: w not in stop_words, initial_words))
# def remove_from_stopwords(sw, relevant_path):
# WordsToBeRem = ['no']
# stopWords = list(filter(lambda w: w not in WordsToBeRem, sw)) #It will retain anyword in sw that is not in WordsToBeRemoved
#
# #converts the stopword list to a df so that it can then be written to a csv file
# df_stopwords = pd.DataFrame(stopWords, columns = ['stopwords'])
# name_of_csv_file = relevant_path + '/' + 'stopwords.csv'
# df_stopwords.to_csv(name_of_csv_file, index = False) #writes stopwords to csv file
#
# #print(stopWords)
#
# return stopWords
#
# #490 Checks to see of the words were removed from the stopWords list.
# #inputs: stopword list (sw) and the word to be removed from the so (WordToBeRem):
# def check_stopwords(sw, WordToBeRem):
#
# r = 0
#
# for w in sw:
# #print(w)
# if w == WordToBeRem:
# print('The word ', w , ' is still in the stopWords list!')
# r += 1
#
# if r == 0:
# print('It did remove the words from the stopWords list!')
#
# #print(len(stopWords))
#
# #510 Removes stopwords from all the "body" text (tweets); to do this it must tokenize the string which means it must parse
# # the string into individual words. It then compares the words with the words in the stopwords list and if there is not
# # match it puts the word into the "wordsFiltered" list. It keeps appending to the list until all of the words are checked.
# # It then joins the individual words back into a string.
# #There is a difference between "deep" copy and "shallow" copy. "Deep" copy make a copy where the index and data are
# # separate from the original. "Shallow" copy is like a pointer where the two df share a common index and data
# #dfScrubbed = df #This is a shallow copy
# def rem_stopwords(df, stopWords):
#
# from nltk.tokenize import sent_tokenize, word_tokenize
#
# dfScrubbed = df.copy() #This is a deep copy. df.copy(deep = True); deep = True is default
#
# i = 0
# while i < len(df):
#
# data = df.loc[i,('body')]
# words = word_tokenize(data) # separates the string into a individual words.
# wordsFiltered = []
#
# for w in words:
# if w not in stopWords:
# wordsFiltered.append(w) # makes a new word list without the stopwords
#
# joinedWordsFiltered = ' '.join(wordsFiltered)
#
# dfScrubbed.loc[i,('body')] = joinedWordsFiltered # replaces the recorded in dfScrubbed with the stopWords removed
# # from the 'body'
#
# i += 1
#
# #print(wordsFiltered)
#
# #### method removes empty body rows and reindexes
# dfScrubbed = remove_empty_body_rows(dfScrubbed)
#
# #### checks to see if there are any empty records left
# print('Are there any empty body records?')
# empty = np.where(pd.isnull(dfScrubbed['body'])) #checks to see if there are any empty records in the column 'body'
# print(empty)
#
# #print(dfScrubbed.head())
#
# return dfScrubbed
#
# #550 converts the scrubbed_compound scores into a 1 significant figure integer from a float number; rounding up
# # this is only needed if you are going to uses the 'scrubbed_compound' value as the label.
# def int_conversion(dfs):
# dfs['scrubbed_compound'] = np.int64((dfs['scrubbed_compound'] + .05) * 10)
#
# # 550 converts the 'scrubbed_compound' (column 10) data to either a 1, 0 or -1.
# # if nltk sentiment number are >= .1; 0 if -.1 < x < .1 and -1 if <= -.1 and over-rights the value in compound_bin
# # creates a new column called 'compound_bin' from the raw_compound scores
# def bin_sentiment(dfs):
# dfs['scrubbed_compound_bin'] = dfs['scrubbed_compound'] # creates a new column 'scrubbed_compound_bin' (column 11)
#
# i = 0
# while i < len(df):
# if dfs.loc[i,('scrubbed_compound')] >= 0.1: # column 10 is 'scrubbed_compound'
# dfs.loc[i, ('scrubbed_compound_bin')] = np.int(dfs.loc[i,('scrubbed_compound')] + .9) # column 11 is 'scurbbed_compound_bin'
#
# if dfs.loc[i,('scrubbed_compound')] < .1 and dfs.loc[i,('scrubbed_compound')] > -.1:
# dfs.iloc[i, 11] = 0
#
# if dfs.loc[i,('scrubbed_compound')] <= -.1:
# dfs.loc[i, ('scrubbed_compound_bin')] = np.int(dfs.loc[i,('scrubbed_compound')] - .9)
# i += 1
#
# print(dfs)
#
# #640 compares the first record (index = 0) raw data ("body" column) with scrubbed (stopwords removed) data
# #inputs: df - original df; dfs - scrubbed df (stopwords removed)
# def compare_scrubbed(df, dfs):
# print(df.loc[0,('body')])
# print(dfs.loc[0,('body')])
#
# # 650 Loads and combines two different dataframes in df; this is to combine two input datasets where the 'none'
# #values have been modified; this is to see if increased records will increase the accuracy of the model.
# def combine_dfs(df1, df2):
#
# df = df1.append(df2)
#
# print('The length of file 1 is:', len(df1))
# print('The length of file 2 is:', len(df2))
#
# print('The length of the combined dataframe is:', len(df))
#
# return df
#
# # 660 Writes a csv file
# #input: df that is to be saved as a csv; output file name (eg 'tech stockTwit 03112021 dup advert stopwords.csv'
# def write_csv(df, filename_output, relevant_path):
#
# df.to_csv(relevant_path + '/' + filename_output, index = False, encoding = 'utf-8')
# print('The csv file was written. File name: ', filename_output)
#
# # displays a list of file with on a csv suffix
# def list_dir_files(relevant_path):
# # https://clay-atlas.com/us/blog/2019/10/27/python-english-tutorial-solved-unicodeescape-error-escape-syntaxerror/?doing_wp_cron=1618286551.1528689861297607421875
# #need to change \ to /
#
# import os
#
# included_extensions = ['csv']
# file_names = [fn for fn in os.listdir(relevant_path) # uses os.listdir to display only .csv files
# if any(fn.endswith(ext) for ext in included_extensions)]
#
# print('Path: ', relevant_path)
#
# for f in file_names:
# print(f)
#
# # removes specific rows and resets the index
# def remove_empty_body_rows(df):
# df.dropna(subset=['body'], inplace=True) #drops empty body records
# df = df.reset_index(drop = True) # resets the index
# return df
#
# #### checks to see if there are any empty records left
# def empty_records_check(df):
# print('Are there any empty body records?')
# empty = np.where(pd.isnull(df['body'])) #checks to see if there are any empty records in the column 'body'
#
# if empty[0].size == 0:
# print('There are no empty records! \n', empty)
# else:
# print('There are empty records ...\n', empty)
#
# #### Removes Imogis
# def remove_emoji(string):
# emoji_pattern = re.compile("["
# u"\U0001F600-\U0001F64F" # emoticons
# u"\U0001F300-\U0001F5FF" # symbols & pictographs
# u"\U0001F680-\U0001F6FF" # transport & map symbols
# u"\U0001F1E0-\U0001F1FF" # flags (iOS)
# u"\U00002500-\U00002BEF" # chinese char
# u"\U00002702-\U000027B0"
# u"\U00002702-\U000027B0"
# u"\U000024C2-\U0001F251"
# u"\U0001f926-\U0001f937"
# u"\U00010000-\U0010ffff"
# u"\u2640-\u2642"
# u"\u2600-\u2B55"
# u"\u200d"
# u"\u23cf"
# u"\u23e9"
# u"\u231a"
# u"\ufe0f" # dingbats
# u"\u3030"
# "]+", flags=re.UNICODE)
# return emoji_pattern.sub(r'', string)
#
# # combines both names of file wanted to combing and writes csv file
# def combine_two_files():
# first_filename = input()
#
# def rem_dup_adver_ever_oth_emoji(df):
# #remove duplicates
# r_d = input('Do you want to remove duplicates? [Press enter if no] ')
# if r_d in yes_resp:
# df = remove_duplicates(df) #return df; removes duplicates
# remove_dupl = 'r_d '
# else:
# remove_dupl = ''
#
# #remove advertisements
# r_a = input('Do you want to remove advertisements? [Press enter if no] ')
# if r_a in yes_resp:
# df = filter_records(df) #returns df; removes addvertisements
# remove_advertisements = 'r_a '
# else:
# remove_advertisements = ''
#
# # 110 OPTIONAL: This removes every other "None" record to reduce the total number of "None" rating. This is to make
# #the 'None' proportions more equal. It also prints the ratios of each sentiment response to the total number
# #of responses.
# r_e_o = input('Do you want to remove every other neutral sentiment record: [Press enter if no] ')
# if r_e_o in yes_resp:
# df = remove_every_other(df) #returns df
# rem_every_other = 'r_e_o '
# else:
# rem_every_other = ''
#
# # remove emojis
# r_emoj = input('Do you want to remove emojis from the body records: [Press enter if no] ')
# if r_emoj in yes_resp:
# #print('location1')
# i = 0
# #print('location2')
# while i < len(df):
# #print('location3', i)
# string = df.loc[i, ('body')]
# #print('location4')
# #print('original string: ', string)
# new_string = remove_emoji(string)
# #print('location5')
# #print('new string: ', new_string)
# df.loc[i, ('body')] = new_string
# #print(df['body'][i])
#
# r_emoji = 'r_emoj '
#
# i += 1
# else:
# r_emoji = ''
#
# return df, r_emoji, rem_every_other, remove_advertisements, remove_dupl
#
#
# def vader_analysis(df): #performs Vader sentiment analysis and adds to df the compound binning and converts the stocktwits string value to a numerical value.
# df = vader_sentiment(df) #returns df; adds column with Vader sentiment values ('raw_compound') from the 'body' column.
# print('Produced Vader sentiment values.')
#
# df = compound_binning(df) #returns df; adds a column where the raw_compound scores are translated into 1, 0 or -1 'compound_bin'
# print('Completed the Vader compound binning.')
#
# df = convert_sentiment_to_numerical(df) #returns df
# print('Converted the Stocktwits sentiments to a numberical value (1,0,-1).')
# print('\nAll finished with the Vader sentiment analysis.\n')
#
# return df
# +
relevant_path = 'C:/Users/pstri/OneDrive/Documents/Personal/Kokoro/NLTK/Code Project/Scraped Files'
#relevant_path = 'C:/Users/pstri/OneDrive/Documents/Personal/Kokoro/NLTK/Code Project/Preprocessing'
print('Here is a list of the csv files to choose from: \n')
list_dir_files(relevant_path) # gives all of the file options in the relevant path.
time.sleep(2)
name = input('\nWhat file do you want to use? \n')
df = getData(relevant_path + '/' + name) #returns df; reads csv file into df
print('Imported the csv file.')
finviz_resp = input('Is this a file from scraping Finviz?')
# -
# removes duplicate symbol headers
def remove_symbol_duplicate_headers(self):
print('\nDropping duplicate headers ...')
column = 'symbol'
# %time df.drop(df[df['symbol'] == column].index, inplace=True)
df = df.reset_index(drop = True) # resets the index
return df
| redditPreprocessingProduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 64-bit (conda)
# metadata:
# interpreter:
# hash: 77c00fb1aaf97f56b4f77d3b27f111b910c44fa38f593c5d680df2d8d98f6a84
# name: python3
# ---
# +
#Importando bibliotecas
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas_datareader.data as web
# !pip install yfinance --upgrade --no-cache-dir
import yfinance as yf
yf.pdr_override()
# -
#Obtendo os dados do mercado
ibov = web.get_data_yahoo('VALE3.SA', start='2018-05-03', end='2021-03-17')
#Abrir os ultimos arquivos
ibov.tail()
#plotando os gráficos
ibov["Close"].plot(figsize=(22,8), label="VALE3")
ibov["Close"].rolling(21).mean().plot(label="MM21")
ibov["Close"].rolling(100).mean().plot(label="MM100")
plt.legend()
ibov_fatiado = ibov[(ibov.index.year >= 2018) & (ibov.index.year <= 2021)]
ibov_fatiado["Close"].plot(figsize=(22,8), label="VALE3.SA")
ibov_fatiado["Close"].rolling(21).mean().plot(label="MM21")
ibov_fatiado["Close"].rolling(200).mean().plot(label="MM200")
plt.legend()
ibov_fatiado = ibov[ibov.index.year == 2021]
ibov_fatiado["Close"].plot(figsize=(22,8), label="VALE3.SA")
ibov_fatiado["Close"].rolling(21).mean().plot(label="MM21")
ibov_fatiado["Close"].rolling(100).mean().plot(label="MM100")
plt.legend()
#plotando os gráficos
ibov_fatiado = ibov[ibov.index.month == 12]
ibov_fatiado.tail(50)
#plotando os gráficos
ibov_fatiado = ibov[ibov.index.day == 14]
ibov_fatiado.tail(50)
| mediamovel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + run_control={"frozen": true} dc={"key": "4"} deletable=false editable=false tags=["context"]
# ## 1. A preliminary look at the Bustabit data
# <p>The similarities and differences in the behaviors of different people have long been of interest, particularly in psychology and other social science fields. Understanding human behavior in particular contexts can help us to make informed decisions. Consider a game of poker - understanding why players raise, call, and fold in various situations can provide a distinct advantage competitively.</p>
# <p>Along these lines, we are going to focus on the behavior on <strong>online gamblers</strong> from a platform called <a href="https://www.bustabit.com" target="_blank">Bustabit</a>. There are a few basic rules for playing a game of Bustabit:</p>
# <ol>
# <li>You bet a certain amount of money (in Bits, which is 1 / 1,000,000th of a Bitcoin) and you win if you cash out before the game <strong>busts</strong>.</li>
# <li>Your win is calculated by the multiplier value at the moment you cashed out. For example, if you bet 100 and the value was 2.50x at the time you cashed out, you win 250. In addition, a percentage <code>Bonus</code> per game is multiplied with your bet and summed to give your final <code>Profit</code> in a winning game. Assuming a <code>Bonus</code> of 1%, your <code>Profit</code> for this round would be <code>(100 x 2.5) + (100 x .01) - 100 = 151</code></li>
# <li>The multiplier increases as time goes on, but if you wait too long to cash out, you may bust and lose your money.</li>
# <li>Lastly, the house maintains slight advantages because in 1 out of every 100 games, everyone playing busts.</li>
# </ol>
# <p>Below we see an example of a winning game:</p>
# <p><img src="https://assets.datacamp.com/production/project_643/img/bustabit_win.gif" alt=""></p>
# <p>And a losing game, for comparison:</p>
# <p><img src="https://assets.datacamp.com/production/project_643/img/bustabit_loss.gif" alt=""></p>
# <p>Our goal will be to define relevant <strong>groups</strong> or <strong>clusters</strong> of Bustabit users to identify what patterns and behaviors of gambling persist. Can we describe a particular group as risk-averse? Is there a set of gamblers that have a strategy that seems to be more successful in the long term?</p>
# <p>The data you will be working with includes over 40000 games of Bustabit by a bit over 4000 different players, for a total of 50000 rows (one game played by one player). The data includes the following variables:</p>
# <ol>
# <li><strong>Id</strong> - Unique identifier for a particular row (game result for one player)</li>
# <li><strong>GameID</strong> - Unique identifier for a particular game</li>
# <li><strong>Username</strong> - Unique identifier for a particular player</li>
# <li><strong>Bet</strong> - The number of Bits (1 / 1,000,000th of a Bitcoin) bet by the player in this game</li>
# <li><strong>CashedOut</strong> - The multiplier at which this particular player cashed out</li>
# <li><strong>Bonus</strong> - The bonus award (in percent) awarded to this player for the game</li>
# <li><strong>Profit</strong> - The amount this player won in the game, calculated as (Bet * CashedOut) + (Bet * Bonus) - Bet</li>
# <li><strong>BustedAt</strong> - The multiplier value at which this game busted</li>
# <li><strong>PlayDate</strong> - The date and time at which this game took place</li>
# </ol>
# <p>Let's begin by doing an exploratory dive into the Bustabit data!</p>
# + dc={"key": "4"} tags=["sample_code"]
# Load the tidyverse
library(tidyverse)
# Read in the bustabit gambling data
bustabit <- read_csv("datasets/bustabit.csv")
# Look at the first five rows of the data
head(bustabit, 5)
# Find the highest multiplier (BustedAt value) achieved in a game
bustabit %>%
arrange(desc(BustedAt)) %>%
slice(1)
# + run_control={"frozen": true} dc={"key": "11"} deletable=false editable=false tags=["context"]
# ## 2. Deriving relevant features for clustering
# <p>The Bustabit data provides us with many features to work with, but to better quantify player behavior, we need to derive some more variables. Currently, we have a <code>Profit</code> column which tells us the amount won in that game, but no indication of how much was lost if the player busted, and no indicator variable quantifying whether the game itself was a win or loss overall. Hence, we will derive or modify the following variables:</p>
# <ol>
# <li><strong>CashedOut</strong> - If the value for <code>CashedOut</code> is <code>NA</code>, we will set it to be 0.01 greater than the <code>BustedAt</code> value to signify that the user failed to cash out before busting</li>
# <li><strong>Profit</strong> - If the value for <code>Profit</code> is <code>NA</code>, we will set it to be zero to indicate no profit for the player in that game</li>
# <li><strong>Losses</strong> - If the new value for <code>Profit</code> is zero, we will set this to be the amount the player lost in that game, otherwise we will set it to zero. This value should always be <em>zero or negative</em></li>
# <li><strong>GameWon</strong> - If the user made a profit in this game, the value should be 1, and 0 otherwise</li>
# <li><strong>GameLost</strong> If the user had a loss in this game, the value should be 1, and 0 otherwise</li>
# </ol>
# + dc={"key": "11"} tags=["sample_code"]
# Create the new feature variables
bustabit_features <- bustabit %>%
mutate(CashedOut = ifelse(is.na(CashedOut), BustedAt + .01, CashedOut),
Profit = ifelse(is.na(Profit), 0, Profit),
Losses = ifelse(Profit == 0, -1 * Bet, 0),
GameWon = ifelse(Profit == 0, 0, 1),
GameLost = ifelse(Profit == 0, 1, 0))
# Look at the first five rows of the features data
head(bustabit_features, 5)
# + run_control={"frozen": true} dc={"key": "18"} deletable=false editable=false tags=["context"]
# ## 3. Creating per-player statistics
# <p>The primary task at hand is to cluster Bustabit <strong>players</strong> by their respective gambling habits. Right now, however, we have features at the per-game level. The features we've derived would be great if we were interested in clustering properties of the games themselves - we know things about the BustedAt multiplier, the time the game took place, and lots more. But to better quantify player behavior, we must group the data by player (<code>Username</code>) to begin thinking about the relationship and similarity between groups of players. Some per-player features we will create are:</p>
# <ol>
# <li><strong>AverageCashedOut</strong> - The average multiplier at which the player cashes out</li>
# <li><strong>AverageBet</strong> - The average bet made by the player</li>
# <li><strong>TotalProfit</strong> - The total profits over time for the player</li>
# <li><strong>TotalLosses</strong> - The total losses over time for the player</li>
# <li><strong>GamesWon</strong> - The total number of individual games the player won</li>
# <li><strong>GamesLost</strong> - The total number of individual games the player lost</li>
# </ol>
# <p>With these variables, we will be able to potentially group similar users based on their typical Bustabit gambling behavior.</p>
# + dc={"key": "18"} tags=["sample_code"]
# Group by players to create per-player summary statistics
bustabit_clus <- bustabit_features %>%
group_by(Username) %>%
summarize(AverageCashedOut = mean(CashedOut),
AverageBet = mean(Bet),
TotalProfit = sum(Profit),
TotalLosses = sum(Losses),
GamesWon = sum(GameWon),
GamesLost = sum(GameLost))
# View the first five rows of the data
head(bustabit_clus, n = 5)
# + run_control={"frozen": true} dc={"key": "25"} deletable=false editable=false tags=["context"]
# ## 4. Scaling and normalization of the derived features
# <p>The variables are on very different <strong>scales</strong> right now. For example, <code>AverageBet</code> is in bits (1/1000000 of a Bitcoin), <code>AverageCashedOut</code> is a multiplier, and <code>GamesLost</code> and <code>GamesWon</code> are counts. As a result, we would like to <strong>normalize</strong> the variables such that across clustering algorithms, they will have approximately equal weighting.</p>
# <p>One thing to think about is that in many cases, we may actually want a particular numeric variable to maintain a higher weight. This could occur if there is some prior knowledge regarding, for example, which variable might be most important in terms of defining similar Bustabit behavior. In this case, without that prior knowledge, we will forego the weighting of variables and scale everything. We are going to use <strong>mean-sd</strong> standardization to scale the data. Note that this is also known as a <strong>Z-score</strong>.</p>
# <p>Note that we could compute the Z-scores by using the base R function <code>scale()</code>, but we're going to write our own function in order to get the practice.</p>
# + dc={"key": "25"} tags=["sample_code"]
# Create the mean-sd standardization function
mean_sd_standard <- function(x) {
(x-mean(x))/sd(x)
}
# Apply the function to each numeric variable in the clustering set
bustabit_standardized <- bustabit_clus %>%
mutate_if(is.numeric, mean_sd_standard)
# Summarize our standardized data
summary(bustabit_standardized)
# + run_control={"frozen": true} dc={"key": "32"} deletable=false editable=false tags=["context"]
# ## 5. Cluster the player data using K means
# <p>With standardized data of per-player features, we are now ready to use K means clustering in order to cluster the players based on their online gambling behavior. K means is implemented in R in the <code>kmeans()</code> function from the stats package. This function requires the <code>centers</code> parameter, which represents the number of clusters to use. </p>
# <p>Without prior knowledge, it is often difficult to know what an appropriate choice for the number of clusters is. We will begin by choosing <strong>five</strong>. This choice is rather arbitrary, but represents a good initial compromise between choosing too many clusters (which reduces the interpretability of the final results), and choosing too few clusters (which may not capture the distinctive behaviors effectively). Feel free to play around with other choices for the number of clusters and see what you get instead!</p>
# <p>One subtlety to note - because the K means algorithm uses a random start, we are going to set a random seed first in order to ensure the results are reproducible.</p>
# + dc={"key": "32"} tags=["sample_code"]
# Choose 20190101 as our random seed
set.seed(20190101)
# Cluster the players using kmeans with five clusters
cluster_solution <- bustabit_standardized %>%
select(-Username) %>%
kmeans(centers = 5)
# Store the cluster assignments back into the clustering data frame object
bustabit_clus$cluster <- factor(cluster_solution$cluster)
# Look at the distribution of cluster assignments
table(bustabit_clus$cluster)
# + run_control={"frozen": true} dc={"key": "39"} deletable=false editable=false tags=["context"]
# ## 6. Compute averages for each cluster
# <p>We have a clustering assignment which maps every Bustabit gambler to one of five different groups. To begin to assess the quality and distinctiveness of these groups, we are going to look at <strong>group averages</strong> for each cluster across the original variables in our clustering dataset. This will, for example, allow us to see which cluster tends to make the largest bets, which cluster tends to win the most games, and which cluster tends to lose the most money. This will provide us with our first clear indication as to whether the behaviors of the groups appear distinctive!</p>
# + dc={"key": "39"} tags=["sample_code"]
# Group by the cluster assignment and calculate averages
bustabit_clus_avg <- bustabit_clus %>%
group_by(cluster) %>%
summarize_if(funs(is.numeric), mean)
# View the resulting table
bustabit_clus_avg
# + run_control={"frozen": true} dc={"key": "46"} deletable=false editable=false tags=["context"]
# ## 7. Visualize the clusters with a Parallel Coordinate Plot
# <p>We can already learn a bit about our cluster groupings by looking at the previous table. We can clearly see that there is a group that makes very large bets, a group that tends to cash out at very high multiplier values, and a group that has played many games of Bustabit. We can visualize these group differences graphically using a Parallel Coordinate Plot or PCP. To do so, we will introduce one more kind of scaling: min-max scaling, which forces each variable to fall between 0 and 1.</p>
# <p>Other choices of scaling, such as the Z-score method from before, can work effectively as well. However, min-max scaling has the advantage of <strong>interpretability</strong> - a value of 1 for a particular variable indicates that cluster has the highest value compared to all other clusters, and a value of 0 indicates that it has the lowest. This can help make relative comparisons between the clusters more clear.</p>
# <p>The <code>ggparcoord()</code> function from <code>GGally</code> will be used to produce a Parallel Coordinate Plot. Note that this has a built-in argument <code>scale</code> to perform different scaling options, including min-max scaling. We will set this option to <code>"globalminmax"</code> to perform no scaling, and write our own scaling routine for practice. If you are interested, you can look at the function definition for <code>ggparcoord()</code> to help you write our scaling function!</p>
# + dc={"key": "46"} tags=["sample_code"]
# Create the min-max scaling function
min_max_standard <- function(x) {
(x - min(x)) / (max(x) - min(x))
}
# Apply this function to each numeric variable in the bustabit_clus_avg object
bustabit_avg_minmax <- bustabit_clus_avg %>%
mutate_if(is.numeric, min_max_standard)
# Load the GGally package
library(GGally)
glimpse(bustabit_avg_minmax)
# Create a parallel coordinate plot of the values
ggparcoord(bustabit_avg_minmax, columns = 2:ncol(bustabit_avg_minmax),
groupColumn = "cluster", scale = "globalminmax", order = "skewness")+
theme(axis.text.x = element_text(size=6))
# + run_control={"frozen": true} dc={"key": "53"} deletable=false editable=false tags=["context"]
# ## 8. Visualize the clusters with Principal Components
# <p>One issue with plots like the previous is that they get more unwieldy as we continue to add variables. One way to solve this is to use the Principal Components of a dataset in order to reduce the dimensionality to aid in visualization. Essentially, this is a two-stage process:</p>
# <ol>
# <li>We extract the principal components in order to reduce the dimensionality of the dataset so that we can produce a scatterplot in two dimensions that captures the underlying structure of the higher-dimensional data.</li>
# <li>We then produce a scatterplot of each observation (in this case, each player) across the two Principal Components and color according to their cluster assignment in order to visualize the separation of the clusters.</li>
# </ol>
# <p>This plot provides interesting information in terms of the similarity of any two players. In fact, you will see that players who fall close to the boundaries of clusters might be the ones that exhibit the gambling behavior of a couple of different clusters. After you produce your plot, try to determine which clusters seem to be the most "different." Also, try playing around with different projections of the data, such as PC3 vs. PC2, or PC3 vs. PC1, to see if you can find one that better differentiates the groups.</p>
# + dc={"key": "53"} tags=["sample_code"]
# Calculate the principal components of the standardized data
my_pc <- bustabit_standardized %>%
select(-Username) %>%
prcomp()
my_pc <- as.data.frame(my_pc$x)
# Store the cluster assignments in the new data frame
my_pc$cluster <- bustabit_clus$cluster
# Use ggplot() to plot PC2 vs PC1, and color by the cluster assignment
p1 <- ggplot(my_pc, aes(x=PC1, y=PC2, color=cluster))+
geom_point()
# View the resulting plot
p1
# + run_control={"frozen": true} dc={"key": "60"} deletable=false editable=false tags=["context"]
# ## 9. Analyzing the groups of gamblers our solution uncovered
# <p>Though most of the statistical and programmatical work has been completed, possibly the most important part of a cluster analysis is to interpret the resulting clusters. This often is the most desired aspect of the analysis by clients, who are hoping to use the results of your analysis to inform better business decision making and actionable items. As a final step, we'll use the parallel coordinate plot and cluster means table to interpret the Bustabit gambling user groups! Roughly speaking, we can breakdown the groups as follows:</p>
# <p><strong><p style="color:#d95f02">Cautious Commoners:</p></strong> This is the largest of the five clusters, and might be described as the more casual Bustabit players. They've played the fewest number of games overall, and tend to make more conservative bets in general. </p>
# <p><strong><p style="color:#66a61e">Strategic Addicts:</p></strong> These users play a lot of games on Bustabit, but tend to keep their bets under control. As a result, they've made on average a net positive earnings from the site, in spite of having the most games played. They seem to maintain a strategy (or an automated script/bot) that works to earn them money.</p>
# <p><strong><p style="color:#7570b3">Risky Commoners:</p></strong> These users seem to be a step above the Cautious Commoners in their Bustabit gambling habits, making larger average bets, and playing a larger number of games on the site. As a result, though they have about the same number of average games won as the Risk Takers, they have a significantly higher number of games lost.</p>
# <p><strong><p style="color:#1b9e77">Risk Takers: </p></strong> These users have played only a couple games on average, but their average cashed out value is significantly higher than the other clusters, indicating that they tend to wait for the multiplier to increase to large values before cashing out.</p>
# <p><strong><p style="color:#e7298a">High Rollers:</p></strong> High bets are the name of the game for this group. They bet large sums of money in each game, although they tend to cash out at lower multipliers and thus play the game more conservatively, particularly compared to the Risk Takers. Interestingly, these users have also on average earned a net positive earnings from their games played.</p>
# + dc={"key": "60"} tags=["sample_code"]
# Assign cluster names to clusters 1 through 5 in order
cluster_names <- c(
"Risky Commoners",
"High Rollers",
"Risk Takers",
"Cautious Commoners",
"Strategic Addicts"
)
# Append the cluster names to the cluster means table
bustabit_clus_avg_named <- bustabit_clus_avg %>%
cbind(Name = cluster_names)
# View the cluster means table with your appended cluster names
bustabit_clus_avg_named
| Clustering Bustabit Gambling Behavior/notebook.ipynb |
# +
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIP example that uses a variable array."""
# [START program]
# [START import]
from __future__ import print_function
from ortools.linear_solver import pywraplp
# [END import]
# [START program_part1]
# [START data_model]
def create_data_model():
"""Stores the data for the problem."""
data = {}
data['constraint_coeffs'] = [
[5, 7, 9, 2, 1],
[18, 4, -9, 10, 12],
[4, 7, 3, 8, 5],
[5, 13, 16, 3, -7],
]
data['bounds'] = [250, 285, 211, 315]
data['obj_coeffs'] = [7, 8, 2, 9, 6]
data['num_vars'] = 5
data['num_constraints'] = 4
return data
# [END data_model]
# [START data]
data = create_data_model()
# [END data]
# [END program_part1]
# [START solver]
# Create the mip solver with the CBC backend.
solver = pywraplp.Solver('simple_mip_program',
pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# [END solver]
# [START program_part2]
# [START variables]
infinity = solver.infinity()
x = {}
for j in range(data['num_vars']):
x[j] = solver.IntVar(0, infinity, 'x[%i]' % j)
print('Number of variables =', solver.NumVariables())
# [END variables]
# [START constraints]
for i in range(data['num_constraints']):
constraint = solver.RowConstraint(0, data['bounds'][i], '')
for j in range(data['num_vars']):
constraint.SetCoefficient(x[j], data['constraint_coeffs'][i][j])
print('Number of constraints =', solver.NumConstraints())
# In Python, you can also set the constraints as follows.
# for i in range(data['num_constraints']):
# constraint_expr = \
# [data['constraint_coeffs'][i][j] * x[j] for j in range(data['num_vars'])]
# solver.Add(sum(constraint_expr) <= data['bounds'][i])
# [END constraints]
# [START objective]
objective = solver.Objective()
for j in range(data['num_vars']):
objective.SetCoefficient(x[j], data['obj_coeffs'][j])
objective.SetMaximization()
# In Python, you can also set the objective as follows.
# obj_expr = [data['obj_coeffs'][j] * x[j] for j in range(data['num_vars'])]
# solver.Maximize(solver.Sum(obj_expr))
# [END objective]
# [START solve]
status = solver.Solve()
# [END solve]
# [START print_solution]
if status == pywraplp.Solver.OPTIMAL:
print('Objective value =', solver.Objective().Value())
for j in range(data['num_vars']):
print(x[j].name(), ' = ', x[j].solution_value())
print()
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Problem solved in %d iterations' % solver.iterations())
print('Problem solved in %d branch-and-bound nodes' % solver.nodes())
else:
print('The problem does not have an optimal solution.')
# [END print_solution]
| examples/notebook/linear_solver/mip_var_array.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 8
# ## Tensor Manipulation
# <hr/>
import tensorflow as tf
t = tf.constant([1,2,3,4])
s = tf.Session()
tf.shape(t).eval(session = s)
matrix1 = tf.constant([[1,2],[3,4]])
matrix2 = tf.constant([[1],[2]])
print(matrix1.shape)
print(matrix2.shape)
print('\n',tf.matmul(matrix1, matrix2).eval(session=s),'\n')
print((matrix1 * matrix2).eval(session=s))
# axis
#
# reshape
#
# tf.squeeze()
#
# tf.expand_dims
#
# tf.stack
#
#
# tf.ones_like
#
# tf.zeros_like
| Lecture8 Tensor Manipulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import requests
import re
import os
import glob
import csv
import numpy as np
import matplotlib.pyplot as plt
import math
# %matplotlib inline
import json
from bs4 import BeautifulSoup
import urllib.request as rq
import urllib
from zipfile import ZipFile
from io import BytesIO
import lxml
def find_files(response):
soup = BeautifulSoup(response.text, "lxml")
download_url = 'https://freddiemac.embs.com/FLoan/Data/'
hrefs = []
for a in soup.find_all('a'):
hrefs.append(download_url + a['href'])
return hrefs
# +
# Logging in to the website and downloading the sample files from 2005 onwards
def downloadSampleFiles(username, password, trainQuarter, testQuarter):
LOGIN_URL = "https://freddiemac.embs.com/FLoan/secure/auth.php"
URL = "https://freddiemac.embs.com/FLoan/Data/download.php"
with requests.session() as c:
payload = {'username': username, 'password': password,\
'action': 'acceptTandC', 'acceptSubmit': 'Continue', 'accept':'Yes'}
try:
login_response = c.post(LOGIN_URL, data = payload)
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
print('Logged in to the website!!', '\n')
download_response = c.post(URL, data=payload)
list_of_links = find_files(download_response)
print('Collected the required file links!!', '\n')
download_path = './Data/historical_data/'
temp_year_list = [trainQuarter, testQuarter]
existing_year_list = []
year_list = []
for path, subdirs, files in os.walk(download_path):
for file in files:
existing_year_list.append(file[-10:-4])
for x in temp_year_list:
if x not in set(existing_year_list):
year_list.append(x)
if not os.path.exists(download_path):
print('Creating required directories!!', '\n')
os.makedirs(download_path)
else:
print('Directories already exist. Continuing the process!!', '\n')
if not year_list:
print('Historical Files already exist!!!')
exit(0)
else:
print('Starting historical files download!!', '\n')
files_required = []
count = 0
for link in list_of_links:
for year in year_list:
if ('historical_data1_' + year) in link:
count = count + 1
if count > 0:
files_required.append([link, 'historical_data1_' + year])
count = 0
for file, filename in files_required:
samplefile_response = c.get(file)
samplefile_content = ZipFile(BytesIO(samplefile_response.content))
samplefile_content.extractall(download_path + filename)
print('Sample files downloaded in the path: ' + download_path, '\n')
downloadSampleFiles("<EMAIL>", "V4wlNZow", 'Q12005', 'Q22005')
# -
def changedatatype(df):
#Change the data types for all column
df[['fico','cd_msa','mi_pct','cnt_borr','cnt_units','cltv','dti','orig_upb','ltv','zipcode','orig_loan_term']] = df[['fico','cd_msa','mi_pct','cnt_borr','cnt_units','cltv','dti','orig_upb','ltv','zipcode','orig_loan_term']].astype('int64')
df[['flag_sc','servicer_name']] = df[['flag_sc','servicer_name']].astype('str')
return df
def changeperformancedatatype(perf_df):
perf_df[['curr_ln_delin_status','loan_age','remng_mon_to_leg_matur','zero_bal_cd','current_dupb',\
'actual_loss_calc']] = perf_df[['curr_ln_delin_status','loan_age','remng_mon_to_leg_matur',\
'zero_bal_cd','current_dupb','actual_loss_calc']].astype('int64')
perf_df[['mon_rpt_prd','zero_bal_eff_dt','lst_pd_inst_duedt']] = perf_df[['mon_rpt_prd','zero_bal_eff_dt',\
'lst_pd_inst_duedt']].astype('str')
return perf_df
def fillNAN(df):
df['fico'] = df['fico'].fillna(0)
df['flag_fthb']=df['flag_fthb'].fillna('X')
df['cd_msa']=df['cd_msa'].fillna(0)
df['mi_pct']=df['mi_pct'].fillna(0)
df['cnt_units']=df['cnt_units'].fillna(0)
df['occpy_sts']=df['occpy_sts'].fillna('X')
df['cltv']=df['cltv'].fillna(0)
df['dti']=df['dti'].fillna(0)
df['ltv']=df['ltv'].fillna(0)
df['channel']=df['channel'].fillna('X')
df['ppmt_pnlty']=df['ppmt_pnlty'].fillna('X')
df['prop_type']=df['prop_type'].fillna('XX')
df['zipcode']=df['zipcode'].fillna(0)
df['loan_purpose']=df['loan_purpose'].fillna('X')
df['cnt_borr']=df['cnt_borr'].fillna(0)
df['flag_sc']=df['flag_sc'].fillna('N')
return df
def performance_fillNA(perf_df):
perf_df['curr_ln_delin_status'] = perf_df['curr_ln_delin_status'].fillna(0)
perf_df['repurch_flag']=perf_df['repurch_flag'].fillna('Unknown')
perf_df['mod_flag']=perf_df['mod_flag'].fillna('N')
perf_df['zero_bal_cd']=perf_df['zero_bal_cd'].fillna(00)
perf_df['zero_bal_eff_dt']=perf_df['zero_bal_eff_dt'].fillna('199601')
perf_df['current_dupb']=perf_df['current_dupb'].fillna(0)
perf_df['lst_pd_inst_duedt']=perf_df['lst_pd_inst_duedt'].fillna('199601')
perf_df['mi_recoveries']=perf_df['mi_recoveries'].fillna(0)
perf_df['net_sale_proceeds']=perf_df['net_sale_proceeds'].fillna('U')
perf_df['non_mi_recoveries']=perf_df['non_mi_recoveries'].fillna(0)
perf_df['expenses']=perf_df['expenses'].fillna(0)
perf_df['legal_costs']=perf_df['legal_costs'].fillna(0)
perf_df['maint_pres_costs']=perf_df['maint_pres_costs'].fillna(0)
perf_df['taxes_and_insur']=perf_df['taxes_and_insur'].fillna(0)
perf_df['misc_expenses']=perf_df['misc_expenses'].fillna(0)
perf_df['actual_loss_calc']=perf_df['actual_loss_calc'].fillna(0)
perf_df['mod_cost']=perf_df['mod_cost'].fillna(0)
return perf_df
def minmax(perf_df):
new1_df = perf_df.groupby(['id_loan'])['current_aupb'].min().to_frame(name = 'min_current_aupb').reset_index()
new2_df = perf_df.groupby(['id_loan'])['current_aupb'].max().to_frame(name = 'max_current_aupb').reset_index()
new3_df = perf_df.groupby(['id_loan'])['curr_ln_delin_status'].min().to_frame(name = 'min_curr_ln_delin_status').reset_index()
new4_df = perf_df.groupby(['id_loan'])['curr_ln_delin_status'].max().to_frame(name = 'max_curr_ln_delin_status').reset_index()
new5_df = perf_df.groupby(['id_loan'])['zero_bal_cd'].min().to_frame(name = 'min_zero_bal_cd').reset_index()
new6_df = perf_df.groupby(['id_loan'])['zero_bal_cd'].max().to_frame(name = 'max_zero_bal_cd').reset_index()
new7_df = perf_df.groupby(['id_loan'])['mi_recoveries'].min().to_frame(name = 'min_mi_recoveries').reset_index()
new8_df = perf_df.groupby(['id_loan'])['mi_recoveries'].max().to_frame(name = 'max_mi_recoveries').reset_index()
new11_df = perf_df.groupby(['id_loan'])['non_mi_recoveries'].min().to_frame(name = 'min_non_mi_recoveries').reset_index()
new12_df = perf_df.groupby(['id_loan'])['non_mi_recoveries'].max().to_frame(name = 'max_non_mi_recoveries').reset_index()
new13_df = perf_df.groupby(['id_loan'])['expenses'].min().to_frame(name = 'min_expenses').reset_index()
new14_df = perf_df.groupby(['id_loan'])['expenses'].max().to_frame(name = 'max_expenses').reset_index()
new15_df = perf_df.groupby(['id_loan'])['legal_costs'].min().to_frame(name = 'min_legal_costs').reset_index()
new16_df = perf_df.groupby(['id_loan'])['legal_costs'].max().to_frame(name = 'max_legal_costs').reset_index()
new17_df = perf_df.groupby(['id_loan'])['maint_pres_costs'].min().to_frame(name = 'min_maint_pres_costs').reset_index()
new18_df = perf_df.groupby(['id_loan'])['maint_pres_costs'].max().to_frame(name = 'max_maint_pres_costs').reset_index()
new19_df = perf_df.groupby(['id_loan'])['taxes_and_insur'].min().to_frame(name = 'min_taxes_and_insur').reset_index()
new20_df = perf_df.groupby(['id_loan'])['taxes_and_insur'].max().to_frame(name = 'max_taxes_and_insur').reset_index()
new21_df = perf_df.groupby(['id_loan'])['misc_expenses'].min().to_frame(name = 'min_misc_expenses').reset_index()
new22_df = perf_df.groupby(['id_loan'])['misc_expenses'].max().to_frame(name = 'max_misc_expenses').reset_index()
new23_df = perf_df.groupby(['id_loan'])['actual_loss_calc'].min().to_frame(name = 'min_actual_loss_calc').reset_index()
new24_df = perf_df.groupby(['id_loan'])['actual_loss_calc'].max().to_frame(name = 'max_actual_loss_calc').reset_index()
new25_df = perf_df.groupby(['id_loan'])['mod_cost'].min().to_frame(name = 'min_mod_cost').reset_index()
new26_df = perf_df.groupby(['id_loan'])['mod_cost'].max().to_frame(name = 'max_mod_cost').reset_index()
final_df = new1_df.merge(new2_df,on='id_loan').merge(new3_df,on='id_loan').merge(new4_df,on='id_loan').\
merge(new5_df,on='id_loan').merge(new6_df,on='id_loan').merge(new7_df,on='id_loan').merge(new8_df,on='id_loan').\
merge(new11_df,on='id_loan').merge(new12_df,on='id_loan').merge(new13_df,on='id_loan').merge(new14_df,on='id_loan').\
merge(new15_df,on='id_loan').merge(new16_df,on='id_loan').merge(new17_df,on='id_loan').merge(new18_df,on='id_loan').\
merge(new19_df,on='id_loan').merge(new20_df,on='id_loan').merge(new21_df,on='id_loan').merge(new22_df,on='id_loan').\
merge(new23_df,on='id_loan').merge(new24_df,on='id_loan').merge(new25_df,on='id_loan').merge(new26_df,on='id_loan')
return final_df
def constructcsv():
download_path = "./Data/"
writeHeader1 = True
filename = download_path + "HistoricalOriginationCombined.csv"
if os.path.exists(filename):
os.unlink(filename)
with open(filename, 'w',encoding='utf-8',newline="") as f:
for subdir,dirs, files in os.walk(download_path):
for file in files:
if 'time' not in file:
sample_df = pd.read_csv(os.path.join(subdir,file) ,sep="|",names=['fico','dt_first_pi','flag_fthb','dt_matr','cd_msa',"mi_pct",'cnt_units','occpy_sts','cltv','dti','orig_upb','ltv','int_rt','channel','ppmt_pnlty','prod_type','st', 'prop_type','zipcode','id_loan','loan_purpose', 'orig_loan_term','cnt_borr','seller_name','servicer_name','flag_sc'],skipinitialspace=True)
sample_df = fillNAN(sample_df)
sample_df = changedatatype(sample_df)
sample_df['Year'] = ['19'+x if x=='99' else '20'+x for x in (sample_df['id_loan'].apply(lambda x: x[2:4]))]
sample_df['Quater'] =sample_df['id_loan'].apply(lambda x: x[4:6])
if writeHeader1 is True:
sample_df.to_csv(f, mode='a', header=True,index=False)
writeHeader1 = False
else:
sample_df.to_csv(f, mode='a', header=False,index=False)
def constructperformancecsv():
download_path = "./Data/"
print("Started")
writeHeader1 = True
filename = download_path + "HistoricalperformanceCombined.csv"
if os.path.exists(filename):
os.unlink(filename)
with open(filename, 'w',encoding='utf-8',newline="") as f:
for subdir,dirs, files in os.walk(download_path):
for file in files:
if 'time_' in file:
temp_list = []
chunksize = 10 ** 6
for chunk in pd.read_csv(os.path.join(subdir,file) ,sep="|", \
skipinitialspace=True, chunksize=chunksize, low_memory=False, header=None):
temp_list.append(chunk)
print('DataFrame creation started!!')
frames = []
for df in temp_list:
frames.append(df)
sample_df = pd.concat(frames)
sample_df.columns = ['id_loan','mon_rpt_prd','current_aupb','curr_ln_delin_status',\
'loan_age','remng_mon_to_leg_matur', 'repurch_flag','mod_flag', \
'zero_bal_cd', 'zero_bal_eff_dt','current_int_rte','current_dupb',\
'lst_pd_inst_duedt','mi_recoveries', 'net_sale_proceeds',\
'non_mi_recoveries','expenses', 'legal_costs', 'maint_pres_costs',\
'taxes_and_insur','misc_expenses','actual_loss_calc', 'mod_cost']
print('DataFrame created!!')
sample_df['curr_ln_delin_status'] = [999 if x=='R' else x for x in \
(sample_df['curr_ln_delin_status'].apply(lambda x: x))]
sample_df['curr_ln_delin_status'] = [0 if x=='XX' else x for x in \
(sample_df['curr_ln_delin_status'].apply(lambda x: x))]
sample_df = performance_fillNA(sample_df)
sample_df = changeperformancedatatype(sample_df)
filtered_df = minmax(sample_df)
filtered_df['Year'] = ['19'+x if x=='99' else '20'+x for x in (filtered_df['id_loan'].apply(lambda x: x[2:4]))]
filtered_df['Quarter'] =filtered_df['id_loan'].apply(lambda x: x[4:6])
if writeHeader1 is True:
filtered_df.to_csv(f, mode='a', header=True,index=False)
writeHeader1 = False
else:
filtered_df.to_csv(f, mode='a', header=False,index=False)
def main():
user_input = sys.argv[1:]
print("----Process Started----")
counter = 0
if len(user_input) == 0:
print('No Input provided. Process is exiting!!')
exit(0)
for ip in user_input:
if counter == 0:
username = str(ip)
elif counter == 1:
password = str(ip)
elif counter == 2:
trainQuarter = str(ip)
else:
testQuarter = str(ip)
counter += 1
downloadSampleFiles(username, password, trainQuarter, testQuarter)
combineOrigFiles()
constructcsv()
if __name__ == '__main__':
main()
| Freddie Mac's US Housing and Urban development loan data analysis/Part 2/part2_download _data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/python3
# coding: utf-8
# sapporo
# -
import codecs
from datetime import datetime as dt
import datetime
import math
import sys
from jma_csvdl import save_jma_data, parse_jma_csv
import json
import numpy as np
import os
import pandas as pd
import plotly
import plotly.express as px
import plotly.tools as tls
import plotly.graph_objects as go
import plotly.io as pio
import plotly.offline as offline
from plotly.subplots import make_subplots
import sys
from urllib.request import urlretrieve
from cov19utils import create_basic_plot_figure, \
show_and_clear, moving_average, \
blank2zero, csv2array, \
get_twitter, tweet_with_image, \
get_gpr_predict, FONT_NAME, DT_OFFSET, \
download_if_needed, json2nparr, code2int, age2int, \
get_populations, get_os_idx_of_arr, dump_val_in_arr, \
calc_last1w2w_dif, create_basic_scatter_figure, \
show_and_save_plotly, make_japan_choropleth, \
make_japan_heatmap
from hokkaidomap import make_hokkaido_choropleth, make_hokkaido_plotly
from air_registance import AirRegistance
city2pref = {
"札幌": "Hokkaido", # 北海道
"青森": "Aomori", # 青森
"盛岡": "Iwate", # 岩手
"仙台": "Miyagi", # 宮城
"秋田": "Akita", # 秋田
"山形": "Yamagata", # 山形
"福島": "Fukushima", # 福島
"水戸": "Ibaraki", # 茨城
"宇都宮": "Tochigi", # 栃木
"前橋": "Gunma", # 群馬
"秩父": "Saitama", # 埼玉
"千葉": "Chiba", # 千葉
"東京": "Tokyo", # 東京
"横浜": "Kanagawa", # 神奈川
"新潟": "Niigata", # 新潟
"富山": "Toyama", # 富山
"金沢": "Ishikawa", # 石川
"福井": "Fukui", # 福井
"甲府": "Yamanashi", # 山梨
"長野": "Nagano", # 長野
"岐阜": "Gifu", # 岐阜
"静岡": "Shizuoka", # 静岡
"名古屋": "Aichi", # 愛知
"津": "Mie", # 三重
"彦根": "Shiga", # 滋賀
"京都": "Kyoto", # 京都
"大阪": "Osaka", # 大阪
"神戸": "Hyogo", # 兵庫
"奈良": "Nara", # 奈良
"和歌山": "Wakayama", # 和歌山
"松江": "Tottori", # 島根
"鳥取": "Shimane", # 鳥取
"岡山": "Okayama", # 岡山
"広島": "Hiroshima", # 広島
"山口": "Yamaguchi", # 山口
"徳島": "Tokushima", # 徳島
"高松": "Kagawa", # 香川
"松山": "Ehime", # 愛媛
"高知": "Kochi", # 高知
"福岡": "Fukuoka", # 福岡
"佐賀": "Saga", # 佐賀
"長崎": "Nagasaki", # 長崎
"熊本": "Kumamoto", # 熊本
"大分": "Oita", # 大分
"宮崎": "Miyazaki", # 宮崎
"鹿児島": "Kagoshima", # 鹿児島
"那覇": "Okinawa" # 沖縄
}
cite2hokkai = {
"札幌" : 1,# "石狩"
"函館" : 2,# "渡島"
"江差" : 3,# "檜山"
"倶知安": 4,# "後志"
"岩見沢": 5,# "空知"
"旭川" : 6,# "上川"
"留萌" : 7,# "留萌"
"稚内" : 8,# "宗谷"
"網走" : 9,# "オホーツク"
"室蘭" : 10,# "胆振"
"浦河" : 11,# "日高"
"帯広" : 12,# "十勝"
"釧路" : 13,# "釧路"
"根室" : 14,# "根室"
}
if 16 != dt.now().hour:
print("nothing to do.")
if "ipy" in sys.argv[0]:
pass#exit()
else:
sys.exit()
# 前日のデータ
u = "https://www.data.jma.go.jp/obd/stats/data/mdrr/synopday/data2s.html"
dfs = pd.read_html(u)
pop_inf = get_populations()
Fds = np.zeros(47 + 1).tolist()
VHs = np.zeros(47 + 1).tolist()
Fds_hokkaido = np.zeros(len(cite2hokkai) + 1).tolist()
VHs_hokkaido = np.zeros(len(cite2hokkai) + 1).tolist()
Fds[0] = Fds_hokkaido[0] = 5.55e-11
VHs[0] = VHs_hokkaido[0] = 10.0
ar = AirRegistance()
def str2f(x):
return float(x.replace(')', '').replace(']', ''))
def tPrh(r):
return str2f(r[5]), str2f(r[1]), str2f(r[11])
def getVH(vp, tc):
return 18.0 * ((100.0 * vp) / (8.31447 * (273.15 + tc)))
data = np.zeros((1,47))
cnt = 0
for df in dfs:
if len(df.columns) == 33:
for i, row in df.iterrows():
if row[0] in city2pref:
cnt += 1
prefname = city2pref[row[0]]
t, P, rh = tPrh(row)
Fd = ar.calc(t=t, P=P, rh=rh)
vh = getVH(str2f(row[10]), t)
code = pop_inf[prefname]['code']
#print(type(Fd), t, P, rh, vh)
Fds[code] = Fd
VHs[code] = vh
if row[0] in cite2hokkai:
cnt += 1
t, P, rh = tPrh(row)
Fd = ar.calc(t=t, P=P, rh=rh)
vh = getVH(str2f(row[10]), t)
code = cite2hokkai[row[0]]
Fds_hokkaido[code] = Fd
VHs_hokkaido[code] = vh
print("processed {} lines.".format(cnt))
minFd = min(np.min(Fds), np.min(Fds_hokkaido))
maxFd = max(np.max(Fds), np.min(Fds_hokkaido))
print("Fd min:{} max:{}".format(minFd, maxFd))
Fds[0] = Fds_hokkaido[0] = minFd
minVH = min(np.min(VHs), np.min(VHs_hokkaido))
maxVH = max(np.max(VHs), np.min(VHs_hokkaido))
print("Fd min:{} max:{}".format(minVH, maxVH))
VHs[0] = VHs_hokkaido[0] = maxVH
today_str = dt.now().isoformat()[:16].replace('T', ' ')
tw_body_map_fds = "全国 新型コロナ 県別 空気抵抗力(前日平均) ({})".format(today_str)
imgname = 'japan-fds.jpg'
make_japan_choropleth(imgname, tw_body_map_fds, Fds[1:])
make_japan_heatmap(imgname, tw_body_map_fds, Fds, pop_inf)
tw = get_twitter()
if False:
tw_body_map_fds += "低リスク:55p以上? 中リスク:54p以下?, 高リスク:53p以下?"
tw_body_map_fds += " https://geneasyura.github.io/cov19-hm/airregist.html "
tweet_with_image(tw, "docs/images/japan-fds.jpg", tw_body_map_fds)
tw_body_map_vhs = "全国 新型コロナ 県別 絶対湿度(前日平均) ({})".format(today_str)
imgname = 'japan-vh.jpg'
make_japan_choropleth(imgname, tw_body_map_vhs, VHs[1:])
make_japan_heatmap(imgname, tw_body_map_vhs, VHs, pop_inf)
tw_body_map_vhs += "高リスク:7g/㎥以下?"
tw_body_map_vhs += " https://geneasyura.github.io/cov19-hm/airregist.html "
if False:
tweet_with_image(tw, "docs/images/japan-vh.jpg", tw_body_map_vhs)
tw_body_map_fds = "北海道 新型コロナ 振興局別 空気抵抗力(前日平均) ({})".format(today_str)
imgname = 'hokkaido-map-fds.jpg'
make_hokkaido_choropleth(imgname, tw_body_map_fds, Fds_hokkaido)
if False:
tw_body_map_fds += "低リスク:55p以上? 中リスク:54p以下?, 高リスク:53p以下?"
tw_body_map_fds += " https://geneasyura.github.io/cov19-hm/airregist.html "
tweet_with_image(tw, "docs/images/hokkaido-map-fds.jpg", tw_body_map_fds)
tw_body_map_vhs = "北海道 新型コロナ 振興局別 絶対湿度(前日平均) ({})".format(today_str)
imgname = 'hokkaido-map-vh.jpg'
make_hokkaido_choropleth(imgname, tw_body_map_vhs, VHs_hokkaido)
tw_body_map_vhs += "高リスク:7g/㎥以下?"
tw_body_map_vhs += " https://geneasyura.github.io/cov19-hm/airregist.html "
if False:
tweet_with_image(tw, "docs/images/hokkaido-map-vh.jpg", tw_body_map_vhs)
| airregist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Creating Our Own Objects
# +
class PlayerCharacter: #Another rule is to have the name singular
def __init__(self, name):
self.name = name
def run(self):
print('run')
player1 = PlayerCharacter()
print(player1)
# -
# When we build a class we usually see __init__ method/constructor method and this is automatically called when we instantiate(calling the class to create an object). When we instantiate, player1 = PlayerCharacter() it automatically runs everything in that code block. It tries to do self.name = name but we didn't give any argument so it gives an error.
# So if we do:
#
player1 = PlayerCharacter('Cindy')
# 'self' in above code refers to PlayerCharacter that we are going to create i.e. player1. So when we print the following we get:
print(player1.name)
# We get Cindy because we gave it as the parameter to PlayerCharacter and when we instantiate it we pass the 'name' parameter. The default parameter is self and then we gave name to 'self.name'. In order to player have name we need to do 'self.' because self refers to player1.
# If we create another player and run it we use same piece of code but we get different values as we use different attributes:
player2 = PlayerCharacter('Tom')
print(player2.name)
# We can have multiple things in the class like age. Will add self.age in the above class definition.
class PlayerCharacter: #Another rule is to have the name singular
def __init__(self, name, age):
self.name = name
self.age = age
def run(self):
print('run')
player1 = PlayerCharacter('Cindy', 22)
player2 = PlayerCharacter('Tom', 31)
print(player1.age)
print(player2.age)
print(player1.run())
# We get **None** because the function isn't returning anything in the function run(). If we add:<br>
# def run(self):<br>
# print('run')<br>
# return('done')
# <br><br>
# Then it will return:<br>
# run<br>
# done
print(player1)
print(player2)
# When we are printing both the player then we notice that:<br> We get the PlayerCharacter but each of these objects player1 and player2 are at different memory location.<br> So we are able to use one blueprint to create multiple players but these players are not the same. This way we are able to keep things safe.
| Python Notes/Creating Our Own Objects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="8niISN6-NR9h"
# Feature importance function of the models (XGBoost, Neural Network and Logistic Regression) is used to identify how the features contribute to the prediction of the target feature.
# This would inform the features to be used in training the models in FeatureSelection4 notebook.
# + id="mCe94D9jMUAP"
#Import the required libraries
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn import linear_model, metrics, preprocessing
from sklearn.neural_network import MLPClassifier
import xgboost as xgb
import matplotlib.pyplot as plt
# + id="b5XPK3AJNQcz"
#Load the data - The datsset used for the modeling, the fractional data, is loaded directly
modeling_dataset = pd.read_csv('/content/drive/MyDrive/prediction/frac_cleaned_fod_data.csv', low_memory = False)
# + id="MwBb0FlINQZd"
#All columns except 'HasDetections', 'kfold', and 'MachineIdentifier'
train_features = [tf for tf in modeling_dataset.columns if tf not in ('HasDetections', 'kfold', 'MachineIdentifier')]
# + id="B7zcMY31NQTt"
for col in train_features:
#Initialize the Label Encoder
lbl = preprocessing.LabelEncoder()
#Fit the label encoder on each of the features
lbl.fit(modeling_dataset[col])
#Transform
modeling_dataset.loc[:,col] = lbl.transform(modeling_dataset[col])
# + id="NZDKcY6ASVHC"
#Get training and validation data using folds (5 as a dummy value)
modeling_datasets_train = modeling_dataset[modeling_dataset.kfold != 5].reset_index(drop=True)
modeling_datasets_valid = modeling_dataset[modeling_dataset.kfold == 5].reset_index(drop=True)
#Get train data - For tree models
X_train = modeling_datasets_train[train_features].values
#Get validation data
X_valid = modeling_datasets_valid[train_features].values
# + colab={"base_uri": "https://localhost:8080/", "height": 789} id="trbkP8A0NQRE" outputId="c53253c9-fcc0-4ef2-b7d3-6c8eebc8abc4"
#Initialize XGboost model
xgb_model = xgb.XGBClassifier(n_jobs=-1)
#Fit the model on training data
xgb_model.fit(X_train, modeling_datasets_train.HasDetections.values)
#Feature importance plotting for XGBoost
importances = xgb_model.feature_importances_
col_names = train_features
idxs = np.argsort(importances)
plt.figure(figsize=(20,18))
plt.title('Feature Importances')
plt.barh(range(len(idxs)), importances[idxs], align='center')
plt.yticks(range(len(idxs)), [col_names[i] for i in idxs])
plt.xlabel('XGBoost Feature Importance')
plt.show
# + colab={"base_uri": "https://localhost:8080/"} id="0r5M64BUGbU0" outputId="4c4bd2e8-720c-4c14-f1c6-d32c0d7016a2"
xgb_important_features(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 964} id="RpRn31CF8QaP" outputId="12a9d2ad-2001-4ea8-f2fe-cfcde1bacd8e"
#Initialize the Logistic Regression Model
lr_model = linear_model.LogisticRegression()
#Fit model on the data
lr_model.fit(X_train, modeling_datasets_train.HasDetections.values)
#Feature importance plotting for Logistic Regression
importances = lr_model.coef_[0]
col_names = train_features
idxs = np.argsort(importances)
plt.figure(figsize=(20,18))
plt.title('Feature Importances')
plt.barh(range(len(idxs)), importances[idxs], align='center')
plt.yticks(range(len(idxs)), [col_names[i] for i in idxs])
plt.xlabel('Logistic Regression Feature Importance')
plt.show
# + [markdown] id="hdYKPbtYTuYw"
# As shown, understanding feature importance is more straight forward in Tree model, XGBoost, for example. Therefore, only XGboost would be used in answering related questions of ranking of feature importance in the research.
# + id="yQ05a1yoB3tw"
| MS-malware-suspectibility-detection/4-feature-selected-model/FeatureSelection3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1. Problem statement
# - Detecting fraud transactions is of great importance for any credit card company.
# - We are tasked by a well-known company to detect potential frauds so that customers are not charged for items that they did not purchase.
#
# - So the goal is to build a classifier that tells if a transaction is a fraud or not.
# ### 2. Import library
# numpy==1.16.4 for tensorflow 1.14.0
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import keras
import os
import matplotlib.pyplot as plt
import seaborn as sn
import itertools
from collections import Counter
np.random.seed(2)
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, f1_score, recall_score
from imblearn.over_sampling import SMOTE
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
# ### 3. Data processing
data = pd.read_csv('creditcard.csv')
data.head(3)
# ##### run below to understand the error
# +
# scaler = StandardScaler()
# data['NormalizedAmount'] = scaler.fit_transform(data['Amount'])
# -
# #### 3.1 Scale data
scaler = StandardScaler()
data['NormalizedAmount'] = scaler.fit_transform(data['Amount'].values.reshape(-1, 1))
# #### 3.2 Split data
data = data.drop(['Amount', 'Time'], axis = 1)
y = data['Class']
X = data.drop(['Class'], axis = 1)
y.head()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# ### 3.3 Convert data
train_identity = X_train.index
test_identity = X_test.index
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
# ### 4. Deep neural network
# #### 4.1 Create model
model = Sequential()
#add input layer
model.add(Dense(input_dim = 29, units = 16, activation = 'relu'))
#add 2nd hidden layer
model.add(Dense(units = 24, activation = 'relu'))
#add dropout layer
model.add(Dropout(0.5))
#add 3rd hidden layer
model.add(Dense(units = 20, activation = 'relu'))
#add 4th hidden layer
model.add(Dense(units = 24, activation = 'relu'))
#add ouptut layer
model.add(Dense(units = 1, activation = 'sigmoid'))
model.summary()
# #### 4.2 Fit model
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, batch_size = 15, epochs = 5)
# #### 4.3 Evaluate model
score = model.evaluate(X_test, y_test)
print(score)
y_pred = model.predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion Matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
cm = confusion_matrix(y_test, y_pred.round())
print(cm)
plot_confusion_matrix(cm, classes = [0,1], title='Confusion Matrix - Test dataset')
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
# #### 4.4 Undersampling & model test
# #### 4.4.1 Undersampling
fraud_ind = np.array(data[data.Class == 1].index)
num_frauds = len(fraud_ind)
print(num_frauds)
normal_ind = np.array(data[data.Class == 0].index)
num_normal = len(normal_ind)
print(num_normal)
normal_ind = data[data.Class == 0].index
random_normal_ind = np.random.choice(normal_ind, num_frauds, replace = False)
random_normal_ind = np.array(random_normal_ind)
under_sample_ind = np.concatenate( [fraud_ind, random_normal_ind])
print(len(under_sample_ind))
under_sample_data = data.iloc[under_sample_ind, :]
X_undersample = under_sample_data.iloc[:, under_sample_data.columns != 'Class']
y_undersample = under_sample_data.iloc[:, under_sample_data.columns == 'Class']
X_train, X_test, y_train, y_test = train_test_split(X_undersample, y_undersample, test_size = 0.3)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
# #### 4.4.2 Model test
model.summary()
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, batch_size = 15, epochs = 5)
y_pred = model.predict(X_test)
y_expect = pd.DataFrame(y_test)
cm = confusion_matrix(y_expect, y_pred.round())
plot_confusion_matrix(cm, classes = (0, 1))
plt.show()
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
# #### 4.5 SMOTE & Model test
# #### 4.5.1 SMOTE
X_resample, y_resample = SMOTE().fit_sample(X, y)
X_resample.shape
counter = Counter(y_resample)
print(counter)
X_train, X_test, y_train, y_test = train_test_split(X_resample, y_resample, test_size = 0.3)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
# #### 4.5.2 Model test
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, batch_size = 15, epochs = 5)
y_pred = model.predict(X_test)
y_expect = pd.DataFrame(y_test)
cm = confusion_matrix(y_expect, y_pred.round())
plot_confusion_matrix(cm, classes = (0, 1))
plt.show()
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
| .ipynb_checkpoints/Credit_card_fraud_detection-DNN-Sampling-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="IpL5v1Sf3RWI"
# # Bangalore House Price Prediction - Outlier Detection
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 2175, "status": "ok", "timestamp": 1593078487795, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="gOSX3Cyj3MMb" outputId="ce9f956d-30df-4c35-d70e-a9bc664bb2a8"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + colab={} colab_type="code" executionInfo={"elapsed": 3393, "status": "ok", "timestamp": 1593078489034, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="xRx0SRBGC54j"
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3360, "status": "ok", "timestamp": 1593078489036, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="bZyJo0ya2OVT" outputId="78d9accb-2a46-4585-c954-7a134482dd0f"
"""from google.colab import files
files=files.upload()
df = pd.read_csv('oh_encoded_data.csv')"""
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3934, "status": "ok", "timestamp": 1593078489647, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="Gp8WO-ZhNc6w" outputId="54b49402-f5e1-4e5c-83d7-024916a9cca0"
# Get clean data
path = r"https://drive.google.com/uc?export=download&id=1P49POlAk27uRzWKXoR2WaEfb1lyyfiRJ" # oh_encoded_data.csv from drive
# This file contain [area_type availability location bath balcony price total_sqft_int bhk price_per_sqft]
# and ['area_type','availability','location'] this are cat var
# We encoded few classes from above car var in OHE
df = pd.read_csv(path)
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3899, "status": "ok", "timestamp": 1593078489650, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="WP8pY6-IlxRJ" outputId="4e9069ef-1af5-4e89-9f04-0df4d66a73b9"
df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" executionInfo={"elapsed": 3862, "status": "ok", "timestamp": 1593078489653, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="VFKjUlZtPQgS" outputId="0637f216-2a21-491b-f28d-fa5a70755b7c"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 258} colab_type="code" executionInfo={"elapsed": 3822, "status": "ok", "timestamp": 1593078489654, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="ye85e6KVQg55" outputId="5264f298-b814-4821-dcea-fc89a4c7d31d"
df = df.drop(['Unnamed: 0'], axis=1)
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3778, "status": "ok", "timestamp": 1593078489655, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="y7UEhvYVSK4J" outputId="deeea3d6-db97-4f54-89eb-aa75064f7a59"
df.shape
# + [markdown] colab_type="text" id="axowdsatcJaQ"
# ## Split Dataset in train and test
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 3736, "status": "ok", "timestamp": 1593078489656, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="PU_DeM90b7As" outputId="bab4699a-6418-4af5-bfbe-69424bb17b8b"
X = df.drop("price", axis=1)
y = df['price']
print('Shape of X = ', X.shape)
print('Shape of y = ', y.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 3693, "status": "ok", "timestamp": 1593078489657, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="aNVLPL-rcNwa" outputId="b24989dd-d514-407f-eae1-4f982084621a"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 51)
print('Shape of X_train = ', X_train.shape)
print('Shape of y_train = ', y_train.shape)
print('Shape of X_test = ', X_test.shape)
print('Shape of y_test = ', y_test.shape)
# + [markdown] colab_type="text" id="k65LC4_qcYpm"
# ## Feature Scaling
# + colab={} colab_type="code" executionInfo={"elapsed": 4112, "status": "ok", "timestamp": 1593078490090, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="D1BCc8gAcUi6"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train= sc.transform(X_train)
X_test = sc.transform(X_test)
# + [markdown] colab_type="text" id="Oh43OeUnfZAq"
# ## Machine Learning Model Training
# + [markdown] colab_type="text" id="d9-YKvESfcmd"
# ## Linear Regression
# + colab={} colab_type="code" executionInfo={"elapsed": 4105, "status": "ok", "timestamp": 1593078490092, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="GfPL9vJLcfXr"
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
lr = LinearRegression()
lr_lasso = Lasso()
lr_ridge = Ridge()
# + colab={} colab_type="code" executionInfo={"elapsed": 4100, "status": "ok", "timestamp": 1593078490094, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="pG9byhg5YWGR"
def rmse(y_test, y_pred):
return np.sqrt(mean_squared_error(y_test, y_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 4027, "status": "ok", "timestamp": 1593078490095, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="WYm8ArSoff1p" outputId="b315cac3-9809-467c-87f6-fb54f1e5918c"
lr.fit(X_train, y_train)
lr_score = lr.score(X_test, y_test) # with all num var 0.7842744111909903
lr_rmse = rmse(y_test, lr.predict(X_test))
lr_score, lr_rmse
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3988, "status": "ok", "timestamp": 1593078490096, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="sEhTsgE6fnXp" outputId="6170272d-4954-4045-e93f-49339b531559"
# Lasso
lr_lasso.fit(X_train, y_train)
lr_lasso_score=lr_lasso.score(X_test, y_test) # with balcony 0.5162364637824872
lr_lasso_rmse = rmse(y_test, lr_lasso.predict(X_test))
lr_lasso_score, lr_lasso_rmse
# + [markdown] colab_type="text" id="73sB16uxfwRm"
# ## Support Vector Machine
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 15003, "status": "ok", "timestamp": 1593078501154, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="aep38HxxfsbA" outputId="ecb5b52f-8743-46a6-ecfb-b69cd5e9d06f"
from sklearn.svm import SVR
svr = SVR()
svr.fit(X_train,y_train)
svr_score=svr.score(X_test,y_test) # with 0.2630802200711362
svr_rmse = rmse(y_test, svr.predict(X_test))
svr_score, svr_rmse
# + [markdown] colab_type="text" id="w9-nexcsf5G3"
# ## Random Forest Regressor
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 18171, "status": "ok", "timestamp": 1593078504362, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="ArlNrxFJf1ey" outputId="90487329-8ad6-4625-a01e-f915975ba241"
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor()
rfr.fit(X_train,y_train)
rfr_score=rfr.score(X_test,y_test) # with 0.8863376025408044
rfr_rmse = rmse(y_test, rfr.predict(X_test))
rfr_score, rfr_rmse
# + [markdown] colab_type="text" id="JjxIDyROgFj4"
# ## XGBoost
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 19264, "status": "ok", "timestamp": 1593078505494, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="GefSOPbqf-4d" outputId="6f6f8cfe-28fa-452e-aebb-468109b3944c"
import xgboost
xgb_reg = xgboost.XGBRegressor()
xgb_reg.fit(X_train,y_train)
xgb_reg_score=xgb_reg.score(X_test,y_test) # with 0.8838865742273464
xgb_reg_rmse = rmse(y_test, xgb_reg.predict(X_test))
xgb_reg_score, xgb_reg_rmse
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 19225, "status": "ok", "timestamp": 1593078505496, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="SjqSFnK4atAI" outputId="dcf267cc-98f4-426b-d51d-f2e9d7e324c4"
print(pd.DataFrame([{'Model': 'Linear Regression','Score':lr_score, "RMSE":lr_rmse},
{'Model': 'Lasso','Score':lr_lasso_score, "RMSE":lr_lasso_rmse},
{'Model': 'Support Vector Machine','Score':svr_score, "RMSE":svr_rmse},
{'Model': 'Random Forest','Score':rfr_score, "RMSE":rfr_rmse},
{'Model': 'XGBoost','Score':xgb_reg_score, "RMSE":xgb_reg_rmse}],
columns=['Model','Score','RMSE']))
# + [markdown] colab_type="text" id="wb6hBVBvgSP4"
# ## Cross Validation
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 19189, "status": "ok", "timestamp": 1593078505498, "user": {"displayName": "indian ai <EMAIL>", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="9X2-jzL8gJJJ" outputId="517a17f1-4b0d-45c6-fffa-cc4c94988cd4"
'''from sklearn.model_selection import KFold,cross_val_score
cvs = cross_val_score(xgb_reg, X_train,y_train, cv = 10)
cvs, cvs.mean() # 0.9845963377450353)'''
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 19157, "status": "ok", "timestamp": 1593078505499, "user": {"displayName": "<EMAIL> ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="3SRukRf9gWc3" outputId="f32a045f-04ef-4c2e-ccd2-2dc6aa931b36"
'''cvs_rfr = cross_val_score(rfr, X_train,y_train, cv = 10)
cvs_rfr, cvs_rfr.mean() # 0.9652425691235843)'''
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 48418, "status": "ok", "timestamp": 1593078534792, "user": {"displayName": "<EMAIL> ai <EMAIL>", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="fOE4aTNGUyTP" outputId="d034f0cd-627a-4e8c-d3fc-15807f2bdfac"
from sklearn.model_selection import cross_val_score
cvs_rfr2 = cross_val_score(RandomForestRegressor(), X_train,y_train, cv = 10)
cvs_rfr2, cvs_rfr2.mean() # 0.9652425691235843)'''
# + [markdown] colab_type="text" id="9r5gSFsWgwsF"
# # Hyper Parmeter Tuning
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" executionInfo={"elapsed": 48381, "status": "ok", "timestamp": 1593078534796, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="Trmslcv4h6ll" outputId="7385acf6-e182-4c61-e148-53814d028d48"
from sklearn.model_selection import GridSearchCV
from xgboost.sklearn import XGBRegressor
'''
# Various hyper-parameters to tune
xgb1 = XGBRegressor()
parameters = {'learning_rate': [0.1,0.03, 0.05, 0.07], #so called `eta` value, # [default=0.3] Analogous to learning rate in GBM
'min_child_weight': [1,3,5], #[default=1] Defines the minimum sum of weights of all observations required in a child.
'max_depth': [4, 6, 8], #[default=6] The maximum depth of a tree,
'gamma':[0,0.1,0.001,0.2], #Gamma specifies the minimum loss reduction required to make a split.
'subsample': [0.7,1,1.5], #Denotes the fraction of observations to be randomly samples for each tree.
'colsample_bytree': [0.7,1,1.5], #Denotes the fraction of columns to be randomly samples for each tree.
'objective':['reg:linear'], #This defines the loss function to be minimized.
'n_estimators': [100,300,500]}
xgb_grid = GridSearchCV(xgb1,
parameters,
cv = 2,
n_jobs = -1,
verbose=True)
xgb_grid.fit(X_train, y_train)
print(xgb_grid.best_score_) # 0.9397345161940295
print(xgb_grid.best_params_)'''
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 48340, "status": "ok", "timestamp": 1593078534797, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="yJkArgKqj4y_" outputId="4b9703ff-3f4a-40b0-ccfa-50b3fb7da614"
'''xgb_tune = xgb_grid.estimator
xgb_tune.fit(X_train,y_train) # 0.9117591385438816
xgb_tune.score(X_test,y_test)'''
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 48310, "status": "ok", "timestamp": 1593078534798, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="wNCK-W4SkMAh" outputId="79075545-215e-4798-a599-df1487ed75ef"
'''cvs = cross_val_score(xgb_tune, X_train,y_train, cv = 10)
cvs, cvs.mean() # 0.9645582338461773)'''
# + colab={} colab_type="code" executionInfo={"elapsed": 48302, "status": "ok", "timestamp": 1593078534799, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="lyexCvMKs8gW"
#[i/10.0 for i in range(1,6)]
# + colab={} colab_type="code" executionInfo={"elapsed": 48297, "status": "ok", "timestamp": 1593078534801, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="PMn_MFhis9HR"
#xgb_grid.estimator
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 52687, "status": "ok", "timestamp": 1593078539232, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="fCtoITd4eaci" outputId="028938c4-ad99-4fbf-9e8c-5f8c10099d32"
xgb_tune2 = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=0.6, colsample_bytree=1, gamma=0,
importance_type='gain', learning_rate=0.25, max_delta_step=0,
max_depth=4, min_child_weight=1, missing=None, n_estimators=400,
n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
xgb_tune2.fit(X_train,y_train) # 0.9412851220926807
xgb_tune2.score(X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 54241, "status": "ok", "timestamp": 1593078540827, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="66kzeVolaEUk" outputId="74761529-0e24-494c-92f7-47dce447e5a7"
'''parameters = {'learning_rate': [0.1,0.03, 0.05, 0.07], #so called `eta` value, # [default=0.3] Analogous to learning rate in GBM
'min_child_weight': [1,3,5], #[default=1] Defines the minimum sum of weights of all observations required in a child.
'max_depth': [4, 6, 8], #[default=6] The maximum depth of a tree,
'gamma':[0,0.1,0.001,0.2], #Gamma specifies the minimum loss reduction required to make a split.
'subsample': [0.7,1,1.5], #Denotes the fraction of observations to be randomly samples for each tree.
'colsample_bytree': [0.7,1,1.5], #Denotes the fraction of columns to be randomly samples for each tree.
'objective':['reg:linear'], #This defines the loss function to be minimized.
'n_estimators': [100,300,500]}'''
xgb_tune2 = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=0.9, colsample_bytree=1, gamma=0,
importance_type='gain', learning_rate=0.05, max_delta_step=0,
max_depth=4, min_child_weight=5, missing=None, n_estimators=100,
n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
xgb_tune2.fit(X_train,y_train) # 0.9412851220926807
xgb_tune2.score(X_test,y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 60233, "status": "ok", "timestamp": 1593078546856, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="6FU4x7iBfMPH" outputId="abfeda40-a5f1-4e5c-d5c5-21e701fcdfcd"
cvs = cross_val_score(xgb_tune2, X_train,y_train, cv = 5)
cvs, cvs.mean() # 0.9706000326331659'''
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 60205, "status": "ok", "timestamp": 1593078546859, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="WhZks478fQvN" outputId="4b99e2ff-93ac-4575-8505-aa8f07669e01"
np.sqrt(mean_squared_error(y_test, xgb_tune2.predict(X_test)))
# + colab={} colab_type="code" executionInfo={"elapsed": 60198, "status": "ok", "timestamp": 1593078546861, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="BvXZVKM4j5q_"
# + [markdown] colab_type="text" id="HMfJzdNkimif"
# ## Test Model
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 60169, "status": "ok", "timestamp": 1593078546862, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="ppJWFinaiovG" outputId="fbb3d745-36c2-4ab9-d10a-d0dd3e0daf7a"
list(X.columns)
# + colab={} colab_type="code" executionInfo={"elapsed": 60159, "status": "ok", "timestamp": 1593078546863, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="udzrDV2fisfs"
# it help to get predicted value of hosue by providing features value
def predict_house_price(model,bath,balcony,total_sqft_int,bhk,price_per_sqft,area_type,availability,location):
x =np.zeros(len(X.columns)) # create zero numpy array, len = 107 as input value for model
# adding feature's value accorind to their column index
x[0]=bath
x[1]=balcony
x[2]=total_sqft_int
x[3]=bhk
x[4]=price_per_sqft
if "availability"=="Ready To Move":
x[8]=1
if 'area_type'+area_type in X.columns:
area_type_index = np.where(X.columns=="area_type"+area_type)[0][0]
x[area_type_index] =1
#print(area_type_index)
if 'location_'+location in X.columns:
loc_index = np.where(X.columns=="location_"+location)[0][0]
x[loc_index] =1
#print(loc_index)
#print(x)
# feature scaling
x = sc.transform([x])[0] # give 2d np array for feature scaling and get 1d scaled np array
#print(x)
return model.predict([x])[0] # return the predicted value by train XGBoost model
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 60119, "status": "ok", "timestamp": 1593078546864, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="cP2-LZ41GVd2" outputId="72f70f40-5466-4fe8-a27f-7d240df93a6f"
predict_house_price(model=xgb_tune2, bath=3,balcony=2,total_sqft_int=1672,bhk=3,price_per_sqft=8971.291866,area_type="Plot Area",availability="Ready To Move",location="Devarabeesana Halli")
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 60083, "status": "ok", "timestamp": 1593078546865, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="FT6oIIPlHRmx" outputId="17191715-4f18-44d5-82fe-4d26df101715"
##test sample
#area_type availability location bath balcony price total_sqft_int bhk price_per_sqft
#2 Super built-up Area Ready To Move Devarabeesana Halli 3.0 2.0 150.0 1750.0 3 8571.428571
predict_house_price(model=xgb_tune2, bath=3,balcony=2,total_sqft_int=1750,bhk=3,price_per_sqft=8571.428571,area_type="Super built-up",availability="Ready To Move",location="Devarabeesana Halli")
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 60050, "status": "ok", "timestamp": 1593078546866, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="TXG6GFOxGw6w" outputId="1920210b-706b-4f61-8597-1ac5712413f1"
##test sample
#area_type availability location bath balcony price total_sqft_int bhk price_per_sqft
#1 Built-up Area Ready To Move Devarabeesana Halli 3.0 3.0 149.0 1750.0 3 8514.285714
predict_house_price(model=xgb_tune2,bath=3,balcony=3,total_sqft_int=1750,bhk=3,price_per_sqft=8514.285714,area_type="Built-up Area",availability="Ready To Move",location="Devarabeesana Halli")
# + [markdown] colab_type="text" id="oC0mBkarKUzw"
# # Save model & load model
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1014, "status": "ok", "timestamp": 1593079039699, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="0FepBAKJHHuf" outputId="780ec6c9-2652-414e-ad32-ba887248ed26"
import joblib
# save model
joblib.dump(xgb_tune2, 'bangalore_house_price_prediction_model.pkl')
joblib.dump(rfr, 'bangalore_house_price_prediction_rfr_model.pkl')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 59992, "status": "ok", "timestamp": 1593078546870, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="y9pzeYYLKm6E" outputId="7e1c2640-ac7b-406b-eadc-d1c0c32c3a91"
# load model
bangalore_house_price_prediction_model = joblib.load("bangalore_house_price_prediction_model.pkl")
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 59958, "status": "ok", "timestamp": 1593078546871, "user": {"displayName": "indian ai production", "photoUrl": "", "userId": "05336710603640792650"}, "user_tz": -330} id="Z24uQ9PjK2H0" outputId="d60dda4c-4677-4ecd-8e13-03e642ff7286"
# predict house price
predict_house_price(bangalore_house_price_prediction_model,bath=3,balcony=3,total_sqft_int=150,bhk=3,price_per_sqft=8514.285714,area_type="Built-up Area",availability="Ready To Move",location="Devarabeesana Halli")
| Bengaluru_House_Price_Prediction/ML_Model_Building_Bengaluru_House_Price_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Understanding Trends in Company Valuation with NLP - Part 2: NLP Company Earnings Analysis Pipeline
#
# ## Introduction
#
# ### Orchestrating company earnings trend analysis, using SEC filings, news sentiment with the Hugging Face transformers, and Amazon SageMaker Pipelines
#
# In this notebook, we demonstrate how to summarize and derive sentiments out of Security and Exchange Commission reports filed by a publicly traded organization. We will derive the overall market sentiments about the said organization through financial news articles within the same financial period to present a fair view of the organization vs. market sentiments and outlook about the company's overall valuation and performance. In addition to this we will also identify the most popular keywords and entities within the news articles about that organization.
#
# In order to achieve the above we will be using multiple SageMaker Hugging Face based NLP transformers for the downstream NLP tasks of Summarization (e.g., of the news and SEC MDNA sections) and Sentiment Analysis (of the resulting summaries).
# ---
#
# ### Using SageMaker Pipelines
# Amazon SageMaker Pipelines is the first purpose-built, easy-to-use continuous integration and continuous delivery (CI/CD) service for machine learning (ML). With SageMaker Pipelines, you can create, automate, and manage end-to-end ML workflows at scale.
#
# Orchestrating workflows across each step of the machine learning process (e.g. exploring and preparing data, experimenting with different algorithms and parameters, training and tuning models, and deploying models to production) can take months of coding.
#
# Since it is purpose-built for machine learning, SageMaker Pipelines helps you automate different steps of the ML workflow, including data loading, data transformation, training and tuning, and deployment. With SageMaker Pipelines, you can build dozens of ML models a week, manage massive volumes of data, thousands of training experiments, and hundreds of different model versions. You can share and re-use workflows to recreate or optimize models, helping you scale ML throughout your organization.
# ---
#
# ### Understanding trends in company valuation (or similar) with NLP
#
# **Natural language processing (NLP)** is a subfield of linguistics, computer science, and artificial intelligence concerned with the interactions between computers and human language, in particular how to program computers to process and analyze large amounts of natural language data. The goal is a computer capable of "understanding" the contents of documents, including the contextual nuances of the language within them. The technology can then accurately extract information and insights contained in the documents as well as categorize and organize the documents themselves. (Source: [Wikipedia](https://en.wikipedia.org/wiki/Natural_language_processing))
#
# We are going to demonstrate how to summarize and derive sentiments out of Security and Exchange Commission reports filed by a publicly traded organization. We are also going to derive the overall market sentiments about the said organization through financial news articles within the same financial period to present a fair view of the organization vs. market sentiments and outlook about the company's overall valuation and performance. In addition to this we will also identify the most popular keywords and entities within the news articles about that organization.
#
# In order to achieve the above we will be using multiple SageMaker Hugging Face based NLP transformers with summarization and sentiment analysis downstream tasks.
#
# * <b> Summarization of financial text from SEC reports and news articles </b> will be done via [Pegasus for Financial Summarization model](https://huggingface.co/human-centered-summarization/financial-summarization-pegasus) based on the paper [Towards Human-Centered Summarization: A Case Study on Financial News](https://aclanthology.org/2021.hcinlp-1.4/).
# * Sentiment analysis on summarized SEC financial report and news articles will be done via pre-trained NLP model to analyze sentiment of financial text called [FinBERT](https://huggingface.co/ProsusAI/finbert). Paper: [ FinBERT: Financial Sentiment Analysis with Pre-trained Language Models](https://arxiv.org/abs/1908.10063)
#
# ---
#
# ### SEC Dataset
#
# The starting point for a vast amount of financial NLP is text in SEC filings. The SEC requires companies to report different types of information related to various events involving companies. The full list of SEC forms is here: https://www.sec.gov/forms.
#
# SEC filings are widely used by financial services companies as a source of information about companies in order to make trading, lending, investment, and risk management decisions. Because these filings are required by regulation, they are of high quality and veracity. They contain forward-looking information that helps with forecasts and are written with a view to the future, required by regulation. In addition, in recent times, the value of historical time-series data has degraded, since economies have been structurally transformed by trade wars, pandemics, and political upheavals. Therefore, text as a source of forward-looking information has been increasing in relevance.
#
# #### Obtain the dataset using the SageMaker JumpStart Industry Python SDK
#
# Downloading SEC filings is done from the SEC's Electronic Data Gathering, Analysis, and Retrieval (EDGAR) website, which provides open data access. EDGAR is the primary system under the U.S. Securities And Exchange Commission (SEC) for companies and others submitting documents under the Securities Act of 1933, the Securities Exchange Act of 1934, the Trust Indenture Act of 1939, and the Investment Company Act of 1940. EDGAR contains millions of company and individual filings. The system processes about 3,000 filings per day, serves up 3,000 terabytes of data to the public annually, and accommodates 40,000 new filers per year on average.
#
# There are several ways to download the data, and some open source packages available to extract the text from these filings. However, these require extensive programming and are not always easy-to-use. We provide a simple one-API call that will create a dataset in a few lines of code, for any period of time and for numerous tickers.
#
# We have wrapped the extraction functionality into a SageMaker processing container and provide this notebook to enable users to download a dataset of filings with metadata such as dates and parsed plain text that can then be used for machine learning using other SageMaker tools. This is included in the [SageMaker Industry Jumpstart Industry](https://aws.amazon.com/blogs/machine-learning/use-pre-trained-financial-language-models-for-transfer-learning-in-amazon-sagemaker-jumpstart/) library for financial language models. Users only need to specify a date range and a list of ticker symbols, and the library will take care of the rest.
#
# As of now, the solution supports extracting a popular subset of SEC forms in plain text (excluding tables): 10-K, 10-Q, 8-K, 497, 497K, S-3ASR, and N-1A. For each of these, we provide examples throughout this notebook and a brief description of each form. For the 10-K and 10-Q forms, filed every year or quarter, we also extract the Management Discussion and Analysis (MDNA) section, which is the primary forward-looking section in the filing. This is the section that has been most widely used in financial text analysis. Therefore, we provide this section automatically in a separate column of the dataframe alongside the full text of the filing.
#
# The extracted dataframe is written to S3 storage and to the local notebook instance.
#
# ---
#
# ### News articles related to the stock symbol -- dataset
#
# We will use the MIT Licensed [NewsCatcher API](https://docs.newscatcherapi.com/) to grab top 4-5 articles about the specific organization using filters, however other sources such as Social media feeds, RSS Feeds can also be used.
#
# The first step in the pipeline is to fetch the SEC report from the EDGAR database using the [SageMaker Industry Jumpstart Industry](https://aws.amazon.com/blogs/machine-learning/use-pre-trained-financial-language-models-for-transfer-learning-in-amazon-sagemaker-jumpstart/) library for Financial language models. This library provides us an easy to use functionality to obtain either one or multiple SEC reports for one or more Ticker symbols or CIKs. The ticker or CIK number will be passed to the SageMaker Pipeline using Pipeline parameter `inference_ticker_cik`. For demo purposes of this Pipeline we will focus on a single Ticker/CIK number at a time and the MDNA section of the 10-K form. The first processing will extract the MDNA from the 10-K form for a company and will also gather few news articles related to the company from the NewsCatcher API. This data will ultimately be used for summarization and then finally sentiment analysis.
# ---
#
# ## MLOps for NLP using SageMaker Pipelines
#
# We will set up the following SageMaker Pipeline. The Pipleline has two flows depending on what the value for `model_register_deploy` Pipeline parameter is set to. If the value is set to `Y` we want the pipeline to register the model and deploy the latest version of the model from the model registry to the SageMaker endpoint. If the value is set to `N` then we simply want to run inferences using the FinBert and the Pegasus models using the Ticker symbol (or CIK number) that is passed to the pipeline using the `inference_ticker_cik` Pipeline parameter.
# <img src="./images/pipeline.png" alt="Pipeline" style="width: 800px;"/>
# <div class="alert alert-block alert-warning">
# <b>Note:</b> You must execute the <code>script-processor-custom-container.ipynb</code> notebook before you can set up the SageMaker Pipeline. This notebook creates a custom Docker image and registers it in Amazon Elastic Container Registry (Amazon ECR) for the pipeline to use. The image contains of all the dependencies required.
# </div>
# ---
#
# ## Set Up SageMaker Project
#
# <a id='setup-project'></a>
#
#
# ### Install and import packages
# +
# Install updated version of SageMaker
# # !pip install -q sagemaker==2.49
# !pip install sagemaker --upgrade
# !pip install transformers
# !pip install typing
# !pip install sentencepiece
# !pip install fiscalyear
# -
#Install SageMaker Jumpstart Industry
# !pip install smjsindustry
# <div class="alert alert-block alert-warning">
# NOTE: After installing an updated version of SageMaker and PyTorch, save the notebook and then restart your kernel.
# </div>
# +
import boto3
import botocore
import pandas as pd
import sagemaker
print(f'SageMaker version: {sagemaker.__version__}')
from sagemaker.huggingface import HuggingFace
from sagemaker.huggingface import HuggingFaceModel
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.steps import CreateModelStep
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.workflow.steps import ProcessingStep
from sagemaker.workflow.steps import TransformStep
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.parameters import (ParameterInteger, ParameterString)
from sagemaker.sklearn.processing import ScriptProcessor
from sagemaker.lambda_helper import Lambda
from sagemaker.workflow.lambda_step import (
LambdaStep,
LambdaOutput,
LambdaOutputTypeEnum,
)
# -
# ### Define parameters that you'll use throughout the notebook
# +
s3 = boto3.resource("s3")
region = boto3.Session().region_name
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
sagemaker_role = role
default_bucket = sagemaker_session.default_bucket()
prefix = 'nlp-e2e-mlops'
s3_client = boto3.client('s3', region_name=region)
sagemaker_boto_client = boto3.client("sagemaker", region_name=region)
#deploy_model_instance_type = "ml.m4.8xlarge"
deploy_model_instance_type = "ml.m4.xlarge"
inference_instances=["ml.t2.medium", "ml.m5.xlarge", "ml.m5.2xlarge", "ml.m5.4xlarge", "ml.m5.12xlarge"]
transform_instances=["ml.m5.xlarge"]
PROCESSING_INSTANCE="ml.m4.4xlarge"
ticker='AMZN'
# %store -r
# -
print(f's3://{default_bucket}/{prefix}/code/model_deploy.py')
print(f'SageMaker Role: {role}')
# ### Define parameters to parametrize Pipeline Execution
#
# Using SageMaker Pipelines, we can define the steps to be included in a pipeline but then use parameters to modify that pipeline when we go to execute the pipeline, without having to modify the pipeline definition. We'll provide some default parameter values that can be overridden on pipeline execution.
# +
#Define some default parameters:
#specify default number of instances for processing step
processing_instance_count = ParameterInteger(
name="ProcessingInstanceCount",
default_value=1
)
#specify default instance type for processing step
processing_instance_type = ParameterString(
name="ProcessingInstanceType",
default_value=PROCESSING_INSTANCE
)
#specify location of inference data for data processing step
inference_input_data = ParameterString(
name="InferenceData",
default_value=f's3://{default_bucket}/{prefix}/nlp-pipeline/inf-data',
)
#Specify the Ticker CIK for the pipeline
inference_ticker_cik = ParameterString(
name="InferenceTickerCik",
default_value=ticker,
)
#specify default method for model approval
model_approval_status = ParameterString(
name="ModelApprovalStatus",
default_value="PendingManualApproval"
)
#specify if new model needs to be registered and deployed
model_register_deploy = ParameterString(
name="ModelRegisterDeploy",
default_value="Y"
)
# -
# %store
# These are the stored variables, the container is created in the
# previous notebook 01_script-processor-custom-container.ipynb
# %pylab inline
# %store -r
# ---
#
# <a id='analyze-sec'></a>
#
# ## Preparing SEC dataset
# Before we dive right into setting up the pipeline, let's take a look at how the SageMaker Jumpstart Industry SDK for Financial language model helps obtain the dataset from SEC forms and what are the features available for us to use.
#
# **Note:** The code cells in this section are completely optional and for information purposes only; we will use the SageMaker JumpStart Industry SDK directly in the pipeline.
#
# Let's install the required dependencies first.
# ### Install the SageMaker JumpStart Industry SDK
#
# The functionality is delivered through a client-side SDK. The first step requires pip installing a Python package that interacts with a SageMaker processing container. The retrieval, parsing, transforming, and scoring of text is a complex process and uses different algorithms and packages. In order to make this seamless and stable for the user, the functionality is packaged into a SageMaker container. This lifts the load of installation and maintenance of the workflow, reducing the user effort down to a pip install followed by a single API call.
# !pip install --no-index smjsindustry==1.0.0
# As an example, we will try to pull AMZN ticker 10k/10q filings from EDGAR and write the data as CSV to S3. Below is the single block of code that contains the API call. The options are all self-explanatory.
# from smfinance import SECDataSetConfig, DataLoader
from smjsindustry.finance import DataLoader
from smjsindustry.finance.processor_config import EDGARDataSetConfig
# The extracted reports will be saved to an S3 bucket for us to review. This code will also be used in the Pipeline to fetch the report for the Ticker or CIK number passed to the SageMaker Pipeline. Executing the following code cell will run a processing job which will fetch the SEC reports from the EDGAR database.
# <a id='use-smjsindustry'></a>
#
# ### Obtain SEC data using the SageMaker JumpStart Industry SDK
# +
# %%time
dataset_config = EDGARDataSetConfig(
tickers_or_ciks=['amzn','goog', '27904', 'FB'], # list of stock tickers or CIKs
form_types=['10-K', '10-Q'], # list of SEC form types
filing_date_start='2019-01-01', # starting filing date
filing_date_end='2020-12-31', # ending filing date
email_as_user_agent='<EMAIL>') # user agent email
data_loader = DataLoader(
role=sagemaker.get_execution_role(), # loading job execution role
instance_count=1, # instances number, limit varies with instance type
instance_type='ml.c5.2xlarge', # instance type
volume_size_in_gb=30, # size in GB of the EBS volume to use
volume_kms_key=None, # KMS key for the processing volume
output_kms_key=None, # KMS key ID for processing job outputs
max_runtime_in_seconds=None, # timeout in seconds. Default is 24 hours.
sagemaker_session=sagemaker.Session(), # session object
tags=None) # a list of key-value pairs
data_loader.load(
dataset_config,
's3://{}/{}'.format(default_bucket, 'sample-sec-data'), # output s3 prefix (both bucket and folder names are required)
'dataset_10k_10q.csv', # output file name
wait=True,
logs=True)
# -
# #### Output
#
# The output of the `data_loader` processing job is a `CSV` file. We see the filings for different quarters.
#
# The filing date comes within a month of the end date of the reporting period. Both these dates are collected and displayed in the dataframe. The column `text` contains the full text of the report, but the tables are not extracted. The values in the tables in the filings are balance-sheet and income-statement data (numeric/tabular) and are easily available elsewhere as they are reported in numeric databases. The last column of the dataframe comprises the Management Discussion & Analysis section, the column is named `mdna`, which is the primary forward-looking section in the filing. This is the section that has been most widely used in financial text analysis. Therefore, we will use the `mdna` text to derive the sentiment of the overall filing in this example.
# !mkdir data
print (f"{default_bucket}/{prefix}/")
s3_client.download_file(default_bucket, '{}/{}'.format(f'sample-sec-data', f'dataset_10k_10q.csv'), f'./data/dataset_10k_10q.csv')
data_frame_10k_10q = pd.read_csv(f'./data/dataset_10k_10q.csv')
data_frame_10k_10q
# ---
#
# ## Set Up Your MLOps NLP Pipeline with SageMaker Pipelines
# <a id='pipe-pre-deploy'></a>
#
# ### Step 1: Data pre-processing - extract SEC data and news about the company
#
# #### Define a processing step to prepare SEC data for inference
# We will define a processing step to extract 10K and 10Q forms for a specific Organization either using the company [Stock Ticker](https://www.investopedia.com/ask/answers/12/what-is-a-stock-ticker.asp) Symbol or [CIK (Central Index Key)](https://www.sec.gov/edgar/searchedgar/cik.htm) used to lookup reports in SEC's EDGAR System. You can find the company Stock Ticker Symbol to CIK Number mapping [here](https://www.sec.gov/include/ticker.txt). This step will also collect news article snippets related to the company using the NewsCatcher API.
#
# #### **<span style="color:lightgreen">Important</span>**:
#
# It is recommended to use CIKs as the input. The tickers will be internally converted to CIKs according to the [mapping file](https://www.sec.gov/include/ticker.txt).
# One ticker may map to multiple CIKs, but we only support the latest ticker to CIK mapping. Please provide the old CIKs in the input when you want historical filings. Also note that even though the Client side SDK allows you to download multiple SEC reports for multiple CIKs at a time, we will set up our data preprocessing step to grab exactly 1 SEC Report for 1 CIK (Company/Organization).
#
'''
we used store magic in the previous note book script-processor-custom-container.ipynb
to instantiate the container in the region of choice
'''
CONTAINER_IMAGE_URI
loader_instance_type = "ml.c5.2xlarge"
create_dataset_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
# Create a processing step to process the SEC data for inference:
# +
create_dataset_script_uri = f's3://{default_bucket}/{prefix}/code/data-processing.py'
s3_client.upload_file(Filename='./scripts/data-processing.py', Bucket=default_bucket, Key=f'{prefix}/code/data-processing.py')
create_dataset_step = ProcessingStep(
name='HFSECFinBertCreateDataset',
processor=create_dataset_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='report_data',
source='/opt/ml/processing/output/10k10q',
destination=f'{inference_input_data}/10k10q'),
sagemaker.processing.ProcessingOutput(output_name='article_data',
source='/opt/ml/processing/output/articles',
destination=f'{inference_input_data}/articles')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--instance-type", loader_instance_type,
"--region", region,
"--bucket", default_bucket,
"--prefix", prefix,
"--role", role],
code=create_dataset_script_uri)
# -
# <a id='pipe-pre-deploy'></a>
#
# ### Step 2: Create models for summarization and sentiment analysis
sentiment_model_name="HFSECFinbertModel"
summarization_model_name="HFSECPegasusModel"
# #### Create the `finBert` model for Sentiment Analysis
# +
# Download pre-trained model using HuggingFaceModel class
from sagemaker.huggingface import HuggingFaceModel
hub = {
'HF_MODEL_ID':'ProsusAI/finbert',
'HF_TASK':'text-classification'
}
# create Hugging Face Model Class (documentation here: https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#hugging-face-model)
sentiment_huggingface_model = HuggingFaceModel(
name=sentiment_model_name,
transformers_version='4.6.1',
pytorch_version='1.7.1',
py_version='py36',
env=hub,
role=role,
sagemaker_session=sagemaker_session,
)
inputs = sagemaker.inputs.CreateModelInput(
instance_type="ml.m4.xlarge"
)
create_sentiment_model_step = CreateModelStep(
name="HFSECFinBertCreateModel",
model=sentiment_huggingface_model,
inputs=inputs,
# depends_on=['HFSECFinBertCreateDataset']
)
# -
# #### Create the Pegasus summarization model
# +
hub = {
'HF_MODEL_ID':'human-centered-summarization/financial-summarization-pegasus',
'HF_TASK':'summarization'
}
# create Hugging Face Model Class (documentation here: https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/sagemaker.huggingface.html#hugging-face-model)
summary_huggingface_model = HuggingFaceModel(
name=summarization_model_name,
transformers_version='4.6.1',
pytorch_version='1.7.1',
py_version='py36',
env=hub,
role=role,
sagemaker_session=sagemaker_session,
)
create_summary_model_step = CreateModelStep(
name="HFSECPegasusCreateModel",
model=summary_huggingface_model,
inputs=inputs,
# depends_on=['HFSECFinBertCreateDataset']
)
# -
# <a id='model-registry'></a>
#
# ### Step 3: Register model
#
# Use HuggingFace register method to register Hugging Face Model for deployment. Set up step as a custom processing step
# +
sentiment_model_package_group_name = "HuggingFaceSECSentimentModelPackageGroup"
summary_model_package_group_name = "HuggingFaceSECSummaryModelPackageGroup"
model_approval_status = "Approved"
register_sentiment_model_step = RegisterModel(
name="HFSECFinBertRegisterModel",
model = sentiment_huggingface_model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.t2.medium", "ml.m4.4xlarge"],
transform_instances=["ml.m4.4xlarge"],
model_package_group_name = sentiment_model_package_group_name,
approval_status = model_approval_status,
depends_on=['HFSECFinBertCreateModel']
)
register_summary_model_step = RegisterModel(
name="HFSECPegasusRegisterModel",
model = summary_huggingface_model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.t2.medium", "ml.m4.4xlarge"],
transform_instances=["ml.m4.4xlarge"],
model_package_group_name = summary_model_package_group_name,
approval_status = model_approval_status,
depends_on=['HFSECPegasusCreateModel']
)
# -
# <a id='deploy'></a>
#
# ### Step 4: Deploy model
#
# We deploy the FinBert and Pegasus models from the model registry.
#
# **NOTE:** The models in the model registry are the pre-trained version from HuggingFace Model Hub. Each of the deployment step will attempt to deploy a SageMaker Endpoint with the model and will write a property file upon successful completion. The Pipeline will make use of these property files to decide whether to execute the subsequent summarization and sentiment analysis inference steps.
# +
deploy_model_instance_type = "ml.m4.4xlarge"
deploy_model_instance_count = "1"
sentiment_endpoint_name = "HFSECFinBertModel-endpoint"
summarization_endpoint_name = "HFSECPegasusModel-endpoint"
# -
# %store -r
print (f"using ecr container in {CONTAINER_IMAGE_URI}")
# +
s3_client.upload_file(Filename='./scripts/model_deploy_v2.py', Bucket=default_bucket, Key=f'{prefix}/code/model_deploy_v2.py')
deploy_model_script_uri = f's3://{default_bucket}/{prefix}/code/model_deploy_v2.py'
deploy_model_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
sentiment_deploy_response = PropertyFile(
name="SentimentPropertyFile",
output_name="sentiment_deploy_response",
path="success.json" # the property file generated by the script
)
sentiment_deploy_step = ProcessingStep(
name='HFSECFinBertDeployModel',
processor=deploy_model_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_deploy_response',
source='/opt/ml/processing/output',
destination=f's3://{default_bucket}/{prefix}/nlp-pipeline/sentimentResponse')],
job_arguments=[
"--initial-instance-count", deploy_model_instance_count,
"--endpoint-instance-type", deploy_model_instance_type,
"--endpoint-name", sentiment_endpoint_name,
"--model-package-group-name", sentiment_model_package_group_name,
"--role", role,
"--region", region,
],
property_files=[sentiment_deploy_response],
code=deploy_model_script_uri,
depends_on=['HFSECFinBertRegisterModel'])
summary_deploy_response = PropertyFile(
name="SummaryPropertyFile",
output_name="summary_deploy_response",
path="success.json" # the property file generated by the script
)
summary_deploy_step = ProcessingStep(
name='HFSECPegasusDeployModel',
processor=deploy_model_processor,
outputs=[sagemaker.processing.ProcessingOutput(output_name='summary_deploy_response',
source='/opt/ml/processing/output',
destination=f's3://{default_bucket}/{prefix}/nlp-pipeline/summaryResponse')],
job_arguments=[
"--initial-instance-count", deploy_model_instance_count,
"--endpoint-instance-type", deploy_model_instance_type,
"--endpoint-name", summarization_endpoint_name,
"--model-package-group-name", summary_model_package_group_name,
"--role", role,
"--region", region,
],
property_files=[summary_deploy_response],
code=deploy_model_script_uri,
depends_on=['HFSECPegasusRegisterModel'])
# -
# #### Create pipeline conditions to check if the Endpoint deployments were successful
#
# We will define a condition that checks to see if our model deployment was successful based on the property files generated by the deployment steps of both the FinBert and Pegasus Models. If both the conditions evaluates to `True` then we will run or subsequent inferences for Summarization and Sentiment analysis.
# +
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.condition_step import ( ConditionStep )
from sagemaker.workflow.functions import JsonGet
summarize_script_uri = f's3://{default_bucket}/{prefix}/code/summarize.py'
sentiment_condition_eq = ConditionEquals(
left=JsonGet( #the left value of the evaluation expression
step_name="HFSECFinBertDeployModel", #the step from which the property file will be grabbed
property_file=sentiment_deploy_response, #the property file instance that was created earlier in Step 4
json_path="model_created" #the JSON path of the property within the property file success.json
),
right="Y" #the right value of the evaluation expression, i.e. the AUC threshold
)
summary_condition_eq = ConditionEquals(
left=JsonGet( #the left value of the evaluation expression
step_name="HFSECPegasusDeployModel", #the step from which the property file will be grabbed
property_file=summary_deploy_response, #the property file instance that was created earlier in Step 4
json_path="model_created" #the JSON path of the property within the property file success.json
),
right="Y" #the right value of the evaluation expression, i.e. the AUC threshold
)
summarize_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
summarize_step_2 = ProcessingStep(
name='HFSECPegasusSummarizer_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
deploy_condition_step = ConditionStep(
name="HFSECFinBertDeployConditionCheck",
conditions=[sentiment_condition_eq,summary_condition_eq], #the equal to conditions defined above
if_steps=[summarize_step_2], #if the condition evaluates to true then run the summarization step
else_steps=[], #there are no else steps so we will keep it empty
depends_on=['HFSECFinBertDeployModel','HFSECPegasusDeployModel'] #dependencies on both Finbert and Pegasus Deployment steps
)
# -
# ### Step 5: Summarize SEC report step
# This step is to make use of the Pegasus Summarizer model endpoint to summarize the MDNA text from the SEC report. Because the MDNA text is usually large, we want to derive a short summary of the overall text to be able to determine the overall sentiment.
summarize_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
# +
s3_client.upload_file(Filename='./scripts/summarize.py', Bucket=default_bucket, Key=f'{prefix}/code/summarize.py')
summarize_step_1 = ProcessingStep(
name='HFSECPegasusSummarizer_1',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
summarize_step_2 = ProcessingStep(
name='HFSECPegasusSummarizer_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='summary_data',
source=f'{inference_input_data}/10k10q',
destination='/opt/ml/processing/input')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='summarized_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/10k10q/summary')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", summarization_endpoint_name],
code=summarize_script_uri)
# -
# ### Step 6: Sentiment inference step - SEC summary and news articles
# This step uses the MDNA summary (determined by the previous step) and the news articles to find out the sentiment of the company's financial and what the Market trends are indicating. This would help us understand the overall position of the company's financial outlook and current position without leaning solely on the company's forward-looking statements and bring objective market opinions into the picture.
sentiment_processor = ScriptProcessor(command=['python3'],
image_uri=CONTAINER_IMAGE_URI,
role=role,
instance_count=processing_instance_count,
instance_type=processing_instance_type)
# +
sentiment_script_uri = f's3://{default_bucket}/{prefix}/code/sentiment.py'
s3_client.upload_file(Filename='./scripts/sentiment.py', Bucket=default_bucket, Key=f'{prefix}/code/sentiment.py')
sentiment_step_1 = ProcessingStep(
name='HFSECFinBertSentiment_1',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='sec_summary',
source=f'{inference_input_data}/10k10q/summary',
destination='/opt/ml/processing/input/10k10q'),
sagemaker.processing.ProcessingInput(input_name='articles',
source=f'{inference_input_data}/articles',
destination='/opt/ml/processing/input/articles')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/sentiment')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", sentiment_endpoint_name],
code=sentiment_script_uri,
depends_on=["HFSECPegasusSummarizer_1"])
sentiment_step_2 = ProcessingStep(
name='HFSECFinBertSentiment_2',
processor=summarize_processor,
inputs=[sagemaker.processing.ProcessingInput(input_name='sec_summary',
source=f'{inference_input_data}/10k10q/summary',
destination='/opt/ml/processing/input/10k10q'),
sagemaker.processing.ProcessingInput(input_name='articles',
source=f'{inference_input_data}/articles',
destination='/opt/ml/processing/input/articles')],
outputs=[sagemaker.processing.ProcessingOutput(output_name='sentiment_data',
source='/opt/ml/processing/output',
destination=f'{inference_input_data}/sentiment')],
job_arguments=["--ticker-cik", inference_ticker_cik,
"--region", region,
"--endpoint-name", sentiment_endpoint_name],
code=sentiment_script_uri,
depends_on=["HFSECPegasusSummarizer_2"])
# -
# #### Condition Step
# As explained earlier, this is a top level condition step. This step will determine based on the value of the pipeline parameter `model_register_deploy` on whether we want to register and deploy a new version of the models and then run inference, or to simply run inference using the existing endpoints.
# +
from sagemaker.workflow.conditions import ConditionEquals
from sagemaker.workflow.condition_step import ( ConditionStep )
condition_eq = ConditionEquals(
left=model_register_deploy,
right="Y"
)
# -
#Define the condition step
condition_step = ConditionStep(
name="HFSECFinBertConditionCheck",
conditions=[condition_eq], #the parameter is Y
if_steps=[
create_sentiment_model_step,
register_sentiment_model_step,
sentiment_deploy_step,
create_summary_model_step,
register_summary_model_step,
summary_deploy_step
], # if the condition evaluates to true then create model, register, and deploy
else_steps=[summarize_step_1],
depends_on=['HFSECFinBertCreateDataset']
)
# #### Combine Pipeline steps and run
pipeline_name = 'FinbertSECDeploymentPipeline'
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
model_register_deploy,
inference_ticker_cik,
inference_input_data],
steps=[
create_dataset_step,
condition_step,
deploy_condition_step,
sentiment_step_1,
sentiment_step_2
],
)
pipeline.upsert(role_arn=role)
# %%time
start_response = pipeline.start()
start_response.wait(delay=60, max_attempts=200)
start_response.describe()
# The following image shows a successful execution of the NLP end-to-end Pipeline.
#
#
# <img src="./images/pipeline_execution_graph.png" alt="Successful Pipeline Execution" style="width: 800px;"/>
# ---
#
# ## View Evaluation Results
#
# Once the pipeline execution completes, we can download the evaluation data from S3 and view it.
s3_client.download_file(default_bucket, f'{prefix}/nlp-pipeline/inf-data/sentiment/{ticker}_sentiment_result.csv', f'./data/{ticker}_sentiment_result.csv')
sentiment_df = pd.read_csv(f'./data/{ticker}_sentiment_result.csv')
sentiment_df
# ---
#
# ## Clean up
# Delete the SageMaker Pipeline and the SageMaker Endpoints created by the pipeline.
def clean_up_resources():
pipeline.delete()
sagemaker_boto_client.delete_endpoint(EndpointName=sentiment_endpoint_name)
sagemaker_boto_client.delete_endpoint(EndpointName=summarization_endpoint_name)
| end_to_end/nlp_mlops_company_sentiment/02_nlp_company_earnings_analysis_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 如果一棵二叉树满足下述几个条件,则可以称为 奇偶树 :
# 二叉树根节点所在层下标为 0 ,根的子节点所在层下标为 1 ,根的孙节点所在层下标为 2 ,依此类推。
# 1、偶数下标 层上的所有节点的值都是 奇 整数,从左到右按顺序 严格递增
# 2、奇数下标 层上的所有节点的值都是 偶 整数,从左到右按顺序 严格递减
# 给你二叉树的根节点,如果二叉树为 奇偶树,
# 则返回 true ,否则返回 false 。
#
# 示例 1:
# 输入:root = [1,10,4,3,null,7,9,12,8,6,null,null,2]
# 输出:true
# 解释:每一层的节点值分别是:
# 0 层:[1]
# 1 层:[10,4]
# 2 层:[3,7,9]
# 3 层:[12,8,6,2]
# 由于 0 层和 2 层上的节点值都是奇数且严格递增,而 1 层和 3 层上的节点值都是偶数且严格递减,因此这是一棵奇偶树。
#
# 示例 2:
# 输入:root = [5,4,2,3,3,7]
# 输出:false
# 解释:每一层的节点值分别是:
# 0 层:[5]
# 1 层:[4,2]
# 2 层:[3,3,7]
# 2 层上的节点值不满足严格递增的条件,所以这不是一棵奇偶树。
#
# 示例 3:
# 输入:root = [5,9,1,3,5,7]
# 输出:false
# 解释:1 层上的节点值应为偶数。
#
# 示例 4:
# 输入:root = [1]
# 输出:true
#
# 示例 5:
# 输入:root = [11,8,6,1,3,9,11,30,20,18,16,12,10,4,2,17]
# 输出:true
#
# 提示:
# 1、树中节点数在范围 [1, 10^5] 内
# 2、1 <= Node.val <= 106
# -
def check(vals, is_odd=True):
if is_odd:
if len(vals) % 2 == 0:
s_vals = sorted(vals)
return vals == s_vals[::-1]
else:
return False
else:
if len(vals) % 2 == 1:
s_vals = sorted(vals)
return vals == s_vals
else:
return False
check([3,7,9], False)
is_odd = 2 % 2 == 0
is_odd
import collections
class Solution:
def isEvenOddTree(self, root: TreeNode) -> bool:
queue = collections.deque([root])
is_even = True
while queue:
prev = None
for _ in range(len(queue)):
node = queue.popleft()
if is_even:
if node.val % 2 == 0: return False
if prev and prev.val >= node.val: return False
else:
if node.val % 2 == 1: return False
if prev and prev.val <= node.val: return False
if node.left: queue.append(node.left)
if node.right: queue.append(node.right)
prev = node
is_even = not is_even
return True
# +
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isEvenOddTree(self, root: TreeNode) -> bool:
nodes = [root]
vals = []
count = 0
limit_val = None
while nodes:
new_nodes, new_val = [], []
is_even = count % 2 == 0
for node in nodes:
if new_val:
if is_even:
if node.val % 2 == 1 and node.val > limit_val:
limit_val = node.val
else:
return False
else:
if node.val % 2 == 0 and node.val < limit_val:
limit_val = node.val
else:
return False
else:
limit_val = node.val
if is_even:
if node.val % 2 == 0:
return False
else:
if node.val % 2 == 1:
return False
new_val.append(node.val)
if node.left:
new_nodes.append(node.left)
if node.right:
new_nodes.append(node.right)
nodes = new_nodes
count += 1
return True
# def check(self, vals, count):
# print(vals, count)
# if count % 2 == 0:
# is_odd = False
# else:
# is_odd = True
# if is_odd:
# if len(vals) % 2 == 0 and len(vals) == len(set(vals)):
# s_vals = sorted(vals)
# return vals == s_vals[::-1]
# else:
# return False
# else:
# if len(vals) % 2 == 1 and len(vals) == len(set(vals)):
# s_vals = sorted(vals)
# return vals == s_vals
# else:
# return False
| Tree/1020/1609. Even Odd Tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 基于转换的标注
#
# N-Gram 标注器有两个潜在的问题:一是 n-gram 表的大小(语言模型),使用回退标注器的 n-gram 标注器可能存储 bigram 和 trigram 表,这会是很大的稀疏矩阵,难以部署在移动计算设备上;二是关于上下文的,n-gram 标注器只使用上下文中的标记,而很难使用词的其它特征,虽然词本身可能是一个有用的信息源。
#
# **Brill 标注**是一种基于转换的学习,它不断猜测每个词的标记,然后返回和修正错误,其转换规则是语言学可解释的。以下表为例,首先使用 unigram 标注器标注,然后运行两个规则:1.当前面词的词性是 TO 时,替换 NN 为 VB;2.当后一个标记是 NNS 时,替换 TO 为 IN。经过两条规则转换后得到最终的标注结果:
#
# | Phrase | to | increase | grants | to | states | for | vocational | rehabilitation |
# |:-------:|:--:|:--------:|:------:|:--:|:------:|:---:|:----------:|:--------------:|
# | Unigram | TO | NN | NNS | TO | NNS | IN | JJ | NN |
# | Rule 1 | | VB | | | | | | |
# | Rule 2 | | | | IN | | | | |
# | Output | TO | VB | NNS | IN | NNS | IN | JJ | NN |
# | Gold | TO | VB | NNS | IN | NNS | IN | JJ | NN |
#
# Brill 标注器的所有规则都是由以下形式的模板产生的:“替换 T1 为 T2 在上下文 C 中”。典型的上下文是之前或之后的词的内容或标记,或者当前词的两到三个词范围内出现一个特定的标记。在训练期间,T1,T2 和 C 的标注器猜测值创造出数以千计的候选规则,每一条规则根据其净收益打分:它修正的不正确标记的数目减去它错误修改的正确标记数目。
from nltk.tbl import demo
demo.demo()
| 5.6-transformation-based-tagging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
from matplotlib import pyplot as plt
import numpy as np
import cv2
import uuid # Unique identifier
import os
import time
# !pip install pyqt5 lxml --upgrade
# !cd labelImg && pyrcc5 -o libs/resources.py resources.qrc
# !cd yolov5 && python train.py --img 320 --batch 16 --epochs 500 --data dataset.yml --weights yolov5s.pt --workers 2
# !cd D:\Programming\Computer_Vision\Yolo\yolov5
# the key to detect objects
# # !pip install -r requirements.txt
# !pip install -r D:\Programming\Computer_Vision\Yolo\yolov5\requirements.txt
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
# # 2.Make Detections with Images
img = "D:\Programming\Computer_Vision\Yolo\images_and_videos\images\Dog_and_Me.jpg"
results = model(img)
results.print()
# %matplotlib inline
plt.imshow(np.squeeze(results.render())) # for minuns 1 dimension (left one)
plt.show()
results.render()
# # 3. Real Time Detections
cap = cv2.VideoCapture("D:\Programming\Computer_Vision\Yolo\images_and_videos\Videos\kaikai_kitan_piano.mp4") # or cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
# Make detections
results = model(frame)
cv2.imshow('YOLO', np.squeeze(results.render()))
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
import torch
from matplotlib import pyplot as plt
import numpy as np
import cv2
# +
import uuid # Unique identifier
import os
import time
# -
IMAGES_PATH = "D:\Programming\Computer_Vision\Yolo\yolov5\data\images" #/data/images
labels = ['awake', 'drowsy']
number_imgs = 20
# !cd D:\Programming\Computer_Vision\Yolo\yolov5
cap = cv2.VideoCapture(0)
# Loop through labels
for label in labels:
print('Collecting images for {}'.format(label))
time.sleep(5)
# Loop through image range
for img_num in range(number_imgs):
print('Collecting images for {}, image number {}'.format(label, img_num))
# Webcam feed
ret, frame = cap.read()
# Naming out image path
imgname = os.path.join(IMAGES_PATH, label+'.'+str(uuid.uuid1())+'.jpg')
# Writes out image to file
cv2.imwrite(imgname, frame)
# Render to the screen
cv2.imshow('Image Collection', frame)
# 2 second delay between captures
time.sleep(2)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
print(os.path.join(IMAGES_PATH, labels[0]+'.'+str(uuid.uuid1())+'.jpg'))
for label in labels:
print('Collecting images for {}'.format(label))
for img_num in range(number_imgs):
print('Collecting images for {}, image number {}'.format(label, img_num))
imgname = os.path.join(IMAGES_PATH, label+'.'+str(uuid.uuid1())+'.jpg')
print(imgname)
img = os.path.join('data', 'images', 'drowsy.11aa6510-3c33-11ec-84a3-94e6f7a1c25e.jpg')
results = model(img)
results.print()
| YOLO_Drowsiness_Detection/YOLO_Drowsiness_Detection_For_Taking_Selfie.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/somesh-scoville/pytorch-custom-dataloader/blob/master/FER_VGGNet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
#
# <p style="font-family: times, serif; font-size:20pt; font-style:bold; color:Orange">
# PyTorch custom dataloader
# </p>
#
import matplotlib.pyplot as plt
import torch
plt.rcParams["figure.figsize"] = (4, 4)
print(f"Torch version: {torch.__version__} GPUs Available for PyTorch: {torch.cuda.is_available()}")
# <p style="font-family: times, serif; font-size:17pt; font-style:bold; color:Orange">
# Dwonload dataset
# </p>
# !git clone https://github.com/somesh-scoville/datasets
# !mv datasets/fruits ./
# <p style="font-family: times, serif; font-size:17pt; font-style:bold; color:Orange">
# Prepare dataset
# </p>
# %run -i prepare_dataset.py
# <p style="font-family: times, serif; font-size:17pt; font-style:bold; color:Orange">
# Sanity check for Dataloader
# </p>
# %run -i dataloader.py
# <p style="font-family: times, serif; font-size:20pt; font-style:bold; color:Green">
# Done
# </p>
| dataloader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="iil3DJ1-nvL7"
# + [markdown] id="yJpLPd3rjPMD"
# ### Prerequsites
# + id="qurS9q_bdahh"
# !pip install flair
# + id="cxGi8LFbvPXe" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1617991188927, "user_tz": -120, "elapsed": 1577, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="483fe17b-edfe-4e82-a6d1-755c80012947"
# Import general modules for data processing
import pandas as pd
import seaborn as sns
print("Succesfully imported necessary modules")
# + colab={"base_uri": "https://localhost:8080/"} id="YXJ4NVJ8wI9O" executionInfo={"status": "ok", "timestamp": 1617991073213, "user_tz": -120, "elapsed": 36056, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2c77c38b-7799-47fe-b622-aadff7eb83a6"
# Enable save and load to Google Drive
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="c9Bzqpmj2qX8"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_processed.csv'
with open(infile, 'r') as f:
corpus = pd.read_csv(f, encoding="UTF-8", usecols=['e_rara_id', 'clean_text', 'clean_text_length'], nrows=50, skiprows=[32,40]) #
# + colab={"base_uri": "https://localhost:8080/"} id="f_0HOOqZyeo-" executionInfo={"status": "ok", "timestamp": 1617993757926, "user_tz": -120, "elapsed": 847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="db344b71-c0d1-414f-e76e-cb0f98edcf19"
round(corpus.clean_text_length.describe())
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="2jB9LUkkye74" executionInfo={"status": "ok", "timestamp": 1617993761043, "user_tz": -120, "elapsed": 1080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="0b4a7d34-e494-4309-98db-6c087f3a1159"
sns.displot(data=corpus, x='clean_text_length', kind="kde")
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="vlJfvfEK87YR" executionInfo={"status": "ok", "timestamp": 1617993791227, "user_tz": -120, "elapsed": 906, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="650ab654-b4fb-4517-98b5-ea60a7249e58"
corpus.sort_values('clean_text_length')
# + [markdown] id="ncX0jqMzgkkn"
# ### Apply NER tagger model
# + id="Rl_Luop7-TzZ"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_processed.csv'
with open(infile, 'r') as f:
corpus = pd.read_csv(f, encoding="UTF-8", usecols=['e_rara_id', 'clean_text', 'clean_text_length'])
# + id="yrmn8rc1AuOF"
corpus = corpus.rename(columns={"LOC_ner-multi-fast": "ner-multi-fast-I"})
# + colab={"base_uri": "https://localhost:8080/", "height": 80} id="or8-wd9222dN" executionInfo={"status": "ok", "timestamp": 1618001167255, "user_tz": -120, "elapsed": 1417, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="c6523655-6cd0-45ab-c6ff-0104aeae5c41"
corpus[39:40]
# + colab={"base_uri": "https://localhost:8080/"} id="hWT1Pj1W1hth" executionInfo={"status": "ok", "timestamp": 1617715000598, "user_tz": -120, "elapsed": 35638, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="a8f3b5bd-925a-4800-f88d-17b841c12095"
from flair.data import Sentence
from flair.models import SequenceTagger
# load the NER tagger
# See available sentence tagger: https://github.com/flairNLP/flair/blob/master/resources/docs/TUTORIAL_2_TAGGING.md#list-of-pre-trained-sequence-tagger-modelsentence
tagger = SequenceTagger.load('flair/ner-multi-fast') # size: 1.51 GB
# + id="DE0xi3Nmi_GM" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1617717595729, "user_tz": -120, "elapsed": 2487336, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="1d6afff0-1728-4021-f025-f6858978a57e"
#Import segtok library to split the paragraph into sentences
from segtok.segmenter import split_single
corpus['ner-multi-fast'] = ''
for index in corpus.index[125:130]:
# use splitter to split text into list of sentences
sentences = [Sentence(sent, use_tokenizer=True) for sent in split_single(corpus['clean_text'][index])]
tagger.predict(sentences) # predict tags for sentences
loc_ents = []
for s in sentences:
for token in s.tokens:
tag = token.get_tag('ner')
if tag.value in ['S-LOC', 'B-LOC', 'E-LOC', 'I-LOC']:
loc_ents.append([token.text, tag.value])
corpus['ner-multi-fast'][index] = loc_ents
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_fulltexts/bernensia
outfile = "./corpus_bernensia_ger_LOC_ner-multi-fast-I-125.csv"
with open(outfile, "w") as f:
corpus.to_csv(f, index=False, columns=['e_rara_id', 'clean_text_length', 'ner-multi-fast'])
# %cd /content/
print("Saved to file {} to Google Drive.".format(index))
# + id="ybeAYyZ2EpIl"
corpus[30:35]
# + colab={"base_uri": "https://localhost:8080/"} id="ucLk5gjfkWeG" executionInfo={"status": "ok", "timestamp": 1617659922021, "user_tz": -120, "elapsed": 559, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="464cfb23-42ae-40d4-8737-f6587c282853"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_fulltexts/bernensia
outfile = "./corpus_bernensia_ger_LOC_ner-multi-fast-I.csv"
with open(outfile, "w") as f:
corpus.to_csv(f, index=False, columns=['e_rara_id', 'ner-multi-fast-I'])
# %cd /content/
print("Saved to file to Google Drive.")
# + id="3CbJI8nMvLzA"
#Import segtok library to split the paragraph into sentences
from segtok.segmenter import split_single
sentences = [Sentence(sent, use_tokenizer=True) for sent in split_single(corpus['clean_text'][0])]
#predicting entities
tagger.predict(sentences[0:5])# print the entities with below command
for sent in sentences:
for entity in sent.get_spans('ner'):
print(entity)
# + id="dvBl5G_Q6SMg"
#Import segtok library to split the paragraph into sentences
#from segtok.segmenter import split_single
sentences = [Sentence(sent, use_tokenizer=True) for sent in split_single(corpus['clean_text'][0][0:500])]
#predicting entities
tagger.predict(sentences[0:500])# print the entities with below command
for sent in sentences:
for token in sent.tokens:
tag = token.get_tag('ner')
if tag.value in ['S-LOC', 'B-LOC', 'E-LOC']:
print([token.text, tag.value])
# + id="v0RVl7xYkjOY"
# + [markdown] id="ygvHvfjwlo_w"
# ## LOC Analysis: SpaCy
# + colab={"base_uri": "https://localhost:8080/"} id="V0OYZrcemiZu" executionInfo={"status": "ok", "timestamp": 1617807066473, "user_tz": -120, "elapsed": 20664, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="b0c57206-d786-48d8-bdb2-e97f8877ec3a"
# Enable save and load to Google Drive
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="afULOZoSl0AH"
import pandas as pd
import seaborn as sns
# + id="pQqwJLEcl1wr"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_spacy_text_full.csv'
with open(infile, 'r') as f:
loc = pd.read_csv(f, encoding="UTF-8", usecols=['e_rara_id', 'clean_text_length', 'spacy_concat'])
# + colab={"base_uri": "https://localhost:8080/"} id="5wzXGSiz0cSA" executionInfo={"status": "ok", "timestamp": 1617997653667, "user_tz": -120, "elapsed": 763, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="999febac-88e2-402b-e239-231d881de82c"
print(loc['spacy_concat'][4:5])
# + colab={"base_uri": "https://localhost:8080/"} id="tkkRCaFEm8Bs" executionInfo={"status": "ok", "timestamp": 1618000269776, "user_tz": -120, "elapsed": 1036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="37ffa300-806e-40a7-b180-7f47bf87c136"
loc = loc.rename(columns={"spacy_concat": "spacy"})
loc['spacy_count'] = 0 # number of LOCs
loc['spacy_ucount'] = 0 # number of unique LOCs
loc['spacy_list'] = ''
loc['spacy_set'] = '' # unique LOCs; set = unique values of list # https://realpython.com/python-sets/
for i in loc.index:
loc['spacy_list'][i] = loc.spacy[i].split(', ') # change spacy (str) to list
loc['spacy_count'][i] = len(loc.spacy_list[i])
loc['spacy_set'][i] = set(loc.spacy_list[i])
loc['spacy_ucount'][i] = len(loc.spacy_set[i])
# + colab={"base_uri": "https://localhost:8080/"} id="Y9GYWHS2quHF" executionInfo={"status": "ok", "timestamp": 1618001183326, "user_tz": -120, "elapsed": 838, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="b2975219-c856-439d-f0d2-feaddf199b8e"
for i in loc[39:40].spacy_list:
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="PFKJ8vO8cVq-" executionInfo={"status": "ok", "timestamp": 1618002156266, "user_tz": -120, "elapsed": 780, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="69de378a-a975-4fe7-dea1-bbb2c9eaba9f"
for i in loc[39:40].e_rara_id:
print(i)
# + [markdown] id="cKLex0xBPQsH"
# ### write to file
# + colab={"base_uri": "https://localhost:8080/"} id="ulnUMwqZzgQF" executionInfo={"status": "ok", "timestamp": 1617823295139, "user_tz": -120, "elapsed": 1031, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="1822a9d4-c158-4c56-d831-528479ecf1db"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_spacy.csv"
with open(outfile, "w") as f:
loc.to_csv(f, index=False) # columns=['e_rara_id', 'ner-multi-fast-I']
# %cd /content/
print("Saved to file to Google Drive.")
# + [markdown] id="MVkZM7sUPHYV"
# #### with 50's set
# + id="efWxkTFqrMgT"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_spacy_text_full.csv'
with open(infile, 'r') as f:
loc = pd.read_csv(f, encoding="UTF-8", usecols=['e_rara_id', 'clean_text_length', 'spacy_concat'], nrows=50, skiprows=[32,40])
# + colab={"base_uri": "https://localhost:8080/"} id="RubnJIyqsGUy" executionInfo={"status": "ok", "timestamp": 1618000269776, "user_tz": -120, "elapsed": 1036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="37ffa300-806e-40a7-b180-7f47bf87c136"
loc = loc.rename(columns={"spacy_concat": "spacy"})
loc['spacy_count'] = 0 # number of LOCs
loc['spacy_ucount'] = 0 # number of unique LOCs
loc['spacy_list'] = ''
loc['spacy_set'] = '' # unique LOCs; set = unique values of list # https://realpython.com/python-sets/
for i in loc.index:
loc['spacy_list'][i] = loc.spacy[i].split(', ') # change spacy (str) to list
loc['spacy_count'][i] = len(loc.spacy_list[i])
loc['spacy_set'][i] = set(loc.spacy_list[i])
loc['spacy_ucount'][i] = len(loc.spacy_set[i])
# + colab={"base_uri": "https://localhost:8080/"} id="ej2KVh27LVpy" executionInfo={"status": "ok", "timestamp": 1617998228051, "user_tz": -120, "elapsed": 1264, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2706873f-f7f0-4d96-a244-d12686429564"
spacy_lens = []
for i in range(0,50):
for j in range(0, loc.spacy_count[i]):
spacy_lens.append(len(loc.spacy_list[i][j]))
# + colab={"base_uri": "https://localhost:8080/"} id="kk2ANR5WNeUi" executionInfo={"status": "ok", "timestamp": 1617998452904, "user_tz": -120, "elapsed": 1068, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="9eb344e3-797f-4df0-9495-a596d97a8821"
pd.Series(spacy_lens).describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="L8Xz4_Mjs-Ec" executionInfo={"status": "ok", "timestamp": 1617730490674, "user_tz": -120, "elapsed": 1365, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="f326a2f5-94d5-4112-e49a-fd511d504c2c"
sns.displot(data=loc, x='clean_text_length', kind="kde")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="KbP-6wYLriVN" executionInfo={"status": "ok", "timestamp": 1617730494394, "user_tz": -120, "elapsed": 933, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="a1278933-7de6-477e-fea7-b85691bc5f41"
sns.displot(data=loc, x='spacy_ucount', kind="ecdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 389} id="rd-VjEm9ssBb" executionInfo={"status": "ok", "timestamp": 1617730498037, "user_tz": -120, "elapsed": 911, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="ed519a71-4f38-4ec4-84bd-a814f8add992"
sns.relplot(x='clean_text_length', y='spacy_ucount', data=loc)
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="y-Peq5uPxXHp" executionInfo={"status": "ok", "timestamp": 1617728424364, "user_tz": -120, "elapsed": 981, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="bcc37a5d-a1bb-4b47-b5cd-2a15161d08a0"
sns.relplot(x='spacy_ucount', y='spacy_count', data=loc)
# + [markdown] id="oDY4QIn2jZ6a"
# ### Analysis File
# + id="KeE8IZLt0__o"
# load LOC file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_spacy.csv'
with open(infile, 'r') as f:
loc = pd.read_csv(f, encoding="UTF-8", nrows=50, skiprows=[32,40])
# + colab={"base_uri": "https://localhost:8080/", "height": 247} id="IjRTsoVjjZ6b" executionInfo={"status": "ok", "timestamp": 1617994727616, "user_tz": -120, "elapsed": 825, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="d80e4ab3-0e1c-46e2-8162-e360b9a5c27d"
loc.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="fNX18gEWkNUO" executionInfo={"status": "ok", "timestamp": 1617819682490, "user_tz": -120, "elapsed": 505, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="ee1e7b10-8076-4fe1-f11f-8f5d85aa1bd1"
round(loc.clean_text_length.describe())
# + colab={"base_uri": "https://localhost:8080/"} id="wQcchhCrj31X" executionInfo={"status": "ok", "timestamp": 1617819631720, "user_tz": -120, "elapsed": 490, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="e42af09b-1b9e-4428-d2ae-ee8953223f58"
round(loc.spacy_ucount.describe())
# + colab={"base_uri": "https://localhost:8080/"} id="Ugqoqrmuol2k" executionInfo={"status": "ok", "timestamp": 1617820992691, "user_tz": -120, "elapsed": 649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="44e3d0d7-f624-43b7-a1e2-32338c6714d3"
type(loc.spacy[3:4])
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="_MGZj_EAjZ6b" executionInfo={"status": "ok", "timestamp": 1617819487728, "user_tz": -120, "elapsed": 1045, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="adc96e43-1d0f-4af3-f343-e237e58b21a3"
sns.displot(data=loc, x='spacy_ucount', kind="kde")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="nVmJFIsljZ6c" executionInfo={"status": "ok", "timestamp": 1617819507181, "user_tz": -120, "elapsed": 1244, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="8c1a65cd-069e-489e-dbbc-c890bc6802fc"
sns.displot(data=loc, x='spacy_ucount', kind="ecdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="f9cuI1qQjZ6c" executionInfo={"status": "ok", "timestamp": 1617819526061, "user_tz": -120, "elapsed": 1500, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="b7eb01a7-c3bb-4ed3-d186-3480e4b0e4cd"
sns.relplot(x='spacy_ucount', y='spacy_count', data=loc)
# + [markdown] id="Qv_BVJfJMkid"
# ## LOC Analysis: Flair ner-multi-fast
# + colab={"base_uri": "https://localhost:8080/"} id="Bgj0jn-eMkig" executionInfo={"status": "ok", "timestamp": 1617818979216, "user_tz": -120, "elapsed": 2853, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="3b8a881d-2358-49cf-9889-eb5d0b322e31"
# Enable save and load to Google Drive
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="KMGT_ATuMkii"
import pandas as pd
import seaborn as sns
# + id="wtjAHBiiMkii"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_ner-multi-fast-I-full-1.csv'
with open(infile, 'r') as f:
loc2 = pd.read_csv(f, encoding="UTF-8", keep_default_na=False, usecols=['e_rara_id', 'clean_text_length', 'ner-multi-fast_concat'])
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="lCs0ZAJgT729" executionInfo={"status": "ok", "timestamp": 1618000749517, "user_tz": -120, "elapsed": 791, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="a958de73-1e58-44bd-9164-caddc8343f1a"
loc2[36:43]
# + colab={"base_uri": "https://localhost:8080/"} id="NZ2VRiPyWgAw" executionInfo={"status": "ok", "timestamp": 1618001344389, "user_tz": -120, "elapsed": 1109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="90bb11d8-f338-4ed7-d3d9-5e9db636c257"
for i in loc2[39:40].flair_list:
print(i)
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="rov2I111VFrw" executionInfo={"status": "ok", "timestamp": 1618000673845, "user_tz": -120, "elapsed": 816, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="9c6ee6d7-2746-41ec-ff25-433b9cd0c412"
loc2.flair[36]
# + colab={"base_uri": "https://localhost:8080/"} id="iEl067DoMkik" executionInfo={"status": "ok", "timestamp": 1618001340482, "user_tz": -120, "elapsed": 855, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="bd710a28-22fc-4f04-b6f8-437c28fa52a3"
loc2 = loc2.rename(columns={"ner-multi-fast_concat": "flair"})
loc2['flair_count'] = 0 # number of LOCs
loc2['flair_ucount'] = 0 # number of unique LOCs
loc2['flair_set'] = '' # unique LOCs; set = unique values of list # https://realpython.com/python-sets/
loc2['flair_list'] = ''
for i in loc2.index:
loc2['flair_list'][i] = loc2.flair[i].split(', ') # change spacy (str) to list
loc2['flair_set'][i] = set(loc2.flair_list[i])
loc2['flair_count'][i] = len(loc2.flair_list[i])
loc2['flair_ucount'][i] = len(loc2.flair_set[i])
# + colab={"base_uri": "https://localhost:8080/"} id="0zWEIWuNzTMz" executionInfo={"status": "ok", "timestamp": 1617823528106, "user_tz": -120, "elapsed": 889, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="06c78a97-fa59-4fc8-c46f-8b481b1dd375"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_flair.csv"
with open(outfile, "w") as f:
loc2.to_csv(f, index=False) # columns=['e_rara_id', 'ner-multi-fast-I']
# %cd /content/
print("Saved to file to Google Drive.")
# + [markdown] id="ngPE-CncPkSt"
# #### with 50's set
# + id="Ec38ooCBsjQI"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_ner-multi-fast-I-full-1.csv'
with open(infile, 'r') as f:
loc2 = pd.read_csv(f, encoding="UTF-8", keep_default_na=False, usecols=['e_rara_id', 'clean_text_length', 'ner-multi-fast_concat'], nrows=50, skiprows=[32,40])
# + id="WEEOyfFQPkSu"
flair_lens = []
for i in range(0,50):
for j in range(0, loc2.flair_count[i]):
flair_lens.append(len(loc2.flair_list[i][j]))
# + colab={"base_uri": "https://localhost:8080/"} id="s7jjauP7sngb" executionInfo={"status": "ok", "timestamp": 1618001340482, "user_tz": -120, "elapsed": 855, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="bd710a28-22fc-4f04-b6f8-437c28fa52a3"
loc2 = loc2.rename(columns={"ner-multi-fast_concat": "flair"})
loc2['flair_count'] = 0 # number of LOCs
loc2['flair_ucount'] = 0 # number of unique LOCs
loc2['flair_set'] = '' # unique LOCs; set = unique values of list # https://realpython.com/python-sets/
loc2['flair_list'] = ''
for i in loc2.index:
loc2['flair_list'][i] = loc2.flair[i].split(', ') # change spacy (str) to list
loc2['flair_set'][i] = set(loc2.flair_list[i])
loc2['flair_count'][i] = len(loc2.flair_list[i])
loc2['flair_ucount'][i] = len(loc2.flair_set[i])
# + colab={"base_uri": "https://localhost:8080/"} id="pzWPwRAqPkSv" executionInfo={"status": "ok", "timestamp": 1617998865303, "user_tz": -120, "elapsed": 928, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="53ac4fc1-9ab7-4ddc-b24b-f89c17e5d9dc"
pd.Series(flair_lens).describe()
# + [markdown] id="wmZInnpeI6BS"
# ### Analysis File
# + id="vBjZrKsJI8oq"
# load analysis file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_flair.csv'
with open(infile, 'r') as f:
loc2 = pd.read_csv(f, encoding="UTF-8", nrows=50, skiprows=[32,40]) # skip rows without data
# + colab={"base_uri": "https://localhost:8080/"} id="7Jynqz0-JP2S" executionInfo={"status": "ok", "timestamp": 1617819023460, "user_tz": -120, "elapsed": 644, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="1816a10c-bcf9-4256-dec8-ec692e21e975"
loc2.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="u882d3FpqpX8" executionInfo={"status": "ok", "timestamp": 1617994881713, "user_tz": -120, "elapsed": 804, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="741d8514-cdb6-4eaa-c942-4c32b7ec6b2d"
loc2.flair_ucount.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="DB5Tz_2InqDN" executionInfo={"status": "ok", "timestamp": 1617820625868, "user_tz": -120, "elapsed": 495, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="5fc55a40-8871-4390-ec5f-c6634a5714e5"
round(loc2.flair_ucount.describe())
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="dIlixAe6Mkik" executionInfo={"status": "ok", "timestamp": 1617819110019, "user_tz": -120, "elapsed": 863, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="e0d8dab4-6ba4-4028-f748-f3f80232ca24"
sns.displot(data=loc2, x='clean_text_length', kind="kde")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="urIqNh8niFVH" executionInfo={"status": "ok", "timestamp": 1617819048197, "user_tz": -120, "elapsed": 860, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="36a57d49-0ead-4740-e165-441a35a27317"
sns.displot(data=loc2, x='flair_count', kind="kde")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="_Crn2LTpiTX1" executionInfo={"status": "ok", "timestamp": 1617819169440, "user_tz": -120, "elapsed": 864, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="552cd719-ffad-45fc-9bdb-cb66b4bfb580"
sns.displot(data=loc2, x='flair_ucount', kind="hist")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="53oe8fvFMkil" executionInfo={"status": "ok", "timestamp": 1617819202330, "user_tz": -120, "elapsed": 920, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="29b3db5c-52bc-4cce-e22e-882bc4e508aa"
sns.displot(data=loc2, x='flair_ucount', kind="ecdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 389} id="YkeQsUAAMkim" executionInfo={"status": "ok", "timestamp": 1617819209943, "user_tz": -120, "elapsed": 1718, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="7a1b440e-036e-48d5-a6ad-8f01b6393d3e"
sns.relplot(x='clean_text_length', y='flair_ucount', data=loc2)
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="jZmLcjSyMkin" executionInfo={"status": "ok", "timestamp": 1617819220016, "user_tz": -120, "elapsed": 1147, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="e02da31b-4c30-4f5a-c619-5d1c6c5c7487"
sns.relplot(x='flair_ucount', y='flair_count', data=loc2)
# + [markdown] id="9pEJV4V0GAmo"
# ## LOC Analysis: BP historic-ner-onb
# + colab={"base_uri": "https://localhost:8080/"} id="MyEU0LkOGAmo" executionInfo={"status": "ok", "timestamp": 1617745807192, "user_tz": -120, "elapsed": 1817, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="c4dfe9e6-6cbe-46aa-93f1-052cbcb0d6b8"
# Enable save and load to Google Drive
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="4lOuunJvGAmo"
import pandas as pd
import seaborn as sns
# + id="wGuX4pVwGAmo"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_dbmdz-historic-ner-onb-I-full.csv'
with open(infile, 'r') as f:
loc3 = pd.read_csv(f, encoding="UTF-8", keep_default_na=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="fsoY4iWzOo74" executionInfo={"status": "ok", "timestamp": 1617999133233, "user_tz": -120, "elapsed": 1031, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="fc65c0b6-67fe-4d2f-94cc-35ba0a9ade2f"
loc3[30:60]
# + colab={"base_uri": "https://localhost:8080/"} id="l-Xu5_0yPmj_" executionInfo={"status": "ok", "timestamp": 1617817767994, "user_tz": -120, "elapsed": 958, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2d297505-c5df-4a40-92bf-563b7167c1ce"
print(loc3['historic-ner-onb_concat'][0])
# + colab={"base_uri": "https://localhost:8080/"} id="6irhLSjnGAmo" executionInfo={"status": "ok", "timestamp": 1618000818452, "user_tz": -120, "elapsed": 1048, "user": {"displayName": "Kathi Woitas", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2571026f-e9dc-4038-cbac-512e571f5bb8"
loc3 = loc3.rename(columns={"historic-ner-onb_concat": "bpe"})
loc3['bpe_count'] = 0 # number of LOCs
loc3['bpe_ucount'] = 0 # number of unique LOCs
loc3['bpe_set'] = '' # unique LOCs; set = unique values of list
loc3['bpe_list'] = ''
for i in loc2.index:
loc3['bpe_list'][i] = loc3.bpe[i].split(', ') # change str to list
loc3['bpe_count'][i] = len(loc3.bpe_list[i])
loc3['bpe_set'][i] = set(loc3.bpe_list[i])
loc3['bpe_ucount'][i] = len(loc3.bpe_set[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="viDvoBBWGAmo" executionInfo={"status": "ok", "timestamp": 1617999155607, "user_tz": -120, "elapsed": 905, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="982268b3-fede-4f1d-d083-dcd8a8731569"
loc3[30:60]
# + colab={"base_uri": "https://localhost:8080/"} id="9nCpvSYFXUOK" executionInfo={"status": "ok", "timestamp": 1618001261247, "user_tz": -120, "elapsed": 1103, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="ad9bd41f-54ff-403a-fd43-1f933564ab09"
for i in loc3[39:40].bpe_list:
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="Q9G4lQZyGAmp" executionInfo={"status": "ok", "timestamp": 1617999075334, "user_tz": -120, "elapsed": 964, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="a981a9aa-0021-452f-85c2-afe004e62327"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_bpe.csv"
with open(outfile, "w") as f:
loc3.to_csv(f, index=False)
# %cd /content/
print("Saved to file to Google Drive.")
# + [markdown] id="dp9x7Z4vRJgd"
# #### with 50's set
# + id="qIyzj4_atELJ"
# load LOC file
infile = '/content/drive/My Drive/e_rara_fulltexts/bernensia/corpus_bernensia_ger_LOC_dbmdz-historic-ner-onb-I-full.csv'
with open(infile, 'r') as f:
loc3 = pd.read_csv(f, encoding="UTF-8", keep_default_na=False, nrows=50, skiprows=[32,40])
# + colab={"base_uri": "https://localhost:8080/"} id="VrnXifBytQOC" executionInfo={"status": "ok", "timestamp": 1618000818452, "user_tz": -120, "elapsed": 1048, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2571026f-e9dc-4038-cbac-512e571f5bb8"
loc3 = loc3.rename(columns={"historic-ner-onb_concat": "bpe"})
loc3['bpe_count'] = 0 # number of LOCs
loc3['bpe_ucount'] = 0 # number of unique LOCs
loc3['bpe_set'] = '' # unique LOCs; set = unique values of list
loc3['bpe_list'] = ''
for i in loc2.index:
loc3['bpe_list'][i] = loc3.bpe[i].split(', ') # change str to list
loc3['bpe_count'][i] = len(loc3.bpe_list[i])
loc3['bpe_set'][i] = set(loc3.bpe_list[i])
loc3['bpe_ucount'][i] = len(loc3.bpe_set[i])
# + id="YxHF7h1VRJge"
bpe_lens = []
for i in range(0,50):
for j in range(0, loc3.bpe_count[i]):
bpe_lens.append(len(loc3.bpe_list[i][j]))
# + colab={"base_uri": "https://localhost:8080/"} id="ROeAkssxRJgf" executionInfo={"status": "ok", "timestamp": 1617999302162, "user_tz": -120, "elapsed": 785, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="42167a77-3b0b-49e2-c9f2-de8253991231"
pd.Series(bpe_lens).describe()
# + [markdown] id="UaRMuwl8OBi3"
# ### Analysis File
# + id="A75BrLFUODgZ"
# load analysis file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_bpe.csv'
with open(infile, 'r') as f:
loc3 = pd.read_csv(f, encoding="UTF-8", nrows=50, skiprows=[32,40]) # skip rows without data nrows=50, skiprows=[32,40]
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="RgcFE9ypec4l" executionInfo={"status": "ok", "timestamp": 1617819271513, "user_tz": -120, "elapsed": 368, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="5925e78d-8a2c-4289-f9bb-616b9b6d91ff"
loc3.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="o7vGCyBDqztq" executionInfo={"status": "ok", "timestamp": 1617821394709, "user_tz": -120, "elapsed": 502, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="5635795e-fb03-4476-edbe-c9bbdda84c68"
type(loc3.bpe_set[4])
# + colab={"base_uri": "https://localhost:8080/"} id="n4-4iclFoCQw" executionInfo={"status": "ok", "timestamp": 1617994929071, "user_tz": -120, "elapsed": 1205, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="8906a515-a4cf-4574-9170-2e89941d9988"
loc3.bpe_ucount.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="T01UXQqJGAmp" executionInfo={"status": "ok", "timestamp": 1617819280956, "user_tz": -120, "elapsed": 873, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="5fc53bee-9433-4362-8739-130c994fd529"
sns.displot(data=loc3, x='bpe_ucount', kind="kde")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="G7UalSonGAmp" executionInfo={"status": "ok", "timestamp": 1617819315311, "user_tz": -120, "elapsed": 1165, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="69f5a0f4-1c45-4080-d808-961921f97afb"
sns.displot(data=loc3, x='bpe_ucount', kind="ecdf")
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="3P_lXQzzGAmq" executionInfo={"status": "ok", "timestamp": 1617819347015, "user_tz": -120, "elapsed": 1692, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="b9f1290b-512e-49cf-c1f4-3add7013d8f8"
sns.relplot(x='bpe_ucount', y='bpe_count', data=loc3)
# + [markdown] id="1v0yntZC0D2x"
# ## LOC Analysis: Comparison
# + [markdown] id="0rEwM8lr1SUw"
# https://realpython.com/python-sets/
# + [markdown] id="otdDCb_q6_sb"
# ### Spacy - Flair
# + id="-5Z6O4Nopm7Y"
locs = pd.DataFrame(columns = ['e_rara_id', 'text_len', 'intersect', 'intersect_c', 'diff_spacy', 'diff_spacy_c', \
'diff_flair', 'diff_flair_c', 'sym_diff', 'sym_diff_c'], index=range(50))
for i in range(0,50):
locs.e_rara_id[i] = loc['e_rara_id'][i]
locs.text_len[i] = loc['clean_text_length'][i]
locs.intersect[i] = loc['spacy_set'][i].intersection(loc2['flair_set'][i]) # LOCs in both
locs.intersect_c[i] = len(locs.intersect[i])
locs.diff_spacy[i] = loc['spacy_set'][i].difference(loc2['flair_set'][i]) # LOCs in spacy, but not in flair
locs.diff_spacy_c[i] = len(locs.diff_spacy[i])
locs.diff_flair[i] = loc2['flair_set'][i].difference(loc['spacy_set'][i]) # LOCs in flair, but not in spacy
locs.diff_flair_c[i] = len(locs.diff_flair[i])
locs.sym_diff[i] = loc['spacy_set'][i].symmetric_difference(loc2['flair_set'][i]) # only in either set / opposite to intersection
locs.sym_diff_c[i] = len(locs.sym_diff[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 253} id="_pEYDgqR0wG-" executionInfo={"status": "ok", "timestamp": 1617831080717, "user_tz": -120, "elapsed": 538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="4d1c4f05-e738-4d6a-952d-ce4b8d114987"
locs[4:6]
# + colab={"base_uri": "https://localhost:8080/"} id="JyNm6hAV1sCT" executionInfo={"status": "ok", "timestamp": 1617829680743, "user_tz": -120, "elapsed": 504, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="1a2d0386-f9d5-494b-a4b3-4dbee76d5733"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_match_spacy_flair.csv"
with open(outfile, "w") as f:
locs.to_csv(f, index=False)
# %cd /content/
print("Saved to file to Google Drive.")
# + id="ZY-4RZav20x3"
# load analysis file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_match_spacy_flair.csv'
with open(infile, 'r') as f:
locs = pd.read_csv(f, encoding="UTF-8")
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="bjep_uEN3F0d" executionInfo={"status": "ok", "timestamp": 1617824612911, "user_tz": -120, "elapsed": 509, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="9a512d3b-c80a-4dc0-b2cf-c6781a0f825b"
locs.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="2LBhbUMa3jSl" executionInfo={"status": "ok", "timestamp": 1617829737691, "user_tz": -120, "elapsed": 910, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="37003be5-b1bd-4aeb-f00f-b88446980b5b"
locs.intersect_c.describe()
# + id="igOfNV2jOlYD"
share = pd.DataFrame(columns = ['spacy', 'flair'], index=range(50))
for i in range(0,50):
share.spacy[i] = locs.intersect_c[i] / loc.spacy_ucount[i]
share.flair[i] = locs.intersect_c[i] / loc2.flair_ucount[i]
# + colab={"base_uri": "https://localhost:8080/", "height": 401} id="2K8BRU-TOx6z" executionInfo={"status": "ok", "timestamp": 1617831112948, "user_tz": -120, "elapsed": 1352, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="1b4bfe97-f348-4a68-a167-46e4ac3317be"
sns.relplot(y='flair', x='spacy', data=share)
# + [markdown] id="lR1FiMmT7KE8"
# ### Spacy - BPE
# + id="WwgxmNQp7KE8"
locs = pd.DataFrame(columns = ['e_rara_id', 'text_len', 'intersect', 'intersect_c', 'diff_spacy', 'diff_spacy_c', \
'diff_bpe', 'diff_bpe_c', 'sym_diff', 'sym_diff_c'], index=range(50))
for i in range(0,50):
locs.e_rara_id[i] = loc['e_rara_id'][i]
locs.text_len[i] = loc['clean_text_length'][i]
locs.intersect[i] = loc['spacy_set'][i].intersection(loc3['bpe_set'][i]) # LOCs in both
locs.intersect_c[i] = len(locs.intersect[i])
locs.diff_spacy[i] = loc['spacy_set'][i].difference(loc3['bpe_set'][i]) # LOCs in spacy, but not in flair
locs.diff_spacy_c[i] = len(locs.diff_spacy[i])
locs.diff_bpe[i] = loc3['bpe_set'][i].difference(loc['spacy_set'][i]) # LOCs in flair, but not in spacy
locs.diff_bpe_c[i] = len(locs.diff_bpe[i])
locs.sym_diff[i] = loc['spacy_set'][i].symmetric_difference(loc3['bpe_set'][i]) # only in either set / opposite to intersection
locs.sym_diff_c[i] = len(locs.sym_diff[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="rLsW4a2a7KE9" executionInfo={"status": "ok", "timestamp": 1617829791684, "user_tz": -120, "elapsed": 1811, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="778cb860-06f2-48d4-a8dc-1e1119fd773d"
locs
# + colab={"base_uri": "https://localhost:8080/"} id="3fmPHj2h7KE9" executionInfo={"status": "ok", "timestamp": 1617829800788, "user_tz": -120, "elapsed": 518, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="269b25fa-fa72-43eb-e631-e246ee783ac1"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_match_spacy_bpe.csv"
with open(outfile, "w") as f:
locs.to_csv(f, index=False)
# %cd /content/
print("Saved to file to Google Drive.")
# + id="fTk26fdN7KE9"
# load analysis file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_match_spacy_bpe.csv'
with open(infile, 'r') as f:
locs = pd.read_csv(f, encoding="UTF-8")
# + colab={"base_uri": "https://localhost:8080/", "height": 354} id="PlyitBes7KE-" executionInfo={"status": "ok", "timestamp": 1617829810902, "user_tz": -120, "elapsed": 533, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="913bcd33-3dd0-4c8a-cad8-60b2824f0d1c"
locs.head(3)
# + id="3YwRdW2b7KE-"
#locs = locs.convert_dtypes()
# + colab={"base_uri": "https://localhost:8080/"} id="W-vDYEV97KE-" executionInfo={"status": "ok", "timestamp": 1617825966025, "user_tz": -120, "elapsed": 526, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="b67a6d0a-4f3b-4454-e404-0e8d5bf1715c"
locs.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="LlEruuo87KE-" executionInfo={"status": "ok", "timestamp": 1617829849049, "user_tz": -120, "elapsed": 828, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="6e442591-3b14-469e-ae72-a707f024b941"
locs.sym_diff_c.describe()
# + id="oAel8wP7QYgJ"
share = pd.DataFrame(columns = ['spacy', 'bpe'], index=range(50))
for i in range(0,50):
share.spacy[i] = locs.intersect_c[i] / loc.spacy_ucount[i]
share.bpe[i] = locs.intersect_c[i] / loc3.bpe_ucount[i]
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="t-XinLBRQbzC" executionInfo={"status": "ok", "timestamp": 1617831564094, "user_tz": -120, "elapsed": 831, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="543312f4-d961-4bb0-e114-b8e8d1019c63"
sns.scatterplot(x='spacy', y='bpe', data=share)
# + [markdown] id="jq-AFmKg9JpH"
# ### Flair - BPE
# + id="98kRZ2vu9JpI"
locs = pd.DataFrame(columns = ['e_rara_id', 'text_len', 'intersect', 'intersect_c', 'diff_flair', 'diff_flair_c', \
'diff_bpe', 'diff_bpe_c', 'sym_diff', 'sym_diff_c'], index=range(50))
for i in range(0,50):
locs.e_rara_id[i] = loc['e_rara_id'][i]
locs.text_len[i] = loc['clean_text_length'][i]
locs.intersect[i] = loc2['flair_set'][i].intersection(loc3['bpe_set'][i]) # LOCs in both
locs.intersect_c[i] = len(locs.intersect[i])
locs.diff_flair[i] = loc2['flair_set'][i].difference(loc3['bpe_set'][i]) # LOCs in spacy, but not in flair
locs.diff_flair_c[i] = len(locs.diff_flair[i])
locs.diff_bpe[i] = loc3['bpe_set'][i].difference(loc2['flair_set'][i]) # LOCs in flair, but not in spacy
locs.diff_bpe_c[i] = len(locs.diff_bpe[i])
locs.sym_diff[i] = loc2['flair_set'][i].symmetric_difference(loc3['bpe_set'][i]) # only in either set / opposite to intersection
locs.sym_diff_c[i] = len(locs.sym_diff[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Wj_9YBjc9JpI" executionInfo={"status": "ok", "timestamp": 1617829872777, "user_tz": -120, "elapsed": 1258, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="c9fbebdb-d72e-4689-fc49-d0d095e54fed"
locs
# + colab={"base_uri": "https://localhost:8080/"} id="NmqylmYL9JpK" executionInfo={"status": "ok", "timestamp": 1617829881275, "user_tz": -120, "elapsed": 518, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="ddef0e03-0b21-4ffe-c6ac-65c1a2ff37b4"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_match_flair_bpe.csv"
with open(outfile, "w") as f:
locs.to_csv(f, index=False)
# %cd /content/
print("Saved to file to Google Drive.")
# + id="4lYnxrGs9JpK"
# load analysis file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_match_flair_bpe.csv'
with open(infile, 'r') as f:
locs = pd.read_csv(f, encoding="UTF-8")
# + colab={"base_uri": "https://localhost:8080/", "height": 337} id="TEglkUow9JpK" executionInfo={"status": "ok", "timestamp": 1617826405293, "user_tz": -120, "elapsed": 593, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="e4981e80-4915-4579-9124-f8e7f05d7309"
locs.head(3)
# + id="_1ndjG3V9JpL"
#locs = locs.convert_dtypes()
# + colab={"base_uri": "https://localhost:8080/"} id="qVveyLq_9JpL" executionInfo={"status": "ok", "timestamp": 1617826421114, "user_tz": -120, "elapsed": 670, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="24ce2ca0-d706-4f0f-d9ea-e40be8c07ac7"
locs.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="U2FHUPU89JpM" executionInfo={"status": "ok", "timestamp": 1617829917731, "user_tz": -120, "elapsed": 503, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="b60ece86-b54c-4d50-f501-98878c3a32b5"
locs.intersect_c.describe()
# + id="GgiSQQaBH7VX"
share = pd.DataFrame(columns = ['bpe', 'flair'], index=range(50))
for i in range(0,50):
share.bpe[i] = locs.intersect_c[i] / loc3.bpe_ucount[i]
share.flair[i] = locs.intersect_c[i] / loc2.flair_ucount[i]
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="_thhmg_sIr9V" executionInfo={"status": "ok", "timestamp": 1617830252187, "user_tz": -120, "elapsed": 932, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="5df86a30-bf5e-4081-8e9e-645c58615b90"
share.bpe.hist(bins=20)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Hwhr7dN7Hgzm" executionInfo={"status": "ok", "timestamp": 1617830316387, "user_tz": -120, "elapsed": 930, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="fdcdcaa5-9d7a-492c-84c7-9e088a530b33"
sns.rugplot(data=share, x='bpe',y='flair')
# + colab={"base_uri": "https://localhost:8080/", "height": 401} id="BsUTtKJgNJNI" executionInfo={"status": "ok", "timestamp": 1617830467757, "user_tz": -120, "elapsed": 1278, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="4eaff01e-93c3-4757-93d3-621c46b3d4d4"
sns.relplot(data=share, x='bpe',y='flair')
# + [markdown] id="2kAWf-p1lcEd"
# ### Metadata
# + id="dzq7DO0w-F-O"
# load LOC file
infile = '/content/drive/My Drive/e_rara_metadata/metadata_2021-02-28.csv'
with open(infile, 'r') as f:
md = pd.read_csv(f, encoding="UTF-8", usecols=['e_rara_id', 'title', 'startdate', 'dateissued'], nrows=50, skiprows=[32,40]) #
# + colab={"base_uri": "https://localhost:8080/"} id="9mmOVI8yZ2sw" executionInfo={"status": "ok", "timestamp": 1618002283543, "user_tz": -120, "elapsed": 1076, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="169bc193-14e6-4695-f5c9-992f2075292e"
for i in md[md.e_rara_id==17996385].title:
print(i)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="JYW1sWhd5501" executionInfo={"status": "ok", "timestamp": 1618001932182, "user_tz": -120, "elapsed": 817, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="a3c44ea3-02c1-4369-9d11-087afa5a2357"
md
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="aNgL1uX2LhAu" executionInfo={"status": "ok", "timestamp": 1617993722289, "user_tz": -120, "elapsed": 1713, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="98a4d2d8-0cc0-49af-e393-14184735eb42"
sns.displot(data=md, x='startdate', kind="kde")
# + id="JTCCX70G9tsn"
locs = locs.join(md, rsuffix='x').drop(columns='e_rara_idx')
# + colab={"base_uri": "https://localhost:8080/"} id="teBl14BFyX8w" executionInfo={"status": "ok", "timestamp": 1617745936216, "user_tz": -120, "elapsed": 899, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="7c110afc-a79a-405a-b29e-f1c36853303e"
# Write to Google Drive
# %cd /content/drive/My\ Drive/e_rara_analysis
outfile = "./LOC_spacy_flair.csv"
with open(outfile, "w") as f:
locs.to_csv(f, index=False)
# %cd /content/
print("Saved to file to Google Drive.")
# + id="5oCgzfsi47cC"
# load LOC file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_spacy_flair.csv'
with open(infile, 'r') as f:
locs = pd.read_csv(f, encoding="UTF-8")
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="fd7JpsxJ0l26" executionInfo={"status": "ok", "timestamp": 1617745961254, "user_tz": -120, "elapsed": 1310, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="29140720-47f6-46fb-eee4-f6e233c19aea"
sns.displot(data=locs, x='intersect_c', kind="hist")
# + colab={"base_uri": "https://localhost:8080/", "height": 401} id="7nautUAe4dlU" executionInfo={"status": "ok", "timestamp": 1617745972179, "user_tz": -120, "elapsed": 901, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="6d4492e4-d833-41c9-8275-ecd1e274f20d"
sns.relplot(x='startdate', y='intersect_c', data=locs)
# + colab={"base_uri": "https://localhost:8080/", "height": 401} id="f-zi1iPULMyT" executionInfo={"status": "ok", "timestamp": 1617746009374, "user_tz": -120, "elapsed": 1327, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="892e2eb1-b1b0-424c-b620-89340439012f"
sns.relplot(x='startdate', y='text_len', data=locs)
# + colab={"base_uri": "https://localhost:8080/"} id="FlhMC6Bu3FSd" executionInfo={"status": "ok", "timestamp": 1617740804289, "user_tz": -120, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2336c7c9-fbf7-4921-ab61-85eff86dff59"
round(locs.intersect_c.describe(),0)
# + colab={"base_uri": "https://localhost:8080/"} id="1dfvbmWS3L_m" executionInfo={"status": "ok", "timestamp": 1617740855359, "user_tz": -120, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="c0fc586a-5bf0-43d0-b8ff-3914c03719da"
round(locs.sym_diff_c.describe(),0)
# + colab={"base_uri": "https://localhost:8080/"} id="ZHLw3pav3vXt" executionInfo={"status": "ok", "timestamp": 1617740901161, "user_tz": -120, "elapsed": 571, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="2d1f2413-62aa-44b2-f9f2-b3862427bdf6"
round(locs.diff_flair_c.describe(),0)
# + colab={"base_uri": "https://localhost:8080/"} id="wUMLoOD23MRc" executionInfo={"status": "ok", "timestamp": 1617740881973, "user_tz": -120, "elapsed": 747, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="eb05d182-9b7e-4323-c8cc-1f97a179f50a"
round(locs.diff_spacy_c.describe(),0)
# + [markdown] id="UhIOnO54-IFJ"
# ### Matchings
# + id="mI_0lzXr-Nfi"
# load LOC file
infile = '/content/drive/My Drive/e_rara_analysis/LOC_match_flair_bpe.csv'
with open(infile, 'r') as f:
match = pd.read_csv(f, encoding="UTF-8")
# + colab={"base_uri": "https://localhost:8080/"} id="Bitm6g4m-XaW" executionInfo={"status": "ok", "timestamp": 1617994650556, "user_tz": -120, "elapsed": 757, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgeucoVZdbDUDaaQBWBqFxRyP4yjPAl18LuuUrz=s64", "userId": "18347744419010233607"}} outputId="a62e456a-abb3-4a28-e7e7-236cedc2aa68"
match.sym_diff_c.describe()
| code/LOC_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# +
import sys
import types
import pandas as pd
#from botocore.client import Config
#import ibm_boto3
def _iter_(self): return 0
# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share your notebook.
#client_5022edb052cb493da82d0f09840119df = ibm_boto3.client(service_name='s3',
# ibm_api_key_id='<KEY>',
# ibm_auth_endpoint="https://iam.eu-gb.bluemix.net/oidc/token",
# config=Config(signature_version='oauth'),
#endpoint_url='https://s3.eu-geo.objectstorage.service.networklayer.com')
#body=client_5022ebd052cb493da82d0f09840119df.get_object(Bucket='batch2-donotdelete-pr-jqe78ba82u2s6i',key='diabetes.csv')['body']
# add missing_iter_method,so pandas accepts body as file-like object
#if not hasattr(body,"_iter_"): body._iter_=types.MethodType(_iter_,body)
dataset=pd.read_csv("diabetes.csv")
dataset.head()
# -
print(dataset.info())
X = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,8].values
plt.style.use('fivethirtyeight')
dataset.columns
fig = plt.figure(figsize=(15,6))
plt.boxplot(X, vert = False,labels=['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI',
'DiabetesPedigreeFunction', 'Age'], patch_artist=True)
plt.show()
corr = dataset.corr()
import seaborn as sns
plt.figure(figsize = (10,6))
sns.heatmap(corr,annot = True,cmap = 'Set1')
plt.show()
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0,1))
X_new = sc.fit_transform(X)
X_new
X_new[0]
fig = plt.figure(figsize=(15,6))
plt.boxplot(X_new, vert = False,labels= ['Pregnancies','Glucose','BloodPressure','skinThickness','Insulin',
'BMI', 'DiabetesPeddigreeFunction','Age'],patch_artist=True)
plt.show()
mx = sc.data_max_
mn = sc.data_min_
np.savez('scale.npz',mn,mx)
print(mx)
print(mn)
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X_new,Y,test_size = 0.2,random_state =0)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
model_log = LogisticRegression(C = 10.0)
model_knn = KNeighborsClassifier(n_neighbors=3)
model_svm = SVC(C = 10.0,probability=True)
model_dt = DecisionTreeClassifier()
model_rf = RandomForestClassifier(n_estimators=100)
model_nb = MultinomialNB(alpha = 1)
model_log.fit(x_train, y_train)# training model
model_knn.fit(x_train, y_train)# training model
model_svm.fit(x_train, y_train)# training model
model_dt.fit(x_train, y_train)# training model
model_rf.fit(x_train, y_train)# training model
model_nb.fit(x_train, y_train)# training model
y_pred_log = model_log.predict(x_test) #we use this for evaluation
y_pred_knn = model_knn.predict(x_test) #we use this for evaluation
y_pred_svm = model_svm.predict(x_test) #we use this for evaluation
y_pred_dt = model_dt.predict(x_test) #we use this for evaluation
y_pred_rf = model_rf.predict(x_test) #we use this for evaluation
y_pred_nb = model_rf.predict(x_test) #we use this for evaluation
from sklearn.metrics import confusion_matrix, classification_report
# +
cm_log = confusion_matrix(y_test, y_pred_log) # confusion matrix
cm_knn = confusion_matrix(y_test, y_pred_knn) # confusion matrix
cm_svm = confusion_matrix(y_test, y_pred_svm) # confusion matrix
cm_dt = confusion_matrix(y_test, y_pred_dt) # confusion matrix
cm_rf = confusion_matrix(y_test, y_pred_rf) # confusion matrix
cm_nb = confusion_matrix(y_test, y_pred_nb) # confusion matrix
cr_log = classification_report(y_test, y_pred_log) # classification report
cr_knn = classification_report(y_test, y_pred_knn) # classification report # classification report
cr_svm = classification_report(y_test, y_pred_svm) # classification report
cr_dt = classification_report(y_test, y_pred_dt) # classification report
cr_rf = classification_report(y_test, y_pred_rf) # classification report
cr_nb = classification_report(y_test, y_pred_nb) # classification report
# -
import seaborn as sns
sns.heatmap(cm_log,annot=True,cbar=None,cmap = 'summer')
plt.title('Logistic Regression')
plt.show()
sns.heatmap(cm_knn,annot=True,cbar=None,cmap = 'spring')
plt.title('K Nearest Neighbour')
plt.show()
sns.heatmap(cm_svm,annot=True,cbar=None,cmap = 'winter')
plt.title('Support Vector Machine')
plt.show()
sns.heatmap(cm_dt,annot=True,cbar=None,cmap = 'cool')
plt.title('Decision Tree')
plt.show()
sns.heatmap(cm_rf,annot=True,cbar=None,cmap = 'autumn')
plt.title('Random Forest')
plt.show()
sns.heatmap(cm_nb,annot=True,cbar=None,cmap = 'set1')
plt.title('Multinomial Navie Bayes')
plt.show()
print('='*20+'Logistic Regression'+'='*20)
print(cr_log)
print('='*20+'KNearest Neighbour'+'='*20)
print(cr_knn)
print('='*20+'Support Vector Machine'+'='*20)
print(cr_svm)
print('='*20+'Decision Tree'+'='*20)
print(cr_dt)
print('='*20+'Random Forest'+'='*20)
print(cr_rf)
print('='*20+'Multinomial NB'+'='*20)
print(cr_nb)
| diabetes final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Notebook 08: Training more than one ML model
#
# ### Goal: Training a ML using all features/predictors/inputs and all ML methods
#
# #### Background
#
# So far in Notebooks 4-7 we have shown you how to train a single ML model for each task. But one really nice thing about ```sklearn``` is that they have coded up all the models we discussed in the paper to adopt the same syntax. This will make more sense in a little bit, but what this enables us to do is train many different ML methods to find which method performs best for our specific task. This generally a good method for designing your own ML projects.
#
# ### Classification, simple
# We will start off the same as Notebook 4.
# +
#needed packages
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#plot parameters that I personally like, feel free to make these your own.
import matplotlib
matplotlib.rcParams['axes.facecolor'] = [0.9,0.9,0.9] #makes a grey background to the axis face
matplotlib.rcParams['axes.labelsize'] = 14 #fontsize in pts
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.facecolor'] = 'w'
matplotlib.rcParams['savefig.transparent'] = False
#make default resolution of figures much higher (i.e., High definition)
# %config InlineBackend.figure_format = 'retina'
#import some helper functions for our other directory.
import sys
sys.path.insert(1, '../scripts/')
from aux_functions import load_n_combine_df
(X_train,y_train),(X_validate,y_validate),(X_test,y_test) = load_n_combine_df(path_to_data='../datasets/sevir/',features_to_keep=np.arange(0,1,1),class_labels=True)
# -
# But now we will initalize a list of models!
# +
#load ML code from sklearn
from sklearn.linear_model import LogisticRegression,SGDClassifier
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import GaussianNB
#initialize
model_list = [LogisticRegression(),GaussianNB(),DecisionTreeClassifier(),RandomForestClassifier(),GradientBoostingClassifier(),LinearSVC(dual=False)]
print(model_list)
# -
# since the syntax is identical, we can loop over this list and train all the methods
# +
#Import a progress bar so we know how long it is taking
import tqdm
for model in tqdm.tqdm(model_list):
model.fit(X_train,y_train)
# -
# Wait for the star to finish up above, then that means it is done training! Way to go, you just trained 6 **different** ML models. Nice right? But lets evaluate them now.
#
# Let's start by looking at the performance diagram first
# +
#load contingency_table func
from gewitter_functions import get_contingency_table,make_performance_diagram_axis,get_acc,get_pod,get_sr,csi_from_sr_and_pod
#make axis to plot on
ax = make_performance_diagram_axis()
#make list of colors so each method shows up as a different color
colors=['b','r','g','y','LightGreen','k']
legend_labels = ['LgR','NB','DT','RF','GBT','SVM']
#loop over all trained models
for idx,model in enumerate(model_list):
#get predictions
yhat = model.predict(X_validate)
#the contingency table calculator expects y_true,y_pred
cont_table = get_contingency_table(y_validate,yhat)
#get metrics
accuracy = get_acc(cont_table)
pod = get_pod(cont_table)
sr = get_sr(cont_table)
csi = csi_from_sr_and_pod(sr,pod)
ax.plot(sr,pod,'o',color=colors[idx],markerfacecolor='w',label=legend_labels[idx])
print('{} accuracy: {}%'.format(legend_labels[idx],np.round(accuracy,0)))
ax.legend()
# -
# As shown in the paper, all of the methods basically have the same results with some minor differences. If we look at the AUC of the ROC curve we will see something similar. Just one note though, the SVM method we used here (LinearSVC) does not support ```model.predict_proba```, so we will leave it off here.
# +
#load contingency_table func
from gewitter_functions import get_points_in_roc_curve,get_area_under_roc_curve
#something to help with annotating the figure
import matplotlib.patheffects as path_effects
pe = [path_effects.withStroke(linewidth=2,
foreground="k")]
pe2 = [path_effects.withStroke(linewidth=2,
foreground="w")]
#make figure
fig = plt.figure(figsize=(4.1,5))
#set facecolor to white so you can copy/paste the image somewhere
fig.set_facecolor('w')
#make list of colors so each method shows up as a different color
colors=['b','r','g','y','LightGreen','k']
legend_labels = ['LgR','NB','DT','RF','GBT','SVM']
ax = plt.gca()
#loop over all trained models
for idx,model in enumerate(model_list[:-1]):
#get predictions
yhat_proba = model.predict_proba(X_validate)
#lets just focus on the output from class 1 (note, the sum of these two columns should be 1)
y_preds = yhat_proba[:,1]
#get the roc curve
pofds, pods = get_points_in_roc_curve(forecast_probabilities=y_preds, observed_labels=y_validate, threshold_arg=np.linspace(0,1,100))
#get AUC
auc = get_area_under_roc_curve(pofds,pods)
ax.plot(pofds,pods,'-',color=colors[idx],label=legend_labels[idx])
print('{} AUC: {}'.format(legend_labels[idx],np.round(auc,2)))
ax.legend()
#set some limits
ax.set_xlim([0,1])
ax.set_ylim([0,1])
#set the no-skill line
ax.plot([0,1],[0,1],'--',color='Grey')
#label things
ax.set_title("AUC of ROC Curve")
ax.set_xlabel('POFD')
ax.set_ylabel('POD')
plt.tight_layout()
# -
# Congrats, you have now trained and evaluated multiple models with the same data! I encourage you to go ahead and now code up how to do it with all the data as inputs. Please note, it does take a bit longer to train, but shouldn't take more than 5-10 mins.
#
# ### Regression, simple
# We will start off the same as Notebook 5.
# +
#needed packages
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#plot parameters that I personally like, feel free to make these your own.
import matplotlib
matplotlib.rcParams['axes.facecolor'] = [0.9,0.9,0.9] #makes a grey background to the axis face
matplotlib.rcParams['axes.labelsize'] = 14 #fontsize in pts
matplotlib.rcParams['axes.titlesize'] = 14
matplotlib.rcParams['xtick.labelsize'] = 12
matplotlib.rcParams['ytick.labelsize'] = 12
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['legend.facecolor'] = 'w'
matplotlib.rcParams['savefig.transparent'] = False
#make default resolution of figures much higher (i.e., High definition)
# %config InlineBackend.figure_format = 'retina'
#import some helper functions for our other directory.
import sys
sys.path.insert(1, '../scripts/')
from aux_functions import load_n_combine_df
(X_train,y_train),(X_validate,y_validate),(X_test,y_test) = load_n_combine_df(path_to_data='../datasets/sevir/',features_to_keep=np.arange(0,1,1),class_labels=False,dropzeros=True)
# -
# Now we will initalize a list of *Regression* models
# +
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import LinearSVR
#initialize
model_list = [LinearRegression(),DecisionTreeRegressor(),RandomForestRegressor(),GradientBoostingRegressor(),LinearSVR()]
print(model_list)
# -
# Now go train your ML regressors!
# +
#Import a progress bar so we know how long it is taking
import tqdm
for model in tqdm.tqdm(model_list):
model.fit(X_train,y_train)
# -
# As before, lets evaluate them. First we will make the one-to-one scatter plot like Figures 14 and 16
# +
from aux_functions import boxbin,make_colorbar
#make figure with 2 rows and 3 columns with size 7.5" by 5"
fig,axes = plt.subplots(2,3,figsize=(7.5,5))
#set facecolor to white so we can copy paste it if you want to somewhere else
fig.set_facecolor('w')
#the number of bins for the boxbin method
n = 33
#the bins we want in x and y
xbins = np.logspace(0,3.5,n)
ybins = np.logspace(0,3.5,n)
#colors i like
r = [255/255,127/255,127/255]
b = [126/255,131/255,248/255]
#labels
labels= ['LnR','DT','RF','GBT','SVM']
#color list, one for each model
colors= [r,b,'orange','purple','dimgrey']
#colormaps to match the colors in 'theme'
cmaps=['Reds_r','Blues_r','Oranges_r','Purples_r','Greys_r']
#force ticks to show up where i want them
locmin = matplotlib.ticker.LogLocator(base=10.0, subs=(0.1,0.2,0.4,0.6,0.8,1,2,4,6,8,10 ))
#axes is shape [2,3], it is easier to loop if we flatten this, which is what ravel does
axes = axes.ravel()
#some parameters to make it pretty
c_scale = 0.575
fs3 = 11
fs4 = 18
props = dict(boxstyle='square', facecolor='White', alpha=0.75)
annotate_list = ['a)','b)','c)','d)','e)',]
#draw a new axis for a new colorbar to go on
ax_cbar = fig.add_axes([0.75, 0.15, 0.015,0.33])
#draw that colorbar
cbar = make_colorbar(ax_cbar,0,2,plt.cm.Greys_r)
#label that colorbar
cbar.set_label('$\%$ of total points')
#loop over axes and draw scatters
for i,ax in enumerate(axes):
#we have 1 too many subplots, so turn off the last one [5]
if i==5:
ax.axis('off')
break
#make axes log-log
ax.semilogy()
ax.semilogx()
#grab model
model = model_list[i]
#get predicitions
yhat = model.predict(X_validate)
#make scatter plot
ax.scatter(yhat,y_validate,color=colors[i],s=1,marker='+')
#box and bin up data to show density of points
ax,cbar,C = boxbin(yhat,y_validate,xbins,ybins,ax=ax,mincnt=100,normed=True,cmap=cmaps[i],vmin=0,vmax=2,cbar=False)
#set some axis limits and ticks
ax.set_xlim([1,4000])
ax.set_xticks([1,10,100,1000])
ax.set_yticks([1,10,100,1000])
ax.set_ylim([1,4000])
#add diaganol line
ax.plot([1,4000],[1,4000],'--k',alpha=0.5)
#add a subplot label
ax.text(0.075, 0.25, annotate_list[i], transform=ax.transAxes,fontsize=fs4,
verticalalignment='top', bbox=props)
#only label certain axes x-y axis to save space
if (i == 0) or (i==3):
ax.set_ylabel('$y$, [# of flashes]')
if i==4:
ax.set_xlabel(r'$\hat{y}$, [# of flashes]')
#label each subplot title as the method used
ax.set_title(labels[i])
plt.tight_layout()
# -
# And their quantitative metrics
# +
from gewitter_functions import get_mae,get_rmse,get_bias,get_r2
#loop over all trained models
for idx,model in enumerate(model_list):
#get predictions
yhat = model.predict(X_validate)
mae = get_mae(y_validate,yhat)
rmse = get_rmse(y_validate,yhat)
bias = get_bias(y_validate,yhat)
r2 = get_r2(y_validate,yhat)
#print them out so we can see them
print('Method: {} .. MAE:{} flashes, RMSE:{} flashes, Bias:{} flashes, Rsquared:{}'.format(labels[idx],np.round(mae,2),np.round(rmse,2),np.round(bias,2),np.round(r2,2)))
# -
# While some are a big fan of tables, I perfer a bar chart (Figures 15 and 17)
# +
#some annotation helpers
import matplotlib.patheffects as path_effects
pe = [path_effects.withStroke(linewidth=2,
foreground="k")]
pe2 = [path_effects.withStroke(linewidth=2,
foreground="w")]
#make a 2 row, 2 column figure of size 5" by 5"
fig,axes = plt.subplots(2,2,figsize=(5,5))
#set facecolor to white so we can copy/paste it whereever
fig.set_facecolor('w')
#list of labels for the x-axis
labels= ['LnR','DT','RF','GBT','SVM']
#loop over all trained models
for i,model in enumerate(model_list):
#get predictions
yhat = model.predict(X_validate)
mae = get_mae(y_validate,yhat)
rmse = get_rmse(y_validate,yhat)
bias = get_bias(y_validate,yhat)
r2 = get_r2(y_validate,yhat)
############### subplot 0,0: Bias ########################
ax = axes[0,0]
#put a bar at position i (from our loop)
ax.bar(i,bias,width=0.95,color=colors[i])
#make the annotation so we can see the numerical data on the plot
annotate = str(int(np.round(bias))).rjust(3, ' ')
ax.text(i-0.4,bias+5,annotate,color=colors[i],path_effects=pe2)
##########################################################
####### subplot 0,1: Mean Absolute Error #################
ax = axes[0,1]
#put a bar at position i (from our loop)
ax.bar(i,mae,width=0.95,color=colors[i])
#make the annotation so we can see the numerical data on the plot
annotate = str(int(np.round(mae))).rjust(3, ' ')
ax.text(i-0.4,mae+5,annotate,color=colors[i],path_effects=pe2)
##########################################################
####### subplot 1,0: Root Mean Squared Error #############
ax = axes[1,0]
ax.bar(i,rmse,width=0.95,color=colors[i])
annotate = str(int(np.round(rmse))).rjust(3, ' ')
ax.text(i-0.4,rmse+5,annotate,color=colors[i],path_effects=pe2)
##########################################################
####### subplot 1,1: Rsquared ###########################
ax = axes[1,1]
ax.bar(i,r2,width=0.95,color=colors[i])
annotate = str(np.round(r2,2)).ljust(4, '0')
ax.text(i-0.5,r2+0.05,annotate,color=colors[i],path_effects=pe2)
##########################################################
#cosmetic things:
ax = axes[0,0]
ax.xaxis.set_ticks(np.arange(0,5))
ax.xaxis.set_ticklabels(labels,rotation=45)
ax.set_ylim([-130,130])
ax.set_title("Bias")
ax.text(0.075, 0.25, annotate_list[0], transform=ax.transAxes,fontsize=fs4,
verticalalignment='top', bbox=props)
ax = axes[0,1]
ax.set_ylim([0,200])
ax.xaxis.set_ticks(np.arange(0,5))
ax.xaxis.set_ticklabels(labels,rotation=45)
ax.set_title("Mean Abs. Error")
ax.text(0.075, 0.25, annotate_list[1], transform=ax.transAxes,fontsize=fs4,
verticalalignment='top', bbox=props)
ax = axes[1,0]
ax.set_ylim([0,300])
ax.xaxis.set_ticks(np.arange(0,5))
ax.xaxis.set_ticklabels(labels,rotation=45)
ax.set_title("Root Mean Sq. Error")
ax.text(0.075, 0.25, annotate_list[2], transform=ax.transAxes,fontsize=fs4,
verticalalignment='top', bbox=props)
ax = axes[1,1]
ax.set_ylim([-1,1])
ax.xaxis.set_ticks(np.arange(0,5))
ax.xaxis.set_ticklabels(labels,rotation=45)
ax.set_title("$R^{2}$")
ax.text(0.075, 0.25, annotate_list[3], transform=ax.transAxes,fontsize=fs4,
verticalalignment='top', bbox=props)
plt.tight_layout()
# -
# There ya go! You have successfully made a metric bar chart to compare the 5 ML regression models you trained. Like earlier in this notebook (the end of the classification task), I encourage you to extend this notebook to now include all predictors!
| jupyter_notebooks/Notebook08_TrainingMoreThanOneModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.layers import Dropout
# +
# Data preprocessing
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3: 13].values
y = dataset.iloc[:, 13].values
# +
# Encoding categorical data
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
transformer = ColumnTransformer(transformers=[("OneHot", OneHotEncoder(categories='auto'), [1])], remainder='passthrough')
X = np.array(transformer.fit_transform(X))
X = X[:, 1:]
# +
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# -
X.shape
# +
# Fitting classifier to the Training set
def build_classifier(first_layer, second_layer):
# create ANN
classifier = Sequential()
# adding input layer and first hidden layer
classifier.add(Dense(first_layer, input_shape=(11,), kernel_initializer='uniform', activation='relu'))
classifier.add(Dropout(rate = 0.2))
#add seconde hidden layer
classifier.add(Dense(second_layer, kernel_initializer='uniform', activation='relu'))
classifier.add(Dropout(rate = 0.2))
# add output layer
classifier.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# soft-max for more than 1 output node in output layer and sigmoid for when we have just 1 node at output layer
# compile ANN
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# adam is a kind of stochastic gradient descent
# binary_crossentropy is log loss function for just 1 output at the output layer
# if you have more than 1 node at the output layer use categorical_crossentropy
return classifier
classifier = KerasClassifier(build_fn=build_classifier)
parameter = {
"first_layer" : [5, 22, 16],
"second_layer" : [5, 22, 16],
"batch_size" : [10, 20, 32],
"epochs" : [100, 150, 200]
}
grid = GridSearchCV(estimator=classifier, param_grid=parameter, scoring='accuracy', n_jobs=-1, cv=10, verbose=3)
# -
grid.fit(X=X_train, y=y_train)
grid.best_score_
grid.best_params_
# +
# Predicting the Test set results
y_pred = grid.predict(X_test)
y_pred = (y_pred >= 0.5)
"""
for i in y_pred:
if(i >= 0.5):
i = 1
else:
i = 0
"""
# -
# Making the Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
cm
accurancy = (1552 + 150) / (1552 + 43 + 255 + 150)
accurancy
| ANN/Untitled.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
# # Debug Driver Models in Urban POMDP
using Revise
using Random
using POMDPs
using POMDPModelTools
using POMDPPolicies
using BeliefUpdaters
using POMDPSimulators
using AutomotiveDrivingModels
using AutomotivePOMDPs
using AutomotiveSensors
using AutoViz
using Reel
rng = MersenneTwister(1);
# ## Environment
params = UrbanParams(nlanes_main=1,
crosswalk_pos =[VecSE2(6, 0., pi/2), VecSE2(-6, 0., pi/2), VecSE2(0., -5., 0.)],
crosswalk_length = [14.0, 14., 14.0],
crosswalk_width = [4.0, 4.0, 3.1],
stop_line = 22.0)
env = UrbanEnv(params=params);
pomdp = UrbanPOMDP(env=env,
sensor = PerfectSensor(),
ego_goal = LaneTag(2, 1),
max_cars=5,
max_peds=5,
car_birth=0.5,
ped_birth=0.5,
max_obstacles=0, # no fixed obstacles
ego_start=20,
ΔT=0.1);
# ## Simulation
function POMDPModelTools.generate_sori(p::UrbanPOMDP, s::UrbanState, a::UrbanAction, rng::AbstractRNG)
return generate_sor(p, s, a, rng)..., deepcopy(p.models)
end
still_policy = FunctionPolicy(s -> UrbanAction(0.))
up = NothingUpdater()
s0 = initialstate(pomdp, rng)
models0 = deepcopy(pomdp.models)
hr = HistoryRecorder(rng=rng, max_steps=200)
@time hist = simulate(hr, pomdp, still_policy, up, nothing, s0);
insert!(hist.info_hist, 1, models0);
duration, fps, render_rec = animate_scenes(hist.state_hist, hist.info_hist, pomdp.env, overlays=SceneOverlay[IDOverlay()], sim_dt=pomdp.ΔT, cam = StaticCamera(VecE2(0., -8.), 14.0))
speed_factor = 2
film = roll(render_rec, fps = speed_factor*fps, duration = duration/speed_factor)
# ## Debugging
step = 180
s = hist.state_hist[step]
vehid = 2
veh = s[findfirst(vehid, s)]
m = hist.info_hist[step][vehid]
cwid = 2
cwm = m.crosswalk_drivers[cwid]
intm = m.intersection_driver
AutomotivePOMDPs.observe!(deepcopy(intm), s, env.roadway, vehid)
veh = s[findfirst(3, s)]
sqrt(normsquared(VecE2(intm.intersection_pos - veh.state.posG)))
# +
egoid = vehid
scene = s
model = deepcopy(intm)
roadway = env.roadway
ego = scene[findfirst(egoid, scene)]
AutomotiveDrivingModels.observe!(model.navigator, scene, roadway, egoid) # set the direction
dir = model.navigator.dir
a_lon =0.
a_lon_idm = model.navigator.a
passed = AutomotivePOMDPs.has_passed(model, scene, roadway, egoid)
is_engaged = AutomotivePOMDPs.engaged(model, scene, roadway, egoid)
right_of_way = model.priorities[(model.navigator.route[1].tag,model.navigator.route[end].tag)]
is_clogged = AutomotivePOMDPs.is_intersection_clogged(model, scene, roadway, egoid)
println("is clogged: $is_clogged")
ttc = AutomotivePOMDPs.ttc_check(model, scene, roadway, egoid)
println("ttc")
model.priority = ttc || right_of_way
if !model.stop
AutomotivePOMDPs.update_stop!(model, ego, roadway)
end
if isempty(model.intersection) || passed
a_lon = a_lon_idm
elseif !passed
if right_of_way
if is_clogged && !passed && is_engaged && !isapprox(ego.state.v, 0.)
println("Vehicle $egoid : emergency break")
a_lon = -model.navigator.d_max
else
a_lon = a_lon_idm
end
else # left turn
if !ttc && !is_engaged # before left turn
a_lon = min(a_lon_idm, stop_at_end(model, ego, roadway))
elseif is_clogged && !passed && is_engaged && !isapprox(ego.state.v, 0.)
# println("Vehicle $egoid : emergency break")
a_lon = -model.navigator.d_max
elseif ttc
a_lon = a_lon_idm
end
end
end
a_lon
# -
methods(get_neighbor_fore_along_lane)
mm = deepcopy(m)
observe!(mm, s, env.roadway, vehid)
mm.a
intm.priorities[(intm.navigator.route[1].tag,intm.navigator.route[end].tag)]
intm.a
AutomotivePOMDPs.has_passed(intm, s, env.roadway, vehid)
AutomotivePOMDPs.engaged(intm, s, env.roadway, vehid)
AutomotivePOMDPs.has_passed(cwm, veh, env.roadway)
AutomotivePOMDPs.ttc_check(intm, s, env.roadway, vehid)
AutomotivePOMDPs.stop_at_end(intm, veh, env.roadway)
intm.navigator.a
veh2 = s[findfirst(5, s)]
AutomotivePOMDPs.is_behind(veh, veh2, env.roadway)
lane1 = get_lane(env.roadway, veh)
# project veh2 on lane2
veh2_l1 = Frenet(veh2.state.posG, lane1, env.roadway)
Frenet(proj(veh2.state.posG, lane1, env.roadway, move_along_curves=false), env.roadway)
get_neighbor_rear_along_lane(s, vehid, env.roadway)
fieldnames(NeighborLongitudinalResult)
methods(get_neighbor_rear_along_lane)
# +
cwm.wait_list
cwm.a
# AutomotivePOMDPs.update_priority!(cwm, s, env.roadway, vehid)
cwm.a
dist_to_cw = AutomotivePOMDPs.get_distance_to_crosswalk(cwm, veh, env.roadway, -cwm.stop_delta)
# AutomotivePOMDPs.stop_at_dist(cwm, veh, dist_to_cw)
cwm.navigator.a, cwm.a, veh.state.v
# +
using Parameters
@with_kw struct BlinkerOverlay <: SceneOverlay
on::Bool = false
right::Bool = true
veh::Vehicle = Vehicle(VehicleState(), VehicleDef(), 0)
color::Colorant = colorant"0xFFEF00FF" # yellow
size::Float64 = 0.3
end
function AutoViz.render!(rendermodel::RenderModel, overlay::BlinkerOverlay, scene::Scene, roadway::Roadway)
if !overlay.on
return nothing
end
if overlay.right
pos = get_front(overlay.veh) + polar(overlay.veh.def.width/2, overlay.veh.state.posG.θ - pi/2)
else
pos = get_front(overlay.veh) + polar(overlay.veh.def.width/2, overlay.veh.state.posG.θ + pi/2)
end
add_instruction!(rendermodel, render_circle, (pos.x, pos.y, overlay.size, overlay.color))
end
# -
scene = Scene()
veh = hist.state_hist[1][3]
push!(scene, veh)
AutoViz.render(scene, env.roadway, SceneOverlay[BlinkerOverlay(on=true, right=false, veh=veh)], cam=FitToContentCamera(0.0))
using Vec
?polar
| test/debug_driver_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="frGnqNWqJ2tr" outputId="0bc2da44-ed33-4052-9c48-3734cf280cd3"
# !git clone https://github.com/harveenchadha/eagleview.git
# + [markdown] id="LNiQMPQ3CHu6"
# # Setup
# + id="mrhpG0f4GohY"
import os
os.makedirs('eagleview/config', exist_ok=True)
os.makedirs('eagleview/data', exist_ok=True)
os.makedirs('eagleview/data/processed', exist_ok=True)
os.makedirs('eagleview/data/raw', exist_ok=True)
os.makedirs('eagleview/logs', exist_ok=True)
os.makedirs('eagleview/notebooks', exist_ok=True)
os.makedirs('eagleview/scripts', exist_ok=True)
os.makedirs('eagleview/checkpoints', exist_ok=True)
os.makedirs('eagleview/checkpoints/pretrained', exist_ok=True)
os.makedirs('eagleview/checkpoints/finetuning', exist_ok=True)
os.makedirs('eagleview/results', exist_ok=True)
# + id="xGq3nnaqMulL"
parent_dir = './eagleview'
raw_data_path = os.path.join(parent_dir, 'data/raw')
processed_data_path = os.path.join(parent_dir, 'data/processed')
checkpoint_dir = os.path.join(parent_dir, 'checkpoints')
# + id="2cPC1hyJRjdS"
if not os.path.exists(raw_data_path + '/trainval.tar.gz'):
os.system('wget https://evp-ml-data.s3.us-east-2.amazonaws.com/ml-interview/openimages-personcar/trainval.tar.gz -P ' + raw_data_path )
os.system('tar xf '+raw_data_path+'/trainval.tar.gz -C '+raw_data_path)
os.system('rm '+raw_data_path+'/trainval.tar.gz')
# + id="90s_kJijIr11" colab={"base_uri": "https://localhost:8080/"} outputId="34d468d0-a9c5-4996-a5fc-2022f7d9c45b"
if not os.path.exists(checkpoint_dir+'/pretrained/efficientdet-d2.tar.gz'):
pretrained_dir = checkpoint_dir+'/pretrained'
os.system('wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d2.tar.gz -P ' + pretrained_dir)
os.system('tar xf '+pretrained_dir+'/efficientdet-d2.tar.gz -C '+pretrained_dir)
os.system('rm '+pretrained_dir+'/efficientdet-d2.tar.gz')
os.system('git clone --depth 1 https://github.com/google/automl ' + parent_dir+'/scripts')
# + colab={"base_uri": "https://localhost:8080/"} id="2H011qpNCN4q" outputId="2d28a893-9644-4546-a87c-3cfb50a7e867"
# !pip install -U PyYAML
# !pip install pycocotools
# !pip install --user --upgrade tensorflow-model-optimization
# + id="vPceOGVO3rn_"
import numpy as np
import pandas as pd
import glob
import json
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import io
import os
from PIL import Image
import yaml
# + [markdown] id="H6QVMbXoCMr0"
# # Data Preparation
# + id="TepqnA653roJ"
with open(raw_data_path+'/trainval/annotations/bbox-annotations.json') as json_data:
data = json.load(json_data)
# + id="CCXIOGr_3roK"
df_images = pd.DataFrame(data['images'])
df_images['file_path'] = raw_data_path + '/trainval/images/' + df_images['file_name']
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="CNXraHG23roK" outputId="89e3fb35-1369-4a28-f1f1-a47c4b549c5c"
df_images.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="StraPjeW3roL" outputId="01e3fd6b-b3a6-4c5e-c8cc-f47e77edc7e9"
df_annotations = pd.DataFrame(data['annotations'])
df_annotations.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 479} id="iKE5Yl6Z3roL" outputId="d93bb6fb-ca2e-4f10-eb48-12399fbd6576"
df = pd.merge(df_images, df_annotations, left_on='id', right_on='image_id')
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="LdnhE1NE3roL" outputId="b0a0ae93-4283-43fa-f431-4839bb0e6da9"
df.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="0woD2hTc3roM" outputId="33e0dbdf-db4c-4c28-c3e8-8771a3857ba8"
df.info()
# + id="DTBHdwrn3roM"
#COCO bounding box format is [top left x position, top left y position, width, height].
# + id="nTfcgi-c3roN"
def apply_transformation_to_df(row):
bbox = row['bbox']
top_left_x = bbox[0]
top_left_y = bbox[1]
width = bbox[2]
height = bbox[3]
bottom_right_x = width + top_left_x
bottom_right_y = height + top_left_y
row['x_min'] = top_left_x
row['y_min'] = top_left_y
row['x_max'] = bottom_right_x
row['y_max'] = bottom_right_y
return row
# + id="ZzxozVv13roN"
df['x_min'] = 0
df['y_min'] = 0
df['x_max'] = 0
df['y_max'] = 0
# + id="5Jz13zU73roO"
df = df.apply(apply_transformation_to_df, axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 233} id="TbdEtBBD3roO" outputId="c20b50ee-902b-4227-ae7a-28e56d183e34"
df.head(2)
# + [markdown] id="aadCFiYFCSWs"
# # Creating TF Records
# + id="HW5gpb9e3roO"
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# + [markdown] id="xkOIot3wCVkb"
# ## Train Test Split
# + id="NqcwqtDL3roP"
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def class_int_to_text(val):
if val==1:
return 'Person'
if val==2:
return 'Car'
def create_tf_example(df, row):
with tf.io.gfile.GFile(row.file_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = row.file_name.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in df.iterrows():
xmins.append(row['x_min'] / width)
xmaxs.append(row['x_max'] / width)
ymins.append(row['y_min'] / height)
ymaxs.append(row['y_max'] / height)
classes.append(row['category_id'])
classes_text.append(class_int_to_text(row['category_id']).encode('utf8'))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/filename': bytes_feature(filename),
'image/source_id':bytes_feature('0'.encode('utf8')),
'image/encoded':bytes_feature(encoded_jpg),
'image/format': bytes_feature(image_format),
'image/object/bbox/xmin': float_list_feature(xmins),
'image/object/bbox/xmax': float_list_feature(xmaxs),
'image/object/bbox/ymin': float_list_feature(ymins),
'image/object/bbox/ymax': float_list_feature(ymaxs),
'image/object/class/text':bytes_list_feature(classes_text),
'image/object/class/label':int64_list_feature(classes),
}))
return tf_example
def create_tf_record(df, subset, output_path):
writer = tf.io.TFRecordWriter(output_path)
for index, row in tqdm(subset.iterrows()):
local_subset = df[df.file_name == row.file_name]
tf_example = create_tf_example(local_subset, row)
writer.write(tf_example.SerializeToString())
writer.close()
print('TFRecords created')
# + id="nOB6Xwt93roP"
from sklearn.model_selection import train_test_split
# + id="0seESG-N3roQ"
from tqdm.notebook import tqdm
# + id="sJKkpX7a3roQ"
_X_train, _X_test = train_test_split(df['file_name'].unique(), test_size =0.1, random_state =42)
_X_train, _X_valid = train_test_split(_X_train, test_size =0.1, random_state =42)
# + id="rQ44zBiL3roR"
X_train = df[df['file_name'].isin(_X_train)]
X_valid = df[df['file_name'].isin(_X_test)]
X_test = df[df['file_name'].isin(_X_valid)]
# + id="Kklw6_3I3roS"
def check_leakage(df1, df2, col):
unique_df1 = np.unique(df1[col])
unique_df2 = np.unique(df2[col])
for i in unique_df1:
if i in unique_df2:
return True
return False
# + [markdown] id="nIp4Q-zGCbUV"
# ## Check Leakage
# + colab={"base_uri": "https://localhost:8080/"} id="MSgMPHfh3roS" outputId="676dc320-7aea-47f0-8ef6-bae3269dc2d9"
check_leakage(X_train, X_valid, 'file_name')
# + colab={"base_uri": "https://localhost:8080/"} id="2BUAB8zV3roS" outputId="08fddf3d-4f8d-4aaa-8733-0c667ccb5dfc"
check_leakage(X_train, X_test, 'file_name')
# + colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["841717667c1d41578473f078567d90b2", "7c65c236bbbb433db017708f751201ba", "08216e507c9d4a428c9c813a9dee2314", "26e359699c2f4eddb6e26d97ec0eaa38", "59b794e937c942309c5573b6c2b64056", "7c88b7e4da6c47f99e44b5c8454ea30e", "27fe4c4115f04d74927815c56735303e", "a46a8db6cf0f42fb94c094c24f80b9cb"]} id="oXQGBzMR3roT" outputId="2c66d2c5-6e76-48d4-fee0-bd584bfb7a4f"
if not os.path.exists(processed_data_path+'/test.record'):
create_tf_record(df, X_test, processed_data_path+'/test.record')
# + colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["6f4f91eb40434d7fb96ffd7a9187682c", "1eb84c8060cc465ea0ed2cd1d7302e00", "9f2e793927e540adae622060410efa7d", "558665570773497b9e7fa0a77ba60389", "297a00d31a2d4b939f3ce58f0984f3fc", "1084a635deb9460c8f4a49e9c7e89806", "77d3e49323984c348947ed5c02576ac8", "5ef4614df7fc4bc189c8b857fc611b6a"]} id="852F40U63roT" outputId="f53911c9-c121-45f1-c7c3-0a616a8ff9b0"
if not os.path.exists(processed_data_path+'/valid.record'):
create_tf_record(df, X_valid, processed_data_path+'/valid.record')
# + id="2LQvF6II3roT" colab={"base_uri": "https://localhost:8080/", "height": 83, "referenced_widgets": ["2af5f9de25764196825d27b09648a2f5", "6be9c622ecc64d49bd1332a7526c7c17", "1fc973a117844e4bb8ee0063e86fc931", "612206b141a2414281ee0483150ad900", "c0f5bd355f9a4c9db8e4df895e24a063", "517efde3d1104d9386767ccc86c501af", "b6bbc2f2664f4cd9a85c0cbdae2b13f3", "5fcf92f0bf9842858009e153b80e5d94"]} outputId="8224458c-7faf-41c3-8a5a-81e1a438927e"
if not os.path.exists(processed_data_path+'/train.record'):
create_tf_record(df, X_train, processed_data_path+'/train.record')
# + [markdown] id="aRg8IvMYCfIg"
# # Start Training
# + id="kq2-k-1_BBxA"
dict_file = {'num_classes': 2, 'var_freeze_expr': '(efficientnet|fpn_cells|resample_p6)','label_map': {1: 'person', 2: 'car'}}
with open(parent_dir+'/config/config.yaml', 'w+') as file:
documents = yaml.dump(dict_file, file)
# + id="1xGVwMu0QI_r"
train_command = 'python '+parent_dir+'/scripts/efficientdet/main.py --mode=train_and_eval \
--train_file_pattern='+parent_dir+'/data/processed/train.record \
--val_file_pattern='+parent_dir+'/data/processed/valid.record \
--model_name=efficientdet-d2 \
--model_dir='+parent_dir+'/checkpoints/finetuning \
--model_name=efficientdet-d2 \
--ckpt='+parent_dir+'/checkpoints/pretrained/efficientdet-d2/ \
--train_batch_size=16 \
--eval_batch_size=16 \
--num_epochs=10 \
--num_examples_per_epoch='+str(X_train.file_name.nunique())+' \
--eval_samples='+str(X_valid.file_name.nunique())+' \
--hparams='+parent_dir+'/config/config.yaml'
with open( parent_dir+'/scripts/train.sh', 'w+') as file:
file.writelines(train_command)
# + colab={"base_uri": "https://localhost:8080/"} id="ft5_dh0iOi9k" outputId="cbda33fb-445b-4bb1-c35f-777e8ff1f258"
# !bash eagleview/scripts/train.sh 2>&1 | tee eagleview/logs/train_log.out
# + colab={"base_uri": "https://localhost:8080/", "height": 612} id="qZSFxB5ZSZy7" outputId="b9f58356-61de-4cb7-ca04-a31a4f3570b9"
# !zip -r checkpoints.zip eagleview/checkpoints/finetuning/
from google.colab import files
files.download("checkpoints.zip")
# + [markdown] id="WzXuKl9sv1Sg"
# # <a>Inference</a>
# + id="uHl5l_QSw3LU"
from tqdm.notebook import tqdm
# + [markdown] id="mAovvWZB17bh"
# ### Export Model
# + colab={"base_uri": "https://localhost:8080/"} id="8mYU6RV-0o0s" outputId="fcaef9fa-822c-4b8d-c9ec-b62b44c686f9"
# !python eagleview/scripts/efficientdet/model_inspect.py \
# --runmode=saved_model \
# --model_name=efficientdet-d2 \
# --ckpt_path=eagleview/checkpoints/finetuning \
# --saved_model_dir=eagleview/checkpoints/exported_model \
# --hparams=eagleview/config/config.yaml
# + [markdown] id="QyxIxN4Y1-bx"
# ### Run Inference
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["7779cce3498e4427b8a9e9390c7fb14e", "d7b08a9bd11840899aa95843ba7aeb3d", "0d1e8a2819924941978a1d1a3a0215f8", "bc25252ebd64401a9ad329f008d96868", "40a07ff5c9e64ac0a7a5e0ba179fe36f", "3777b7d6963f4816b2afed6d0a177fd9", "28c66e5d03c24662ad0ce40697e3d139", "8445d69f46de418788d8d739dedf83e6"]} id="V2m_T_-hsObS" outputId="b842407b-4aaa-4f7f-9d81-5dde9f83e9a4"
X_test_paths = X_test.file_path.unique()
for image in tqdm(X_test_paths):
cmd= 'python eagleview/scripts/efficientdet/model_inspect.py \
--runmode=saved_model_infer \
--model_name=efficientdet-d2 \
--saved_model_dir=eagleview/checkpoints/exported_model \
--hparams=eagleview/config/config.yaml \
--input_image='+image+' \
--output_image_dir=eagleview/results'
filename = image.split('/')[-1]
cmd_2 = 'mv eagleview/results/0.jpg eagleview/results/'+filename
os.system(cmd)
os.system(cmd_2)
# + colab={"base_uri": "https://localhost:8080/", "height": 119} id="F0UhedFVB8Q2" outputId="650b2e19-bf68-40c9-a216-79d99f56d5ec"
# !zip -r exported_model.zip eagleview/checkpoints/exported_model/
from google.colab import files
files.download("exported_model.zip")
# + id="VzcAQS0aJxep"
X_train.to_csv('./eagleview/data/train.csv', index=False)
# + id="Ht4_u61vKcZE"
X_valid.to_csv('./eagleview/data/valid.csv', index=False)
X_test.to_csv('./eagleview/data/test.csv', index=False)
# + colab={"base_uri": "https://localhost:8080/"} id="-Yt-OK-pMoqk" outputId="5fbd4b15-79f2-4e9e-e1e3-cfd96b9aef01"
# %cd eagleview/
# !git add scripts
# !git add config
# !git add data/train.csv
# !git add data/valid.csv
# !git add data/test.csv
# !git add notebooks
# !git add logs
# !git add results
# + colab={"base_uri": "https://localhost:8080/"} id="xZMlaMWILAGN" outputId="a87e7626-b78f-43d7-9de5-f3ac946a945e"
# !git commit -m "Harveen | Adding Training Scripts and Results"
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="KQG2BsbcNWW9" outputId="b4aa125f-9d73-4b63-c570-609062a58df1"
# %cd ../
# !zip -r eagleview_code.zip eagleview
from google.colab import files
files.download("eagleview_code.zip")
# + id="oo8iHw0lNyvO"
| setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Wrapper for CMR
# `A python library to interface with CMR - Token Demo`
#
# This demo will show how to request an EDL token from CMR while inside a notebook.
# ## Demo Safe
# Just for this demo, I'm going to create a function that hids some of an EDL token, I don't want anyone to actually see my tokens
# +
def safe_token_print(actual_token):
if actual_token is None:
print ("no token")
return
scrub = 5
strike = "*" * scrub
safe = actual_token[scrub:(len(actual_token)-scrub)]
print (strike + safe + strike)
print ("example:")
safe_token_print("012345678909876543210")
# -
# ## Loading the library
# This will not be needed once we have the library working through PIP, but today I'm going to use my local checkout.
import sys
sys.path.append('/Users/tacherr1/src/project/eo-metadata-tools/CMR/python/')
# ### Import the library
# This should be all you need after we get PIP support
import cmr.auth.token as t
# ## Using a token file
# In this example we are going to store our password in a file, listed below is how you can specify the file, however the setting is actually the assumed file if no setting is given.
# +
options = {"cmr.token.file": "~/.cmr_token"} #this is the default actually
safe_token_print(t.token(t.token_file, options))
options = {"cmr.token.file": "~/.cmr_token_fake_file"} #this is the default actually
safe_token_print(t.token(t.token_file, options))
# -
# ## Using Keychain on Mac OS X
# in this example I am using an already existing password saved securly in keychain. Keychain may require a human to type in the password, I have clicked "Allways allow" so we may not see it.
options = {'token.manager.service': 'cmr lib token'} #this is not the default
safe_token_print(t.token(t.token_manager, options))
# ### Search both at once
options = {"cmr.token.file": "~/.cmr_token_fake_file", 'token.manager.service': 'cmr lib token'}
safe_token_print(t.token([t.token_manager, t.token_file], options))
# ## Bulit in help
# I can't remember anything, so here is some built in help which pulls from the python docstring for each function of interest
print(t.print_help('token_'))
# ----
# The End
| CMR/python/demos/tokens.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="6dD24G7spF7m"
# ## Trabalho Prático 1
#
# Bootcamp Analista de Machine Learning @ IGTI
# + [markdown] id="o7zMDJGPjTDD"
# Attribute Information:
#
# Both hour.csv and day.csv have the following fields, except hr which is not available in day.csv
#
# - instant: record index
# - dteday : date
# - season : season (1:winter, 2:spring, 3:summer, 4:fall)
# - yr : year (0: 2011, 1:2012)
# - mnth : month ( 1 to 12)
# - hr : hour (0 to 23)
# - holiday : weather day is holiday or not (extracted from [Web Link])
# - weekday : day of the week
# - workingday : if day is neither weekend nor holiday is 1, otherwise is 0.
# # + weathersit :
# - 1: Clear, Few clouds, Partly cloudy, Partly cloudy
# - 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
# - 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
# - 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
# - temp : Normalized temperature in Celsius. The values are derived via (t-t_min)/(t_max-t_min), t_min=-8, t_max=+39 (only in hourly scale)
# - atemp: Normalized feeling temperature in Celsius. The values are derived via (t-t_min)/(t_max-t_min), t_min=-16, t_max=+50 (only in hourly scale)
# - hum: Normalized humidity. The values are divided to 100 (max)
# - windspeed: Normalized wind speed. The values are divided to 67 (max)
# - casual: count of casual users
# - registered: count of registered users
# - cnt: count of total rental bikes including both casual and registered
# + id="9RkU8ObzjjMX"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
bikes = pd.read_csv('/content/drive/MyDrive/Data Science/Bootcamp Analista de ML/Módulo 1 - Introdução ao Aprendizado de Maquina/Desafio/comp_bikes_mod.csv')
# + id="QSMH34osj8ZA" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="e436a6a4-649f-4e41-cada-7351188cbfe2"
bikes.head()
# + id="WvI0b-pOkDKZ" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="0bd0bb4e-0b9c-43e1-aa51-09b39fb009b6"
bikes.describe()
# + id="Rzh8OYoJkKDB" colab={"base_uri": "https://localhost:8080/"} outputId="4f572a6f-45ae-4174-ae38-399353450f9a"
#No dataset utilizado para o desafio, quantas instâncias e atributos existem, respectivamente?
bikes.info()
# + id="moLbXgnQl6b_" colab={"base_uri": "https://localhost:8080/"} outputId="c07057b2-1bfb-4d12-f601-c1f2b366462b"
15641 + 1738
# + id="B-b70qcOjF9h" colab={"base_uri": "https://localhost:8080/"} outputId="84f0d927-a7c1-42bf-9e04-6139578e5d7d"
#Contando valores nulos
bikes['temp'].isnull().sum()
#Fazendo % de valores nulos
percentual = bikes['temp'].isnull().sum()/len(bikes)
percentual
# + id="bAxhRoORnBUe"
bikes['dteday'].dropna(inplace=True)
# + id="vxrcY6XgniGW" colab={"base_uri": "https://localhost:8080/"} outputId="d70b7978-40c5-441f-d322-fc4da4ed411b"
bikes.info()
# + id="o9Ye5SjRoUx8" colab={"base_uri": "https://localhost:8080/"} outputId="a8ba08a8-1fcc-4ad5-d2e7-ca4874f1e5d4"
bikes['temp'].describe()
# + id="4Zqn9Pmyor5x" colab={"base_uri": "https://localhost:8080/"} outputId="0d498b43-ed05-436e-fd9a-4e1e1bbfd5bb"
bikes['season'].unique()
# + id="YG3yLo0guGCl" colab={"base_uri": "https://localhost:8080/"} outputId="097ae2f1-e804-4294-ee0e-68450926d779"
pd.to_datetime(bikes['dteday'])
# + id="8mBS9IVNtDdy" colab={"base_uri": "https://localhost:8080/"} outputId="143458b3-5aac-488e-ada2-b0963070a848"
bikes['dteday'].head(-1)
# + id="6rDjov6mvysU"
import seaborn as sns
# + id="_AHdwNuzrgjf" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="9571b8c2-612b-4133-b4ff-65579ab0fb09"
#sns.boxplot(bikes['windspeed'], bikes['dteday'])
windspeed = sns.boxplot(x=bikes['windspeed'])
#df_compart_bikes.boxplot(['windspeed']) #boxplot para a velocidade do vento (['windspeed'])
# + id="HdS6RQv8u0jj" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="941e4901-86e3-4894-ba5a-05a59625cab6"
bikescorr = bikes[['season', 'temp', 'atemp', 'hum', 'windspeed', 'cnt']]
bikescorr
# + id="CQ1Lu1K7tEoH" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="207f27ff-d57a-4a8f-f5f6-c5149f3c6b4c"
sns.heatmap(bikescorr.corr())
# + id="VQFTm45YtOyP"
#Preencha os valores nulos das colunas "hum","cnt" e "casual" com os valores médios.
#Utilize as variáveis "hum" e "casual" como independentes e a "cnt" como dependente.
#Aplique uma regressão linear. Qual o valor de R2? Utilize as entradas como teste.
bikes.fillna(bikes.mean(), inplace=True)
#utiliza as funções do sklearn para construir a regressão linear
from sklearn.linear_model import LinearRegression
# + id="8r0XhCYUljtb" colab={"base_uri": "https://localhost:8080/"} outputId="543777f0-42e7-4300-a646-8a822b80ea55"
bikes[["hum","cnt", "casual"]].isnull().sum()
# + id="RsjXyRBR1Rs0"
xbikes = np.array(bikes[['hum', 'casual']])
ybikes = np.array(bikes[['cnt']])
# + id="qnSMyUcKixs1"
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# + id="LBKhJYK7ix8Z"
regressao = LinearRegression()
# + id="Vue1sW9GiyJc" colab={"base_uri": "https://localhost:8080/"} outputId="b20dcbaa-30bd-43b8-bc0c-14670301cea8"
regressao.fit(xbikes, ybikes)
# + id="PZEQe4qItPxK" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="d578d907-b644-4d2c-d45d-61522ff54a31"
from sklearn.model_selection import train_test_split
x_treinamento, x_teste, y_treinamento, y_teste = train_test_split(xbikes, ybikes, test_size = 0.3, random_state = 0)
ln = LinearRegression()
ln.fit(x_treinamento, y_treinamento)
predict1 = ln.predict(x_teste)
plt.scatter(y_teste, predict1)
# + id="_ft07YZq2F8_" colab={"base_uri": "https://localhost:8080/", "height": 339} outputId="d8c0bc3b-ed33-457f-bb02-92c306bdbdf0"
sns.distplot(y_teste-predict1)
# + id="v5core2k25n4" colab={"base_uri": "https://localhost:8080/"} outputId="00ce0e86-cc1e-46e2-aaa0-584e7b877898"
teste1score = ln.score(x_teste, y_teste)
teste1score
# + colab={"base_uri": "https://localhost:8080/"} id="hQJQIjOQsc2R" outputId="df70a671-085c-4b62-cd08-27821181e185"
#adotando decision tree regressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
dtr = DecisionTreeRegressor(random_state=0)
cross_val_score(dtr, xbikes, ybikes, cv=10)
# + id="ke4GPz6PuEbu"
dtr.fit(xbikes, ybikes, sample_weight=50)
x_treinamento, x_teste, y_treinamento, y_teste = train_test_split(xbikes, ybikes, test_size = 0.3, random_state = 0)
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="rXaO7x06uY6g" outputId="44bb7b01-fc1d-42e7-e13a-cedc2c03470d"
predict2 = dtr.predict(x_teste)
plt.scatter(y_teste, predict2)
# + colab={"base_uri": "https://localhost:8080/"} id="SE2DpCjkuvme" outputId="7e513c68-1b49-40fb-d9ae-fccb7feda151"
teste2score = dtr.score(x_teste, y_teste)
teste2score
| modulo - 1 Fundamentos/trabalho_pratico1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="8GF4eJ054R0o" colab_type="text"
# # Data Analytics Project#
# ## Team 9 <br>
# ## <NAME><br>
# ## <NAME><br>
# ## <NAME>
# + [markdown] id="2DDbCTnv4akB" colab_type="text"
# >### Introduction:
# Subject area is Hospitality and Revenue Management demand forecasting.
# <br>
# About Data: <br>
# Data is about two hotels.One of the hotels is a resort hotel and the other is a city hotel. The structure of the dataset is with 32 variables describing the 119390 observations. Each observation represents a hotel or resort booking. Both datasets comprehend bookings due to arrive between the 1st of July of 2015 and the 31st of August 2017, including bookings that effectively arrived and bookings that were canceled.
# Project aims to do the following:
# 1. Descriptive analytics to further understand patterns, trends, and anomalies in data.
# 2. To understand and analyse bookings cancellations, customer segmentation, customer satiation, seasonality, etc.
# 3. One of our project goals is aiming at the development of prediction models to classify a hotel booking׳s likelihood to be canceled.
# 4. Evaluate the performance of various models and select best two models for tuning process.
#
# + id="5APR9DsZDDsh" colab_type="code" colab={}
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from scipy import stats
import statsmodels.api as sm
from sklearn import datasets, linear_model
from sklearn.linear_model import LogisticRegression
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from matplotlib import pyplot
from sklearn.model_selection import GridSearchCV
# + [markdown] id="aKrUfGp9B2l_" colab_type="text"
# ## Data Cleaning
# + id="REiiNsV3DdVW" colab_type="code" colab={}
df = pd.read_csv('hotel_bookings.csv')
# + [markdown] id="iZfz_1796Ljp" colab_type="text"
# >### **Finding Data Quality Issues:**
# 1. **Checking for Null, NA, NaN values**.<br>
# In some categorical variables like Agent or Company, “NULL” is presented as one of the categories. This should not be considered a missing value, but rather as “not applicable”. For example, if a booking “Agent” is defined as “NULL” it means that the booking did not came from a travel agent.!
#
# + id="2fK19j9EDrJw" colab_type="code" outputId="d545f1ee-a0c9-408b-f4c0-0fe961218113" colab={"base_uri": "https://localhost:8080/", "height": 595}
print("# of NULL in each columns:", df.isnull().sum(), sep='\n')
# + id="uzdHmu1T9ZDK" colab_type="code" outputId="1ee2cf8e-1a9d-4007-83e1-d1b34b6339c0" colab={"base_uri": "https://localhost:8080/", "height": 102}
#function to compute percentage of missing values in columns
def missing_values(x, y):
percentage = y.isnull().sum() / len(x) * 100
return percentage
print('Missing value ratios:\nCompany: {}\nAgent: {}\nCountry: {}\nchildren: {}'.format(missing_values(df, df['company']),
missing_values(df, df['agent']),
missing_values(df, df['country']),
missing_values(df, df['children'])))
# + [markdown] id="IYmZE_7J9gKO" colab_type="text"
# **1. Children:**<br> For children variable, there are only 4 NULL entries and having NULL entries can be same as having no child or 0 children altogether.
# Therefore, substituting 0 in place of NULL entries.
# + id="JAU1z_tb93Ku" colab_type="code" colab={}
df['children'].fillna(0,inplace=True,limit=4)
# + [markdown] id="I8-fMdrJ977P" colab_type="text"
# **2.** **For Agent variable**: <br>
# Since for direct bookings there are no agents required,NULL values can be values corresponding to Direct bookings. Since 13.68% of the data in agent column is NULL , values can't be deleted or not taken into consideration as it can be important for prediction. Therefore, we will substitute an ID ['000'] (for the prediction convenience, so that even this data can be used for it).
# + id="FeDffuio99pQ" colab_type="code" outputId="25461a9d-6c64-4648-f897-cec02175645f" colab={"base_uri": "https://localhost:8080/", "height": 187}
p = len(df[(df.market_segment == 'Direct') & (df.agent.isnull())])
q = len(df[(df.market_segment == 'Corporate') & (df.agent.isnull())])
r = len(df[(df.market_segment == 'Offline TA/TO') & (df.agent.isnull())])
s = len(df[(df.market_segment == 'Online TA') & (df.agent.isnull())])
t = len(df[(df.market_segment == 'Aviation') & (df.agent.isnull())])
u = len(df[(df.market_segment == 'Complementary') & (df.agent.isnull())])
v = len(df[(df.market_segment == 'Groups') & (df.agent.isnull())])
w = len(df[(df.market_segment == 'Undefined') & (df.agent.isnull())])
print('Missing value ratios:\nDirect: {}\nCorporate: {}\nOffline TA/TO: {}\nOnline TA: {}\nAviation: {}\nComplementary: {}\nGroups: {}\nUndefined: {}'.format(p,q,r,s,t,u,v,w))
print('Total missing values in agent:{}'.format(q+r+s+t+u+v+w))
# + id="Iubhws_x-Gya" colab_type="code" colab={}
#substituting agent ID as 0 for Direct Bookings.
df.loc[df['market_segment'] == 'Direct', 'agent'] = 0
# + [markdown] id="9ZAluYBu-Ni7" colab_type="text"
# 10333 values are NULL which do not correspond to Direct bookings. Mode can be taken of the IDs and used for substitution since very less percentage of Null values are being substituted.
# + id="mC60v9Gf-Tb7" colab_type="code" outputId="cd7840a1-4fc8-4d68-9fc3-aee20edf991e" colab={"base_uri": "https://localhost:8080/", "height": 34}
mode_agent = df['agent'].mode()
df.loc[df['agent'].isnull(), 'agent'] = 9.0
df['agent'].isnull().sum()
# + [markdown] id="Pb730A9k-bcc" colab_type="text"
# **4.** **For Company Variable:** <br>
# As observed, 94.3% of company column are missing values. Therefore there are less values for the imputation process. The best option is to drop company column.
# + id="vLqT4ZET-xEc" colab_type="code" colab={}
# company is dropped
df = df.drop(['company'], axis = 1)
# + [markdown] id="MNgXTKeR-0TG" colab_type="text"
# **3. For Country variable:** <br>
# There are 488 NULL values for "Country" variable: It is also common for hotels not to know the correct nationality of the customer until the moment of check-in. So if they have cancelled at the last moment it is possible to have null values for those observations.Let us check if that is so.Check whether booking was cancelled or not?
# + id="20K-aBAh-gmn" colab_type="code" outputId="b5cf4f04-e483-4969-b96a-8eaa8f157793" colab={"base_uri": "https://localhost:8080/", "height": 34}
null_data = df[df['country'].isnull()]
len(null_data[null_data['is_canceled']==1].index.tolist())
# + [markdown] id="Rv71_AQl-k_y" colab_type="text"
# Only 67 out of 488 null values from country variable have status as cancelled.So apparently there is no relation between cancellations and null entries in the country variable.
# + id="nQsbJOA2M5xy" colab_type="code" colab={}
df=df.dropna()
# + [markdown] id="-ExZyuMsZuEW" colab_type="text"
# confirming no null values present
# + id="T_9oE1IiZraQ" colab_type="code" outputId="aa5cacb5-8e59-42eb-c228-8346c96657b3" colab={"base_uri": "https://localhost:8080/", "height": 578}
print("# of NULL in each columns:", df.isnull().sum(), sep='\n')
# + [markdown] id="SwdQ2wrRB_nG" colab_type="text"
# ## EDA
# + [markdown] id="YB2BWPnk-_rg" colab_type="text"
# >### **Exploratory Data Analysis:**<br>
# **1.** **Customer Segmentation** <br>
# **2.** **Customer Satiation** <br>
# **3.** **Seasonality** <br>
# <br>
# Will perform univariate, bi-variate and multivariate analysis.
# + [markdown] id="FvibC4zvAdiP" colab_type="text"
# 1. Descriptive Statistics <br>
# Univariate basically tells us how data in each feature is distributed and also tells us about central tendencies like mean, median, and mode.
# + id="6vbV79eKG2AQ" colab_type="code" outputId="53a25b36-b272-4581-f36c-979ac2c78dba" colab={"base_uri": "https://localhost:8080/", "height": 317}
df.describe()
# + [markdown] id="Cn-qHH3IYyvj" colab_type="text"
# **For numeric variables:** <br>
# * [is_canceled]: We can see that the average cancellations is just 37.04% where it deviates by 48% which means there is lots of variation between cancellations which directly affects productivity of the hotel. <br>
# * [total_of_special_requests] : Also, we can see that 75% of people ask for 1 special request.
# + id="_3fs2FlrYRUd" colab_type="code" outputId="9595f9f7-b364-4229-eebf-db0a21ae505b" colab={"base_uri": "https://localhost:8080/", "height": 142}
# Looking into adults.
# Using groupby to group according to hotel types only.
df['adults'].groupby(df['hotel']).describe()
# + id="0f18D_gOYA2_" colab_type="code" outputId="434e719f-652e-41f0-a30a-18a435417597" colab={"base_uri": "https://localhost:8080/", "height": 142}
# Looking into children.
# Using groupby to group according to hotel types only.
df['children'].groupby(df['hotel']).describe()
# + [markdown] id="L7rg8Z6PYZFB" colab_type="text"
# It seems that mean values for adults and children are higher. This means that resort hotels are better choice for large families.
# + id="j47bob5iaccZ" colab_type="code" outputId="8f5fb4ba-72ac-424e-b3f1-f69c34678b9b" colab={"base_uri": "https://localhost:8080/", "height": 119}
df.meal.value_counts()
# + id="x4yWPGiHbXEK" colab_type="code" outputId="2943eed5-a4fe-46af-8b23-4458795f505c" colab={"base_uri": "https://localhost:8080/", "height": 170}
df.market_segment.value_counts()
# + [markdown] id="XGWEHMO9b0jd" colab_type="text"
# **For Categorical variables:** <br>
# * Frequency table of Type of meal booked is shown. Almost 78% of the people book for BB meal type i.e Bed and breakfast.
# * 47.30% of market segment designation is of Online Travel Agents.
#
# + id="ujqzc1y5yUMN" colab_type="code" outputId="3069163e-45ca-40ad-ca28-4593084c00e8" colab={"base_uri": "https://localhost:8080/", "height": 542}
#Looking at the distribution of Market Segments
segments=df["market_segment"].value_counts()
# pie plot
fig = px.pie(segments,
values=segments.values,
names=segments.index,
title="Bookings per market segment",
#template="seaborn",
color =segments.index,
color_discrete_map={'Aviation':'lightcyan',
'Complementary':'cyan',
'Corporate':'DeepSkyBlue',
'Direct':'darkblue',
'Offline TA/TO':'yellow',
'Online TA':'lightskyblue',
'Undefined':'green'})
fig.update_traces(rotation=-90, textinfo="percent+label")
fig.show()
# + [markdown] id="c02awM6UdZO9" colab_type="text"
# About 47.4% of people book through Online Travel Agents whereas 20.3% book through Offline Travel Agents.
# + [markdown] id="P0caIeLmzZjj" colab_type="text"
# #### **1.** **Customer Segmentation and Satiation:**
# Answer following questions:
# 1. Where do the guest come from?
# 2. Customer distribution?
# 3. Hotel type with more time spent by customers.
# 4. Repeated guest effect on cancellations
# 5. How long do repeated people stay at the hotel?
# 6. Bookings and cancellations
#
# + id="P_D-dgh51P23" colab_type="code" outputId="214a651a-f4ed-4eca-a9c5-23da21489923" colab={"base_uri": "https://localhost:8080/", "height": 542}
#Where do the guest come from?
country_data = pd.DataFrame(df.loc[df["is_canceled"] == 0]["country"].value_counts())
country_data.rename(columns={"country": "Number of Guests"}, inplace=True)
total_guests = country_data["Number of Guests"].sum()
country_data["Guests in %"] = round(country_data["Number of Guests"] / total_guests * 100, 2)
country_data["country"] = country_data.index
# show on map
guest_map = px.choropleth(country_data,
locations=country_data.index,
color=country_data["Guests in %"],
hover_name=country_data.index,
color_continuous_scale=px.colors.sequential.Plasma,
title="Home country of guests")
guest_map.show()
# + [markdown] id="xVDthWwOvVFO" colab_type="text"
# Highest number of bookings are done by the customers coming from Portugal.
# + id="r8PpZBO3y_iE" colab_type="code" outputId="078d6fee-18d4-45ac-b60a-709df7eb3f03" colab={"base_uri": "https://localhost:8080/", "height": 466}
#Uni-variate Analysis
plt.rcParams['figure.figsize'] = 8,8
labels = df['customer_type'].value_counts().index.tolist()
# Convert value counts to list
sizes = df['customer_type'].value_counts().tolist()
# As the name suggest, explode will determine how much each section is separated from each other
explode = (0,0,0,0.1)
# Determine colour of pie chart
colors = ['lightskyblue','yellow','royalblue','#ADFF2F']
# textprops will adjust the size of text
plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%',startangle=90, textprops={'fontsize': 14})
plt.show()
# + [markdown] id="4RxCmSf0zH0B" colab_type="text"
# About 75% of people are Transient Type.Only 0.5% bookings are in groups.
# + id="Oy2HJteQzVbZ" colab_type="code" outputId="daca9df5-9c10-4c9a-85e3-c9ff9fc659a6" colab={"base_uri": "https://localhost:8080/", "height": 261}
#bi-variate analysis
g = sns.catplot("is_canceled", col="customer_type", col_wrap=4,
data=df[df.customer_type.notnull()],
kind="count", height=3.5, aspect=.8,
palette='pastel')
plt.show()
# + [markdown] id="QF9ax1Jwzp1P" colab_type="text"
# Most number of cancellations are done by Transient type
# + id="w85kZERfxV-e" colab_type="code" outputId="86ab5361-7b6e-4040-b945-2c9c1c4a2d14" colab={"base_uri": "https://localhost:8080/", "height": 368}
#Multi-variate Analysis
country_data = pd.DataFrame(df.loc[(df["is_canceled"] == 0)|(df["is_canceled"] == 1)]["country"].value_counts())
country_data.rename(columns={"country": "Number of Guests"}, inplace=True)
total_guests = country_data["Number of Guests"].sum()
country_data["Guests in %"] = round(country_data["Number of Guests"] / total_guests * 100, 2)
country_data["country"] = country_data.index
sns.catplot(x="customer_type", y="lead_time", data=df,hue="is_canceled");
# + [markdown] id="vAPNri1CveC1" colab_type="text"
# Transient-Party customer category shows very few cancellations even with a decent lead-time. The Transient customer type has the highest number of cancellations. Also it can be seen that more the no of days took as lead time, more cancellations were made.
# + id="PsMrMw8t-yDF" colab_type="code" outputId="1e455766-4f82-46aa-843c-d6adacfe4bf7" colab={"base_uri": "https://localhost:8080/", "height": 606}
plt.figure(figsize = (10,10))
sns.boxplot(x = "customer_type", y = "stays_in_week_nights", data = df, hue = "hotel", palette = 'Set1');
# + id="hLuXZXZgdMTn" colab_type="code" outputId="2216a007-dee1-48c6-feb7-76ec61ac1c7a" colab={"base_uri": "https://localhost:8080/", "height": 280}
#Creating a dataframe for the visualization sake and introducing "total_guests" feature
new1 = pd.DataFrame()
new1['total_guests'] = df['adults'] + df['children'] + df['babies']
new1 = pd.concat([new1,df['is_canceled']],1)
new1 = pd.concat([new1,df['hotel']],1)
new1 = pd.concat([new1,df['is_repeated_guest']],1)
ax= sns.barplot(x='is_canceled',y='total_guests' ,data=new1,hue='is_repeated_guest');
# + [markdown] id="QP56kQeJu3tK" colab_type="text"
# More number of repeated guests cancel bookings.
# + id="6s4sIhdeys9H" colab_type="code" outputId="761fc128-dace-422c-ee94-1033694edff5" colab={"base_uri": "https://localhost:8080/", "height": 415}
new = df[['reserved_room_type','is_canceled','assigned_room_type']].copy()
new['total_nights_stayed'] = df['stays_in_weekend_nights'] + df['stays_in_week_nights']
new = pd.concat([new,df['is_repeated_guest']],1)
plt.figure(figsize=(20,8))
ax = sns.countplot(x="total_nights_stayed", data = new, palette="tab10",hue='is_repeated_guest')
plt.title('Total Nights Stayed')
plt.xlabel('total_nights_stayed')
plt.ylabel('Total Count')
for p in ax.patches:
ax.annotate((p.get_height()),(p.get_x()-0.1 , p.get_height()+100))
# + [markdown] id="9JpWQOOM1tWl" colab_type="text"
# Most of the customers on average stayed from about 1-4 days. Of those very few were repeated customers. It seems that repeated guests do not find hotel/ resort promising. Management system should focus on repeated guests as well.
# + id="fCiGle_KxoB8" colab_type="code" colab={}
new['Given_same_roomtype'] = 0
#import seaborn as sb
new.loc[(df.reserved_room_type == 'A') & (df.assigned_room_type == 'A'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'B') & (df.assigned_room_type == 'B'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'C') & (df.assigned_room_type == 'C'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'D') & (df.assigned_room_type == 'D'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'E') & (df.assigned_room_type == 'E'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'F') & (df.assigned_room_type == 'F'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'G') & (df.assigned_room_type == 'G'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'H') & (df.assigned_room_type == 'H'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'L') & (df.assigned_room_type == 'L'), 'Given_same_roomtype'] = 1
new.loc[(df.reserved_room_type == 'P') & (df.assigned_room_type == 'P'), 'Given_same_roomtype'] = 1
new
new = pd.concat([new,new1['total_guests']],1)
new = pd.concat([new,df['is_repeated_guest']],1)
new = pd.concat([new,df['customer_type']],1)
new = pd.concat([new,df['lead_time']],1)
new = pd.concat([new,df['arrival_date_year']],1)
new['total_nights_stayed'] = df['stays_in_weekend_nights'] + df['stays_in_week_nights']
# + id="HNZ52QVs299o" colab_type="code" outputId="3716bfbb-5ea4-42c1-feb3-f93547a6e565" colab={"base_uri": "https://localhost:8080/", "height": 498}
ax= sns.countplot(data = new, x = 'Given_same_roomtype', hue = 'is_canceled')
plt.show()
# + [markdown] id="BBMs1s0I4b2F" colab_type="text"
# As we can see, more number of customers those have been assigned the same room type which they reserved did no cancelations.
# Also even if they are given the same room type there are still cancellations which are in significant numbers. This can be due to other reasons which are not taken into consideration.
# + [markdown] id="QswrDIKl-Zmz" colab_type="text"
# 3. Seasonality
# + id="H7FRO9QP6lni" colab_type="code" outputId="8713c886-b2c0-42b3-a1a0-eb160335a310" colab={}
ordered_months = ["January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"]
df["arrival_date_month"] = pd.Categorical(df["arrival_date_month"], categories=ordered_months, ordered=True)
# barplot with standard deviation:
plt.figure(figsize=(12, 8))
sns.lineplot(x = "arrival_date_month", y="adr", hue="hotel", data=df,
ci="sd", size="hotel", sizes=(2.5, 2.5))
plt.title("Room price per night and person over the year", fontsize=16)
plt.xlabel("Month", fontsize=16)
plt.xticks(rotation=45)
plt.ylabel("Price [EUR]", fontsize=16)
plt.show()
# + [markdown] id="YzD_d3ZQdVlY" colab_type="text"
# Outlier Detection
# + id="WgDBderwOd0a" colab_type="code" outputId="9e6fbad7-999b-4d0f-d318-23c2cf8242f8" colab={"base_uri": "https://localhost:8080/", "height": 577}
plt.figure(figsize=(20,10))
plt.subplot(1,3,1)
sns.boxplot(y= df['lead_time'])
plt.subplot(1,3,2)
sns.boxplot(y=df['adr'])
plt.subplot(1,3,3)
sns.boxplot(y=df['stays_in_week_nights'])
plt.show()
# + id="DUsrodj2ciUV" colab_type="code" colab={}
def outliers_IQR(data, col):
lower_quartile = data[col].quantile(0.25)
upper_quartile = data[col].quantile(0.75)
IQR = upper_quartile - lower_quartile
outlier_thresh = 1.5 * IQR
return data[data[col].between((lower_quartile - outlier_thresh), (upper_quartile + outlier_thresh))]
df = outliers_IQR(df, 'lead_time')
df = outliers_IQR(df, 'stays_in_weekend_nights')
df = outliers_IQR(df, 'stays_in_week_nights')
df = outliers_IQR(df, 'days_in_waiting_list')
df = outliers_IQR(df, 'adr')
# + id="9xna2GuwdLbu" colab_type="code" outputId="889e9b71-3b1d-49a3-e4e1-eedd23a2f65a" colab={"base_uri": "https://localhost:8080/", "height": 577}
plt.figure(figsize=(20,10))
plt.subplot(1,3,1)
sns.boxplot(y= df['lead_time'])
plt.subplot(1,3,2)
sns.boxplot(y=df['adr'])
plt.subplot(1,3,3)
sns.boxplot(y=df['stays_in_week_nights'])
plt.show()
# + id="hWjiwAoZoHd0" colab_type="code" colab={}
df['total_guest']=df['adults']+df['babies']+df['children']
df = df.drop(['country','reservation_status_date','adults','babies','children'],axis =1)
df_dummies = pd.get_dummies(df,columns = ['hotel','arrival_date_month','meal','market_segment','distribution_channel','reserved_room_type','assigned_room_type','deposit_type','customer_type','reservation_status'])
# + [markdown] id="JXDCXcmxCQ3Q" colab_type="text"
# ## Feature Selection
# + id="jcJxzO2folAT" colab_type="code" outputId="e3eb1be0-6781-4331-8187-183e5ade970e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
X = df_dummies.drop(columns=['is_canceled'])
Y = df_dummies['is_canceled'].values
X2 = sm.add_constant(X)
est = sm.OLS(Y, X2)
est2 = est.fit()
print(est2.summary())
# + id="BWSWKV0rouDz" colab_type="code" colab={}
X = X.drop(columns=['arrival_date_year', 'arrival_date_week_number',
'arrival_date_day_of_month', 'stays_in_weekend_nights',
'stays_in_week_nights', 'is_repeated_guest', 'previous_cancellations',
'previous_bookings_not_canceled', 'booking_changes','required_car_parking_spaces',
'total_of_special_requests', 'total_guest','reservation_status_Canceled','reservation_status_Check-Out',
'reservation_status_No-Show'])
# + [markdown] id="Qyiclc76CCFA" colab_type="text"
# ## Training and Testing split
# + id="jPEOrWesxVa9" colab_type="code" colab={}
x_train, x_test, y_train, y_test = train_test_split(X,Y,test_size = 0.3,random_state=0)
# + [markdown] id="Dh_zOtNeCHFm" colab_type="text"
# ## Base Models
# + id="AHWfvKMI1JqB" colab_type="code" colab={}
accuracies = {}
# + id="OXTC4Zj60_Da" colab_type="code" outputId="f5ebc2bb-3606-42ae-deef-e9e2292eaed1" colab={"base_uri": "https://localhost:8080/"}
lr = LogisticRegression(max_iter=10000)
lr.fit(x_train,y_train)
acc = lr.score(x_test,y_test)*100
accuracies['Logistic Regression'] = acc
print("Test Accuracy {:.2f}%".format(acc))
# + id="ONh-34jQ2h_y" colab_type="code" outputId="cf56e83b-69cc-4232-c1eb-9638e2c93b64" colab={"base_uri": "https://localhost:8080/"}
from sklearn.naive_bayes import GaussianNB
nb_G = GaussianNB()
nb_G = nb_G.fit(x_train, y_train)
acc = nb_G.score(x_test,y_test)*100
accuracies['Naive Bayes'] = acc
print("Test Accuracy {:.2f}%".format(acc))
# + id="eqth5amv21u4" colab_type="code" outputId="40615a38-0718-4fc9-9c63-413b134101f3" colab={"base_uri": "https://localhost:8080/"}
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn = knn.fit(x_train, y_train)
acc = knn.score(x_test,y_test)*100
accuracies['KNN'] = acc
print("Test Accuracy {:.2f}%".format(acc))
# + id="EtYlYaoo3kjW" colab_type="code" outputId="107cd821-e457-4e1b-f024-75fd706e8872" colab={"base_uri": "https://localhost:8080/"}
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(x_train, y_train)
acc = rf.score(x_test,y_test)*100
accuracies['RF'] = acc
print("Test Accuracy {:.2f}%".format(acc))
# + id="TcEQ72z33lD3" colab_type="code" outputId="3b879e55-ac4f-41e5-b9c5-3a4c6d5eb468" colab={"base_uri": "https://localhost:8080/"}
from sklearn.ensemble import GradientBoostingClassifier
gradient_boosting = GradientBoostingClassifier()
gradient_boosting.fit(x_train, y_train)
acc = gradient_boosting.score(x_test,y_test)*100
accuracies['GB'] = acc
print("Test Accuracy {:.2f}%".format(acc))
# + id="mXd6jGeJ35Al" colab_type="code" outputId="dfe4f7a6-4e18-4579-c684-d977152b2d82" colab={"base_uri": "https://localhost:8080/"}
sns.set_style("whitegrid")
plt.figure(figsize=(16,5))
plt.yticks(np.arange(0,100,10))
plt.ylabel("Accuracy %")
plt.xlabel("Algorithms")
sns.barplot(x=list(accuracies.keys()), y=list(accuracies.values()), palette='pastel')
plt.show()
# + [markdown] id="XqEGMBrSCVrb" colab_type="text"
# ## Logistic
# + colab_type="code" id="9gsF1XHY_DHd" outputId="26842bcc-930c-415f-a034-d5ff884d55dd" colab={"base_uri": "https://localhost:8080/", "height": 98}
lr = LogisticRegression(max_iter=10000)
lr.fit(x_train,y_train)
# + colab_type="code" id="NCR_Ys05Ax2I" outputId="ea8ea543-cac5-496c-c7ee-b7a2ccdaa7ee" colab={"base_uri": "https://localhost:8080/", "height": 261}
predictions = lr.predict(x_test)
print("\n Confusion Matrix \n",confusion_matrix(y_test,predictions))
print("\n Classification Report \n",classification_report(y_test,predictions))
# + colab_type="code" id="vPj6cZnYAxzu" outputId="ff30467a-b1f7-4bea-e5a4-353ebf0ffee9" colab={"base_uri": "https://localhost:8080/", "height": 66}
param_grid = { 'C':[0.1,0.5,1.0,5.0,10.0],'penalty':['l1','l2'], 'max_iter': [10000]}
grid = GridSearchCV(LogisticRegression(),param_grid,n_jobs=3)
grid = grid.fit(x_train,y_train)
grid_predictions = grid.predict(x_test)
print("\n Best Parameters \n",grid.best_params_)
# + id="kMNGkwENuDbr" colab_type="code" outputId="eb201f2b-8444-4e3e-f915-42900952483c" colab={"base_uri": "https://localhost:8080/", "height": 196}
print("\n Classification Report \n",classification_report(y_test,grid_predictions))
# + colab_type="code" id="2uL2g4gzYbHN" colab={}
lrc = confusion_matrix(y_test,grid_predictions)
# + colab_type="code" id="dXRmGnocAw-D" colab={}
lr = LogisticRegression(C=1.0, penalty='l2', max_iter=10000)
lr.fit(x_train,y_train)
lr_pred = lr.predict(x_test)
pkl_filename = "lr.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(lr,file)
# + colab_type="code" id="AM7ada7_FC9u" colab={}
lr_1 = LogisticRegression(C=0.1, penalty='l2', max_iter=10000 )
lr_1.fit(x_train,y_train)
pr_lr = lr_1.predict(x_test)
pkl_filename = "lr1.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(lr_1,file)
# + colab_type="code" id="BK-OZZsRFDKI" outputId="92173b07-ffd1-4b8b-b572-e56a888d53e8" colab={"base_uri": "https://localhost:8080/", "height": 82}
lr_2 = LogisticRegression(C=5.0, penalty='none', max_iter=10000)
lr_2.fit(x_train,y_train)
predictions = lr_2.predict(x_test)
pkl_filename = "lr2.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(lr_2,file)
# + colab_type="code" id="4r9g4_7iFDZg" colab={}
lr_3 = LogisticRegression(C=5.0, penalty='l1', max_iter=10000,solver = "liblinear")
lr_3.fit(x_train,y_train)
lr1_pre = lr_3.predict(x_test)
pkl_filename = "lr3.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(lr_3,file)
# + colab_type="code" id="m-my_6_zFDwU" colab={}
lr_4 = LogisticRegression(C=10.0, penalty='l2', max_iter=10000)
lr_4.fit(x_train,y_train)
lr5_pre = lr_4.predict(x_test)
pkl_filename = "lr4.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(lr_4,file)
# + [markdown] id="xCHHVFXMCZS4" colab_type="text"
# ## Random Forest
# + colab_type="code" id="IPH0IGEWM9y8" outputId="4edc787c-a926-41ee-94a9-b9628e578881" colab={"base_uri": "https://localhost:8080/", "height": 147}
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
model = rf.fit(x_train, y_train)
model
# + colab_type="code" id="xFHz9IPUjBtD" outputId="b3e161f3-cbb0-4157-bad7-aca23377cd60" colab={"base_uri": "https://localhost:8080/", "height": 261}
predictions = rf.predict(x_test)
print("\n Confusion Matrix \n",confusion_matrix(y_test,predictions))
print("\n Classification Report \n",classification_report(y_test,predictions))
acc = rf.score(x_test,y_test)*100
accuracies['Random Forest'] = acc
# + colab_type="code" id="USMQ9HG8MstH" outputId="f05300b1-bb8d-43f8-a0af-895322515f27" colab={"base_uri": "https://localhost:8080/", "height": 131}
param_grid = { 'bootstrap': [True],
'max_depth': [0,10,100],
'max_features': [0.1,0.5,1],
'min_samples_leaf': [1,3,5],
'min_samples_split': [2, 4 , 8],
'n_estimators': [100, 200, 300]}
grid = GridSearchCV(RandomForestClassifier(),param_grid,n_jobs=3)
grid = grid.fit(x_train,y_train)
grid_predictions = grid.predict(x_test)
print("\n Best Parameters \n",grid.best_params_)
# + colab_type="code" id="zBJf47q6fH2J" outputId="c6fa252c-24ba-411c-d025-be8ea86c50c7" colab={"base_uri": "https://localhost:8080/", "height": 261}
print("\n Confusion Matrix \n",confusion_matrix(y_test,grid_predictions))
print("\n Classification Report \n",classification_report(y_test,grid_predictions))
# + colab_type="code" id="nghIQuDFXs2L" colab={}
rfc = confusion_matrix(y_test,grid_predictions)
# + colab_type="code" id="wyrVdEGT4FMq" colab={}
rf_1 = RandomForestClassifier(bootstrap=True,max_depth=100,max_features=0.5,min_samples_leaf=1,min_samples_split=4,n_estimators=300)
rf_1.fit(x_train, y_train)
rf_pred = rf_1.predict(x_test)
pkl_filename = "rf.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(rf_1,file)
# + colab_type="code" id="DJbCI7lD6hWZ" colab={}
rf_2 = RandomForestClassifier(bootstrap=True,max_depth=100,max_features=0.5,min_samples_leaf=3,min_samples_split=2,n_estimators=100)
rf_2.fit(x_train, y_train)
predictions = rf_2.predict(x_test)
pkl_filename = "rf1.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(rf_2,file)
# + colab_type="code" id="ZxyCR4oP6hki" colab={}
rf_3 = RandomForestClassifier(bootstrap=True,max_depth=10,max_features=0.5,min_samples_leaf=1,min_samples_split=2,n_estimators=100)
rf_3.fit(x_train, y_train)
rf3_predict = rf_3.predict(x_test)
import pickle
pkl_filename = "rf2.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(rf_3,file)
# + colab_type="code" id="m3D4LIC86hvY" colab={}
rf_4 = RandomForestClassifier(bootstrap=True,max_depth=10,max_features=0.1,min_samples_leaf=1,min_samples_split=2,n_estimators=100)
rf_4.fit(x_train, y_train)
predictions = rf_4.predict(x_test)
import pickle
pkl_filename = "rf3.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(rf_4,file)
# + colab_type="code" id="dR5Fcyv26h7k" colab={}
rf_5 = RandomForestClassifier(bootstrap=True,max_depth=50,max_features=1,min_samples_leaf=5,min_samples_split=8,n_estimators=200)
rf_5.fit(x_train, y_train)
rf5_predict = rf_5.predict(x_test)
import pickle
pkl_filename = "rf4.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(rf_5,file)
# + [markdown] id="7ngS4KO9CbOm" colab_type="text"
# ## GBM
#
# + colab_type="code" id="bv1I7VU0Nall" outputId="4e770980-ed08-450f-bdc1-2bb759ea3144" colab={"base_uri": "https://localhost:8080/", "height": 180}
from sklearn.ensemble import GradientBoostingClassifier
gradient_boosting = GradientBoostingClassifier(random_state=42)
modelg=gradient_boosting.fit(x_train, y_train)
modelg
# + id="26hkaGFZ3jw-" colab_type="code" outputId="9bf7cbd0-da41-4a70-f82c-378cb9a698cf" colab={"base_uri": "https://localhost:8080/", "height": 261}
predictions = gradient_boosting.predict(x_test)
print("\n Confusion Matrix \n",confusion_matrix(y_test,predictions))
print("\n Classification Report \n",classification_report(y_test,predictions))
# + colab_type="code" id="b9O0mcwggDMI" outputId="0b3081d1-4b29-411d-e716-2121b763bf5f" colab={"base_uri": "https://localhost:8080/", "height": 66}
param_grid = {'n_estimators': [100, 200, 300],
'max_depth': [3, 6, 9],
'learning_rate': [0.1,0.5,1.0]}
grid = GridSearchCV(GradientBoostingClassifier(),param_grid,n_jobs=3)
grid = grid.fit(x_train,y_train)
grid_predictions = grid.predict(x_test)
print("\n Best Parameters \n",grid.best_params_)
# + colab_type="code" id="tKj3lrnzgXX6" outputId="ecc97e15-81c9-444f-fe32-e42e9ebeec95" colab={"base_uri": "https://localhost:8080/", "height": 261}
print("\n Confusion Matrix \n",confusion_matrix(y_test,grid_predictions))
print("\n Classification Report \n",classification_report(y_test,grid_predictions))
# + colab_type="code" id="c4Kja213X7qi" colab={}
gbc = confusion_matrix(y_test,grid_predictions)
# + colab_type="code" id="ZmJv_PLn8lmh" colab={}
gb = GradientBoostingClassifier(n_estimators=300,max_depth=9,learning_rate=0.1)
gb = gb.fit(x_train, y_train)
gb_predict = gb.predict(x_test)
pkl_filename = "gB.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(gb,file)
# + colab_type="code" id="BG16HQYFzv5T" colab={}
gb_1 = GradientBoostingClassifier(n_estimators=100,max_depth=6,learning_rate=0.1)
gb_1 = gb_1.fit(x_train, y_train)
pred_gb1 = gb_1.predict(x_test)
pkl_filename = "gB1.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(gb_1,file)
# + colab_type="code" id="xiq_PoHW83QY" colab={}
gb_2 = GradientBoostingClassifier(n_estimators=200,max_depth=3,learning_rate=0.5)
gb_2 = gb_2.fit(x_train, y_train)
gb2_pred = gb_2.predict(x_test)
pkl_filename = "gB2.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(gb_2,file)
# + colab_type="code" id="9EwoWsem833d" colab={}
gb_3 = GradientBoostingClassifier(n_estimators=300,max_depth=6,learning_rate=1.0)
gb_3 = gb_3.fit(x_train, y_train)
predictions = gb_3.predict(x_test)
pkl_filename = "gB3.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(gb_3,file)
# + colab_type="code" id="XqympHEU84Q8" colab={}
gb_4 = GradientBoostingClassifier(n_estimators=100,max_depth=9,learning_rate=1.0)
gb_4 = gb_4.fit(x_train, y_train)
predictions = gb_4.predict(x_test)
pkl_filename = "gB4.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(gb_4,file)
# + [markdown] id="LUjQ0t_w5JDA" colab_type="text"
# ## Confusion Matrix
# + colab_type="code" id="nJ7MQPa9XVvz" outputId="7e1dc6ac-c9f9-4309-f844-b3a7ed308a6e" colab={"base_uri": "https://localhost:8080/", "height": 346}
plt.figure(figsize=(20,10))
plt.suptitle("Confusion Matrixes",fontsize=24)
plt.subplots_adjust(wspace = 0.4, hspace= 0.4)
plt.subplot(2,3,1)
plt.title("Logistic Regression Confusion Matrix")
sns.heatmap(lrc,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.subplot(2,3,2)
plt.title("Random Forest Confusion Matrix")
sns.heatmap(rfc,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.subplot(2,3,3)
plt.title("Gradient Boosting Classifier Confusion Matrix")
sns.heatmap(gbc,annot=True,cmap="Blues",fmt="d",cbar=False, annot_kws={"size": 24})
plt.show()
# + [markdown] id="S4WoXtT5CeKG" colab_type="text"
# ## Feature Importance
# + id="U1b287nQxYxA" colab_type="code" outputId="cf5a7ed1-ddbb-4e21-cf9b-abaace968c67" colab={"base_uri": "https://localhost:8080/", "height": 495}
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(bootstrap=True,max_depth=100,max_features=0.5,min_samples_leaf=1,min_samples_split=4,n_estimators=300)
model = rf.fit(x_train, y_train)
importances = model.feature_importances_
indices = np.argsort(importances)[::-1]
feature_names = list(X.columns)
# Rearrange feature names so they match the sorted feature importances
names = [feature_names[i] for i in indices]
# Create plot
plt.figure(figsize=(12,5))
# Create plot title
plt.title("Feature Importance")
# Add bars
plt.bar(range(X.shape[1]), importances[indices])
# Add feature names as x-axis labels
plt.xticks(range(X.shape[1]), names, rotation=90)
# Show plot
plt.show()
| Hotel_Bookings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="48f7c0b5"
# # Forward kinematics of 2dof planar robots
#
# ## Case 1) Two revolute joints
#
# <img src="https://github.com/robotica-cem/cinematica-notebooks/blob/main/figures/2d-2dof-revolute.png?raw=true" width=400 />
#
# ## Case 2) Revolute joint followed by prismatic joint
#
# <img src="https://github.com/robotica-cem/cinematica-notebooks/blob/main/figures/2d-2dof-revolute-prismatic.png?raw=true" width=400 />
#
# + id="f3ab12a1"
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import doctest
import spatialmath as sm
import sympy as sy
import sys
# + id="2d98a0d0"
def fwd_kinematics_2rev(th1, th2, l1=2, l2=1):
'''
Implements the forward kinematics of a robot with two revolute joints.
Arguments
---------
th1, th2 : float
Angle in radians of the two degree of freedoms, respectively.
l1, l2 : float
Length of the two links, respectively.
Returns
-------
x : float
The position in the global x-direction of the end-effector (tool point)
y : float
The position in the global y-direction of the end-effector (tool point)
theta : float
The orientation of the end-effector with respect to the positive global x-axis.
The angle returned is in the range [-np.pi, np.pi]
j : tuple with 2 elements
The position of the joint between the two links
Tests
------
1) End-effector pose at default position
>>> x, y, th, j = fwd_kinematics_2rev(0, 0)
>>> "(%0.2f, %0.2f, %0.2f)" %(x, y, th)
'(3.00, 0.00, 0.00)'
2) End-effector pose at 90 degrees in both joints
>>> x, y, th, j = fwd_kinematics_2rev(np.pi/2, np.pi/2)
>>> "(%0.2f, %0.2f, %0.2f)" %(x, y, th)
'(-1.00, 2.00, 3.14)'
3) End-effector pose at 0 degress in first joint and 90 degress in second
>>> x, y, th, j = fwd_kinematics_2rev(0, np.pi/2)
>>> "(%0.2f, %0.2f, %0.2f)" %(x, y, th)
'(2.00, 1.00, 1.57)'
4) End-effector position is always inside a circle of a certain radius
>>> poses = [fwd_kinematics_2rev(th1_, th2_, 3, 2)
... for th1_ in np.arange(0, 2*np.pi, 0.2)
... for th2_ in np.arange(0, 2*np.pi, 0.2)]
>>> distances = np.array([np.sqrt(x_**2 + y_**2) for x_, y_, th_, j_ in poses])
>>> max_radius = 5 + 1e-12 # Add a small tolerance
>>> np.any(distances > max_radius)
False
5) Joint is always at constant distance from the origin
>>> poses = [fwd_kinematics_2rev(th1_, 0, 3, 2)
... for th1_ in np.arange(0, 2*np.pi, 0.2) ]
>>> distances = np.array([np.sqrt(j_[0]**2 + j_[1]**2) for x_, y_, th_, j_ in poses])
>>> np.any(np.abs(distances - 3) > 1e-12)
False
'''
# Static transformation between frame 2 and frame E
g_2e = sm.SE3()
g_2e.A[0,3] = l1+l2
# Transformation betwen frame 1 and frame 2
g_I = sm.SE3() # Identity transformation
g_12 = sm.SE3.Rz(th2) # Rotation about z-axis
q = [l1,0,0]
d_12 = g_I*q - g_12*q
g_12.A[:3, 3] = d_12.ravel()
# Transformation between frame S and frame 1
g_s1 = sm.SE3.Rz(th1)
# Chain of transformations
g_se = g_s1 * g_12 * g_2e
x = g_se.A[0,3]
y = g_se.A[1, 3]
theta = th1+th2
#print(np.arccos(g_se[0,0]), theta)
#assert(np.abs(theta-np.arccos(g_se[0,0])) < 1e-8)
j_s = g_s1 * [l1,0,0]
j = tuple(j_s[:2])
return (x, y, theta, j)
# -
# Case 1)
doctest.run_docstring_examples(fwd_kinematics_2rev, globals(), verbose=True)
def fwd_kinematics_2rev_symbolic(th1, th2,
l1=sy.symbols('l1'), l2=sy.symbols('l2')):
'''
Implements the forward kinematics of a robot with two revolute joints.
Arguments
---------
th1, th2 : sympy symbols
Symbol representing the angle in radians of the two degree of freedoms, respectively.
l1, l2 : sympy symbols
Symbol representing the length of the two links, respectively.
Returns
-------
x : sympy expression
The position in the global x-direction of the end-effector (tool point)
y : sympy expression
The position in the global y-direction of the end-effector (tool point)
theta : sympy expression
The orientation of the end-effector with respect to the positive global x-axis.
j : tuple with 2 elements, each is a sympy expression
The position of the joint between link1 and link2
Tests
------
1) End-effector pose at default position
>>> th1, th2, l1, l2 = sy.symbols('th1, th2, l1, l2')
>>> x, y, th, j = fwd_kinematics_2rev_symbolic(th1, th2, l1, l2)
>>> subsdict = {th1: 0, th2: 0, l1: 2, l2: 1}
>>> xn = x.evalf(subs=subsdict)
>>> yn = y.evalf(subs=subsdict)
>>> thn = th.evalf(subs=subsdict)
>>> "(%0.2f, %0.2f, %0.2f)" %(xn, yn, thn)
'(3.00, 0.00, 0.00)'
2) End-effector pose at 90 degrees in both joints
>>> th1, th2, l1, l2 = sy.symbols('th1, th2, l1, l2')
>>> x, y, th, j = fwd_kinematics_2rev_symbolic(th1, th2, l1, l2)
>>> subsdict = {th1: np.pi/2, th2: np.pi/2, l1: 2, l2: 1}
>>> xn = x.evalf(subs=subsdict)
>>> yn = y.evalf(subs=subsdict)
>>> thn = th.evalf(subs=subsdict)
>>> "(%0.2f, %0.2f, %0.2f)" %(xn, yn, thn)
'(-1.00, 2.00, 3.14)'
'''
# Static transformation between frame 2 and frame E
g = sy.eye(4)
g[0, 3] = l1+l2
g_2e = sm.SE3(np.array(g), check=False)
# Transformation betwen frame 1 and frame 2
g_I = sm.SE3(np.array(sy.eye(4)), check=False) # Identity transformation
g_12 = sm.SE3.Rz(th2) # Rotation about z-axis
q = [l1,0,0]
d_12 = g_I*q - g_12*q
g_12.A[:3, 3] = d_12.ravel()
# Transformation between frame S and frame 1
g_s1 = sm.SE3.Rz(th1)
# Chain of transformations
g_se = g_s1 * g_12 * g_2e
x = g_se.A[0,3]
y = g_se.A[1, 3]
theta = th1+th2
#print(np.arccos(g_se[0,0]), theta)
#assert(np.abs(theta-np.arccos(g_se[0,0])) < 1e-8)
j_s = g_s1 * [l1,0,0]
j = tuple(j_s[:2])
return (x, y, theta, j)
doctest.run_docstring_examples(fwd_kinematics_2rev_symbolic, globals())
# + id="2d98a0d0"
def fwd_kinematics_rev_prism(th1, th2, l1=2):
'''
Implements the forward kinematics of a robot with one revolute joint and one prismatic.
Arguments
---------
th1 : float
Angle in radians of the first degree of freedom.
th2 : float
Displacement in meter of the second degree of freedom.
l1 : float
Length of the first link.
Returns
-------
x : float
The position in the global x-direction of the end-effector (tool point)
y : float
The position in the global y-direction of the end-effector (tool point)
theta : float
The orientation of the end-effector with respect to the positive global x-axis
Tests
------
1) End-effector pose at default position
>>> "(%0.2f, %0.2f, %0.2f)" %fwd_kinematics_rev_prism(0, 0)
'(2.00, 0.00, 0.00)'
2) End-effector pose at 90 degrees in first joint and 0.6m in second
>>> "(%0.2f, %0.2f, %0.2f)" %fwd_kinematics_rev_prism(np.pi/2, 0.6)
'(0.00, 2.60, 1.57)'
4) End-effector orientation is always the same as the angle of the first dof
>>> angles = np.array( [th1_ for th1_ in np.arange(0, 2*np.pi, 0.2)
... for th2_ in np.arange(-1, 1, 0.2)])
>>> poses = [fwd_kinematics_rev_prism(th1_, th2_)
... for th1_ in np.arange(0, 2*np.pi, 0.2)
... for th2_ in np.arange(-1, 1, 0.2)]
>>> orientations = np.array([th_ for x_, y_, th_ in poses])
>>> np.any(np.abs(angles-orientations) > 1e-12)
False
'''
x = 0
y = 0
theta = 0
return (x, y, theta)
# -
# ## Run doctests
# If tests pass, no output is generated.
# Case 2)
doctest.run_docstring_examples(fwd_kinematics_rev_prism, globals())
# + [markdown] id="5b89ab55"
# ## Visualize the work space of the robot
# + id="f4c05b80"
th1 = np.arange(0, 2*np.pi, 0.1)
th2 = np.arange(-np.pi, np.pi, 0.1)
xythetaj =[ fwd_kinematics_2rev(th1_, th2_) for th1_ in th1 for th2_ in th2]
xytheta = np.array([ (x_, y_, th_) for x_, y_, th_, j_ in xythetaj])
df = pd.DataFrame(data=np.reshape(xytheta, (-1,3)), columns=['x', 'y', 'theta'])
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="0fc38a8f" outputId="be371e80-cb09-4124-9db9-3d740d5df7af"
fig = px.scatter_3d(df, x='x', y='y', z='theta')
camera = dict(
up=dict(x=0, y=1, z=0),
center=dict(x=0, y=0, z=0),
eye=dict(x=0, y=0, z=4)
)
fig.update_scenes(camera_projection_type="orthographic")
fig.update_layout(scene_camera=camera)
fig.show()
# + [markdown] id="ff6b7894"
# ## Visualize movement of the manipulator
# + colab={"base_uri": "https://localhost:8080/"} id="40040997" outputId="ca71c732-4535-4726-d7c7-c8fcea23f6d4"
poses = [ fwd_kinematics_2rev(th1_, th2_) for th1_, th2_ in zip(th1, th2)]
endeff_trajectory = np.array([ [x_, y_] for x_, y_, th_, j_ in poses])
joint_trajectory = np.array([ j_ for x_, y_, th_, j_ in poses])
# + colab={"base_uri": "https://localhost:8080/", "height": 617} id="f558fe6b" outputId="0f1b033e-3cc2-4b7f-f20e-982baa956864"
fig = go.Figure(
data=[go.Scatter(x=[0, joint_trajectory[0,0]], y=[0, joint_trajectory[0,1]],
name="First link", mode="lines",
line=dict(width=6, color="blue")),
go.Scatter(x=[joint_trajectory[0,0], endeff_trajectory[0,0]],
y=[joint_trajectory[0,1], endeff_trajectory[0,1]],
name="Second link", mode="lines",
line=dict(width=5, color="red")),
go.Scatter(x=joint_trajectory[:,0], y=joint_trajectory[:,1],
name="Joint trajectory", mode="lines",
line=dict(width=1, color="lightblue")),
go.Scatter(x=endeff_trajectory[:,0], y=endeff_trajectory[:,1],
name="End-point trajectory", mode="lines",
line=dict(width=1, color="red"))],
layout=go.Layout( width=700, height=600,
xaxis=dict(range=[-4, 4], autorange=False),
yaxis=dict(range=[-4, 4], autorange=False),
title="End-effector trajectory",
updatemenus=[dict(
type="buttons",
buttons=[dict(label="Play",
method="animate",
args=[None])])]
),
frames=[go.Frame(data=[go.Scatter(x=[0, xj_], y=[0, yj_]),
go.Scatter(x=[xj_, xe_], y=[yj_, ye_])])
for xj_, yj_, xe_, ye_ in np.hstack((joint_trajectory, endeff_trajectory))]
)
fig.show()
# + colab={"base_uri": "https://localhost:8080/"} id="2be1341a" outputId="7bade7f7-5c7f-40c1-8117-ab4cf74e2dbd"
# ?px.scatter_3d
# + id="d6d3db4d"
# -
| cinematica-directa-2D/robot_2dof_sm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/ferdinand-popp/BIDD/blob/master/BIDD_pycaret.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="n1_SwzOI0JF-" outputId="fec017e1-49c4-4e4c-da4a-dd49c5873e5d"
import pandas as pd
dataset = pd.read_csv('bioactivity_final_data.csv')
data = dataset.loc[:, dataset.columns.difference(['canonical_smiles', 'standard_values', 'molecule_chembl_id'])]
data
# + id="WGeh0ZA478io"
selection = ['canonical_smiles','molecule_chembl_id']
df3_selection = dataset[selection]
df3_selection.to_csv('molecule.smi', sep='\t', index=False, header=False)
# + colab={"base_uri": "https://localhost:8080/"} id="2JmbahDW7tIp" outputId="0c058afe-5def-4840-f80c-e7d613d72805"
# ! bash padel.sh
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="yD4fOaQQBbL5" outputId="ae75cb3d-1365-4ec1-bb59-db6a151b236a"
df3_X = pd.read_csv('descriptors_output.csv')
df3_X = df3_X.drop(columns=['Name'])
df3_X
# + colab={"base_uri": "https://localhost:8080/"} id="E8n11Rn0Br7b" outputId="e59c7e91-a656-4bd5-8942-170bb9a30e77"
df3_Y = dataset['pIC50']
df3_Y
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="jSV6DNddB0C0" outputId="cdb86742-e746-4aa5-f4de-54ee78d0a106"
dataset3 = pd.concat([df3_X,df3_Y], axis=1)
dataset3
# + id="QEr94HE9B574"
dataset3.to_csv('final_set_for_model.csv', index=False)
# + [markdown] id="yFeJcDT4CGgd"
# # Model Building
# + id="jq1xz0k5CJor"
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="9ccSV98yCLpA" outputId="f7e39eaa-9c43-4a9c-92b0-4892c0a82006"
df = pd.read_csv('final_set_for_model.csv')
X = df.drop('pIC50', axis=1)
Y = df.pIC50
# + colab={"base_uri": "https://localhost:8080/"} id="tweo-VhyChN_" outputId="63d1ca77-9d11-4a9b-882c-d2450dca3718"
from sklearn.feature_selection import VarianceThreshold
selection = VarianceThreshold(threshold=(.8 * (1 - .8)))
X = selection.fit_transform(X)
X.shape
# + id="Hg0TtIjYCwdR"
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# + [markdown] id="_TRYQFovDET0"
# # Lazy predict
# + colab={"base_uri": "https://localhost:8080/"} id="5qaSFpn9DG4e" outputId="98a06413-b512-4d1c-997f-0b7f6f71a869"
# ! pip install lazypredict
# + colab={"base_uri": "https://localhost:8080/"} id="xCNQtqyeDLja" outputId="c84abb23-6a8b-42e9-be15-db1379ef90a1"
import lazypredict
from lazypredict.Supervised import LazyRegressor
# + colab={"base_uri": "https://localhost:8080/"} id="V_kOEV2pDr2P" outputId="0743535d-70bf-4004-e98c-111226cd19a3"
clf = LazyRegressor(verbose=0,ignore_warnings=True, custom_metric=None)
models_train,predictions_train = clf.fit(X_train, X_train, Y_train, Y_train)
models_test,predictions_test = clf.fit(X_train, X_test, Y_train, Y_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 630} id="F2lTnqBAEEl6" outputId="c24e3a0d-0063-4aee-fa14-a898efbd4dc1"
# Bar plot of R-squared values
import matplotlib.pyplot as plt
import seaborn as sns
#train["R-Squared"] = [0 if i < 0 else i for i in train.iloc[:,0] ]
plt.figure(figsize=(5, 10))
sns.set_theme(style="whitegrid")
ax = sns.barplot(y=predictions_train.index, x="R-Squared", data=predictions_train)
ax.set(xlim=(0, 1))
# + colab={"base_uri": "https://localhost:8080/", "height": 630} id="K872lvXPhze2" outputId="779c0c58-3bd5-46b0-f0e6-e61cea6da6d1"
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(5, 10))
sns.set_theme(style="whitegrid")
ax = sns.barplot(y=predictions_train.index, x="RMSE", data=predictions_train)
ax.set(xlim=(0, 10))
# + colab={"base_uri": "https://localhost:8080/", "height": 630} id="gXdSUeZuh9BU" outputId="49f80533-51ad-4174-8644-aafe08ea1d84"
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(5, 10))
sns.set_theme(style="whitegrid")
ax = sns.barplot(y=predictions_train.index, x="Time Taken", data=predictions_train)
ax.set(xlim=(0, 10))
# -
selected_model =
import pickle
pickle.dump(model, open('model.pkl', 'wb'))
| BIDD_auto_model.ipynb |