code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test using websockets from Jupyter notebook
# +
import asyncio
import websockets
address = 'localhost'
port = 5000
async def echo(websocket, path):
async for message in websocket:
print(f'Response: {message}')
await websocket.send(message)
asyncio.get_event_loop().run_until_complete(
websockets.serve(echo, address, port))
print(f'Serving at {address}:{port}')
asyncio.get_event_loop().run_forever()
# -
| jupyter-notebook-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JinyuanSun/my_bio_script/blob/main/genomic/genomic_bioinfo_plot_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ymUzqLWTuXjm"
# ## CAZy annotation
# + [markdown] id="-EF7_aLT3gtg"
# ## Cazy
# + cellView="form" id="jQa5OBHzeSay"
#@title
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return '{v:d}'.format(p=pct,v=val)
return my_autopct
def cazy_plot(cazy_values, short_cazy_names = False, size = 0.3, dark = False):
if short_cazy_names == False:
cazy_names = ["Glycoside Hydrolases",
"Glycosyltransferases",
"Polysaccharide Lyases",
"Carbohydrate Esterases",
"Auxiliary Activities",
"Carbohydrate Binding Modules"]
else:
cazy_names = ["GHs",
"GTs",
"PLs",
"CEs",
"AAs",
"CBMs"]
if dark == True:
cmap = plt.get_cmap("tab10")
colors = cmap(np.arange(6))
if dark == False:
cmap = plt.get_cmap("Set3")
colors = cmap(np.arange(6))
#Uncomment the next line and change to any colors you like to use!
#colors = []
fig, ax = plt.subplots()
matplotlib.style.use('ggplot')
ax.pie(cazy_values,
labels=cazy_names,
radius=1,
colors=colors,
autopct = make_autopct(cazy_values),
wedgeprops=dict(width=size, edgecolor='w'))
ax.set(aspect="equal", title='CAZy annotation')
plt.show()
fig.savefig('cazy_pie.png',dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="XWku6mKwea_J" outputId="2a14df64-6454-43c9-e426-1d5d3a3cec9c"
cazy_values = [174,126,11,55,76,23]
cazy_plot(cazy_values,short_cazy_names=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="L86p1_wVeuII" outputId="dad91d3f-553c-4aa1-de93-aac6fef022da"
cazy_plot(cazy_values,short_cazy_names=True, dark=True)
# + [markdown] id="dgYoFzY63q3e"
# ## Merops
# + id="_yZbaFCSw7Wt"
#@title
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return '{v:d}'.format(p=pct,v=val)
return my_autopct
def merops_plot(merops_dict, short_merops_names = True, size = 0.3, dark = False):
#Aspartic (A), Cysteine (C), Glutamic (G), Metallo (M), Asparagine (N), Mixed (P), Serine (S), Threonine (T), Unknown (U)
merops_values = merops_dict.values()
merops_names = merops_dict.keys()
#Uncomment the next line and change to any colors you like to use!
#colors = []
name_dict = {"A":"Aspartic",
"C":"Cysteine",
"G":"Glutamic",
"M":"Metallo",
"N":"Asparagine",
"P":"Mixed",
"S":"Serine",
"T":"Threonine",
"U":"Unknown",
"I":"Inhibitor"}
if short_merops_names == False:
tmp = []
for name in merops_names:
tmp.append(name_dict[name])
merops_names = tmp
#plt.set_cmap("ocean")
fig, ax = plt.subplots()
matplotlib.style.use('ggplot')
cmap = plt.get_cmap("Set3")
colors = cmap(np.arange(len(merops_values)))
if dark == True:
cmap = plt.get_cmap("tab10")
colors = cmap(np.arange(len(merops_values)))
#print(len(list(cmap)))
ax.pie(merops_values,
labels=merops_names,
radius=1,
colors = colors,
autopct = make_autopct(merops_values),
wedgeprops=dict(width=size, edgecolor='w'))
ax.set(aspect="equal", title='merops annotation')
plt.show()
fig.savefig('merops_pie.png',dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 429} id="6HISrZt7cwjP" outputId="06797a00-6cd2-4e13-9ec6-0e339e0f85f5"
merops_dict = {"A":1,"C":2,"G":2, "M":2,"N":2,"P":2,"S":2,"T":2,"U":2,"I":2}
merops_plot(merops_dict, short_merops_names = False)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Q3IF_fcuk4Uy" outputId="cf3889a8-44d9-45cc-b394-5d4b2f2e6dee"
merpos_plot(merpos_dict, short_merpos_names = False,dark=True)
# + [markdown] id="ZGbH_G4mZVJO"
# ## PHI bar
# + cellView="form" id="QL9HvK20f4ig"
#@title
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct*total/100.0))
return '{v:d}'.format(p=pct,v=val)
return my_autopct
def phi_plot(phi_dict, short_phi_names = True, dark = False):
#chemistry_target:_resistance_to_chemical, chemistry_target:_sensitivity_to_chemical,
#effector_(plant_avirulence_determinant), enhanced_antagonism, lethal
# reduced_virulence, increased_virulence_(hypervirulence),
# loss_of_pathogenicity, unaffected_pathogenicity
phi_values = phi_dict.values()
phi_names = phi_dict.keys()
name_dict = {"RC":"chemistry target: resistance to chemical",
"SC":"chemistry target: sensitivity to chemical",
"EF":"effector (plant avirulence determinant)",
"EA":"enhanced antagonism",
"LL":"lethal",
"RV":"reduced virulence",
"IV":"increased virulence",
"LP":"loss of pathogenicity",
"UP":"unaffected pathogenicity"}
if short_phi_names == False:
tmp = []
for name in phi_names:
tmp.append(name_dict[name])
phi_names = tmp
matplotlib.style.use('default')
cmap = plt.get_cmap("Set3")
colors = cmap(np.arange(len(phi_values)))
if dark == True:
cmap = plt.get_cmap("tab10")
colors = cmap(np.arange(len(phi_values)))
#Uncomment the next line and change to any colors you like to use!
#colors = []
width = 0.7 # the width of the bars
#fig, ax = plt.subplots()
fig, ax = plt.subplots(figsize=(7.5,4.7))
rects1 = ax.bar(phi_names, phi_values, width, color=colors)
# add some text for labels, title and axes ticks
ax.set_ylabel('Counts')
ax.set_title('PHI annotation')
ax.set_xticklabels(phi_names)
labels = ["RC: chemistry target: resistance to chemical",
"SC: chemistry target: sensitivity to chemical",
"EF: effector (plant avirulence determinant)",
"EA: enhanced antagonism",
"LL: lethal",
"RV: reduced virulence",
"IV: increased virulence",
"LP: loss of pathogenicity",
"UP: unaffected pathogenicity"]
handles = [rect for rect in rects1]
#print([rect for rect in rects1])
plt.legend(handles,
labels,
bbox_to_anchor =(1.02, -0.1),
ncol = 2)
#plt.legend(bbox_to_anchor =(0.75, 1.15), ncol = 2)
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2.,height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
#plt.set(aspect="equal", title='PHI annotation')
plt.show()
fig.savefig('phi_bar.png',dpi=300)
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="Cteqm7DMbunL" outputId="cfab1428-3be8-4ba7-a6e0-4692875b759b"
phi_dict = {"RC":724,"SC":41,"EF":102,"EA":151,"LL":392,"RV":6,"IV":3,"LP":3,"UP":0}
phi_plot(phi_dict, short_phi_names = True, dark = False)
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="kyPz4QreYVoE" outputId="a737e6ea-0d6f-4e34-8c12-b0718057c13e"
phi_plot(phi_dict, short_phi_names = True, dark = True)
# + id="c7BATc6shuWU"
# + [markdown] id="cQ6YAPylZgxH"
# ## Venn (P450, Phi, CAZy, merpos)
# + id="TVI1ikjchvUD"
# %matplotlib inline
def venn_plot(dataset_dict):
from venn import venn
import matplotlib.pyplot as plt
fig = venn(dataset_dict,cmap = plt.get_cmap("RdYlBu"))
plt.show()
fig.figure.savefig('venn.png',dpi=300)
# + colab={"base_uri": "https://localhost:8080/"} id="Gp75QKVgjgw7" outputId="247f5b1b-a1d0-44f8-fb15-db73e4e18a61" language="bash"
# pip install venn
# git clone https://github.com/JinyuanSun/my_bio_script.git
# + id="eWxNOBhz-Pj0"
def read_glist(path = './'):
import os
dataset_dict = {}
for filename in os.listdir(path):
if filename.endswith(".glist"):
filepath = os.path.join(path, filename)
with open(filepath) as glist:
k = filename.replace(".glist","")
dataset_dict[k] = []
for line in glist:
gene = line.strip()
dataset_dict[k].append(gene)
dataset_dict[k] = set(dataset_dict[k])
glist.close()
# print(os.path.join(directory, filename))
else:
continue
return dataset_dict
# + id="GsA-127aAodX"
dataset_dict = read_glist(path = "my_bio_script/genomic/test")
# + colab={"base_uri": "https://localhost:8080/", "height": 466} id="LjyXvozpgv-x" outputId="82fd476b-c94a-41f8-a3a4-667882bb1b22"
venn_plot(dataset_dict)
# + id="s9mHD94mg8zR" language="bash"
#
# grep -v "#" protein.faa.p450.htb|awk -F " " '{print$3}'|sort|uniq > p450.glist
# grep -v "#" protein.faa.cazy.htb|awk -F " " '{print$3}'|sort|uniq > cazy.glist
# awk '{print$1}' protein.faa.merops.btb |sort|uniq > merops.glist
# awk '{print$1}' protein.faa.phi.btb |sort|uniq > phi.glist
#
# + id="eO53Rs3ziQZB"
def make_phi_dict(btbfile = 'protein.faa.phi.btb'):
phi_dict = {"RC":0,
"SC":0,
"EF":0,
"EA":0,
"LL":0,
"RV":0,
"IV":0,
"LP":0,
"UP":0}
map_dict = { "chemistry target: resistance to chemical":"RC",
"chemistry target: sensitivity to chemical":"SC",
"effector (plant avirulence determinant)":"EF",
"enhanced antagonism":"EA",
"lethal":"LL",
"reduced virulence":"RV",
"increased virulence (hypervirulence)":"IV",
"loss of pathogenicity":"LP",
"unaffected pathogenicity":"UP"}
with open(btbfile) as btbfile:
gene_name = ''
annotation_dict = {}
for line in btbfile:
line_list = line.split("\t")
annotation = line_list[1].split("#")[-1].split("__")
if gene_name == line_list[0]:
annotation_dict[gene_name] += annotation
else:
gene_name = line_list[0]
annotation_dict[gene_name] = annotation
btbfile.close()
for gene_name in annotation_dict:
for annotation in set(annotation_dict[gene_name]):
phi_dict[map_dict[annotation.replace("_"," ")]] += 1
return phi_dict
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="2Pt38F_Zx4f6" outputId="8dbd2086-a750-48a5-888e-25c046acc120"
phi_dict = make_phi_dict(btbfile = 'my_bio_script/genomic/test/protein.faa.phi.btb')
phi_plot(phi_dict, short_phi_names = True, dark = False)
# + id="alOPWbSJyW9p"
def read_merops_map(merops_path = "./merops.txt"):
merops_map_dict = {}
with open(merops_path) as merops_txt:
for line in merops_txt:
merops_gene = line[1:11]
merops_class = line.split("#")[1][0]
merops_map_dict[merops_gene] = merops_class
merops_txt.close()
return merops_map_dict
def mk_merops_dict(merops_map_dict, btbfile = './protein.faa.merops.btb'):
merops_dict = {"A":0,"C":0,"G":0, "M":0,"N":0,"P":0,"S":0,"T":0,"U":0,"I":0}
name_dict = {"A":"Aspartic",
"C":"Cysteine",
"G":"Glutamic",
"M":"Metallo",
"N":"Asparagine",
"P":"Mixed",
"S":"Serine",
"T":"Threonine",
"U":"Unknown",
"I":"Inhibitor"}
with open(btbfile) as btbfile:
gene_name = ''
annotation_dict = {}
for line in btbfile:
line_list = line.split("\t")
annotation = merops_map_dict[line_list[1]]
if gene_name == line_list[0]:
annotation_dict[gene_name].append(annotation)
else:
gene_name = line_list[0]
annotation_dict[gene_name] = [annotation]
btbfile.close()
for gene_name in annotation_dict:
for annotation in set(annotation_dict[gene_name]):
merops_dict[annotation] += 1
occured_merops = {}
absent_list = []
for key in merops_dict:
if merops_dict[key] > 0:
occured_merops[key] = merops_dict[key]
else:
absent_list.append(key + "(" + name_dict[key] + ")")
print("Lack of "+", ".join(absent_list)+" in the genome!")
return occured_merops
# + colab={"base_uri": "https://localhost:8080/"} id="SvZv6iqa26PP" outputId="8f2d6b2b-057a-401d-a021-1ac9d144bf11"
merops_map_dict = read_merops_map(merops_path = "my_bio_script/genomic/test/merops.txt")
merops_dict = mk_merops_dict(merops_map_dict, btbfile = 'my_bio_script/genomic/test/protein.faa.merpos.btb')
# + colab={"base_uri": "https://localhost:8080/", "height": 429} id="Lzsk-qi34J6A" outputId="8029fb43-6905-44ea-c957-d20fd2977a8b"
merops_plot(merops_dict, short_merops_names = True)
# + id="NgCdP954-Ruw"
| genomic/genomic_bioinfo_plot_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# +
# %run notebook_setup
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
import exoplanet as xo
# +
E_grid = np.linspace(0, np.pi, 201)
e_grid = np.linspace(0.0, 1.0-1e-10, 210)
E, e = np.meshgrid(E_grid, e_grid, indexing="ij")
M = E - e * np.sin(E)
f = 2 * np.arctan2(np.sqrt(1+e) * np.tan(0.5*E), np.sqrt(1-e))
f_val = xo.orbits.get_true_anomaly(M, e).eval()
# +
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
delta = np.log10(np.clip(np.abs((f - f_val)) % (2*np.pi), 1e-19, 100))
# print(delta.max())
# delta = gaussian_filter(delta, 8.0)
C = ax.contour(e_grid, E_grid, delta,
levels=[-19.0, -18.0, -17.0, -16.0, -15.0, -14.0],
colors="k", linestyles="solid")
ax.clabel(C, C.levels, inline=True, fmt="{0:.0f}".format, fontsize=10)
ax.set_xlabel("eccentricity")
ax.set_ylabel("eccentric anomaly")
ax.set_title("$\log_{10} |f_\mathrm{calc} - f_\mathrm{true}| $", fontsize=14);
# -
plt.scatter(e, delta, c=M, s=2, edgecolor="none")
plt.colorbar(label="mean anomaly");
# +
delta = np.log10(np.clip(np.abs((f - f_val)) % (2*np.pi), 1e-30, 100.0))
delta_s = gaussian_filter(delta, 2.0)
inds = np.argsort(delta.flatten())
inds = inds[delta.flatten()[inds] > -25]
delta_f = delta.flatten()[inds]
e_f = e.flatten()[inds]
M_f = M.flatten()[inds]
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
ax = axes[0, 0]
ax.scatter(e_f, delta_f, c=M_f, s=2, edgecolor="none")
ax = axes[1, 1]
ax.scatter(delta_f, M_f, c=e_f, s=2, edgecolor="none")
ax = axes[1, 0]
C = ax.contourf(e_grid, E_grid, delta_s, levels=np.arange(-19, -12).astype(float),
extend="min")
# -
delta_s
np.arange(-19, -15)
| paper/notebooks/kepler-accuracy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **IBM** *Data Science* Professional Certification Capstone : *Coursera*
#
# ## Project - **US Accident** Severity
# #### Author : <NAME>
# #### Date : 14-Sep-2020
# ### ***Table of Contents***
# * [Introduction: Business Problem](#Introduction)
# * [Data](#Data)
# * [Methodology](#Methodology)
# * [Results](#Results)
# * [Discussion](#Discussion)
# * [Conclusion](#Conclusion)
# # Introduction
# ## Business Problem
#
# All around the world, roads are shared by many motorized vehicles that have made transportation faster and more comfortable while supporting the country's economic status and social development. However, these vehicles cause many problems globally. Car accidents are responsible for 1.35M deaths on roadways every year. Almost 3.7k people are killed globally in road traffic crashes, where more than half of those killed are pedestrians, motorcyclists, and cyclists etc.
#
# In this project we will try to construct an optimal model for predicting **The Severity of Road Accidents**. Specifically, this report will be targeted to stakeholders interested in knowing the chances of them, encountering into a road accident on a given day(with given factors) in and around New York. The severity falls under 5 categories, where **0 indicates less severity** and **4 indicated more severity**.
#
# Since the road ways is preferred by many as a mode of transportation, the vehicles are prone to accident especially when the weather is not favourable.
#
# So here, We will use our data science skills to predict whether the person has chances to encounter a Severe Collision based on few criteria. Upon analysing the data set and building a model to predict the severity, the people in New York who usually go by car might think of other alternatives.
# # Data
# ### *Description* :
# This is a countrywide car accident dataset, which covers 49 states of the USA. The accident data are collected from February 2016 to June 2020, using two APIs that provide streaming traffic incident (or event) data. These APIs broadcast traffic data captured by a variety of entities, such as the US and state departments of transportation, law enforcement agencies, traffic cameras, and traffic sensors within the road-networks. Currently, there are about 3.5 million accident records in this dataset.
#
# ### *Content* :
# This dataset has been collected in real-time, using multiple Traffic APIs. Currently, it contains accident data that are collected from February 2016 to June 2020 for the Contiguous United States. Check here to learn more about this dataset.
#
# ### *Inspiration* :
# US-Accidents can be used for numerous applications such as real-time car accident prediction, studying car accidents hotspot locations, casualty analysis and extracting cause and effect rules to predict car accidents, and studying the impact of precipitation or other environmental stimuli on accident occurrence. The most recent release of the dataset can also be useful to study the impact of COVID-19 on traffic behavior and accidents.
#
# ### *Source* :
#
# KAGGLE : [https://www.kaggle.com/sobhanmoosavi/us-accidents](https://www.kaggle.com/sobhanmoosavi/us-accidents)
# +
# Import numpy, pandas, matpltlib.pyplot, sklearn modules and seaborn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
# -
# Import the data
df = pd.read_csv('US_Accidents_June20.csv')
df.info()
# The given data-set has 3513617 number of records with 48 attributes. Here, Attributes like *Zipcode, ID, Source* etc are not necessary while training the model. Also, we are not going to consider all the records to fit our model because of **memory concerns**. So we will remove the unwanted attributes and NA records and wor with a **filtered** data-set *(based on States)*.
# # Methodology
# * For implementing the solution, I have used **Github** as a repository and running **Jupyter Notebook** to preprocess data and build Machine Learning models. Regarding coding, I have used **Python** and its popular packages such as *Pandas, NumPy and Sklearn*.
#
# * Once I have load data into Pandas Dataframe, used *‘info’* attribute to check the feature names and their data types.
#
# * Then I have *cleaned* the data and re-built the Attributes in a **Standardized Formats** to ease the usage.
#
# * Then I have presented few **statistics** inferences coupled with **visuals** in the *Explanatory Data Analysis* Section.
#
# * Because of my PC's **less computation capacity**, I have used only the records corresponding to the *New York* to proceed further.
#
# * I have chosen the **Random Forest** machine learning model, I have also built and *evaluated* the model and shown the results with accuracy.
# ## Standardizing Time
# +
# Convert Start_Time and End_Time to datetypes
df['Start_Time'] = pd.to_datetime(df['Start_Time'], errors='coerce')
df['End_Time'] = pd.to_datetime(df['End_Time'], errors='coerce')
# Extract year, month, day, hour and weekday
df['Year']=df['Start_Time'].dt.year
df['Month']=df['Start_Time'].dt.strftime('%b')
df['Day']=df['Start_Time'].dt.day
df['Hour']=df['Start_Time'].dt.hour
df['Weekday']=df['Start_Time'].dt.strftime('%a')
# Extract the amount of time in the unit of minutes for each accident, round to the nearest integer
td='Time_Duration(min)'
df[td]=round((df['End_Time']-df['Start_Time'])/np.timedelta64(1,'m'))
# -
# ## Handling Outliers
# ### Drop Rows with negative time frame
# +
# Check if there is any negative time_duration values
print(df[td][df[td]<=0])
# Drop the rows with td<0
neg_outliers=df[td]<=0
# Set outliers to NAN
df[neg_outliers] = np.nan
# Drop rows with negative td
df.dropna(subset=[td],axis=0,inplace=True)
# -
# ### Fill Outliers With Median Values
# +
# Remove outliers for Time_Duration(min): n * standard_deviation (n=3), backfill with median
n=3
median = df[td].median()
std = df[td].std()
outliers = (df[td] - median).abs() > std*n
# Set outliers to NAN
df[outliers] = np.nan
# Fill NAN with median
df[td].fillna(median, inplace=True)
# -
# ### Time Interval
print('Max time to clear an accident: {} minutes or {} hours or {} days; Min to clear an accident td: {} minutes.'.format(
df[td].max(),round(df[td].max()/60), round(df[td].max()/60/24), df[td].min()))
# ## Feature Selection
# ### Select a list of features for machine learning algorithms
# Setting the list of features to include in Machine Learning
feature_lst=['Source','TMC','Severity','Start_Lng','Start_Lat','Distance(mi)','Side',
'City','County','State','Timezone','Temperature(F)','Humidity(%)','Pressure(in)',
'Visibility(mi)', 'Wind_Direction','Weather_Condition','Amenity','Bump','Crossing',
'Give_Way','Junction','No_Exit','Railway','Roundabout','Station','Stop','Traffic_Calming',
'Traffic_Signal','Turning_Loop','Sunrise_Sunset','Hour','Weekday', 'Time_Duration(min)']
# ### DataFrame with only Required Features
# Select the dataset to include only the selected features
df_sel=df[feature_lst].copy()
df_sel.info()
# ## Drop Rows With NA
# Check missing values
df_sel.isnull().mean()
df_sel.dropna(subset=df_sel.columns[df_sel.isnull().mean()!=0], how='any', axis=0, inplace=True)
df_sel.shape
# ## Explanatory Data Analysis
# +
from datetime import date
# Define lists of states, colors, linestyles, month order, day_order, and hour_order
# For this notebook, we will focus on the following three states: NJ, PA, & NY
state_lst=['NJ','PA','NY']
state_lst_full=['New Jersey','Pennsylvania','New York']
# We will focus on accident severities 2, 3 & 4
severity_lst=[2,3,4]
# Set a list of colors, markers and linestyles for plotting
color_lst=['tab:red','tab:blue','tab:green']
marker_lst=['D','o','*']
linestyle_lst=['dashed','dashdot','solid']
# Set a list of month, weekday, hour for reindex purpose and time_duraction to clear the accident
month_lst = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul','Aug','Sep','Oct','Nov','Dec']
weekday_lst = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
weekday_lst_full = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
hour_lst= np.arange(24)
td='Time_Duration(min)'
df = pd.read_csv('US_Accidents_June20.csv')
df = df[df.State.isin(state_lst)]
df.head()
# +
# Extract year, month, day, hour, & weekday information
# Convert Start_Time and End_Time to datetypes
df['Start_Time'] = pd.to_datetime(df['Start_Time'], errors='coerce')
df['End_Time'] = pd.to_datetime(df['End_Time'], errors='coerce')
# Extract year, month, day, hour, weekday and time_duration information
df['Start_Year']=df['Start_Time'].dt.year
df['Start_Month']=df['Start_Time'].dt.strftime('%b')
df['Start_Day']=df['Start_Time'].dt.day
df['Start_Hour']=df['Start_Time'].dt.hour
df['Start_Weekday']=df['Start_Time'].dt.strftime('%a')
# Extract the amount of time in the unit of minutes for each accident, round to the nearest integer
td='Time_Duration(min)'
df[td]=round((df['End_Time']-df['Start_Time'])/np.timedelta64(1,'m'))
# -
# Visualization: map of accidents by state
sns.scatterplot(x='Start_Lng', y='Start_Lat', data=df, hue='State')
plt.xlabel('Longitude')
plt.ylabel('Latitude)')
plt.show()
# Check the beginning and end date of this dataset
print('This dataset contains data beween {} and {}.'.format(df.Start_Time.min(),df.Start_Time.max()))
# +
# Find out how many days (Monday-Sunday) between the beginning and end of this dataset.
calendar_weekday_num=[]
d1=df.Start_Time.min()
d2=df.Start_Time.max()
for i in range(7):
count = 0
for d_ord in range(d1.toordinal(), d2.toordinal()+1):
d = date.fromordinal(d_ord)
if (d.weekday() == i):
count += 1
calendar_weekday_num.append(count)
print('Number of days for Monday-Sunday: {}.'.format(calendar_weekday_num))
print('Total number of days between {} and {}: {} days.'.format(d1,d2,sum(calendar_weekday_num)))
# +
# Time series analysis, resample by month
# Set the start_time as the index for resampling purpose
df.set_index('Start_Time',drop=True,inplace=True)
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and weekday
df[df['State']==state].resample('M').count()['ID'].plot(linestyle=linestyle_lst[i], color=color_lst[i])
# Set the limits and labels
plt.xlim('2016','2019-Mar')
plt.ylim(0,4200)
plt.xlabel('Year')
plt.title('{}'.format(state))
plt.show()
# Reset the index back for further data analysis
df.reset_index(inplace=True)
# +
# The severity of accidents for each state
feature='Severity'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=0
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# +
# The day or night when accidents occurred for each state
feature='Sunrise_Sunset'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# +
# Hourly distribution of accidents on weekdays in NJ, PA & NY (for the whole dataset)
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
for i,state in enumerate(state_lst):
plt.subplot(1, 3, 1+i)
df[df['State']==state].groupby('Start_Weekday').count()['ID'].reindex(weekday_lst).plot(kind='bar',color=color_lst[i]).set_title(state)
plt.xlabel('')
plt.ylim(0, 20000)
# Only diplay ylabel on the leftmost plot
if i==0:
plt.ylabel('Number of accidents')
# +
# For each state, find out how many unique days for each weekday/weekend
# Initialize an empty list to hold the number of days for each weekday/weekend for the three states
weekday_num_state=[]
# Run a for loop for the list of states: NJ, PA, & NY
for state in state_lst:
# Initialize an empty list to hold the number of days for each weekday
weekday_num=[]
# Run a for loop for the whole week
for weekday in weekday_lst:
# Slice the dataframe for specific state & weekday
df_weekday=df[(df['State']==state) & (df.Start_Weekday==weekday)]
# For each weekday, extract the day information from the Start_Time column, by separating the datetime into day and hour
day_lst1=df_weekday.Start_Time.astype(str).str.split(' ')
# Extract the first item which is the day information
day_lst2=[item[0] for item in day_lst1]
# Append the day into the list weekday_num
weekday_num.append(len(set(day_lst2)))
# Append the day with state information encoded into the list weekday_num_state
weekday_num_state.append(weekday_num)
# Accidents per day on weekdays/weekends in NJ, PA & NY
# Set the size of the figure
# The severity for each location for each state; for each of the following locations, what's the severity of the accident?
feature='Start_Weekday'
fig_x=len(state_lst)
# Divide the total number of accidents by the number of unique days
# Set the size of the figure
fig= plt.figure(figsize=(5*fig_x,6))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and weekday
df_temp=df[df['State']==state].groupby('Start_Weekday').count()['ID'].reindex(weekday_lst)
# checkpoint
# print('df_temp:', df_temp)
# print('weekday_num_state[i]:',weekday_num_state[i])
# Divid the number of accidents by the number of unique days, using list comprehension
df_temp2=[round(int(item1)/int(item2)) for item1,item2 in zip(df_temp,weekday_num_state[i])]
# checkpoint
# print('df_temp2:', df_temp2)
# Convert the list of pandas series for plot
df_temp2=pd.Series(df_temp2)
# Make a bar plot, using different color and set the title as the state
# df_temp2.plot(kind='bar',color=color_lst[i]).set_title(state)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else '' for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp2, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel('Weekday/Weekend')
plt.title(state)
plt.tight_layout()
plt.show()
# +
# The location of accidents for each state
# Where are the accidents?
feature='Accident location'
# Set the state as the index
df.set_index('State',drop=True,inplace=True)
# State is the index when selecting bool type data as df_bool
df_bool=df.select_dtypes(include=['bool'])
# Reset the index of the original data for other calculations
df.reset_index(inplace=True)
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2.5
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df_bool[df_bool.index==state]
df_temp=(df_temp.sum(axis=0)/df_temp.sum(axis=0).sum()).sort_values()
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.xlabel(feature)
plt.show()
# +
# The weather condition for each state
feature='Weather_Condition'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# -
# !!!**Since the data-set is too large and since I don't have a GPU, I am proceeding to analyze and model for a particular city i.e) New York**!!!
# ## Considering Only New York
# +
# Set state
state='NY'
# Select the state of New York
df_state=df_sel.loc[df_sel.State==state]
df_state.drop('State',axis=1, inplace=True)
df_state.info()
# -
# Map of accidents in New York
sns.scatterplot(x='Start_Lng', y='Start_Lat', data=df_state, hue='County', legend=False, s=20)
plt.show()
# ## Handling Categorical Attributes
# Generate dummies for categorical data
df_state_dummy = pd.get_dummies(df_state,drop_first=True)
# ## Train Test Split
# +
# Assign the data
df=df_state_dummy
# Set the target for the prediction
target='Severity'
# Create arrays for the features and the response variable
# set X and y
y = df[target]
X = df.drop(target, axis=1)
# Split the data set into training and testing data sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=21, stratify=y)
# -
# ## Modeling : **Random Forest**
# +
# Random Forest algorithm
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=750)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
y_pred= clf.predict(X_test)
# Get the accuracy score
accuracy_rf = accuracy_score(y_test, y_pred)
# -
# Model Accuracy, how often is the classifier correct?
print("[Randon forest algorithm] accuracy_score: {:.3f}.".format(accuracy_rf))
# ### Visualize Key Features
# +
feature_imp = pd.Series(clf.feature_importances_,index=X.columns).sort_values(ascending=False)
# Creating a bar plot, displaying only the top k features
k=15
sns.barplot(x=feature_imp[:15], y=feature_imp.index[:k])
# Add labels to your graph
plt.xlabel('Feature Importance Score')
plt.ylabel('Features')
plt.title("Visualizing Important Features")
plt.show()
# -
# ## Modeling with only Top Features : RF
# +
# Create a selector object that will use the random forest classifier to identify
# features that have an importance of more than 0.021
sfm = SelectFromModel(clf, threshold=0.021)
# Train the selector
sfm.fit(X_train, y_train)
feat_labels=X.columns
# Print the names of the most important features
for feature_list_index in sfm.get_support(indices=True):
print(feat_labels[feature_list_index])
# +
# Transform the data to create a new dataset containing only the most important features
# Note: We have to apply the transform to both the training X and test X data.
X_important_train = sfm.transform(X_train)
X_important_test = sfm.transform(X_test)
# Create a new random forest classifier for the most important features
clf_important = RandomForestClassifier(n_estimators=750, random_state=0, n_jobs=-1)
# Train the new classifier on the new dataset containing the most important features
clf_important.fit(X_important_train, y_train)
# -
# # Results
# +
y_important_pred = clf_important.predict(X_important_test)
# View The Accuracy Of Our Limited Feature Model
print('[Randon forest algorithm -- Limited feature] accuracy_score: {:.3f}.'.format(accuracy_score(y_test, y_important_pred)))
# -
# #### So, with the **Random Forest Classifier** above, we were able to derive **88.4%** of accuracy with the *test set*.
# # Discussion
# * The accuracy gained from this model is pretty much good on a scale upto 88.4 in the Test Accuracy.
# * As a future work, similar models likes *Decision Trees, K-Nearest Neighbour, Multiclass Logistic Regression* etc can be built on the same dataset.
# * Upon constructing those models, a final model using **Ensemble Learning** can be built to boost up the *accuracy* of the model.
# * Also, with a **GPU** its efficient to build the model using *whole of the data-set*.
# # Conclusion
# Based on the dataset chosen for this capstone from Time, Weather, and Temperature conditions pointing to certain classes, we can conclude that particular conditions has an impact on whether or not travel could result in less serious (class 0) or more severe accident(class 4), in and around New York.
| IBM_Data_Science_Capstone_US_Accident.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from celluloid import Camera
from itertools import cycle
import tikzplotlib
# %run './../split_step_fourier.ipynb'
DEBUG = False
# showing figures inline
# %matplotlib inline
# plotting options
figure_size = (10, 10*np.sqrt(2))
plt.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
# +
# parameters
f_symbol = 32e9 # symbol rate (Baud) (Symbols per second)
n_up = 10 # samples per symbol (>1 => oversampling)
r_rc = .33
syms_per_filt = 4 # symbols per filter (plus minus in both directions)
t_sample_rc, rc = get_rc_ir(syms_per_filt, r_rc, f_symbol, n_up)
power = 5
# modulation scheme and constellation points
M = 2
modulation = {'0': -1, '1': 1}
n_symbol = 30 # number of symbols
# Signalfolge generieren
send_bits = np.random.choice([symbol for symbol in modulation.keys()], size=n_symbol)
# Sendesignal generieren
send_rc = generate_signal(modulation, t_sample_rc, 1/f_symbol, send_bits, rc, syms_per_filt, n_symbol, power)
# add zeros before and after signal (use samples per symbol as factor)
send_new = add_zeros(send_rc, 5 * int(1/f_symbol/t_sample_rc))
# +
## Transmission
z_length = 70 # [km]
nz = 10 # steps
dz = z_length / nz # [km]
alpha = 0.2 # Dämpfung [dB/km]
D = 17 # [ps/nm/km]
beta2 = - (D * np.square(1550e-9)) / (2 * np.pi * 3e8) * 1e-3 # [s^2/km] propagation constant, lambda=1550nm is standard single-mode wavelength
gamma = 1.3 # [1/W/km]
output = splitstepfourier(send_new, t_sample_rc, dz, nz, alpha, beta2, gamma, True)
# +
fig1, ax1 = plt.subplots(len(output)+1, figsize=(15,30), sharex=True)
colors = cycle(list(mcolors.TABLEAU_COLORS))
counter = 0
x_vals = np.arange(send_new.size)*t_sample_rc
xmin = np.amin(x_vals)
xmax = np.amax(x_vals)
ax1[counter].plot(x_vals, np.square(np.abs(send_new)), label='0', color=next(colors))
ax1[counter].set_xlim(xmin, xmax)
ax1[counter].set_ylim(bottom=0)
ax1[counter].set_title("Input (0 km)")
ax1[counter].set_ylabel("$|s|^2$")
counter += 1
for key, val in output.items():
ax1[counter].plot(x_vals, np.square(np.abs(val)), label=key, color=next(colors))
ax1[counter].set_ylim(bottom=0)
ax1[counter].set_title(f"after {key} steps ({float(key)*dz} km)")
ax1[counter].set_ylabel("$|s|^2$ [W]")
counter += 1
ax1[counter-1].set_title(f"Output ({z_length} km)")
ax1[counter-1].set_xlabel("$t$ [s]")
# +
## Transmission
z_length = 70 # [km]
nz = 10 # steps
dz = z_length / nz # [km]
alpha = 0 # Dämpfung [dB/km]
D = 17 # [ps/nm/km]
beta2 = - (D * np.square(1550e-9)) / (2 * np.pi * 3e8) * 1e-3 # [s^2/km] propagation constant, lambda=1550nm is standard single-mode wavelength
gamma = 1.3 # [1/W/km]
output2 = splitstepfourier(send_new, t_sample_rc, dz, nz, alpha, beta2, gamma, True)
# +
fig2, ax2 = plt.subplots(len(output)+1, figsize=(15,30), sharex=True)
colors = cycle(list(mcolors.TABLEAU_COLORS))
counter = 0
all_vals = np.square(np.abs(np.asarray([val for val in output2.values()]).flatten()))
ymin = np.amin(all_vals)
ymax = np.amax(all_vals)*1.1
print(ymax)
x_vals = np.arange(send_new.size)*t_sample_rc
xmin = np.amin(x_vals)
xmax = np.amax(x_vals)
ax2[counter].plot(x_vals, np.square(np.abs(send_new)), label='0', color=next(colors))
ax2[counter].set_xlim(xmin, xmax)
ax2[counter].set_ylim(ymin, ymax)
ax2[counter].set_title(f"Input")
ax2[counter].set_ylabel("$|s|^2$")
counter += 1
for key, val in output2.items():
ax2[counter].plot(x_vals, np.square(np.abs(val)), label=key, color=next(colors))
ax2[counter].set_xlim(xmin, xmax)
ax2[counter].set_ylim(ymin, ymax)
ax2[counter].set_title(f"after {key} steps ({float(key)*dz} km)")
ax2[counter].set_ylabel("$|s|^2$ [W]")
counter += 1
ax2[counter-1].set_title(f"Output ({z_length} km)")
ax2[counter-1].set_xlabel("$t$ [s]")
# +
# parameters
f_symbol2 = 32e9 # symbol rate (Baud) (Symbols per second)
n_up2 = 10 # samples per symbol (>1 => oversampling)
r_rc2 = .33
syms_per_filt2 = 4 # symbols per filter (plus minus in both directions)
t_sample_rc2, rc2 = get_rc_ir(syms_per_filt2, r_rc2, f_symbol2, n_up2)
power2 = 5
# modulation scheme and constellation points
M2 = 2
modulation2 = {'0': -1, '1': 1}
n_symbol2 = 10 # number of symbols
# Signalfolge generieren
send_bits2 = np.random.choice([symbol for symbol in modulation2.keys()], size=n_symbol2)
# Sendesignal generieren
send_rc2 = generate_signal(modulation2, t_sample_rc2, 1/f_symbol2, send_bits2, rc2, syms_per_filt2, n_symbol2, power2)
# add zeros before and after signal (use samples per symbol as factor)
send_new2 = add_zeros(send_rc2, 5 * int(1/f_symbol2/t_sample_rc2))
# +
## Transmission
z_length2 = 70 # [km]
nz2 = 100 # steps
dz2 = z_length2 / nz2 # [km]
alpha2 = 0 # Dämpfung [dB/km]
D2 = 17 # [ps/nm/km]
beta22 = - (D2 * np.square(1550e-9)) / (2 * np.pi * 3e8) * 1e-3 # [s^2/km] propagation constant, lambda=1550nm is standard single-mode wavelength
gamma2 = 1.3 # [1/W/km]
output3 = splitstepfourier(send_new2, t_sample_rc2, dz2, nz2, alpha2, beta22, gamma2, True)
# +
## Animated Plot
plt.rcParams.update({'font.size': 26})
plt.rcParams['axes.labelweight'] = 'bold'
all_vals2 = np.square(np.abs(np.asarray([val for val in output3.values()]).flatten()))*1e3
ymin2 = np.amin(all_vals2)
ymax2 = np.amax(all_vals2)*1.1
x_vals2 = np.arange(send_new2.size)*t_sample_rc*1e9
xmin2 = np.amin(x_vals2)
xmax2 = np.amax(x_vals2)
fig = plt.figure(figsize=(16,9))
camera = Camera(fig)
signal = np.square(np.abs(send_new2))*1e3
start_signal = signal
p = plt.plot(x_vals2, signal, label='Schritt 0 (0 km)', color='tab:blue')
plt.xlim(xmin2,xmax2)
plt.ylim(ymin2,ymax2)
plt.title("Signalverlauf auf LWL")
plt.ylabel("$|u|^2/$mJ")
plt.xlabel("$t/$ns")
plt.legend(p, ['Schritt 0.0'])
plt.savefig('../../../bachelorarbeit-folien/abschlussvortrag/images/fiber_propagation_wallpaper_noalpha.pdf')
camera.snap()
for key,val in output3.items():
signal = np.square(np.abs(val))*1e3
p = plt.plot(x_vals2, signal, color='tab:blue')
plt.xlim(xmin2,xmax2)
plt.ylim(ymin2,ymax2)
plt.title("Signalverlauf auf LWL")
plt.ylabel("$|u|^2/$mJ")
plt.xlabel("$t/$ns")
plt.legend(p, [f'Schritt {key}'])
camera.snap()
animation = camera.animate(interval=200)
fig_temp = plt.figure(figsize=(16,9))
plt.plot(x_vals2, np.square(np.abs(output3['100']))*1e3, label='Schritt 100.0')
plt.plot(x_vals2, start_signal, label='Schritt 0.0')
plt.xlim(xmin2,xmax2)
plt.ylim(ymin2,ymax2)
plt.title("Signalverlauf auf LWL")
plt.ylabel("$|u|^2/$mJ")
plt.xlabel("$t/$ns")
plt.legend()
plt.savefig('../../../bachelorarbeit-folien/abschlussvortrag/images/fiber_propagation_wallpaper_final_noalpha.pdf')
# +
output_fname = "fiber_propagation"
output_path = "../../../bachelorarbeit-ausarbeitung/figures/plots/"
tikzplotlib.save(f'{output_path}{output_fname}.tex', figure=fig1, wrap=False, add_axis_environment=False, externalize_tables=True, override_externals=True)
tikzplotlib.save(f'{output_path}{output_fname}_noalpha.tex', figure=fig2, wrap=False, add_axis_environment=False, externalize_tables=True, override_externals=True)
fig1.savefig(f"{output_path}{output_fname}.pdf", bbox_inches='tight')
fig2.savefig(f"{output_path}{output_fname}_noalpha.pdf", bbox_inches='tight')
animation.save(f'../../../bachelorarbeit-folien/abschlussvortrag/video/{output_fname}_animated_noalpha.mp4', dpi=96)
| jupyter-notebooks/eval/signal_propagation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: DS 5110
# language: python
# name: ds5110
# ---
#
# ### K-Means Cluster Analysis of Fidelity Fund Returns
#
# In this project, I conduct a k-means cluster analysis on a set of Fidelity mutual funds. This helps group similar funds based on their performance.
# ##### Preprocessing
import pandas as pd
import numpy as np
df0 = pd.read_csv('fido_returns.csv')
df0.head(3)
# store the tickers in a list called tickers
tickers = list(df0.columns[1:])
tickers[:5]
# store the dates
dates = df0.Index.values
dates[:5]
# drop the index column
del df0['Index']
# store the dataframe values
vals = df0.values
# transpose the data, putting funds on rows and timepoints on columns
vals_t = vals.transpose()
vals_t
vals_t.shape
# build the dataframe with transposed data, calling it dft.
dft = pd.DataFrame(data=vals_t, index=df0.columns, columns=dates)
dft.head()
# ##### Read Data into Spark DataFrame
# +
from pyspark.ml.clustering import KMeans
from pyspark.ml.evaluation import ClusteringEvaluator
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
filename = 'fido_returns_funds_on_rows.csv'
df = spark.read.csv(filename, inferSchema=True, header = True)
# -
# Assemble the Features
# +
feats = []
for i in df.schema.names:
feats.append(i)
assembler = VectorAssembler(inputCols=feats, outputCol="features")
dataset=assembler.transform(df)
# dataset.select("*").show(truncate=False)
# -
dataset.select(dataset.features).show(5)
pandasDF = dataset.toPandas()
print(pandasDF[2:5])
# ##### Set up.
# +
kmeans = KMeans().setK(3).setSeed(314).setMaxIter(10)
model = kmeans.fit(dataset)
predictions = model.transform(dataset)
# +
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
# +
'''
Here I define a function that:
- takes integers for lower and upper bounds, and a df
- fits K-means with k within this range
- compute the silhouette score for each k
- returns a pandas df with cols containing k and scores
'''
def kmeans_range(lower_bound, upper_bound, spark_df):
feats = []
for i in spark_df.schema.names:
feats.append(i)
assembler = VectorAssembler(inputCols=feats, outputCol="features")
dataset=assembler.transform(spark_df)
kvalues = []
sil_scores = []
for k in range(lower_bound, upper_bound):
kvalues.append(k)
kmeans = KMeans().setK(k).setSeed(314).setMaxIter(10)
model = kmeans.fit(dataset)
# Silhouette Score
predictions = model.transform(dataset)
evaluator = ClusteringEvaluator()
silhouette = evaluator.evaluate(predictions)
print("Silhouette with squared euclidean distance = " + str(silhouette))
sil_scores.append(silhouette)
# create df
global kmeans_range_df
data = {'k':kvalues,
'silhouette score':sil_scores}
kmeans_range_df = pd.DataFrame(data=data)
return kmeans_range_df
# -
kmeans_210 = kmeans_range(2,11,df)
kmeans_210
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(kmeans_range_df['k'], kmeans_range_df['silhouette score'])
# #!export PATH=/Library/TeX/texbin:$PATH
# !jupyter nbconvert --to pdf `pwd`/*.ipynb
| databricks_pyspark_examples/fidelity_fund_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating data for n_features experiment
# +
import os
import numpy as np
# -
# Path to data folder.
# +
PATH_TO_EXP = '/cobrain/groups/ml_group/experiments/dustpelt/imc_exp/'
PATH_DATA = os.path.join(PATH_TO_EXP, 'data/n_instances')
if not os.path.exists(PATH_DATA):
os.makedirs(PATH_DATA)
# -
# Random state.
random_state = np.random.RandomState(0x0BADCAFE)
# Data configuration.
elements = np.arange(0.001, 0.02, 0.0015)
# +
n_samples, n_objects = 800, 1600
n_rank = 25
n_features = 100
scale = 0.05
noise = 0.10
# -
# Making artificial data:
from sgimc.utils import make_imc_data
# +
X, W_ideal, Y, H_ideal, R_noisy, R = make_imc_data(
n_samples, n_features, n_objects, n_features,
n_rank, scale=(scale, scale), noise=scale*noise,
binarize=False,
random_state=random_state,
return_noisy_only=False)
data = (X, Y, R, R_noisy)
# -
# Saving data:
import gzip
import pickle
filename = os.path.join(PATH_DATA, 'data.gz')
with gzip.open(filename, "wb+", 4) as fout:
pickle.dump(data, fout)
| experiments/n_instances/data_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploring Weather Data
# -------------------------------------------------------------------------------------------------------------------
# # About This Project
#
# The purpose of the exploring weather data project is to explore the two data sets, 'city_data' and 'global_data', and determine the relationship between a chosen city's average temperature in the 'city_data' database to the global average temperature. Key concepts to demonstrate include data extraction, data manipulation, data wrangling, and data visualization.
# # SQL Database Extraction
#
# ### Determine the Closest City
#
# SELECT *
# FROM city_list
# WHERE country LIKE 'United States'
#
# Since I am located in Indiana, the closest city from the data set is Chicago.
#
# ### Retrieve Chicago Weather Data
#
# SELECT year, avg_temp AS Chicago
# FROM city_data
# WHERE country = 'United States' AND city = 'Chicago'
#
# The data ranges from year 1743 to 2013 where year 1746 to 1749 is missing data.
#
# ### Retrieve Global Weather Data
#
# SELECT year, avg_temp AS Global
# FROM global_data
#
# The data starts ranges from year 1750 to 2015 which contains less data points than Chicago's weather data. From these observations, it is safe to assume that the joined table will contain data from 1750 to 2013.
#
# ### Join Resulting Tables into One
#
# SELECT city_data.year,
# city_data.avg_temp as City,
# global_data.avg_temp as Global
# FROM city_data
# JOIN global_data
# ON city_data.year = global_data.year
# WHERE country LIKE 'United States'
# AND city LIKE 'Chicago'
#
# After evauluating the query, the output was downloaded as 'avg-yearly-weather.csv'.
#
# # Spreadsheet Data Manipulation
#
# ### Moving Average Calculation
#
# The downloaded csv file was opened in Microsoft Excel where the 'AVERAGE' function was used to calculate the 5-year moving averages (MA) for Chicago and Global weather data. Columns 'chicagoMA' and 'globalMA' were created for tabulation of the excecuted function. Now, I have the required information to extrapolate a line chart.
#
# # Data Visualization
#
# ### Import CSV and Libraries
#
# 'avg-yearly-weather.csv' was imported using the pandas library in Python3. Matplotlib library was imported to utilize pyplot for visualization later.
# Import pandas to import csv, matplotlib to use pyplot
import pandas as pd
import matplotlib.pyplot as plt
# Reading csv "avg-yearly-weather.csv"
raw_avg_temp = pd.read_csv("/Users/bnoog/Documents/DAND/avg-yearly-weather.csv")
# Check to see if correct csv imported
raw_avg_temp.head()
# ### Data Wrangling
# Once I confirmed that the correct dataset has been imported, the table was manipulated to become easier to use.
# Assign variables and clean the table
avg_temp = raw_avg_temp.set_index(['year']).copy()
def moving_avg(df, window_size):
return df.rolling(window_size).mean().dropna().copy()
moving_avg_temp = moving_avg(avg_temp, 5)
moving_avg_temp.head()
# Defining columns to use
chicagoMA = moving_avg_temp.chicagoMA
globalMA = moving_avg_temp.globalMA
# ### Generate Line Chart
# Now that the table is usable, I can use matplotlib to plot the 5-year moving average temperature for Chicago and Global weather data from 1750 to 2013 as a line chart. The graph was manipulated in terms of labels, color, and size to visualize trends more effectively.
# +
# Plot both Chicago moving average and Global moving average
plt.plot(moving_avg_temp.chicagoMA, color = 'orange')
plt.plot(moving_avg_temp.globalMA, 'b-')
# Legend
plot1, = plt.plot(moving_avg_temp.chicagoMA, color = 'orange')
plot2, = plt.plot(moving_avg_temp.globalMA, 'b-')
plt.legend([plot1,plot2],["Chicago","Global"], loc = 2)
plt.title("5 Year Moving Average Temperature: Chicago & Global")
plt.xlabel("Year")
plt.ylabel("Temperature °C")
plt.ylim(7,12)
plt.xlim(1750,2020)
plt.grid(True, color = 'k', linestyle = ':')
plt.style.use('default')
# Resize
from matplotlib import rcParams
rcParams ['figure.figsize'] = 10,8
# -
# ### Correlation Determination
# A quick Pearson correlation table was created to demonstrate the strong positive correlation between the two data sets.
# Pearson Correlation Coefficient
moving_avg_temp.drop('city', inplace = True, axis = 1)
moving_avg_temp.drop('global', inplace = True, axis = 1)
correlation = moving_avg_temp.corr(method = 'pearson')
print(correlation)
# # Observations
#
# From the data visualization and Pearsons correlation coefficient, one can observe that both Chicago and Global weather data trendlines are positively correlated (r = 0.872). One can safely assume that the average temperature will continue to increase in the next several decades. Chicago is generally, at a minimum, 1°C warmer in any given year while also fluctuating more eradically than Global weather data from decade to decade. Around 1820, Global average temperature decreased more than 1°C.
| exploring-weather-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/LeeGitaek/2020_AI_Class/blob/master/kaggle_netflix.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="1mgIGFirA4i1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="b9dbca12-05d2-474c-f62b-67b2b3c19cba"
# !pip uninstall kaggle
# !pip install --upgrade pip
# !pip install kaggle==1.5.6
# + id="roXSGLRs5Kvo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ac7ff19c-aefc-4881-d483-282e5e8ca5c8"
# !mkdir -p ~/.kaggle
# !cp kaggle.json ~/.kaggle
# !ls -lha kaggle.json
# !chmod 600 ~/.kaggle/kaggle.json
# + id="CdA72b1Q5Nuv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f3d2137c-b90a-4bf5-edf9-4c2a0f5c4b0c"
# !kaggle competitions download -c netflix-stock-prediction
# + id="glpJr2wA5Xa9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="eb69708e-4c94-494e-d72c-706c337c3225"
# !unzip netflix-stock-prediction.zip
# + id="9k0S2kyd6IMV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="431a6296-f26b-4fda-d42c-d0aeb7ac0d83"
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
from pandas import datetime
import math, time
import itertools
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import datetime
from operator import itemgetter
from sklearn.metrics import mean_squared_error
from math import sqrt
import torch
import torch.nn as nn
from torch.autograd import Variable
torch.manual_seed(1)
# + id="vo-nCkM-oOsy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="b6d904da-2276-430f-c5c7-8872ed3b2456"
df_train = pd.read_csv('train.csv',parse_dates=True)
df_test = pd.read_csv('test.csv',parse_dates=True)
sc = MinMaxScaler()
df_train.head()
# + id="U6oQfA5koUnG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="f2ba5372-0195-4103-eef6-7d9fc8f396ee"
df_train.info()
# + id="h5wfYC86o__W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 454} outputId="7c6ab56c-bc21-4944-81c4-eb05a06e72fe"
import seaborn as sns
sns.set_style("darkgrid")
plt.figure(figsize = (10,6))
plt.plot(df_train[['Close']])
plt.xticks(range(0,df_train.shape[0],967),df_train['Date'].loc[::967],rotation=45)
plt.title('Netflix Stock Price',fontsize=18, fontweight='bold')
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price (USD)',fontsize=18)
plt.show()
# + id="EWU1GPwMqzCy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="225f4ce1-08ab-4779-a6bc-80ca5b369d7b"
price = df_train[['Close']]
price.info()
# + id="EeAB0Mstq3-2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="3edd9669-af90-40f3-c78a-e98281f27ccc"
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(-1, 1))
price['Close'] = scaler.fit_transform(price['Close'].values.reshape(-1,1))
# + id="wOmSXISRqkTI" colab_type="code" colab={}
def split_data(stock, lookback):
data_raw = stock.to_numpy() # convert to numpy array
data = []
# create all possible sequences of length seq_len
for index in range(len(data_raw) - lookback):
data.append(data_raw[index: index + lookback])
data = np.array(data);
test_set_size = int(np.round(0.2*data.shape[0]));
train_set_size = data.shape[0] - (test_set_size);
x_train = data[:train_set_size,:-1,:]
y_train = data[:train_set_size,-1,:]
x_test = data[train_set_size:,:-1]
y_test = data[train_set_size:,-1,:]
return [x_train, y_train, x_test, y_test]
# + id="1Vh92WAtqsD_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="1aca4c21-41cc-4e40-e993-e5fe3207ca1a"
lookback = 20 # choose sequence length
x_train, y_train, x_test, y_test = split_data(price, lookback)
print('x_train.shape = ',x_train.shape)
print('y_train.shape = ',y_train.shape)
print('x_test.shape = ',x_test.shape)
print('y_test.shape = ',y_test.shape)
# + id="d-UBsOpUuatN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 293} outputId="03513087-11d6-4beb-861c-dd9815b7a093"
df_test['Volume'].plot()
# + id="F8HaMokwaOaE" colab_type="code" colab={}
x_train = torch.from_numpy(x_train).type(torch.Tensor)
x_test = torch.from_numpy(x_test).type(torch.Tensor)
y_train_lstm = torch.from_numpy(y_train).type(torch.Tensor)
y_test_lstm = torch.from_numpy(y_test).type(torch.Tensor)
y_train_gru = torch.from_numpy(y_train).type(torch.Tensor)
y_test_gru = torch.from_numpy(y_test).type(torch.Tensor)
# + id="MqOzRo_JdlPG" colab_type="code" colab={}
input_dim = 1
hidden_dim = 32
num_layers = 2
output_dim = 1
num_epochs = 100
# + id="AkYwhXCmr3gj" colab_type="code" colab={}
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_layers, output_dim):
super(LSTM, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
# + id="ni7B2lrIr7b2" colab_type="code" colab={}
model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers)
criterion = torch.nn.MSELoss(reduction='mean')
optimiser = torch.optim.Adam(model.parameters(), lr=0.01)
# + id="Wf9kD2PEr_KL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2c81203f-4e9d-4b33-da59-51468a52526f"
import time
hist = np.zeros(num_epochs)
start_time = time.time()
lstm = []
for t in range(num_epochs):
y_train_pred = model(x_train)
loss = criterion(y_train_pred, y_train_lstm)
print("Epoch ", t, "MSE: ", loss.item())
hist[t] = loss.item()
optimiser.zero_grad()
loss.backward()
optimiser.step()
training_time = time.time()-start_time
print("Training time: {}".format(training_time))
# + id="Tw1YiyaNsF50" colab_type="code" colab={}
predict = pd.DataFrame(scaler.inverse_transform(y_train_pred.detach().numpy()))
original = pd.DataFrame(scaler.inverse_transform(y_train_lstm.detach().numpy()))
# + id="byd8r-m_sJOS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 409} outputId="10ddf729-1d2c-4323-824d-7598e590684e"
import seaborn as sns
sns.set_style("darkgrid")
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2)
plt.subplot(1, 2, 1)
ax = sns.lineplot(x = original.index, y = original[0], label="Data", color='royalblue')
ax = sns.lineplot(x = predict.index, y = predict[0], label="Training Prediction (LSTM)", color='tomato')
ax.set_title('Stock price', size = 14, fontweight='bold')
ax.set_xlabel("Days", size = 14)
ax.set_ylabel("Cost (USD)", size = 14)
ax.set_xticklabels('', size=10)
plt.subplot(1, 2, 2)
ax = sns.lineplot(data=hist, color='royalblue')
ax.set_xlabel("Epoch", size = 14)
ax.set_ylabel("Loss", size = 14)
ax.set_title("Training Loss", size = 14, fontweight='bold')
fig.set_figheight(6)
fig.set_figwidth(16)
# + id="JrKyZ4MKsPtJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="9355b6a7-968c-460c-e1d1-48d870087c5d"
import math, time
from sklearn.metrics import mean_squared_error
# make predictions
y_test_pred = model(x_test)
# invert predictions
y_train_pred = scaler.inverse_transform(y_train_pred.detach().numpy())
y_train = scaler.inverse_transform(y_train_lstm.detach().numpy())
y_test_pred = scaler.inverse_transform(y_test_pred.detach().numpy())
y_test = scaler.inverse_transform(y_test_lstm.detach().numpy())
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
lstm.append(trainScore)
lstm.append(testScore)
lstm.append(training_time)
# + id="krh3-nL3sVaG" colab_type="code" colab={}
trainPredictPlot = np.empty_like(price)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[lookback:len(y_train_pred)+lookback, :] = y_train_pred
# shift test predictions for plotting
testPredictPlot = np.empty_like(price)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(y_train_pred)+lookback-1:len(price)-1, :] = y_test_pred
original = scaler.inverse_transform(price['Close'].values.reshape(-1,1))
predictions = np.append(trainPredictPlot, testPredictPlot, axis=1)
predictions = np.append(predictions, original, axis=1)
result = pd.DataFrame(predictions)
# + id="i_RvhC6lsbla" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="3f7d666e-2273-4548-e8b3-f259ffebc320"
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(go.Scatter(x=result.index, y=result[0],
mode='lines',
name='Train prediction')))
fig.add_trace(go.Scatter(x=result.index, y=result[1],
mode='lines',
name='Test prediction'))
fig.add_trace(go.Scatter(go.Scatter(x=result.index, y=result[2],
mode='lines',
name='Actual Value')))
fig.update_layout(
xaxis=dict(
showline=True,
showgrid=True,
showticklabels=False,
linecolor='white',
linewidth=2
),
yaxis=dict(
title_text='Close (USD)',
titlefont=dict(
family='Rockwell',
size=12,
color='white',
),
showline=True,
showgrid=True,
showticklabels=True,
linecolor='white',
linewidth=2,
ticks='outside',
tickfont=dict(
family='Rockwell',
size=12,
color='white',
),
),
showlegend=True,
template = 'plotly_dark'
)
annotations = []
annotations.append(dict(xref='paper', yref='paper', x=0.0, y=1.05,
xanchor='left', yanchor='bottom',
text='Results (LSTM)',
font=dict(family='Rockwell',
size=26,
color='white'),
showarrow=False))
fig.update_layout(annotations=annotations)
fig.show()
# + id="7x8dWEmz0Xj7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d29e7f39-9728-49dc-adef-113ab5e9b3e2"
print(len(result[1].dropna(axis=0)))
# + id="RBJM4NmY2bjp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} outputId="f4aa49a1-61c6-4680-eadc-b683f9f8f02b"
result[1] = result[1].dropna(axis=0)
predict = result[1].tail(21).dropna(axis=0)
print(len(predict))
print(predict.astype('int'))
# + id="Yq8Ix88i5DCJ" colab_type="code" colab={}
predict = np.array(predict).reshape(-1,1).astype('int')
id = np.array([i+1 for i in range(len(predict))]).reshape(-1,1)
result = np.hstack([id,predict])
df = pd.DataFrame(result,columns=["Id","Expected"])
df.to_csv("submission_form.csv",index=False,header=True)
# + id="hBGMU38p5KAx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="87111121-5eb8-4585-9e62-cc8d3b02b185"
# !kaggle competitions submit -c netflix-stock-prediction -f submission_form.csv -m "baseline"
| kaggle_netflix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import AFQ.data as afqd
import cloudknot as ck
import importlib
import s3fs
import json
import os.path as op
import numpy as np
import pandas as pd
# +
study = afqd.S3BIDSStudy(
"hbn_curated-0",
bucket="fcp-indi",
s3_prefix="data/Projects/HBN/BIDS_curated",
subjects=1,
)
qsiprep_study = afqd.S3BIDSStudy(
"hbn_curated_qsiprep-0",
bucket="fcp-indi",
s3_prefix="data/Projects/HBN/BIDS_curated/derivatives/qsiprep",
subjects=1,
)
# -
print(len(study._all_subjects))
print(len(qsiprep_study._all_subjects))
remaining_subs = list(set(study._all_subjects) - set(qsiprep_study._all_subjects))
print(len(remaining_subs))
mismatch_df = pd.read_csv("/Users/richford/Desktop/curation_dwi_mismatch.csv")
mismatch_df.head()
print("union", len(set(remaining_subs) & set(mismatch_df["participant_id"])))
print("remaining, not mismatched", len(set(remaining_subs) - set(mismatch_df["participant_id"])))
# ## Define the preprocessing function
def preprocess_hbn(subject_id):
import AFQ.data as afqd
import os
import subprocess
from s3fs import S3FileSystem
# The following code snippet retrieves access credentials for the fcp-indi bucket that are stored in an AWS secret.
import boto3
import json
secret_name = "hbn/fcp-indi/access"
region_name = "us-west-2"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
# Decrypts secret using the associated KMS CMK.
secret = json.loads(get_secret_value_response['SecretString'])
local_dir = "./hbn"
local_output_dir = "./hbn-preproc"
bucket = "fcp-indi"
s3_prefix = "data/Projects/HBN/BIDS_curated"
study = afqd.S3BIDSStudy(
"hbn_curated",
bucket=bucket,
s3_prefix=s3_prefix,
subjects=[subject_id],
anon=True,
)
study.download(local_dir)
fs = S3FileSystem(
key=secret["fcp_indi_aws_access_key_id"],
secret=secret["fcp_indi_aws_secret_access_key"]
)
# HBN has other files that we don't need for dMRI preproc and whose presence will confuse qsiprep
# Get rid of them
s0 = study.subjects[0]
dwi_files = [file for key, file in s0.files["raw"].items() if "/dwi/" in key]
fmri_files = [file for key, file in s0.files["raw"].items() if "/func/" in key]
fmri_files += [file for key, file in s0.files["raw"].items() if "/fmap/" in key and "acq-fMRI" in key]
for fname in fmri_files:
os.remove(fname)
command = [
"qsiprep",
"--output-resolution",
"1.8",
"--participant-label",
subject_id,
"-w",
"./hbn-wrk",
"--nthreads",
"8",
"--omp-nthreads",
"8",
"--dwi-denoise-window",
"5",
"--unringing-method",
"mrdegibbs",
local_dir,
local_output_dir,
"participant",
]
if dwi_files:
response = subprocess.run(command, check=True)
output_dir = "/".join([bucket, s3_prefix, "derivatives", "qsiprep"])
fs.put(f"{local_output_dir}/qsiprep/{subject_id}",
"/".join([output_dir, subject_id]), recursive=True)
fs.put(f"{local_output_dir}/qsiprep/{subject_id}.html",
"/".join([output_dir, subject_id + ".html"]))
return {subject_id: True}
else:
return {subject_id: False}
# # Create a cloudknot DockerImage instance
# This Docker image was previously created for the initial production runs. Instead of creating a new one, retrieve the information from the cloudknot config file. If you haven't done this previously on your local machine, you'll have to set `recover_from_config = False`.
recover_from_config = True
if not recover_from_config:
di = ck.DockerImage(
name="preprocess-hbn-curated",
func=preprocess_hbn,
base_image="qsiprep:direct-0.12.1",
github_installs=["https://github.com/yeatmanlab/pyAFQ.git@master",
"https://github.com/matplotlib/matplotlib.git@v2.2.3",
"https://github.com/bids-standard/pybids.git@0.9.3"],
overwrite=True,
)
else:
di = ck.DockerImage(name="preprocess-hbn-curated")
# ## Build, tag, and push the Docker image
di.repo_uri
if not recover_from_config:
di.build(tags=["hbn-preproc-curated"])
if not recover_from_config:
repo = ck.aws.DockerRepo(name=ck.get_ecr_repo())
if not recover_from_config:
print(repo.repo_uri)
if not recover_from_config:
# The very first time you run this, this command could take
# a few hours because the docker image is large
di.push(repo=repo)
# ## Create the Knots
# Specify bid_percentage to use Spot instances
# And make sure the volume size is large enough. 50-55 GB seems about right for HBN preprocessing. YMMV.
# Also be sure to set the Project tag in ``aws_resource_tags`` for billing transparency
knot = ck.Knot(
name=f"qsiprep-hbn-curated-3",
docker_image=di,
pars_policies=('AmazonS3FullAccess','AllowFcpIndiKeyAccess'),
bid_percentage=100,
memory=64000,
job_def_vcpus=8,
volume_size=90,
max_vcpus=8192,
retries=3,
aws_resource_tags={"Project": "HBN-FCP-INDI"},
)
# ## Submit the jobs and check on results
first_50_futures = knot.map(remaining_subs[50:])
# Argh, that was silly. That's all the subjects except the first 50. Oh well, let's see how it's running
knot.view_jobs()
print(remaining_subs[:50])
remaining_futures = knot.map(remaining_subs[:50])
knot.view_jobs()
# ## Results
#
# The results are dicts where the keys are the subject IDs and the values report sucess or failure
result_futures.result()
# ## Figure out how many subjects we have in the entire study to support some cost estimates
# +
all_sites = {}
for site in ["Site-SI", "Site-CBIC", "Site-RU", "Site-CUNY"]:
all_sites[site] = afqd.HBNSite(site=site)
print(f"{site}: {len(all_sites[site]._all_subjects)}")
# -
# ## When you're done, clobber the knot
knot.clobber(clobber_pars=True)
| notebooks/2021-01-25-preprocess-remaining-hbn-curated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inspecting available data
#
# The `.info()` method provides an overview of the data in an opened run or file:
# +
from extra_data import RunDirectory
run = RunDirectory("/gpfs/exfel/exp/XMPL/201750/p700000/raw/r0010")
run.info()
# -
# The [lsxfel command](cli.rst#lsxfel) can give similar information at the command line.
#
# The train IDs included in the run are available as a simple list:
print(run.train_ids[:10])
# And the source names are available as a set:
run.all_sources
# You can see control and instrument sources separately,
# but for data analysis this distinction is often not important.
assert run.all_sources == (run.control_sources | run.instrument_sources)
# Within each source, the data is organised under keys.
# The `.keys_for_source()` method lists a source's keys:
run.keys_for_source('SA1_XTD2_XGM/XGM/DOOCS:output')
# Instrument sources may have multiple values recorded for each train,
# and may be missing data for some trains.
# You can see how many data points there are for each train with `.get_data_counts()`.
# E.g. for this AGIPD detector module, the counts are the number of frames in each train:
run.get_data_counts('SPB_DET_AGIPD1M-1/DET/11CH0:xtdf', 'image.data')
# This method returns a pandas series.
# The index (the numbers shown on the left) are train IDs.
| docs/inspection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This is an example code for getting started with tensorflow
print("getting started with tensorflow\n")
# import tensorflow
print("import...\n")
import tensorflow as tf
sess = tf.Session()
# build your first computational graph
# with constant nodes
print("\nbuild computational graph with constant nodes...\n")
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # also tf.float32 impicitly
node3 = tf.add(node1, node2)
print(node1)
print(node2)
print(node3)
# this is just BUILDING the graph
# to actually evaluate the graph, you have to run with a SESSION
print("\nrun the graph...\n")
print(sess.run([node1, node2, node3]))
# you may want a more flexible graph
# with nodes which can FEEDED with exteral inputs
# to do this we use PLACEHODER
print("\nbuild and graph with exteral inputs...\n")
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder = a + b # opporator + provides a shotcut for tf.add
# to FEED the PLACEHODERs
# use the feed_dict parameter
print("\nrun the graph...\n")
data1 = {a: 3, b: 2.5}
print(sess.run(adder,feed_dict = data1))
data2 = {a: [1, 2.5], b: [5, 4]}
print(sess.run(adder, data2)) # "feed_dict" can be omited
# we can continue to make the graph more complex
print("\na more complex graph...\n")
add_and_triple = adder * 3 # for a shotcut just use "adder*3"
print(sess.run(add_and_triple, {a: 1.0, b: 3.5}))
# in machine learning there are various parameters (weights, bias, etc.)
# use VARIABLE
# having parameters makes a model trainable
print("\nbuild a trainable model...\n")
W = tf.Variable(.3, tf.float32)
# we can also get W by:
# C = tf.constant(.3, tf.float32)
# W = tf.Variable(C)
b = tf.Variable(-.3, tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
# you have to explicitly initialize the Variables
print("\ninitialize model parameters...\n")
init = tf.global_variables_initializer()
sess.run(init) # note that the Variables are uninitialized until we call sess.run
# now that we've initialized the Variables, we can compute
print(sess.run([W, b]))
# since x is a placeholder, we can feed it and evaluate the model
print("\nrun the model...\n")
print(sess.run(linear_model, {x: 1.0}))
print(sess.run(linear_model, {x: [1, 2, 3, 4]})) # we can evaluate model for several values of x simultaneously
# you can change the value of variables
print("\nassign new values to variables...\n")
W = tf.assign(W, 1.0)
b = tf.assign(b, -1.0)
sess.run([W, b]) # remember to run !
print(sess.run(linear_model, {x: [1, 2, 3, 4]})) # the result is computed with new values
| tensorflow/exp1_basic_usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rock Paper Scissors
#
# Let's play the famous game against our computer.
#
# https://en.wikipedia.org/wiki/Rock%E2%80%93paper%E2%80%93scissors
#
# The use of functions is recommended
#
# ## Goals
# 1. Use of loop
# 2. Data capture by console
# 3. Use if-elif-else
# 4. Use of try-except
# 5. Definition of functions. Modular programming
# 6. Logical operators.
# 7. Print
# 8. Import modules
# +
# Import the choice function of the random module
# https://stackoverflow.com/questions/306400/how-to-randomly-select-an-item-from-a-list
# Assign to a list the 3 possible options: 'stone', 'paper' or 'scissors'.
# Assign a variable to the maximum number of games: 1, 3, 5, etc ...
# Assign a variable to the number of games a player must win to win.
# Preferably the value will be based on the number of maximum games
# Define a function that randomly returns one of the 3 options.
# This will correspond to the play of the machine. Totally random.
# Define a function that asks your choice: 'stone', 'paper' or 'scissors'
# you should only allow one of the 3 options. This is defensive programming.
# If it is not stone, paper or scissors keep asking until it is.
# Define a function that resolves a combat.
# Returns 0 if there is a tie, 1 if the machine wins, 2 if the human player wins
# Define a function that shows the choice of each player and the state of the game
# This function should be used every time accumulated points are updated
# Create two variables that accumulate the wins of each participant
# Create a loop that iterates while no player reaches the minimum of wins
# necessary to win. Inside the loop solves the play of the
# machine and ask the player's. Compare them and update the value of the variables
# that accumulate the wins of each participant.
# Print by console the winner of the game based on who has more accumulated wins
# -
# # Bonus: Stone, paper, scissors, lizard, spock
#
# Now the improvement begins.
#
# 
#
#
# http://www.samkass.com/theories/RPSSL.html
#
# You are asked to impliment some improvements with respect to the simple previous game. In addition, the number of games (which must be ODD) will be requested per console until a valid number is entered.
#
# Improvements:
# * 5 options: stone, paper, scissors, lizard, spock
# * The number of games is requested per console
# Tip: Reuse code that you already use. If you have programmed intelligently, the bonus are simple modifications to the original game.
# +
# Import the choice function of the random module
# Define a function that asks for an odd number on the keyboard, until it is not valid
# will keep asking
# Assign a list of 5 possible options.
# Assign a variable to the maximum number of games: 1, 3, 5, etc ...
# This time the previously defined function is used
# Assign a variable to the number of games a player must win to win.
# Preferably the value will be based on the number of maximum games
# Define a function that randomly returns one of the 5 options.
# This will correspond to the play of the machine. Totally random.
# Define a function that asks your choice between 5
# you should only allow one of the 5 options. This is defensive programming.
# If it is not valid, keep asking until it is valid.
# Define a function that resolves a combat.
# Returns 0 if there is a tie, 1 if the machine wins, 2 if the human player wins
# Now there are more options
# Define a function that shows the choice of each player and the state of the game
# This function should be used every time accumulated points are updated
# Create two variables that accumulate the wins of each participant
# Create a loop that iterates while no player reaches the minimum of wins
# necessary to win. Inside the loop solves the play of the
# machine and ask the player's. Compare them and update the value of the variables
# that accumulate the wins of each participant.
# Print by console the winner of the game based on who has more accumulated wins
# -
| rock-paper-scissors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:ap-northeast-2:806072073708:image/datascience-1.0
# ---
# +
import sagemaker
print(sagemaker.__version__)
session = sagemaker.Session()
# + language="sh"
# # https://s3.amazonaws.com/amazon-reviews-pds/readme.html
# aws s3 cp s3://amazon-reviews-pds/tsv/amazon_reviews_us_Camera_v1_00.tsv.gz /tmp
# +
prefix = 'amazon-reviews-camera'
input_data = session.upload_data(path='/tmp/amazon_reviews_us_Camera_v1_00.tsv.gz', key_prefix=prefix)
# +
from sagemaker.sklearn.processing import SKLearnProcessor
sklearn_processor = SKLearnProcessor(
framework_version='0.23-1',
role=sagemaker.get_execution_role(),
instance_type='ml.c5.2xlarge',
instance_count=1)
# +
# %%time
from sagemaker.processing import ProcessingInput, ProcessingOutput
sklearn_processor.run(
code='preprocessing.py',
inputs=[
ProcessingInput(
source=input_data,
destination='/opt/ml/processing/input')
],
outputs=[
ProcessingOutput(
output_name='train_data',
source='/opt/ml/processing/train'),
ProcessingOutput(
output_name='validation_data',
source='/opt/ml/processing/validation')
],
arguments=[
'--filename', 'amazon_reviews_us_Camera_v1_00.tsv.gz',
'--num-reviews', '100000',
'--split-ratio', '0.05',
]
)
# +
preprocessing_job_description = sklearn_processor.jobs[-1].describe()
output_config = preprocessing_job_description['ProcessingOutputConfig']
for output in output_config['Outputs']:
print(output['S3Output']['S3Uri'])
# -
| Chapter 06/blazingtext/Preprocessing Amazon Reviews for Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="j2Erm_eprvIi" colab_type="text"
# ## Install Openjdk and Pyspark
# + id="I-1NstgwZH3S" colab_type="code" colab={}
# !apt-get install openjdk-8-jdk-headless -qq > /dev/null
# !pip install pyspark==2.4.4
# + [markdown] id="D2Kp88Xl0mZj" colab_type="text"
# ## Set environment
# + id="754T0y6zaUQM" colab_type="code" colab={}
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
# + [markdown] id="94-UGCDx0tT5" colab_type="text"
# ## Create Spark Session
# + id="-9lQHyCEafpe" colab_type="code" colab={}
from pyspark.sql import SparkSession
spark=SparkSession.builder.appName("spark").getOrCreate()
# + [markdown] id="az8M4kjl00Xk" colab_type="text"
# ## Cloning and using the diabetes data set
# + id="Oi7DzO8mbVP0" colab_type="code" colab={}
# ! git clone https://github.com/education454/diabetes_dataset
# + id="yyuAk99AbjwJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5c08e16d-16b9-45e0-f546-787b6b69d457"
# ! ls diabetes_dataset
# + [markdown] id="ePWlaik70__T" colab_type="text"
# ## Read the data
# + id="_Uk_ERvSbz65" colab_type="code" colab={}
df = spark.read.csv('/content/diabetes_dataset/diabetes.csv',header = True , inferSchema= True)
# + [markdown] id="trtWKJzd1LLc" colab_type="text"
# ### Check 20 rows of data set
# + id="xsEK5sDvcWdS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="b19c6ab3-504b-4a91-c796-df2a7c574ce5"
df.show()
# + [markdown] id="LHT9Gsd21V5A" colab_type="text"
# ## Display data in root fashion
# + id="kbo7lbyIc7ml" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="55cab226-cf66-4d72-c8d8-75d3c8bec5a8"
df.printSchema()
# + [markdown] id="5mbQXRMU1cWu" colab_type="text"
# ## Check no of observations and total variables
# + id="ON5xklQ7dSje" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a7e169d2-622c-44d4-e9b9-e9bef5bfc552"
print((df.count(),len(df.columns)))
# + [markdown] id="tnVGgkn71ly1" colab_type="text"
# ## Check how many patients are diabetic and non diabetic ( 0 - means non diabetic and 1 means person is diabetic )
# + id="pattEbSDeDSf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="3a229d78-e69f-461b-d200-8fde0c51524a"
df.groupby('Outcome').count().show()
# + id="i64pXfI8eZ-L" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 207} outputId="fea9c239-d61d-4f3b-8eb9-dec26dcb8e67"
df.describe().show()
# + id="bWX34bHufAEN" colab_type="code" colab={}
# find the total no of zeroes in columns : Glucose| BloodPressure| SkinThickness| Insulin BMI
def count_zeroes():
columns_list=['Glucose', 'BloodPressure','SkinThickness','Insulin','BMI']
for i in columns_list :
print(i+":"+str(df[df[i]==0].count()))
# + id="oSV4D8mOgBsa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="647460c3-2c92-4cfc-99a4-f0a4be1a6841"
count_zeroes()
# + [markdown] id="xogvXq4H15mE" colab_type="text"
# ## Replacing the 0 values with the mean values to fill the columns for better prediction
# + id="nYZ-0gZqhLr6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="83084dce-7cf3-4186-8064-c37ee0f8087a"
from pyspark.sql.functions import *
for i in df.columns[1:6]:
data=df.agg({i:'mean'}).first()[0]
print("Mean value for {} is {}".format(i,int(data)))
df=df.withColumn(i,when(df[i]==0,int(data)).otherwise(df[i]))
# + id="45imWs6Shyjk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="1a255d45-ac54-453d-9d1e-26ee53804250"
df.show()
# + [markdown] id="mB-s5ELD2Fb8" colab_type="text"
# ## Getting Correlation value for all columns expect Outcome
# + id="qOhff-0yjBZw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="1d04b23f-b82b-4298-d3c8-511dd3f1ab86"
for col in df.columns:
print("correlation to outcome for {} is {}".format(col,df.stat.corr('Outcome',col)))
# + [markdown] id="ktG0Iwnj2xhb" colab_type="text"
# ## Getting important features and transforming it into a single column as a vector
# + id="V3xZdQKekuPM" colab_type="code" colab={}
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(inputCols=['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age'],outputCol='features' )
output_data = assembler.transform(df)
# + id="VJpWxLXPmhx6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="0c4e1ab4-e564-4b57-8d67-6b37d3af0c0f"
output_data.printSchema()
# + id="SW0_wKbAmuQ1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 459} outputId="5a5ca84a-7cae-4006-bb77-6aa1d202567a"
output_data.show()
# + [markdown] id="snzn3ZdP3GQn" colab_type="text"
# ## Import Logistic Regression
# + id="0ROLRwdym5B0" colab_type="code" colab={}
from pyspark.ml.classification import LogisticRegression
final_data = output_data.select('features','Outcome')
# + id="8cr2eZKYnOSj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="9b61136c-3107-4237-95d7-16a1e98feeec"
final_data.printSchema()
# + [markdown] id="0cj3i6YE3OO3" colab_type="text"
# ## 70 % training data and 30 % data
# + id="s8BUKeEunYKk" colab_type="code" colab={}
train , test = final_data.randomSplit([0.7 , 0.3])
models = LogisticRegression(labelCol = 'Outcome')
model = models.fit(train)
# + id="rNmyaxB1n2mX" colab_type="code" colab={}
summary = model.summary
# + [markdown] id="DXtDbVlH3pOG" colab_type="text"
# ## Training Prediction and Seeing Data Prediction Column
# + id="Jom2Y0-ioJMX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="e111b47f-5a96-4ccd-93e1-c79d37a002ec"
summary.predictions.describe().show()
# + [markdown] id="H7f1k7of3_5N" colab_type="text"
# ## Import BinaryClassificationEvaluator
# + id="wqGt1C0Aoj9z" colab_type="code" colab={}
from pyspark.ml.evaluation import BinaryClassificationEvaluator
predictions = model.evaluate(test)
# + [markdown] id="DkIILeQC4NmV" colab_type="text"
# ## Getting Prediction
# + id="pThj_HE5o3HL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="adcf8035-f39e-49a5-824b-b2a8d3b4f9c2"
predictions.predictions.show(10)
# + [markdown] id="tkuGI1N84cM6" colab_type="text"
# ## Getting prediction for Test
# + id="sawItV5KpnmI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="325a8f1f-390b-4edb-da76-2bf51303a774"
evaluator = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction',labelCol='Outcome')
evaluator.evaluate(model.transform(test))
# + [markdown] id="oTRj1dQ74k5S" colab_type="text"
# ## Accuracy is approx 86 % which is quite good.
# + id="_DJbWYsiq4FA" colab_type="code" colab={}
# For saving the model
# model.save("save")
from pyspark.ml.classification import LogisticRegressionModel
model = LogisticRegressionModel.load('model')
| Diabetes_Prediction_Spark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # The Series Data Structure
# +
import pandas as pd
# pd.Series?
# -
animals = ['Tiger', 'Bear', 'Moose']
pd.Series(animals)
numbers = [1, 2, 3]
pd.Series(numbers)
animals = ['Tiger', 'Bear', None]
pd.Series(animals)
numbers = [1, 2, None]
pd.Series(numbers)
import numpy as np
np.nan == None
np.nan == np.nan
np.isnan(np.nan)
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.index
s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
s
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])
s
# # Querying a Series
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.iloc[3]
s.loc['Golf']
s[3]
s['Golf']
sports = {99: 'Bhutan',
100: 'Scotland',
101: 'Japan',
102: 'South Korea'}
s = pd.Series(sports)
s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead
s = pd.Series([100.00, 120.00, 101.00, 3.00])
s
total = 0
for item in s:
total+=item
print(total)
# +
import numpy as np
total = np.sum(s)
print(total)
# -
#this creates a big series of random numbers
s = pd.Series(np.random.randint(0,1000,10000))
s.head()
len(s)
# %%timeit -n 100
summary = 0
for item in s:
summary+=item
# %%timeit -n 100
summary = np.sum(s)
s+=2 #adds two to each item in s using broadcasting
s.head()
for label, value in s.iteritems():
s.loc[label] = value + 2
s.head()
# %%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
for label, value in s.iteritems():
s.loc[label]= value + 2
# %%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
s+=2
s = pd.Series([1, 2, 3])
s.loc['Animal'] = 'Bears'
s
original_sports = pd.Series({'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'})
cricket_loving_countries = pd.Series(['Australia',
'Barbados',
'Pakistan',
'England'],
index=['Cricket',
'Cricket',
'Cricket',
'Cricket'])
all_countries = original_sports.append(cricket_loving_countries)
original_sports
cricket_loving_countries
all_countries
all_countries.loc['Cricket']
# # The DataFrame Data Structure
import pandas as pd
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df.head()
df.loc['Store 2']
type(df.loc['Store 2'])
df.loc['Store 1']
df.loc['Store 1', 'Cost']
df.T
df.T.loc['Cost']
df['Cost']
df.loc['Store 1']['Cost']
df.loc[:,['Name', 'Cost']]
df.drop('Store 1')
df
copy_df = df.copy()
copy_df = copy_df.drop('Store 1')
copy_df
# +
# copy_df.drop?
# -
del copy_df['Name']
copy_df
df['Location'] = None
df
# # Dataframe Indexing and Loading
costs = df['Cost']
costs
costs+=2
costs
df
# !bzcat /data/courses/coursera/data-science-with-python/course-1-python-intro/olympics.csv.bz2|head
# !ls -la /data/courses/coursera/data-science-with-python/course-1-python-intro/olympics.csv.bz2
df = pd.read_csv('/data/courses/coursera/data-science-with-python/course-1-python-intro/olympics.csv.bz2')
df.head()
df = pd.read_csv('/data/courses/coursera/data-science-with-python/course-1-python-intro/olympics.csv.bz2', index_col = 0, skiprows=1)
df.head()
df.columns
# +
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold' + col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver' + col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze' + col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#' + col[1:]}, inplace=True)
df.head()
# -
# # Querying a DataFrame
df['Gold'] > 0
only_gold = df.where(df['Gold'] > 0)
only_gold.head()
only_gold['Gold'].count()
df['Gold'].count()
only_gold = only_gold.dropna()
only_gold.head()
only_gold = df[df['Gold'] > 0]
only_gold.head()
len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)])
df[(df['Gold.1'] > 0) & (df['Gold'] == 0)]
# # Indexing Dataframes
df.head()
df['country'] = df.index
df = df.set_index('Gold')
df.head()
df = df.reset_index()
df.head()
# + language="markdown"
#
# For this part, we will be using census data from the [United States Census Bureau](https://www.census.gov/data/tables/2017/demo/popest/nation-detail.html).
# Counties are political and geographic subdivisions of states in the United States.
# This dataset contains population data for counties and states in the US from 2010 to 2015.
# [See this document](https://www2.census.gov/programs-surveys/popest/datasets/2010-2015/counties/totals/co-est2015-alldata.pdf)
# for a description of the variable names.
#
# -
df = pd.read_csv('/data/courses/coursera/data-science-with-python/course-1-python-intro/census.csv.bz2')
df.head()
df['SUMLEV'].unique()
df = df[df['SUMLEV'] == 50]
df.head()
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df = df[columns_to_keep]
df.head()
df = df.set_index(['STNAME', 'CTYNAME'])
df.head()
df.loc['Michigan', 'Washtenaw County']
df.loc[ [('Michigan', 'Washtenaw County'),
('Michigan', 'Wayne County')] ]
# # Missing values
df = pd.read_csv('/data/courses/coursera/data-science-with-python/course-1-python-intro/log.csv.bz2')
df
# +
# df.fillna?
# -
df = df.set_index('time')
df = df.sort_index()
df
df = df.reset_index()
df = df.set_index(['time', 'user'])
df
df = df.fillna(method='ffill')
df.head()
# + language="markdown"
# Interactive questions and answers from the videos
# +
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df['Item Purchased']
# +
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df['Cost'] = 0.8 * df['Cost']
df
# +
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df['Name'][df['Cost'] > 3.0]
# +
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
# Re-index
df['Location'] = df.index
df = df.set_index(['Location', 'Name'])
# Add a record
df = df.append(pd.Series(data={'Cost': 3.00, 'Item Purchased': 'Kitty Food'}, name=('Store 2', 'Kevyn')))
df.head()
# + language="markdown"
# * [US Census Bureau pristine data CSV data file](https://www2.census.gov/programs-surveys/popest/datasets/2010-2015/counties/totals/co-est2015-alldata.csv)
# * [Copy of that data file, converted to UTF-8 (from ISO-8859-1), and bzipped](https://github.com/machine-learning-helpers/data-samples/blob/master/courses/coursera/data-science-with-python/course-1-python-intro/co-est2015-alldata.csv.bz2)
# * [Simplified version on Coursera](https://github.com/machine-learning-helpers/data-samples/blob/master/courses/coursera/data-science-with-python/course-1-python-intro/census.csv.bz2)
#
# -
# !bzcat /data/courses/coursera/data-science-with-python/course-1-python-intro/co-est2015-alldata.csv.bz2|head -3
census_df = pd.read_csv('/data/courses/coursera/data-science-with-python/course-1-python-intro/co-est2015-alldata.csv.bz2')
census_df
census_df['SUMLEV'].unique()
census_df_by_state = census_df[census_df['SUMLEV'] == 50].set_index(['STNAME', 'COUNTY'])
census_df_by_state.loc[[('Alabama', 1), ('Alabama', 3)]]
state_list = census_df[census_df['SUMLEV'] == 50]['STNAME'].unique()
max_county_nb = 0
max_county_state = ''
for state in state_list:
county_list = census_df[(census_df['SUMLEV'] == 50) & (census_df['STNAME'] == state)]['COUNTY'].unique()
nb_counties = len(county_list)
if nb_counties > max_county_nb:
max_county_nb = nb_counties
max_county_state = state
#print ('State: {}; Nb of counties: {}'.format(state, str(nb_counties)))
print('The max number of counties ({}) is in {}'.format(str(max_county_nb), max_county_state))
# + language="markdown"
# Iterative version (non optimized) for #6
# +
state_list = census_df[census_df['SUMLEV'] == 50]['STNAME'].unique()
tot_census_df = pd.DataFrame()
for state in state_list:
columns = ['STNAME', 'COUNTY', 'CENSUS2010POP']
sorted_census_df = census_df[(census_df['SUMLEV'] == 50) & (census_df['STNAME'] == state)].nlargest(3, 'CENSUS2010POP')[columns]
state_total_census = sorted_census_df['CENSUS2010POP'].sum()
state_total_row = pd.Series ({'State': state, 'Census 2010 Pop': state_total_census})
tot_census_df = tot_census_df.append ([state_total_row], ignore_index = True)
top_state_list = tot_census_df.nlargest(3, 'Census 2010 Pop')['State'].tolist()
top_state_list
# + language="markdown"
# Iterative version (non optimized) for #7
# +
census_df['FULLCTYNAME'] = census_df['CTYNAME'] + ', ' + census_df['STNAME']
tot_census_df = pd.DataFrame()
county_list = census_df[census_df['SUMLEV'] == 50]['FULLCTYNAME'].unique()
for county in county_list:
columns = ['FULLCTYNAME', 'STNAME', 'CTYNAME', 'POPESTIMATE2010',
'POPESTIMATE2011', 'POPESTIMATE2012',
'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']
cty_census_df = census_df[(census_df['SUMLEV'] == 50) & (census_df['FULLCTYNAME'] == county)][columns]
pop_max = 0
pop_min = 1e10
for year in range(2010, 2016):
pop_fieldname = 'POPESTIMATE' + str(year)
pop_year = cty_census_df[pop_fieldname].item()
if pop_year > pop_max: pop_max = pop_year
if pop_year < pop_min: pop_min = pop_year
cenus_min_max_row = pd.Series ({'County': county,
'Min Pop': pop_min, 'Max Pop': pop_max,
'Abs Change': abs(pop_max - pop_min)})
tot_census_df = tot_census_df.append ([cenus_min_max_row], ignore_index = True)
top_county_list = tot_census_df.nlargest(1, 'Abs Change')['County'].tolist()
print (str(top_county_list))
# + language="markdown"
# Optimized/compact version for #7
# +
census_df['FULLCTYNAME'] = census_df['CTYNAME'] + ', ' + census_df['STNAME']
census_df['Pop Max'] = census_df[['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012',
'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']].max(axis=1)
census_df['Pop Min'] = census_df[['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012',
'POPESTIMATE2013', 'POPESTIMATE2014', 'POPESTIMATE2015']].min(axis=1)
census_df['Abs Change'] = abs(census_df['Pop Max'] - census_df['Pop Min'])
top_county_list = census_df[census_df['SUMLEV'] == 50].nlargest(1, 'Abs Change')['FULLCTYNAME'].tolist()
top_county_list
# -
| courses/coursera/data-science-with-python/course-1-python-intro/Week 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from src.embedding_manager import EmbeddingManager
em = EmbeddingManager(path='../fasttext.wiki-news-300d-1M.vec')
# -
# # Perform cleaning according to analysis, conducted in embeddings-analysis.ipynb
def filter_values(array, limits, title):
filtered_out_idx = (array < limits[0]) | (array > limits[1])
filtered_out = array[filtered_out_idx]
print(f'Filtered out by {title} in {limits}: {filtered_out.shape[0]}')
return filtered_out_idx
# +
# Per entry limits
mean_limits = (-0.03, 0.03)
median_limits = (-0.04, 0.04)
max_limits = (0, 1)
min_limits = (-1, 0)
mean_values = np.mean(em.vectors, axis=1)
median_values = np.median(em.vectors, axis=1)
max_values = np.max(em.vectors, axis=1)
min_values = np.min(em.vectors, axis=1)
mean_filtered_out_idx = filter_values(mean_values, mean_limits, 'mean')
median_filtered_out_idx = filter_values(median_values, median_limits, 'median')
max_filtered_out_idx = filter_values(max_values, max_limits, 'max')
min_filtered_out_idx = filter_values(min_values, min_limits, 'min')
total_entry_filter_idx = mean_filtered_out_idx | median_filtered_out_idx | max_filtered_out_idx | min_filtered_out_idx
print(f'Total filtered out entries count: {em.vectors[total_entry_filter_idx].shape[0]}')
em.vectors = em.vectors[~total_entry_filter_idx]
em.words = em.words[~total_entry_filter_idx]
# +
# Per dimension limits
mean_dim_limits = (-0.1, 0.1)
median_dim_limits = (-0.1, 0.1)
max_dim_limits = (0, 2)
min_dim_limits = (-2, 0)
mean_values_per_dim = np.mean(em.vectors, axis=0)
median_values_per_dim = np.median(em.vectors, axis=0)
max_values_per_dim = np.max(em.vectors, axis=0)
min_values_per_dim = np.min(em.vectors, axis=0)
mean_dim_filtered_out_idx = filter_values(mean_values_per_dim, mean_dim_limits, 'mean per dimension')
median_dim_filtered_out_idx = filter_values(median_values_per_dim, median_dim_limits, 'median per dimension')
max_dim_filtered_out_idx = filter_values(max_values_per_dim, max_dim_limits, 'max per dimension')
min_dim_filtered_out_idx = filter_values(min_values_per_dim, min_dim_limits, 'min per dimension')
total_dimension_filter_idx = mean_dim_filtered_out_idx | median_dim_filtered_out_idx | max_dim_filtered_out_idx | min_dim_filtered_out_idx
print(f'Total filtered out dimensions count: {em.vectors[0][total_dimension_filter_idx].shape[0]}')
# +
em.vectors = em.vectors.swapaxes(0, 1)[~total_dimension_filter_idx].swapaxes(0, 1)
print(em.vectors.shape)
# reduce dimensions a bit more
from sklearn.decomposition import PCA
pca = PCA(n_components=290)
em.vectors = pca.fit_transform(em.vectors)
print(em.vectors.shape)
# -
# ## Save filtered embeddings
# +
from tqdm import tqdm
rounded = em.vectors.round(5)
with open('../fasttext.wiki-news-cleaned-290d.vec', 'w', encoding='UTF-8') as vec_file:
vec_file.write(f'{em.vectors.shape[0]} {em.vectors.shape[1]}\n')
for i, word in enumerate(tqdm(em.words)):
vector = rounded[i]
row = word + ' ' + ' '.join([str(val) for val in vector])
vec_file.write(row + '\n')
# -
| 2. embeddings-cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''.venv'': poetry)'
# name: python3
# ---
# # Классификация - копаем глубже
# > 🚀 В этой практике нам понадобятся: `numpy==1.21.2, pandas==1.3.3, scikit-learn==0.24.2`
#
# > 🚀 Установить вы их можете с помощью команды: `!pip install numpy==1.21.2, pandas==1.3.3, scikit-learn==0.24.2`
#
# ## Содержание
#
# * [Посмотрим на данные](#Посмотрим-на-данные)
# * [Лирический отступ на стратификацию](#Лирический-отступ-на-стратификацию)
# * [Обратно к самому главному](#Обратно-к-самому-главному)
# * [Confusion matrix (Матрица ошибок)](#Confusion-matrix-Матрица-ошибок)
# * [Вернёмся к accuracy](#Вернёмся-к-accuracy)
# * [Двигаемся глубже](#Двигаемся-глубже)
# * [Почему гармоническое среднее?](#Почему-гармоническое-среднее?)
# * [Инструментарий](#Инструментарий)
# * [Вывод](#Вывод)
# * [Для затравки](#Для-затравки)
# * [Вопросы для закрепления](#Вопросы-для-закрепления)
# * [Полезные ссылки](#Полезные-ссылки)
#
# Приветствуем! Вы уже познакомились с тем, что такое задача классификации и даже смогли сделать свою первую классифицирующую модель - замечательно!
#
# Сегодня мы поговорим о том, как более подробно изучить поведение модели классификации. Ведь, по одной только точности (числу accuracy) слишком сложно что-то оценить. Давайте посмотрим другие способы анализа качества работы модели.
# + _cell_id="v6MqNbmiDIx8weCu"
# Настройки для визуализации
# Если используется темная тема - лучше текст сделать белым
import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import seaborn as sns
TEXT_COLOR = 'black'
matplotlib.rcParams['figure.figsize'] = (15, 10)
matplotlib.rcParams['text.color'] = TEXT_COLOR
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['lines.markersize'] = 15
matplotlib.rcParams['axes.labelcolor'] = TEXT_COLOR
matplotlib.rcParams['xtick.color'] = TEXT_COLOR
matplotlib.rcParams['ytick.color'] = TEXT_COLOR
sns.set_style('darkgrid')
# Зафиксируем состояние случайных чисел
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
# -
# ## Посмотрим на данные
# Допустим датасет собран из магазина, так вот предсказывать мы хотим факт покупки товаров, чтобы в будущем говорить, купят товар или нет.
# + _cell_id="rngKob9Pvat2Mys2"
from sklearn.datasets import make_classification
X_data, y_data = make_classification(
n_samples=600,
n_features=2,
n_redundant=0,
n_informative=2,
n_clusters_per_class=1,
random_state=RANDOM_SEED,
shift=[5, 3],
weights=[0.6, 0.4]
)
df_data = pd.DataFrame(X_data, columns=["Качество", "Размер"])
df_data['Факт покупки'] = y_data
df_data['Факт покупки'] = df_data['Факт покупки'].map({0: "Отказ", 1: "Покупка"})
pnts_scatter = plt.scatter(X_data[:, 0], X_data[:, 1], marker='o', c=y_data, s=50, edgecolor='k')
plt.xlabel('Качество продуктов, балл')
plt.ylabel('Размер поставки, ящиков')
plt.grid(True)
plt.legend(handles=pnts_scatter.legend_elements()[0], labels=['Отказ', 'Покупка'])
plt.show()
# -
# Смотрите, это наши данные, которые мы получили из базы данных (например) и **наша задача** заключается в том, чтобы предсказать, купят ли новые товары, поставка которого имеет определенный размер и оценка качества этой поставки имеет какой-то балл, или нет.
#
# Для начала, как обычно смотрим на данные:
# + _cell_id="Bq82OJINme1w9YXP"
df_data.info()
# + _cell_id="0lBB9vS2iiHgCpAd"
df_data
# -
# Как видим, данные представляют собой две независимые переменные (числовые) и одна колонка зависимая (целевая), которая имеет **категориальный тип**. Пропусков в данных нет.
#
# > 🤓 **Числовой** тип переменной - данные представлены вещественными или целыми числами.
#
# > 🤓 **Категориальный** тип переменной - данные являются повторяющимися классами. То есть, на всю колонку есть несколько уникальных значений, которые и заполняют колонку.
#
# Мы ещё позже подробнее узнаем новый тип переменной (категориальный) и на практике его достаточно часто будем применять! А сейчас, давайте посмотрим, какие уникальные значения есть в целевой колонке и какое их соотношение:
# + _cell_id="mqjU0U2qbb2sMD48"
df_data['Факт покупки'].value_counts()
# -
# Отлично, количество классов в данных сбалансировано.
#
# Что такое **дисбаланс в данных** - тема отдельная, а пока мы видим, что соотношение примеров одного класса к другому близко к единице. Значит, можно не беспокоиться. Вот если у вас в данных соотношение количество примеров одного класса к количеству примеров другого больше 3-5 раз, то надо принимать меры.
#
# > ⚠️ Как правило, нельзя сказать определённый порог, при котором мы видим дисбаланс. Обычно, соотношение 1:1 - отсутсвие дисбаланса, а вот превышение одного класса над другим раза в 3 уже можно считать дисбалансом. Мы сегодня будем знакомиться с новыми показателями, которые будем часто применять. Так как даже в случае соотношения 1:2 могут быть негативные последствия, то новые метрики позволят увидеть это.
#
# Чего нам не хватает, чтобы попробовать обучить простую модель? Правильно! В наших данных целевая колонка имеет строковый тип, но при этом категориальный, так как на всю колонку всего два уникальных значения. Значит, нужно разделить данные и закодировать целевую колонку!
# + _cell_id="QM8nY3bmhJDGEIij"
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df_data, test_size=0.3, stratify=df_data['Факт покупки'], random_state=RANDOM_SEED)
y_train = df_train.pop('Факт покупки')
y_test = df_test.pop('Факт покупки')
target_enc = LabelEncoder()
target_enc.fit(y_train)
y_train_enc = target_enc.transform(y_train)
y_test_enc = target_enc.transform(y_test)
print(f"Train/test shapes: {df_train.shape}, {df_test.shape}")
print(target_enc.classes_)
# -
# ## Лирический отступ на стратификацию
# Отлично, целевая колонка закодирована, чтобы в модель пошли не строки, а числа.
#
# Мы честно выделили выборку данных для тестирования, но что такое флаг `stratify`??
#
# А вы знаете, что такое **стратификация** при разделении данных?
#
# Это очень важный аспект, когда мы работаем с данными для классификации!
#
# Представьте, что у нас 1000 примеров (из них всего 20 примеров класса "Отказ", остальные - "Покупка") в датасете и надо выдели 30% - это 300 примеров. Как вы считаете, какая вероятность того, что все 20 примеров "Отказ" попадут в тест? К чему это приведет?
#
# > ✨ Попробуйте обсудить это с преподавателем
#
# <p align="center"><img src="https://raw.githubusercontent.com/AleksDevEdu/ml_edu/master/assets/think_about_it_image.png" width=600/></p>
# А приведёт это к тому, что обучающая выборка будет состоять только из примеров "Покупка"! Так и чему будет учиться модель?
#
# **Ничему** - она будет думать, что всегда надо просто предсказывать "Покупка" и даже не будет знать об "Отказах". Это очень плохо!
#
# Так вот **стратификация** позволяет сохранить соотношение классов изначальной выборки и присвоить такое же соотношение как train, так и test выборке.
#
# В случае с 1000 классами в train обязательно попадет 14 примеров "отказа" и 686 примеров "покупки". В test - 6 примеров "отказа" и 294 примера "покупки".
#
# > ⚠️ Изначально "отказ" имел соотношение $20/1000 = 0.02$, "покупка" - $980/1000 = 0.98$. Теперь смотрим train: $14/700 = 0.02$, $686/700 = 0.98$. Аналогично вы увидите в test выборке.
#
# > ⚠️ Таким образом, в задаче классификации крайне рекомендуется использовать стратификацию!
#
# Отлично, мы разобрались, что такое стратификация и зачем её применять, а теперь вернёмся в обучению!
# ## Обратно к самому главному
# + _cell_id="txCDaNXmx6F2iUGa"
# TODO - реализуйте функцию train_logreg_model(), которая на вход принимает DataFrame, целевую колонку и выдаёт обученную модель логистической регрессии
# Не забывайте фиксировать random_state !
# + _cell_id="yC8atdlECWGrPyF9"
# TEST
_test_X_data, _test_y_data = make_classification(
n_samples=100,
n_features=2,
n_redundant=0,
random_state=RANDOM_SEED,
)
_test_model = train_logreg_model(_test_X_data, _test_y_data)
np.testing.assert_array_almost_equal(_test_model.coef_, [[3.23218612, -0.84604963]])
print("Well done!")
# -
# Замечательно! Давайте посмотрим, какая точность нашего решения!
# + _cell_id="Ir9RW46zwmx96rsb"
from sklearn.metrics import accuracy_score
logreg = train_logreg_model(df_train, y_train_enc)
y_pred = logreg.predict(df_test)
accuracy_value = accuracy_score(y_test_enc, y_pred)
print(f"Accuracy value: {accuracy_value}")
# -
# Вооу, вы только поглядите! Мы получили больше 90% точности! Так чего нам жаловаться? Давайте скажем, что это очень высокая точность и двинем дальше.
#
# Ну нет, в анализе данных важно стараться как можно лучше понять, по каким причинам происходит то или иное предсказание. Давайте попробуем разобраться!
#
# Один и самых простых подходов - взять и посмотреть, где и как предсказывает модель в случае ошибок:
# + _cell_id="WhemYtC9CJkAhH9n"
for y_p, y_t in zip(y_pred, y_test_enc):
if y_t != y_p:
print(f"Predict {y_p} vs True {y_t}")
# -
# Смотрите, у нас в большинстве случаев модель предсказывает 1 (Покупка), хотя на деле был 0 (Отказ). Мы смогли таким образом вывести пару примеров, но что если данных будет под миллион записей и ошибок будет куча?
#
# Это очень кропотливый труд и есть более удобные способы **анализа модели классификации**!
#
# Так как же нам проанализировать поведение модели, если при таком плохом случае accuracy говорит, что у нас очень даже хорошо?
#
# Начнем с основ!
# ## Confusion matrix (Матрица ошибок)
# Confusion matrix (CM) - это матрица, которая показывает не только ошибки (как можно подумать из названия), но и правильные предсказания.
#
# Давайте посмотрим, как она выглядит в общем виде:
#
# <table align="center">
# <thead>
# <tr>
# <th colspan=2></th>
# <th colspan=2>Предсказание</th>
# </tr>
# </thead>
# <tbody>
# <tr>
# <td rowspan=3>Истинное</td>
# <td></td>
# <td>0</td>
# <td>1</td>
# </tr>
# <tr>
# <td>0</td>
# <td>TN</td>
# <td>FP</td>
# </tr>
# <tr>
# <td>1</td>
# <td>FN</td>
# <td>TP</td>
# </tr>
# </tbody>
# </table>
#
#
# <p align="center"><img src="https://raw.githubusercontent.com/AleksDevEdu/ml_edu/master/assets/21_confusion_matrix.png" width=600/></p>
#
# > Помните, что 0 - Отказ, 1 - Покупка
#
# В разборе нам пригодится понятие **положительного** и **отрицательного** классов. Эти понятия связаны с конечной задачей. То есть, если мы хотим проанализировать, как модель предсказывает "Покупку", то мы говорим, что класс "Покупка" (индекс 1) - **положительный**. Так как задача состоит из предсказания 2-х классов, то другой становится **отрицательным**.
#
# <details>
# <summary>А что, если хочется сделать класс 0 положительным?</summary>
# На самом деле нам ничего не мешает сказать, что класс 0 (Отказ) - положительный, класс 1 (Покупка) становится отрицательным. Тогда анализ производится похожим образом, но инверсно, и мы будем анализировать качество работы модели на предсказание класса "Отказ".
# </details>
#
# Так вот, определили, что класс 1 - положительный, тогда разбор четырёх показателей станет намного проще!
#
# - **TP** - количество правдиво положительных (true positive), это сколько раз мы верно предсказали положительный класс. То есть, сколько раз мы предсказали примеры с истинным классом 1 как 1.
# - **TN** - количество правдиво отрицательных (true negative), тоже самое, но про отрицательный класс, сколько раз мы правильно предсказали истинные 0 как 0. Не путайтеся в том, что здесь есть слово отрицательный! Это не *плохой* показатель, а просто про отрицательный класс (не положительный).
# - **FP** - количество ложных положительных (false positive), а вот это уже количество примеров, которые были предсказаны как положительный класс (класс Покупка - 1), но на деле таковым не являющимся. Мы предсказали как положительный, но ошиблись. То есть, сколько раз мы истинны Отказ предсказали как Покупка.
# - **FN** - количество ложных отрицательных (false negative), это обратная к FP ситуация. Количество ошибок, когда мы предсказали как отрицательны класс, но это неправильно. То есть, сколько раз мы предсказали истинную Покупку как Отказ.
#
# > 🤓 **FP** еще называется **ошибкой первого рода**
#
# > 🤓 **FN** еще называется **ошибкой второго рода**
#
# Если всё ещё не очень понятно, что это за зверь такой можете глянуть дружелюбный [видосик](https://www.youtube.com/watch?v=CXahhigetdAhttps://www.youtube.com/watch?v=CXahhigetdA).
# Так вот, такая матрица показывает, в каких ситуациях мы ошибаемся.
#
# Давайте напишем код, который на основе двух векторов (вектор истинных индексов и вектор предсказанных индексов) создаёт матрицу 2х2 с заполненными значениями:
# + _cell_id="80seClL0nCsBzbDS"
# TODO - напишите код расчета CM. CM - матрица размером 2х2. Функция create_CM(), которая принимает y_true - вектор истинных индексов, y_pred - вектор предсказанных
# + _cell_id="bKESK5GEm3C4B2r6"
# TEST
_test_y_true = np.array([1, 1, 1, 0, 0, 1, 1, 0, 0])
_test_y_pred = np.array([1, 1, 1, 1, 0, 1, 0, 1, 0])
result_cm = create_CM(_test_y_true, _test_y_pred)
_test_expected_cm = np.array([
[2, 2], [1, 4]
])
np.testing.assert_array_almost_equal(_test_expected_cm, result_cm)
print("Well Done!")
# -
# Отлично, молодцы, теперь смотрим, какие у нас показатели:
# + _cell_id="biBz8Rf07UpojuZB"
data_cm = create_CM(y_test_enc, y_pred)
data_cm
# + _cell_id="GH9NcPTOJ1mgYZnz"
# Обычно, проще смотреть на CM через отображенный Heatmap
def draw_heatmap(y_true, y_pred):
data_cm = create_CM(y_true, y_pred)
ax = sns.heatmap(data_cm, annot=True, fmt='.0f', xticklabels=['Отказ', 'Покупка'], yticklabels=['Отказ', 'Покупка'])
ax.set_ylabel('Истинное')
ax.set_xlabel('Предсказание')
draw_heatmap(y_test_enc, y_pred)
# -
# Как видим, CM - отличный простой способ отображения того, как модель себя ведёт!
#
# Тут мы сразу увидели, что модель ошибается, предсказывая Покупку, хотя на деле был Отказ.
#
# Далее, мы разберём другие показатели, которые это покажут, а сейчас важно запомнить, что CM - это хороший вариант, чтобы посмотреть базовые аспекты поведения модели вне зависимости от количества данных!
#
# Всего мы видим: 70 TP, 101 TN, 2 FN и 7 FP.
#
# > ⚠️ Confusion Matrix - это наиболее универсальный способ посмотреть на поведение классифицирующей модели. Не пренебрегайте им, так как он прост и очень показателен!
#
# > ⚠️ Хотите маленький трюк? Если сложить значения в каждой строке матрицы CM, то можно узнать сколько примеров одного и другого класса. Например, мы сразу узнаем, что класс "Покупка" - 2+70 примеров, а класса "Отказ" - 101+7. Можете это проверить!
# ## Вернёмся к accuracy
#
# Так как мы уже знаем термины TP, FN и другие, то мы можем более научно сформулировать формулу accuracy! Вот же она:
#
# $$
# Accuracy = \frac{TP+TN}{TP+TN+FP+FN}
# $$
#
# По сути, это количество всех правильно предсказанных примеров, деленное на количество всех примеров!
#
# Что важно понимать о Accuracy - эта метрика не зависит от того, какой класс выбран положительным. Это и хорошо и плохо, так как это позволяет быстро и в общем виде сделать оценку, **но** этот показатель не дает подробностей. Просто, сколько раз мы правильно предсказали.
# ## Двигаемся глубже
#
# Так что мы имеем на данный момент?
# - Мы можем оценить одним числом (accuracy) то, насколько модель попадает в предсказания, но при определённых условиях это число может быть обманчиво.
# - Мы можем построить визуальное представление количеств правильно и неправильно предсказанных примеров (матрица ошибок). По нему уже можно видеть, в каких классах и насколько модель ошибается, но что с этим делать дальше??
#
# По сути, все эти TP, FP и другие в виде heatmap показывают, где модель ошибается, но эти показатели в чистом виде трудно обобщить.
#
# А что если мы попробуем покомбинировать показатели? Начнём с первого варианта:
#
# $$
# \frac{TP}{TP+FN} = \frac{3}{3+3} = 0.5
# $$
#
# Что такое соотношение даёт? Мы видим, насколько полно предсказание заполнено положительным классом.
#
# То есть, FN в знаменателе указывает, что если бы мы никогда не предсказывали FN (предсказать 0, а на деле это 1), то есть никогда не предсказывали бы 0 вообще, а всегда 1, то даже в таком случае показатель был бы **единица**!
#
# Давайте на примере:
# ```
# y_true = [0, 1, 1, 0, 0, 1, 1, 1]
# y_pred = [1, 1, 1, 1, 1, 1, 1, 1]
# ```
#
# Сколько в этом примере ложно положительных? FN = 0!
#
# Тогда $\frac{TP}{TP+FN} = 1$! То есть, такое соотношение говорит, насколько **полно** мы предсказываем положительный класс - насколько предсказания плотно заполняют истинные положительные примеры.
#
# Название этому показателю - **Recall** (полнота).
#
# $$
# Recall = \frac{TP}{TP+FN}
# $$
# Другой показатель немного по-другому работает, но формула очень похожа!
#
# $$
# \frac{TP}{TP+FP} = \frac{3}{3+0} = 1.0
# $$
#
# Смотрите, в нашем случае именно этот показатель равен 1!
#
# Но почему так? Потому что этот показатель оценивает, насколько модель **попадает в предсказании** положительного класса.
#
# ```
# y_true = [0, 1, 1, 0, 0, 1, 1, 1]
# y_pred = [0, 1, 1, 0, 0, 0, 0, 0]
# ```
#
# Вот в этом примере FP=0!
#
# То есть, модель никогда случайно не назовёт отрицательный класс (0) положительным (1). Да, бывает, что не узнает истинные положительные, но помните, полноту предсказания положительного класса оценивает другой показатель - Recall.
#
# А этот показатель акцентируется на **отсутсвии промахов** в предсказании положительного. Такой эффект называют высокой **точностью**, но не путайте с accuracy! Название ему Precision:
#
# $$
# Precision = \frac{TP}{TP+FP}
# $$
# > ⚠️ Recall хорош для оценки, когда нам нужно понимать, насколько модель ведёт себя уверенно. Например, в медицине, нам важнее сказать, что истинно больной (1) пациент - больной (1), так как если назвать здорового человека больным, то он просто сходит и сдаст анализы ещё раз, а вот пропустить больного - может быть очень опасно.
#
# > Если мы совершим ошибку и скажем, что здоровый (0) пациент болен (1), то мы совершим ошибку FP и это уменьшит Precision. Если скажем, что больной пациент (1) здоров (0), то это будет ошибка FN - это уменьшит Recall.
#
# > Так вот, так как нам важно не пропустить больных пациентов, то в таких задачах надо добиваться максимального Recall, чтобы был минимум ошибок FN!
#
# ---
#
# > ⚠️ Для Precision может быть примером другая задача. Разрабатывается система полива сада. Наша модель классифицирует, надо поливать сад (1) или ещё нет (0). Если в ситуации, когда не надо было поливать (0), наша система неправильно это классифицировала и начала полив (1), то это может навредить растениям и они завянут (FP). Если наоборот, надо полить (1), а мы классифицировали, что нет (0), то это не так страшно, так как может прийти человек и перепроверить (FN). Так вот, мы пытаемся свести FP к минимуму, поэтому максимизируем Precision!
#
# Так что нам дают эти два показателя? По сути, один показывает, насколько мы уверенно предсказываем положительный класс, а другой, насколько **не** промахиваемся. По отдельности эти два показателя тоже имеют плюсы, но мы попробуем их совместить с помощью математического трюка под названием **гармоническое среднее**!
#
# $$
# F_1 = 2*\frac{Recall*Precision}{Recall+Precision} = 2*\frac{1.0*0.5}{1.0+0.5} = 0.667
# $$
# Гармоническое среднее Precision и Recall зовётся **F1**. Это способ усреднить Precision и Recall, чтобы оценить, насколько хорошо модель работает в целом.
#
# Как видите, из-за того, что наша моделька имеет хорошую точность, но полнота сильно страдает, то F1 = 0.667. Это уже ближе к правде, нежели оценка accuracy = 95%!
#
# > 🤓 Если интересно, вы ещё можете почитать о $F_2$ и $F_{0.5}$, которые делают смещение в определённые акценты. Они не так часто распространены, но о них можно изредка услышать.
# Таким образом, мы научились оценивать систему с помощью показателя, который не обманешь дисбалансом!
#
# Он сразу выявляет, что система не совсем точна и надо что-то делать!
#
# Но всё это пустые разговоры без кода, погнали:
# + _cell_id="UsFg6pEuJ7sCBJZP"
# TODO - напишите функцию compute_classification_metrics(), который принимает y_true - вектор истинных индексов, y_pred - вектор предсказанных
# Функция должна возвращать dict с ключами recall, precision, f1, accuracy
# + _cell_id="UEMRUbeNqfPAh6tG"
# TEST
_test_y_true = np.array([1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1])
_test_y_pred = np.array([1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1])
_test_metrics = compute_classification_metrics(_test_y_true, _test_y_pred)
_test_expected_metrics = {
'recall': 0.8333333333333334,
'precision': 0.625,
'f1': 0.7142857142857143,
'accuracy': 0.6363636363636364
}
assert _test_expected_metrics == _test_metrics
print("Well done!")
# -
# Отлично, молодцы! А теперь проверим на наших данных!
# + _cell_id="GBNNwGJjuIC9nrui"
metrics = compute_classification_metrics(y_test_enc, y_pred)
metrics
# -
# Глядите, всё так, как мы посчитали!
#
# Теперь у нас есть инструмент - показатель, который позволяет оценить то, насколько действительно хорошо наша модель работает! Шикарные новости!
#
# Как видим, из-за того, что модель иногда на Отказ предсказывает Покупку, показатель Precision немного просел. В результате, F1 значение меньше, чем accuracy и это важно, так как F1 показывает качество предсказания модели в данном случае только по классу "Покупка".
# ## Почему гармоническое среднее?
#
# Вы, наверное, обратили внимание, что F1 - это не просто усреднение Recall и Precision, а именно гармоническое среднее! Давайте попробуем разобраться, почему так, и сделаем это через графики!
#
# Вот так выглядит график среднего, если усредняем x и y в диапазоне [0; 100]:
#
# <p align="center"><img src="https://raw.githubusercontent.com/AleksDevEdu/ml_edu/master/assets/plot_mean.jpg" width=600/></p>
# А вот так выглядит график гармонического среднего со степенью 1:
#
# <p align="center"><img src="https://raw.githubusercontent.com/AleksDevEdu/ml_edu/master/assets/plot_harmonic_mean.jpg" width=600/></p>
# Если вы ещё не заметили разницу, то обратите внимание на края графиков:
#
# - в случае среднего, даже если x = 0, а y = 100, то мы получаем 50
# - в случае гармонического среднего близость хотя бы одного показателя к 0 даёт в результате ноль вне зависимости от второго.
#
# Это даёт важное свойство, что в случае, например, когда модель просто всегда предсказывает положительный класс, Recall = 1, но Precision близок к 0. Это не даёт нам обмануться значением F1 равным 0.5, а понять, что модель всё ещё работает "плохо" и вернуть F1 близкий к 0.
#
# Вот такой просто математический трюк, а какой интересный результат!
# ## Инструментарий
#
# Конечно же, никто не будет каждый раз реализовывать функции вычисления recall, precision, так как они уже давно существуют.
#
# Давайте проделаем маленькое задание и познакомимся с готовыми функциями, а также сравним наши реализации с готовыми:
#
# - [recall_score()](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html)
# - [precision_score()](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html)
# - [f1_score()](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html)
#
# Также, можно использовать реализацию расчёта CM: [confusion_matrix()](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html), [plot_confusion_matrix()](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html).
#
# В следующих нескольких ячейках произведите применение функций и сравните результаты с собственными реализациями:
# + _cell_id="KyK8hDDYfiHavvNV"
# TODO - посчитайте показатели и CM с помощью своих функций и с помощью функций sklearn
# Сравните результаты
# -
# ## Вывод
#
# В этой главе мы познакомились с очень важными инструментами!
#
# Во-первых, стратификация - маленький аспект, а очень важное значение в задаче классификации! (равномерное распеределение классов по выборкам)
#
# Во-вторых, новые способы проанализировать модель в целом - accuracy и Confusion Matrix, а ещё новые способы провести анализ качества предсказания конкретного класса - Recall, Precision, F1.
#
# В-третьих, пополнили свои навыки пониманием того, что часто задача не просто сводится к увеличению accuracy, а часто, в зависимости от специфики задачи, важно оценивать и максимизировать ещё и отдельные показатели.
# ## Для затравки
#
# Вы помните, что в предыдущей практике говорили о сигмоиде и принятии решения?
#
# Немного напомним, в логистической регрессии (бинарной классификации) применяется функция сигмоиды:
# + _cell_id="KODjATQaMrhc0luf"
_plot_x = np.linspace(-6, 6, 1000)
_plot_y = 1 / (1 + np.exp(-_plot_x))
plt.plot(_plot_x, _plot_y)
plt.xlabel('x')
plt.ylabel('Вероятность предсказания')
plt.axhline(1, color='g')
plt.axhline(0, color='g')
plt.text(0, 1, 'Покупка', fontsize=20, va='center', ha='center', backgroundcolor='w')
plt.text(0, 0, 'Отказ', fontsize=20, va='center', ha='center', backgroundcolor='w')
# -
# Мы видели, что модель предсказывает только 0 или 1, но на деле внутри модели до **принятия решения** существует вероятность предсказания (Y ось на графике). Эта вероятность говорит, к каком классу больше относится предказание - "Покупка" или "Отказ".
#
# По-умолчанию, метод `predict()` принимает решение на основе границы 0.5, то есть, если вероятность выше 0.5 - это класс Покупка, а если ниже - Отказ.
#
# Давайте проверим, для начала посмотрим вывод метода [predict_proba()](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression.predict_proba), который возвращает сырые вероятности:
# + _cell_id="U9QwxyR5pS0l1eyD"
y_probs = logreg.predict_proba(df_test)
print(y_probs)
# -
# Как видите, `predict_proba()` возвращает матрицу Nx2, в которой N - количество примеров в данных, а 2 - количество классов, которое мы предсказываем. В бинарной классификации мы можем воспользоваться вторым столбцом матрицы, так как первый - вероятность предсказания класса 0 (Отказ) и равна обратной вероятности предсказания класса 1 (Покупка).
#
# > 🔥 Если вы не верите последнему факту - отлично! Критическое мышление важно - вы можете создать ячейку кода и проверить это самостоятельно. Достаточно сложить каждую строку в матрице и убедиться, что каждое сложение даёт единицу (или близкое к 1 значение до 6-го знака - могут быть неточности float вычислений).
#
# Давайте попробуем вывести пару значений вероятностей и результатов `predict()`:
# + _cell_id="1QZ84DxDmzTLmSOO"
y_pred = logreg.predict(df_test)
match_matrix = np.c_[y_probs[:,1], y_pred]
_show_df = pd.DataFrame(match_matrix, columns=['Вероятность предсказания', 'Предсказанный predict() класс'])
_show_df
# -
# Видите, если вероятность высока, то `predict()` возвращает 1, в ином случае - 0.
#
# Но это только просмотр части данных, давайте напишем свою функцию, которая на основе вероятностей будет давать вектор предсказаний:
# + _cell_id="kQgtasmBmtUwNdSG"
# TODO - напишите функцию принятия решения predict_threshold() на основе вероятностей из predict_proba().
# Функция должна принимать матрицу вероятностей y_probs и порог принятия решения threshold
# + _cell_id="1AwgG1A3Dv6rmwl8"
# TEST
_test_y_probs = np.array([
[0.4, 0.6],
[0.1, 0.9],
[0.8, 0.2],
[0.0, 1.0],
[0.7, 0.3]
])
_test_result = predict_threshold(_test_y_probs, 0.75)
_test_expected = np.array([0, 1, 0, 1, 0])
np.testing.assert_allclose(_test_result, _test_expected)
print("Well done!")
# -
# Замечательно, а теперь проверим, действительно ли `predict()` даёт то же самое, что и `predict_proba()` с порогом 0.5?
# + _cell_id="zz7EIYrp9sFwssWH"
y_pred_05 = predict_threshold(y_probs, 0.5)
np.allclose(y_pred, y_pred_05)
# + _cell_id="eZf8wp3MxaQ9mQwG"
# Так а теперь проверим, что при смене порога что-то меняется:
y_pred_06 = predict_threshold(y_probs, 0.6)
np.allclose(y_pred, y_pred_06)
# -
# Замечательно! Вот так мы раскрыли хитрость работы логистической регрессии!
#
# Только зачем нам это нужно? А давайте взглянем на метрики после изменения порога:
# + _cell_id="ozfpEX0dORJsGTwj"
compute_classification_metrics(y_test_enc, y_pred_05)
# + _cell_id="O7uqkWI0fDlxndGc"
compute_classification_metrics(y_test_enc, y_pred_06)
# + _cell_id="eTOs1cvceNKFREmO"
y_pred_07 = predict_threshold(y_probs, 0.7)
compute_classification_metrics(y_test_enc, y_pred_07)
# -
# Смотрите! С увеличением порога растёт Precision, но падает Recall! Получается, с уменьшением порога, растёт Recall, но падает Precision.
#
# Вот это да! Получается, что мы можем управлять интересующей нас метрикой с помощью порога принятия решение! Вау, так просто!
#
# Но есть и обратная сторона этой радости, получается, нам нужно как-то настроить параметр на подходящее значение...
#
# Не переживайте, скоро мы научимся парочке трюков, а пока просто осознайте и запомните, что предсказания - это не просто конкретный индекс класса, а вероятности, с которыми можно работать!
# ## Вопросы для закрепления
#
# А теперь пара вопросов, чтобы закрепить материал!
#
# 1. Какие могут быть причины высокого accuracy, но низкого F1?
# 2. Какую метрику лучше максимизировать, если вы разрабатываете систему предсказания возврата кредита для банка. Если человек возвращает (1), то это очень хорошо и банк получает прибыль, если нет (0) - плохо, банк теряет деньги. Вам важно предсказывать, что человек действительно вернёт и лучше лишний раз отказать (0) тому, кто вернёт (1), чем дать кредит тому (1), кто не вернёт (0).
# 3. Методы sklearn по расчету scores считают показатели для класса 1 как положительного. Что нужно сделать, чтобы посчитать метрики для класса 0 как положительного?
# 4. Попробуйте предположить, как принимается решение, если предсказание делается не на двух классах, а, например, пяти?
# 5. Как, используя [LabelEncoder](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html), из индексов получить исходные названия классов?
# ## Полезные ссылки
# * [The Confusion Matrix от StatQuest](https://www.youtube.com/watch?v=Kdsp6soqA7o)
# * [Sensitivity and Specificity от StatQuest](https://www.youtube.com/watch?v=vP06aMoz4v8)
#
| notebooks/21_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Test Dispersion
#
# <NAME> | January 2016
import sys
sys.path.append('../')
import numpy as np
from zephyr.backend import ViscoMultiFreq, MultiFreq, MiniZephyrHD, SparseKaiserSource
# +
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
# %matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png')
matplotlib.rcParams['savefig.dpi'] = 150 # Change this to adjust figure size
# +
systemConfig = {
'dx': 1., # m
'dz': 1., # m
'c': 2500., # m/s
'rho': 1., # kg/m^3
'nx': 100, # count
'nz': 200, # count
'freqs': [2e2, 3e2], # Hz
'Disc': MiniZephyrHD,
'Q': 20,
'freqBase': 200., # Hz
}
nx = systemConfig['nx']
nz = systemConfig['nz']
dx = systemConfig['dx']
dz = systemConfig['dz']
# +
MF = MultiFreq(systemConfig)
VMF = ViscoMultiFreq(systemConfig)
SKS = SparseKaiserSource(systemConfig)
xs, zs = 25, 25
sloc = np.array([xs, zs]).reshape((1,2))
q = SKS(sloc)
uMF = list(MF*q)
uVMF = list(VMF*q)
# +
clip = 10
fid = 1
plotopts = {
'vmin': -np.pi,
'vmax': np.pi,
'extent': [0., dx * nx, dz * nz, 0.],
'cmap': cm.bwr,
}
fig = plt.figure()
ax1 = fig.add_subplot(1,4,1)
plt.imshow(np.angle(uMF[fid].reshape((nz, nx))), **plotopts)
plt.title('MF Phase')
ax2 = fig.add_subplot(1,4,2)
plt.imshow(np.angle(uVMF[fid].reshape((nz, nx))), **plotopts)
plt.title('VMF Phase')
plotopts.update({
'vmin': -clip,
'vmax': clip,
})
ax3 = fig.add_subplot(1,4,3)
plt.imshow(uMF[fid].reshape((nz, nx)).real, **plotopts)
plt.title('MF Real')
ax4 = fig.add_subplot(1,4,4)
plt.imshow(uVMF[fid].reshape((nz, nx)).real, **plotopts)
plt.title('VMF Real')
fig.tight_layout()
# ---
fig = plt.figure()
ax = fig.add_subplot(2,1,1, aspect=5)
plt.plot(np.angle(uMF[fid]).reshape((nz, nx))[:,xs], '.', label='MultiFreq')
plt.plot(np.angle(uVMF[fid]).reshape((nz, nx))[:,xs], '.', label='ViscoMultiFreq')
plt.legend(loc=4)
plt.title('Phase through xs=%d'%xs)
ax = fig.add_subplot(2,1,2, aspect=3)
plt.plot(uMF[fid].real.reshape((nz, nx))[:,xs], label='MultiFreq')
plt.plot(uVMF[fid].real.reshape((nz, nx))[:,xs], label='ViscoMultiFreq')
plt.legend(loc=4)
plt.title('Real part through xs=%d'%xs)
fig.tight_layout()
# -
| notebooks/Test Dispersion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Rizwan-Ahmed-Surhio/Accountants/blob/main/1_Variables.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="76dDOWmxIF50"
# # Variables
#
# ---
#
#
# + [markdown] id="JhdTB_5A8veS"
# Use this file to practice the code in Colab and save a copy in your google drive.
# + [markdown] id="70wqMsWZvRGc"
# #### Python is a great programming language
# + id="dBhk3FyZv5sy"
# + id="VwNU6TbTHwaq"
x = "Python is a great programming language"
# + id="7alPo0IYUn6W" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="30ec320d-3093-41a6-cd93-eaf86ed30dd7"
x
# + id="TYV2U_2yrWsf"
x = 5
# + id="BIDuJLvLrYAB" colab={"base_uri": "https://localhost:8080/"} outputId="90b01fab-51c8-4368-deb6-63e92d92cbe7"
x
# + id="eGLBCJuJrbka"
X = 3 * 5
# + id="uTt7sVIXvdYr" colab={"base_uri": "https://localhost:8080/"} outputId="35c97626-15df-4b32-928d-5e156fddaf62"
x
# + id="IqyfW6fVvd8S" colab={"base_uri": "https://localhost:8080/"} outputId="6df0e280-5e27-4756-b703-ed1567161d75"
X
| 1_Variables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ejercicios Graphs, Paths & Components
#
# Ejercicios básicos de Grafos.
# ## Ejercicio - Número de Nodos y Enlaces
#
# _ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _
#
# Cuente el número de nodos y enlaces con los siguientes links (asumiendo que el grafo puede ser dirigido Y no dirigido):
edges = set([(1, 2), (3, 1), (3, 2), (2, 4)])
import networkx as nx
G=nx.Graph() #Se crea un grafo vacio y no dirigido
G.add_edges_from(edges)
G2=nx.DiGraph() #Se crea un grafo vacio y no dirigido
G2.add_edges_from(edges)
numNodes = G.number_of_nodes()
numEdges = G.number_of_edges() # el grafo se creo no dirigido, la respuesta sera 4, pero son 4 con dos direciones
print("Grafo no dirigido")
print("La tupla (1,2) es no dirigida: ", G.has_edge(1,2))# dice si la tupla tiene un enlace dirigido (false) o no dirigido (true)
print("Es dirigido: ", G.is_directed())# dice si el grafo es dirigido o no dirigido
print("Numero de nodos: "+str(numNodes))
print("Numero de enlaces(no dirigido): "+str(numEdges))# imprime 4 enlaces no dirigidos
print("")
print("Grafo Dirigido")
print("Numero de nodos: "+str(G2.number_of_nodes()))
print("Numero de enlaces(dirigido): "+str(G2.number_of_edges()))# imprime 4 enlaces dirigidos
print(G[3])
# ## Ejercicio - Matriz de Adyacencia
#
# _ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _
#
# Cree la matriz de adyacencia del grafo del ejercicio anterior (para dirigido y no-dirigido)
A = nx.adjacency_matrix(G)
print("Grafo no dirigido")
print(A.todense())
print("")
A2 = nx.adjacency_matrix(G2)
print("Grafo dirigido")
print(A2.todense())
# ## Ejercicio - Sparseness
#
# Calcule la proporción entre número de links existentes en 3 redes reales (http://snap.stanford.edu/data/index.html) contra el número de links posibles.
# En la matriz de adyacencia de cada uno de las redes elegidas, cuantos ceros hay?
# ## Ejercicio - Redes Bipartitas
#
# Defina una red bipartita y genere ambas proyecciones, explique qué son los nodos y links tanto de la red original como de las proyeccciones
# ## Ejercicio - Paths
#
# Cree un grafo de 5 nodos con 5 enlaces. Elija dos nodos cualquiera e imprima:
# + 5 Paths diferentes entre los nodos
# + El camino mas corto entre los nodos
# + El diámetro de la red
# + Un self-avoiding path
#
# # Ejercicio - Componentes
#
# Baje una red real (http://snap.stanford.edu/data/index.html) y lea el archivo
# Utilizando NetworkX o iGraph descubra el número de componentes
# Implemente el algorithmo Breadth First para encontrar el número de componentes (revise que el resultado es el mismo que utilizando la librería)
# ## Ejercicio - Degree distribution
#
# _ (resuelva en código propio y usando la librería NetworkX (python) o iGraph (R)) _
#
# Haga un plot con la distribución de grados de la red real
# Calcule el grado promedio
# ## Ejercicio - Diámetro
N = 5
# Cree un grafo de N nodos con el máximo diámetro posible
# Cree un grafo de N nodos con el mínimo diámetro posible
# Cree un grafo de N nodos que sea un ciclo simple
# ## Ejercicio - Pregunta "real"
#
# Una aerolínea tiene las siguientes rutas desde las ciudades a las que sirve (cada par tiene servicio en ambas direcciones).
routemap = [('St. Louis', 'Miami'),
('St. Louis', 'San Diego'),
('St. Louis', 'Chicago'),
('San Diego', 'Chicago'),
('San Diego', 'San Francisco'),
('San Diego', 'Minneapolis'),
('San Diego', 'Boston'),
('San Diego', 'Portland'),
('San Diego', 'Seattle'),
('Tulsa', 'New York'),
('Tulsa', 'Dallas'),
('Phoenix', 'Cleveland'),
('Phoenix', 'Denver'),
('Phoenix', 'Dallas'),
('Chicago', 'New York'),
('Chicago', 'Los Angeles'),
('Miami', 'New York'),
('Miami', 'Philadelphia'),
('Miami', 'Denver'),
('Boston', 'Atlanta'),
('Dallas', 'Cleveland'),
('Dallas', 'Albuquerque'),
('Philadelphia', 'Atlanta'),
('Denver', 'Minneapolis'),
('Denver', 'Cleveland'),
('Albuquerque', 'Atlanta'),
('Minneapolis', 'Portland'),
('Los Angeles', 'Seattle'),
('San Francisco', 'Portland'),
('San Francisco', 'Seattle'),
('San Francisco', 'Cleveland'),
('Seattle', 'Portland')]
# Cuál es el máximo número de intercambios que tendría que hacer un pasajero en un solo viaje entre dos ciudades servidas? (suponiendo rutas óptimas)
# Si usted necesitara viajar mucho en esta aerolínea, cual sería el lugar óptimo para vivir? (i.e. minimizar el número de intercambios para llegar a cualquier ciudad)
# Visualize la red
| DiderGonzalez/Ejercicios 1.1/Ejercicios 1.1 - Graphs, Paths & Components.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas
#
# Pandas is a powerful, open source Python library for data analysis, manipulation, and visualization. If you're working with data in Python and you're not using pandas, you're probably working too hard!
#
# There are many things to like about pandas: It's well-documented, has a huge amount of community support, is under active development, and plays well with other Python libraries (such as matplotlib, scikit-learn, and seaborn).
#
# There are also things you might not like: pandas has an overwhelming amount of functionality (so it's hard to know where to start), and it provides too many ways to accomplish the same task (so it's hard to figure out the best practices).
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# read data in a table
orders = pd.read_csv('./dataset/chipotle.tsv', sep = "\t")
orders.head()
orders.tail()
# change columns
new_columns = ["Orden","Cantidad","Producto","Descripción","Precio"]
orders.columns = new_columns
orders.head()
#other form using rename. You can rename specific columns by using the 'rename' method
orders.rename(columns={'Orden':'OrdenID', 'Descripción':'Detalle'}, inplace=True)
orders.head()
# get copy of pandas
df = orders.copy()
#rename index
df = df.rename(index={1:'a', 2:'b', 3:'c'})
df.head()
orders.head()
orders.index.names = ['INDICE']
orders.columns.names = ['ATRIBUTOS']
orders.head()
# To acces a specific column you can use bracket notation or dot notation. Bracket notation will always work, whereas dot notation has limitations:
#
# + Dot notation doesn't work if there are spaces in the Series name
# + Dot notation doesn't work if the Series has the same name as a DataFrame method or attribute (like 'head' or 'shape')
# + Dot notation can't be used to define the name of a new Series (see below)
# two way to access specific columns
orders.Producto.head()
orders["Producto"].head()
# cut off the "$" character
f = orders["Precio"].str.replace("$","",regex=True)
# convert to float variable
orders.Precio = pd.to_numeric(f, errors = 'coerce')
# other option we use .str to replace and then convert to float
#orders['Precio'] = orders.item_price.str.replace('$', '').astype(float)
orders.Precio.head()
#verify tipes of variables into DataFrame
orders.dtypes
orders.Cantidad = orders.Cantidad.astype(float)
orders.dtypes
# create a new column Series (must use bracket notation to define the Series name)
orders['Total'] = orders.Cantidad * orders.Precio
orders.head(20)
# example method: calculate summary statistics
orders.describe()
# use an optional parameter to the describe method to summarize only 'object' columns
orders.describe(include=['object'])
# use an optional parameter to the describe method to summarize all columns
orders.describe(include='all')
# example attribute: number of rows and columns
orders.shape
#set the index with some of the columns, then the column name will become the new index level name.
orders = orders.set_index(['OrdenID'], append=False)
orders.head()
# ## Remove columns or rows
df.drop('Detalle',axis=1, inplace=True)
df.head()
# remove multiple rows at once (axis=0 refers to rows)
df.drop(["a", "b"], axis=0, inplace=True)
df.head()
#get unique values
df.Producto.unique()
# count the number of unique values in the Series
df.Producto.nunique()
# sort the 'title' Series in ascending order (returns a Series)
orders.sort_values("Precio").head()
orders.sort_values("Precio", ascending = False).head()
# sort the DataFrame first by 'Cantidad', then by 'Precio'
orders.sort_values(['Cantidad', 'Precio'], ascending= False).head()
# ## filter rows of a pandas DataFrame by column value
#movies dataset
df_movies = pd.read_csv('./dataset/imdb_1000.csv')
df_movies.head()
df_movies.columns = ['Rating','Titulo', 'Clasificacion','Genero','Duracion','Reparto']
df_movies.head()
# **Exercise**: Filter the DataFrame rows to only show movies with a 'duration' of at least 200 minutes.
# +
# create a list in which each element refers to a DataFrame row: True if the row satisfies the condition, False otherwise
mask = []
for minutos in df_movies.Duracion:
if minutos >= 200:
mask.append(True)
else:
mask.append(False)
# convert the list to a Series
is_long = pd.Series(mask)
# use bracket notation with the boolean Series to tell the DataFrame which rows to display
df_movies[is_long].head(20)
# +
# simplify the steps above: no need to write a for loop to create 'is_long' since pandas will broadcast the comparison
is_long = df_movies.Duracion >= 200
df_movies[is_long]
# or equivalently, write it in one line (no need to create the 'is_long' object)
df_movies[df_movies.Duracion >= 200]
# -
# ## multiple filter criteria to a pandas DataFrame
#
# Rules for specifying multiple filter criteria in pandas:
#
# + use & instead of and
# + use | instead of or
# + add parentheses around each condition to specify evaluation order
# Evercise
# : Further filter the DataFrame of long movies (Duracion >= 200) to only show movies which also have a 'Crime'
# CORRECT: use the '&' operator to specify that both conditions are required
df_movies[(df_movies.Duracion >=200) & (df_movies.Genero == 'Crime')]
#using the '|' operator would have shown movies that are either long or dramas (or both)
df_movies[(df_movies.Duracion >=200) | (df_movies.Genero == 'Drama')].head()
#equivalently, use the 'isin' method to remplace '|' operator
df_movies[df_movies.Genero.isin(['Crime', 'Drama', 'Action'])].head(10)
df_movies.keys()
#iterate through a DataFrame
for index, row in df_movies.iterrows():
if index ==3:
break
print("the index {0} has the movie {1} with rating {2}".format(index, row.Titulo, row.Rating))
#print(row.keys())
#drop all non-numeric columns from a DataFrame
df_movies.select_dtypes(include=['object']).head()
df_movies.select_dtypes(include=[np.number]).head()
# string methods can be chained together
df_movies.Reparto = df_movies.Reparto.str.replace('[', '').str.replace(']', '').str.replace('u', '')
df_movies.head()
# calculate the mean duration for each genero
df_movies.groupby('Genero').Duracion.mean()
# calculate the max rating for each genero
df_movies.groupby('Genero').Rating.max()
# multiple aggregation functions can be applied simultaneously
df_movies.groupby('Genero').Duracion.agg(['count', 'mean', 'min', 'max'])
# specifying a column to which the aggregation function should be applied is not required
df_movies.groupby('Genero').mean()
# side-by-side bar plot of the DataFrame directly above
df_movies.groupby('Genero').Duracion.mean().plot(kind='bar', ec='black')
# +
# histogram of the 'duration' Series (shows the distribution of a numerical variable)
df_movies.Duracion.plot(kind='hist', ec='black')
# -
# count how many times each value in the Series occurs
df_movies.Genero.value_counts()
# display percentages instead of raw counts
df_movies.Genero.value_counts(normalize=True)
# compute a cross-tabulation of two Series
pd.crosstab(df_movies.Genero, df_movies.Rating)
# compute a cross-tabulation of two Series
pd.crosstab([df_movies.Genero,df_movies.Clasificacion], df_movies.Rating)
# ## missing values in pandas
#
# What does "NaN" mean?
#
# + "NaN" is not a string, rather it's a special value: numpy.nan.
# + It stands for "Not a Number" and indicates a missing value.
df_ufo = pd.read_csv("./dataset/ufo.csv")
df_ufo.head()
# 'isnull' returns a DataFrame of booleans (True if missing, False if not missing)
df_ufo.isnull().tail(20)
# count the number of missing values in each Series
df_ufo.isnull().sum()
# 'nonnull' returns the opposite of 'isnull' (True if not missing, False if missing)
df_ufo.notnull().head()
# use the 'isnull' Series method to filter the DataFrame rows
df_ufo[df_ufo.notnull()].head()
# use the 'isnull' Series method to filter the DataFrame rows
df_ufo[df_ufo["Colors Reported"].notnull()].head()
# ‘any’ : If any NA values are present, drop that row or column. ‘all’ : If all values are NA, drop that row or column.
df_ufo.dropna(how='any').head()
# if 'any' values are missing in a row (considering only 'City' and 'Shape Reported'), then drop that row
df_ufo.dropna(subset=['City', 'Shape Reported'], how='any').head()
# fill missing values using "backward fill" strategy (doesn't affect the DataFrame since inplace=False)
df_ufo.fillna(method='bfill').tail()
# compare with "forward fill" strategy (doesn't affect the DataFrame since inplace=False)
df_ufo.fillna(method='ffill').tail()
# fill in missing values with a specified value
df_ufo['Colors Reported'].fillna(value='black', inplace=True)
df_ufo.head(10)
# selection: select a portion of the DataFrame using the index
df_ufo.loc[23, 'State']
# selection: select a portion of the DataFrame using the index
df_ufo.iloc[23,3]
df_ufo.set_index("Time", inplace = True)
df_ufo.head()
df_ufo.sort_values("Time", inplace = True)
df_ufo.head()
df_ufo.reset_index(inplace = True)
df_ufo.info()
df_ufo['Time'] = pd.to_datetime(df_ufo['Time'])
df_ufo.info()
df_ufo.set_index("Time", inplace = True)
df_ufo.head()
df_ufo.index.year[df_ufo.index.year==1980]
df_ufo.index.month
df = pd.DataFrame()
df['hour'] = df_ufo.index.hour
df['minute'] = df_ufo.index.minute
df['month'] = df_ufo.index.month
df['day'] = df_ufo.index.day
df['year'] = df_ufo.index.year
df.sort_values("year")
df.head()
df.tail(20)
# +
# Extract the hour from 10am on 1944-01-01 to 10am on 1950-01-01
ts1 = df_ufo.loc['1944-01-01 10:00:00':'1950-01-01 10:00:00']
# Extract 2000-9' from ts0: ts2
ts2 = df_ufo.loc['September, 2000']
# Extract data from '2010-12-15' to '2010-12-31': ts3
ts3 = df_ufo.loc['9/1/1980':'9/10/1981']
# -
ts1.head()
ts2.head()
ts3.head()
df_ufo2 = pd.read_csv("./dataset/ufo.csv", index_col='Time', parse_dates=True)
df_ufo2.head()
df_ufo2.loc["1930-06-01 22:00:00"]
df_ufo2.shape
df_ufo2["values_Random"] = np.random.randint(0,100,18241)
df_ufo2.head(20)
df_ufo2['year'] = df_ufo2.index.year
temp = df_ufo2.groupby('year').count()
temp.head()
scatt = pd.DataFrame(temp.City)
scatt.reset_index(inplace = True)
scatt.plot(kind='scatter',x='year', y = 'City')
# ## Resampling and frequency
#
# Pandas provides methods for resampling time series data. When downsampling or upsampling, the syntax is similar, but the methods called are different. Both use the concept of 'method chaining' - df.method1().method2().method3() - to direct the output from one method call to the input of the next, and so on, as a sequence of operations, one feeding into the next.
#
# For example, if you have hourly data, and just need daily data, pandas will not guess how to throw out the 23 of 24 points. You must specify this in the method. One approach, for instance, could be to take the mean, as in df.resample('D').mean().
#
# In this exercise, a data set containing hourly temperature data has been pre-loaded for you. Your job is to resample the data using a variety of aggregation methods to answer a few questions.
df = pd.read_csv('./dataset/weather_data_austin_2010.csv', index_col= 'Date', parse_dates= True)
df.head()
# +
# Downsample to 6 hour data and aggregate by mean: df1
df1 = df['Temperature'].resample('6H').mean()
# Downsample to daily data and count the number of data points: df2
df2 = df['Temperature'].resample('D').count()
# -
df1.head(10)
df2.head(10)
# ## Separating and resampling
# With pandas, you can resample in different ways on different subsets of your data. For example, resampling different months of data with different aggregations
# +
# Extract temperature data for August: august
august = df.loc['August 2010','Temperature']
# Downsample to obtain only the daily highest temperatures in August: august_highs
august_highs = august.resample('D').max()
august_lows = august.resample('D').min()
# -
august_highs.plot()
august_lows.plot()
# ## Rolling mean and frequency
#
# In statistics, a moving average (rolling average or running average) is a calculation to analyze data points by creating a series of averages of different subsets of the full data set. It is also called a moving mean (MM)[1] or rolling mean and is a type of finite impulse response filter. Variations include: simple, and cumulative, or weighted forms (described below).
#
# Given a series of numbers and a fixed subset size, the first element of the moving average is obtained by taking the average of the initial fixed subset of the number series. Then the subset is modified by "shifting forward"; that is, excluding the first number of the series and including the next value in the subset.
#
# A moving average is commonly used with time series data to smooth out short-term fluctuations and highlight longer-term trends or cycles. The threshold between short-term and long-term depends on the application, and the parameters of the moving average will be set accordingly. For example, it is often used in technical analysis of financial data, like stock prices, returns or trading volumes. It is also used in economics to examine gross domestic product, employment or other macroeconomic time series. Mathematically, a moving average is a type of convolution and so it can be viewed as an example of a low-pass filter used in signal processing. When used with non-time series data, a moving average filters higher frequency components without any specific connection to time, although typically some kind of ordering is implied. Viewed simplistically it can be regarded as smoothing the data.
# +
# Extract data from 2010-Aug-01 to 2010-Aug-15: unsmoothed
unsmoothed = df['Temperature']['August 1 2010':'August 15 2010']
# Apply a rolling mean with a 24 hour window: smoothed
smoothed = unsmoothed.rolling(window=24).mean()
# Create a new DataFrame with columns smoothed and unsmoothed: august
august = pd.DataFrame({'smoothed':smoothed, 'unsmoothed':unsmoothed})
# Plot both smoothed and unsmoothed data using august.plot().
august.plot()
plt.show()
| 5PandasFundations/.ipynb_checkpoints/1_2Pandas_Fundations-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# +
#Initializ with centroid data and the corresponding uncertainty generated from Feature Extraction
STAR_CENTROIDS = np.array([[-3,4], [8,-6], [10,10], [5,5]])
STAR_CENTROIDS_UNCERTAINTY = np.array([[0.06], [0.004], [0.001], [0.002]])
#Initialize
CATALOGUE = pd.read_csv("Modified Star Catalogue.csv")
PROCESSED_CATALOGUE= pd.read_csv('Processed_Catalogue.csv')
# +
def cos(row):
return np.cos(np.radians(row['Ang_Distance']))
REFERENCE = pd.DataFrame(columns=['Star_ID1', 'Star_ID2', 'Ang_Distance'])
REFERENCE['Star_ID1'], REFERENCE['Star_ID2'] = PROCESSED_CATALOGUE['Star_ID1'], PROCESSED_CATALOGUE['Star_ID2']
REFERENCE['Ang_Distance'] = PROCESSED_CATALOGUE.apply(cos, axis = 1)
REFERENCE.sort_values('Ang_Distance' ,ascending=True, inplace=True)
REFERENCE.head()
# -
VOTING_DF = pd.DataFrame(columns=['Star_ID', 'Votes'])
VOTING_DF['Star_ID'] = CATALOGUE['StarID']
VOTING_DF.sort_values('Star_ID', inplace = True, ascending = True)
VOTING_DF['Votes'].values[:] = 0
VOTING_DF.head()
STAR_CENTROIDS
STAR_CENTROIDS_UNCERTAINTY
# +
def starVectorTransform(centroid, focal_length=10):
'''
Generates the unit 3D vectors from given 2D centroids of stars on the
image frame with the focal point as the origin
<Formula> - CubeStar Doc - Appendix B
'''
x, y = centroid
temp = np.power(((x/focal_length)**2 + (y/focal_length)**2 + 1), -0.5)
ux = (x/focal_length)
uy = (y/focal_length)
uz = 1
return np.array([ux, uy, uz])*temp
STAR_VECTORS = np.apply_along_axis(starVectorTransform, 1, STAR_CENTROIDS, focal_length=10 )
STAR_VECTORS
# +
def vectorAngularDistance(vect1, vect2):
'''
Returns the angular distance [cos(theta)] between two unit vectors seperated by an angle theta
'''
return np.sum(vect1*vect2)
def uncertaintyAngularDistance(u1, u2):
'''
Assumes that the uncertainty is a simple addition
'''
return u1 + u2
# +
#Generating square symmetric matrix of order (row X row), where row is the number of stars in the STAR_CENTROID array
#Each element corresponds to the angular distance [cos(theta)] between vectors Star_i & Star_j separated by angle theta
#Thus the diagonal elements will always be one
rows, columns = STAR_CENTROIDS.shape
ANGULAR_DISTANCE = np.zeros((rows,rows))
for i in range(rows):
for j in range(i, rows):
dist_ij = vectorAngularDistance(STAR_VECTORS[i], STAR_VECTORS[j])
ANGULAR_DISTANCE[i,j] = ANGULAR_DISTANCE[j,i] = dist_ij
#Symmetric Matrix
ANGULAR_DISTANCE == ANGULAR_DISTANCE.T
# -
ANGULAR_DISTANCE
# +
VOTING_DF['Votes'].values[:] = 0
for i in range(rows):
for j in range(i+1, rows):
dist_ij = ANGULAR_DISTANCE[i,j]
uncert_ij = uncertaintyAngularDistance(STAR_CENTROIDS_UNCERTAINTY[i], STAR_CENTROIDS_UNCERTAINTY[j])
r_ij = [dist_ij - uncert_ij, dist_ij + uncert_ij]
temp_df = REFERENCE[(REFERENCE['Ang_Distance']>r_ij[0][0]) & (REFERENCE['Ang_Distance']<r_ij[1][0])]
for k in range(temp_df.shape[0]):
s1, s2, ad = temp_df.iloc[k]
VOTING_DF.loc[VOTING_DF['Star_ID'] == s1, 'Votes'] = VOTING_DF.loc[VOTING_DF['Star_ID'] == s1, 'Votes'] + 1
VOTING_DF.loc[VOTING_DF['Star_ID'] == s2, 'Votes'] = VOTING_DF.loc[VOTING_DF['Star_ID'] == s2, 'Votes'] + 1
VOTING_DF.sort_values('Votes', inplace = True, ascending=False)
VOTING_DF.head(10)
# -
| Geometric_Voting_Runtime.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sepal_ui import sepalwidgets as sw
from component import model
from component import tile
from component.message import cm
# -
frag_model = model.FragModel()
frag_title = sw.Tile(
frag_model.tile_id, cm.frag.title, [sw.Markdown(cm.frag.description)]
)
frag_convert = tile.ConvertByte(frag_model, 4)
frag_process = tile.FragTile(frag_model)
frag_title
# +
# from pathlib import Path
#
# frag_io.bin_map = Path('/home/prambaud/gwb_results/frag/example_bin_map.tif')
# frag_io.connectivity = 8
# frag_io.res = 100
# frag_io.window_size = '[17, 11, 7]'
# frag_io.option = 'FAD-APP2'
# frag_io.prescision = 1
#
# frag_io.__dict__
# -
frag_convert
frag_process
| frag_ui.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pretrained BERT models
# +
import sys
package_dir = "../input/ppbert/pytorch-pretrained-bert/pytorch-pretrained-BERT"
sys.path.append(package_dir)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data
import numpy as np
import pandas as pd
from tqdm import tqdm
import os
import warnings
from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification, BertAdam
from pytorch_pretrained_bert import BertConfig
import gc
warnings.filterwarnings(action='once')
device = torch.device('cuda')
def convert_lines(example, max_seq_length,tokenizer):
max_seq_length -=2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a)>max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
MAX_SEQUENCE_LENGTH = 220
SEED = 1234
BATCH_SIZE = 32
BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/'
LARGE_BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-24_h-1024_a-16/uncased_L-24_H-1024_A-16/'
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# +
# Pretrained BERT models - Google's pretrained BERT model
BERT_SMALL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/'
BERT_LARGE_PATH = '../input/bert-pretrained-models/uncased_l-24_h-1024_a-16/uncased_L-24_H-1024_A-16/'
# JIGSAW fine-tuned BERT models
JIGSAW_BERT_SMALL_MODEL_PATH = '../input/bert-inference/bert/bert_pytorch.bin'
JIGSAW_BERT_LARGE_MODEL_PATH = '../input/jigsawpretrainedbertmodels/jigsaw-bert-large-uncased-len-220-fp16/epoch-1/pytorch_model.bin'
JIGSAW_BERT_SMALL_JSON_PATH = '../input/bert-inference/bert/bert_config.json'
JIGSAW_BERT_LARGE_JSON_PATH = '../input/jigsawpretrainedbertmodels/jigsaw-bert-large-uncased-len-220-fp16/epoch-1/config.json'
NUM_BERT_MODELS = 2
INFER_BATCH_SIZE = 64
train_df = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv')
test_df = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv')
test_preds = np.zeros((test_df.shape[0],NUM_BERT_MODELS))
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
print("Predicting BERT large model......")
# Prepare data
bert_config = BertConfig(JIGSAW_BERT_LARGE_JSON_PATH)
tokenizer = BertTokenizer.from_pretrained(BERT_LARGE_PATH, cache_dir=None,do_lower_case=True)
X_test = convert_lines(test_df["comment_text"].fillna("DUMMY_VALUE"), MAX_SEQUENCE_LENGTH, tokenizer)
test = torch.utils.data.TensorDataset(torch.tensor(X_test, dtype=torch.long))
# Load fine-tuned BERT model
gc.collect()
model = BertForSequenceClassification(bert_config, num_labels=1)
model.load_state_dict(torch.load(JIGSAW_BERT_LARGE_MODEL_PATH))
model.to(device)
for param in model.parameters():
param.requires_grad = False
model.eval()
# Predicting
model_preds = np.zeros((len(X_test)))
test_loader = torch.utils.data.DataLoader(test, batch_size=INFER_BATCH_SIZE, shuffle=False)
tk0 = tqdm(test_loader)
for i, (x_batch,) in enumerate(tk0):
pred = model(x_batch.to(device), attention_mask=(x_batch > 0).to(device), labels=None)
model_preds[i * INFER_BATCH_SIZE:(i + 1) * INFER_BATCH_SIZE] = pred[:, 0].detach().cpu().squeeze().numpy()
test_preds[:,0] = torch.sigmoid(torch.tensor(model_preds)).numpy().ravel()
del model
gc.collect()
print("Predicting BERT small model......")
bert_config = BertConfig(JIGSAW_BERT_SMALL_JSON_PATH)
tokenizer = BertTokenizer.from_pretrained(BERT_SMALL_PATH, cache_dir=None,do_lower_case=True)
X_test = convert_lines(test_df["comment_text"].fillna("DUMMY_VALUE"), MAX_SEQUENCE_LENGTH, tokenizer)
test = torch.utils.data.TensorDataset(torch.tensor(X_test, dtype=torch.long))
# # # Load fine-tuned BERT model
model = BertForSequenceClassification(bert_config, num_labels=1)
model.load_state_dict(torch.load(JIGSAW_BERT_SMALL_MODEL_PATH))
model.to(device)
for param in model.parameters():
param.requires_grad = False
model.eval()
# Predicting
model_preds = np.zeros((len(X_test)))
test_loader = torch.utils.data.DataLoader(test, batch_size=INFER_BATCH_SIZE, shuffle=False)
tk0 = tqdm(test_loader)
for i, (x_batch,) in enumerate(tk0):
pred = model(x_batch.to(device), attention_mask=(x_batch > 0).to(device), labels=None)
model_preds[i * INFER_BATCH_SIZE:(i + 1) * INFER_BATCH_SIZE] = pred[:, 0].detach().cpu().squeeze().numpy()
test_preds[:,1] = torch.sigmoid(torch.tensor(model_preds)).numpy().ravel()
del model
gc.collect()
# Sub-model prediction
bert_submission = pd.DataFrame.from_dict({
'id': test_df['id'],
'prediction': test_preds.mean(axis=1)})
bert_submission.to_csv('bert_submission.csv', index=False)
# -
# **Credits**
# This notebook was mainly inspired by the following awesome kernel scripts:
#
# https://www.kaggle.com/gpreda/jigsaw-fast-compact-solution
#
# https://www.kaggle.com/christofhenkel/how-to-preprocessing-for-glove-part2-usage
#
# https://www.kaggle.com/shubham505/apply-by-simple-bilstm
#
#
#
# # Preparations
#
# ## Datasets
# You will need to add the following Kaggle dataset for pickled pretrained embeddings
#
# https://www.kaggle.com/chriscc/pickled-word-embedding
#
# ## Import packages
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import gc
import re
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
from keras.preprocessing import text, sequence
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Embedding, SpatialDropout1D, add, concatenate
from keras.layers import CuDNNLSTM, Bidirectional, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.preprocessing import text, sequence
from keras.callbacks import LearningRateScheduler
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints, optimizers, layers
from tqdm._tqdm_notebook import tqdm_notebook as tqdm
import pickle
tqdm.pandas()
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# ## Configurations
# +
EMBEDDING_PATHS = ['../input/pickled-word-embedding/crawl-300d-2M.pkl',
'../input/pickled-word-embedding/glove.840B.300d.pkl']
NUM_MODELS = 2 # The number of classifiers we want to train
BATCH_SIZE = 512 # can be tuned
LSTM_UNITS = 128 # can be tuned
DENSE_HIDDEN_UNITS = 4*LSTM_UNITS # can betuned
EPOCHS = 4 # The number of epoches we want to train for each classifier
MAX_LEN = 220 # can ben tuned
IDENTITY_COLUMNS = [
'transgender', 'female', 'homosexual_gay_or_lesbian', 'muslim', 'hindu',
'white', 'black', 'psychiatric_or_mental_illness', 'jewish'
]
AUX_COLUMNS = ['target', 'severe_toxicity','obscene','identity_attack','insult','threat']
TEXT_COLUMN = 'comment_text'
TARGET_COLUMN = 'target'
# -
# ## Utils
# +
#----------------------------------- Preprocessing-------------------------------------#
SYMBOLS_TO_ISOLATE = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁'
SYMBOLS_TO_REMOVE = '\n🍕\r🐵\xa0\ue014\t\uf818\uf04a\xad😢🐶️\uf0e0😜😎👊\u200b\u200e😁عدويهصقأناخلىبمغر😍💖💵Е👎😀😂\u202a\u202c🔥😄🏻💥ᴍʏʀᴇɴᴅᴏᴀᴋʜᴜʟᴛᴄᴘʙғᴊᴡɢ😋👏שלוםבי😱‼\x81エンジ故障\u2009🚌ᴵ͞🌟😊😳😧🙀😐😕\u200f👍😮😃😘אעכח💩💯⛽🚄🏼ஜ😖ᴠ🚲‐😟😈💪🙏🎯🌹😇💔😡\x7f👌ἐὶήιὲκἀίῃἴξ🙄H😠\ufeff\u2028😉😤⛺🙂\u3000تحكسة👮💙فزط😏🍾🎉😞\u2008🏾😅😭👻😥😔😓🏽🎆🍻🍽🎶🌺🤔😪\x08‑🐰🐇🐱🙆😨🙃💕𝘊𝘦𝘳𝘢𝘵𝘰𝘤𝘺𝘴𝘪𝘧𝘮𝘣💗💚地獄谷улкнПоАН🐾🐕😆ה🔗🚽歌舞伎🙈😴🏿🤗🇺🇸мυтѕ⤵🏆🎃😩\u200a🌠🐟💫💰💎эпрд\x95🖐🙅⛲🍰🤐👆🙌\u2002💛🙁👀🙊🙉\u2004ˢᵒʳʸᴼᴷᴺʷᵗʰᵉᵘ\x13🚬🤓\ue602😵άοόςέὸתמדףנרךצט😒͝🆕👅👥👄🔄🔤👉👤👶👲🔛🎓\uf0b7\uf04c\x9f\x10成都😣⏺😌🤑🌏😯ех😲Ἰᾶὁ💞🚓🔔📚🏀👐\u202d💤🍇\ue613小土豆🏡❔⁉\u202f👠》कर्मा🇹🇼🌸蔡英文🌞🎲レクサス😛外国人关系Сб💋💀🎄💜🤢َِьыгя不是\x9c\x9d🗑\u2005💃📣👿༼つ༽😰ḷЗз▱ц🤣卖温哥华议会下降你失去所有的钱加拿大坏税骗子🐝ツ🎅\x85🍺آإشء🎵🌎͟ἔ油别克🤡🤥😬🤧й\u2003🚀🤴ʲшчИОРФДЯМюж😝🖑ὐύύ特殊作戦群щ💨圆明园קℐ🏈😺🌍⏏ệ🍔🐮🍁🍆🍑🌮🌯🤦\u200d𝓒𝓲𝓿𝓵안영하세요ЖљКћ🍀😫🤤ῦ我出生在了可以说普通话汉语好极🎼🕺🍸🥂🗽🎇🎊🆘🤠👩🖒🚪天一家⚲\u2006⚭⚆⬭⬯⏖新✀╌🇫🇷🇩🇪🇮🇬🇧😷🇨🇦ХШ🌐\x1f杀鸡给猴看ʁ𝗪𝗵𝗲𝗻𝘆𝗼𝘂𝗿𝗮𝗹𝗶𝘇𝗯𝘁𝗰𝘀𝘅𝗽𝘄𝗱📺ϖ\u2000үսᴦᎥһͺ\u2007հ\u2001ɩye൦lƽh𝐓𝐡𝐞𝐫𝐮𝐝𝐚𝐃𝐜𝐩𝐭𝐢𝐨𝐧Ƅᴨןᑯ໐ΤᏧ௦Іᴑ܁𝐬𝐰𝐲𝐛𝐦𝐯𝐑𝐙𝐣𝐇𝐂𝐘𝟎ԜТᗞ౦〔Ꭻ𝐳𝐔𝐱𝟔𝟓𝐅🐋ffi💘💓ё𝘥𝘯𝘶💐🌋🌄🌅𝙬𝙖𝙨𝙤𝙣𝙡𝙮𝙘𝙠𝙚𝙙𝙜𝙧𝙥𝙩𝙪𝙗𝙞𝙝𝙛👺🐷ℋ𝐀𝐥𝐪🚶𝙢Ἱ🤘ͦ💸ج패티W𝙇ᵻ👂👃ɜ🎫\uf0a7БУі🚢🚂ગુજરાતીῆ🏃𝓬𝓻𝓴𝓮𝓽𝓼☘﴾̯﴿₽\ue807𝑻𝒆𝒍𝒕𝒉𝒓𝒖𝒂𝒏𝒅𝒔𝒎𝒗𝒊👽😙\u200cЛ‒🎾👹⎌🏒⛸公寓养宠物吗🏄🐀🚑🤷操美𝒑𝒚𝒐𝑴🤙🐒欢迎来到阿拉斯ספ𝙫🐈𝒌𝙊𝙭𝙆𝙋𝙍𝘼𝙅ﷻ🦄巨收赢得白鬼愤怒要买额ẽ🚗🐳𝟏𝐟𝟖𝟑𝟕𝒄𝟗𝐠𝙄𝙃👇锟斤拷𝗢𝟳𝟱𝟬⦁マルハニチロ株式社⛷한국어ㄸㅓ니͜ʖ𝘿𝙔₵𝒩ℯ𝒾𝓁𝒶𝓉𝓇𝓊𝓃𝓈𝓅ℴ𝒻𝒽𝓀𝓌𝒸𝓎𝙏ζ𝙟𝘃𝗺𝟮𝟭𝟯𝟲👋🦊多伦🐽🎻🎹⛓🏹🍷🦆为和中友谊祝贺与其想象对法如直接问用自己猜本传教士没积唯认识基督徒曾经让相信耶稣复活死怪他但当们聊些政治题时候战胜因圣把全堂结婚孩恐惧且栗谓这样还♾🎸🤕🤒⛑🎁批判检讨🏝🦁🙋😶쥐스탱트뤼도석유가격인상이경제황을렵게만들지않록잘관리해야합다캐나에서대마초와화약금의품런성분갈때는반드시허된사용🔫👁凸ὰ💲🗯𝙈Ἄ𝒇𝒈𝒘𝒃𝑬𝑶𝕾𝖙𝖗𝖆𝖎𝖌𝖍𝖕𝖊𝖔𝖑𝖉𝖓𝖐𝖜𝖞𝖚𝖇𝕿𝖘𝖄𝖛𝖒𝖋𝖂𝕴𝖟𝖈𝕸👑🚿💡知彼百\uf005𝙀𝒛𝑲𝑳𝑾𝒋𝟒😦𝙒𝘾𝘽🏐𝘩𝘨ὼṑ𝑱𝑹𝑫𝑵𝑪🇰🇵👾ᓇᒧᔭᐃᐧᐦᑳᐨᓃᓂᑲᐸᑭᑎᓀᐣ🐄🎈🔨🐎🤞🐸💟🎰🌝🛳点击查版🍭𝑥𝑦𝑧NG👣\uf020っ🏉ф💭🎥Ξ🐴👨🤳🦍\x0b🍩𝑯𝒒😗𝟐🏂👳🍗🕉🐲چی𝑮𝗕𝗴🍒ꜥⲣⲏ🐑⏰鉄リ事件ї💊「」\uf203\uf09a\uf222\ue608\uf202\uf099\uf469\ue607\uf410\ue600燻製シ虚偽屁理屈Г𝑩𝑰𝒀𝑺🌤𝗳𝗜𝗙𝗦𝗧🍊ὺἈἡχῖΛ⤏🇳𝒙ψՁմեռայինրւդձ冬至ὀ𝒁🔹🤚🍎𝑷🐂💅𝘬𝘱𝘸𝘷𝘐𝘭𝘓𝘖𝘹𝘲𝘫کΒώ💢ΜΟΝΑΕ🇱♲𝝈↴💒⊘Ȼ🚴🖕🖤🥘📍👈➕🚫🎨🌑🐻𝐎𝐍𝐊𝑭🤖🎎😼🕷grntidufbk𝟰🇴🇭🇻🇲𝗞𝗭𝗘𝗤👼📉🍟🍦🌈🔭《🐊🐍\uf10aლڡ🐦\U0001f92f\U0001f92a🐡💳ἱ🙇𝗸𝗟𝗠𝗷🥜さようなら🔼'
ISOLATE_DICT = {ord(c):f' {c} ' for c in SYMBOLS_TO_ISOLATE}
REMOVE_DICT = {ord(c):f'' for c in SYMBOLS_TO_REMOVE}
CHARS_TO_REMOVE = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n“”’\'∞θ÷α•à−β∅³π‘₹´°£€\×™√²—'
CONTRACTION_MAPPING = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have" }
def handle_punctuation(text):
text = text.translate(REMOVE_DICT)
text = text.translate(ISOLATE_DICT)
return text
def clean_contractions(text, mapping=CONTRACTION_MAPPING):
'''
Expand contractions
'''
specials = ["’", "‘", "´", "`"]
for s in specials:
text = text.replace(s, "'")
text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")])
return text
def preprocess(x):
x = handle_punctuation(x)
# x = clean_contractions(x)
return x
#----------------------------------- Embedding -------------------------------------#
def get_coefs(word, *arr):
"""
Get word, word_embedding from a pretrained embedding file
"""
return word, np.asarray(arr,dtype='float32')
def load_embeddings(path):
if path.split('.')[-1] in ['txt','vec']: # for original pretrained embedding files (extension .text, .vec)
with open(path,'rb') as f:
return dict(get_coefs(*line.strip().split(' ')) for line in f)
if path.split('.')[-1] =='pkl': # for pickled pretrained embedding files (extention pkl). Loading pickeled embeddings is faster than texts
with open(path,'rb') as f:
return pickle.load(f)
def build_matrix(word_index, path):
"""
Here we take each word we've tokenized in our text corpus
for each word we look up in the pre-trained embedding.
Each row in this matrix is a corpus word's embedding.
"""
embedding_index = load_embeddings(path)
embedding_matrix = np.zeros((len(word_index)+1, 300))
for word, i in word_index.items():
try:
embedding_matrix[i] = embedding_index[word]
except KeyError:
pass
return embedding_matrix
# -
# ## Define LSTM model
# +
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
def build_model(embedding_matrix, num_aux_targets):#, loss_weight):
"""
embedding layer
droput layer
2 * bidirectional LSTM layers
2 * pooling layers
2 dense layers
1 softmax layer
"""
words = Input(shape=(MAX_LEN,))
#Embedding layer takes variable size input
x = Embedding(*embedding_matrix.shape, weights = [embedding_matrix], trainable=False)(words)
x = SpatialDropout1D(0.2)(x)
x = Bidirectional(CuDNNLSTM(LSTM_UNITS, return_sequences=True))(x)
x = Bidirectional(CuDNNLSTM(LSTM_UNITS, return_sequences=True))(x)
#att = Attention(MAX_LEN)(x)
hidden = concatenate([
GlobalMaxPooling1D()(x),
GlobalAveragePooling1D()(x)
])
hidden = add([hidden, Dense(DENSE_HIDDEN_UNITS, activation='relu')(hidden)])
hidden = add([hidden, Dense(DENSE_HIDDEN_UNITS, activation='relu')(hidden)])
result = Dense(1, activation='sigmoid')(hidden)
aux_result =Dense(num_aux_targets, activation='sigmoid')(hidden)
model = Model(inputs =words, outputs =[result, aux_result])
model.compile(loss='binary_crossentropy', optimizer='adam')
return model
# -
# # Data preprocessing
# Preprocess comment texts
train_df['comment_text'] = train_df['comment_text'].progress_apply(lambda x:preprocess(x))
test_df['comment_text'] = test_df['comment_text'].progress_apply(lambda x:preprocess(x))
gc.collect()
# +
x_train = train_df[TEXT_COLUMN].astype(str)
y_train = train_df[TARGET_COLUMN].values
y_aux_train = train_df[AUX_COLUMNS].values
x_test = test_df[TEXT_COLUMN].astype(str)
# Convert target probability to 1 or 0 so they can be used for classification
for column in IDENTITY_COLUMNS + [TARGET_COLUMN]:
train_df[column] = np.where(train_df[column] >=0.5, True, False)
# -
# # Tokenize comment texts
# Return a Keras tokenizer class
tokenizer = text.Tokenizer(filters = CHARS_TO_REMOVE)
tokenizer.fit_on_texts(list(x_train)+ list(x_test))
# Turn text to sequences of tokens
x_train = tokenizer.texts_to_sequences(x_train)
x_test = tokenizer.texts_to_sequences(x_test)
#Pad sequences to the same length
x_train = sequence.pad_sequences(x_train,maxlen=MAX_LEN)
x_test= sequence.pad_sequences(x_test, maxlen=MAX_LEN)
# Initialize weights
sample_weights = np.ones(len(x_train), dtype=np.float32)
# Add all the values of the identities along rows
sample_weights += train_df[IDENTITY_COLUMNS].sum(axis=1)
#Add all values of targets*~identity
sample_weights += train_df[TARGET_COLUMN]*(~train_df[IDENTITY_COLUMNS]).sum(axis=1)
#Add all values ~targets*identity
sample_weights += (~train_df[TARGET_COLUMN])*train_df[IDENTITY_COLUMNS].sum(axis=1)
#Normalize them
sample_weights/=sample_weights.mean()
# ## Create embedding matrix
embedding_matrix = np.concatenate([build_matrix(tokenizer.word_index,f) for f in EMBEDDING_PATHS], axis =-1)
print("Embedding matrix shape:", embedding_matrix.shape)
del train_df, tokenizer
gc.collect()
# # Model training
#
# * 2 models will be trained (NUM_MODELS=2)
# * Make predictions at the end of each epoch
# * Weighted averaging epoch predictions
# * Weights = 2 ** epoch
checkpoint_predictions = []
weights = []
NUM_MODELS = 1
for model_idx in range(NUM_MODELS):
#Passes embedding matrix and aux outputs shape
model = build_model(embedding_matrix, y_aux_train.shape[-1]) #1/sample_weights.mean())
for global_epoch in range(EPOCHS):
model.fit(
x_train,
[y_train, y_aux_train],
batch_size=BATCH_SIZE,
epochs=1,
verbose=1,
sample_weight=[sample_weights.values, np.ones_like(sample_weights)],
callbacks = [
LearningRateScheduler(lambda _: 1e-3*(0.55**global_epoch)) # Decayed learning rate
]
)
# model.save_weights("model_%d_%d.h5" % (model_idx, global_epoch)) # Save model weights
checkpoint_predictions.append(model.predict(x_test, batch_size=2048)[0].flatten())
weights.append(2 ** global_epoch)
del model # If a model didn't get deleted Keras will continue training it eventhough build_model() was used to initialize a model
gc.collect() # It's a good practice to use gc.collect() once the training is done to free up RAM
print (weights)
predictions = np.average(checkpoint_predictions, weights=weights, axis=0)
lstm_submission = pd.DataFrame.from_dict({
'id': test_df.id,
'prediction': predictions
})
lstm_submission.to_csv('submission.csv', index=False)
submission = pd.DataFrame.from_dict({
'id': test_df['id'],
'prediction': lstm_submission['prediction'].rank(pct=True)*0.4 + bert_submission['prediction'].rank(pct=True)*0.6})
submission.to_csv('submission.csv', index=False)
| 4 jigsaw/jigsaw-starter-blend.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Files
# ## Opening Files
help(open)
fh = open('mailbox.txt')
type(fh)
# print(fh)
# ## Reading file contents
fh.read()
fh.readlines()
# ## Closing the file handler
fh.closed
fh.close()
fh.closed
# ## Using with statement for file handler
print(dir(str))
help(str.strip)
lst_mails = list()
with open('mailbox.txt') as fh:
for line in fh.readlines():
if line.startswith('From:'):
# print(line.split('From:')[1])
lst_mails.append(line.strip('From: ').strip().split('@')[1])
len(set(lst_mails))
# Functions
# * Getting the usernames
# * Getting the domain
# * Getting the count of each username
# * Getting the count of each domain name
# * Handle exceptions
| Day_11/files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The plots of the scores
#
# I changed the epsilon from 0.01 to 0.001 at the 6035th game.
import matplotlib.pyplot as plt
import pickle
import numpy as np
with open('mem.pickle','rb') as f:
(_,_,_,scores)=pickle.load(f)
def moving_avg(x,p=10):
y = []
for i in range(len(x)-p+1):
y.append(np.mean(x[i:i+p]))
return y
np.max(scores)
plt.plot(scores)
plt.plot(moving_avg(scores,3))
plt.plot(moving_avg(scores,5))
plt.plot(moving_avg(scores,10))
plt.xlabel('score')
plt.ylabel('games')
plt.savefig('plot.png')
| 5_5/.ipynb_checkpoints/analyze-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Constructing and fitting models
#
# The [astropy.modeling](http://docs.astropy.org/en/stable/modeling/index.html) sub-package provides a convenient interface for defining and fitting models. There are of course a number of other packages in Python for fitting/optimization, such as [scipy.optimize](https://docs.scipy.org/doc/scipy/reference/optimize.html) and [lmfit](https://lmfit.github.io/lmfit-py/), but astropy.modeling provides simple to use pre-defined models as well as a common interface for different fitting algorithms. In addition, it simplifies the task of fitting models to multi-dimensional data.
#
# <section class="objectives panel panel-warning">
# <div class="panel-heading">
# <h2><span class="fa fa-certificate"></span> Objectives</h2>
# </div>
#
#
# <div class="panel-body">
#
# <ul>
# <li>Use built-in 1D and 2D models</li>
# <li>Fit 1D and 2D data</li>
# <li>Extracting information about the fit</li>
# <li>Defining compound models</li>
# <li>Working with unitful quantities</li>
# </ul>
#
# </div>
#
# </section>
#
# ## Documentation
#
# This notebook only shows a subset of the functionality in astropy.modeling. For more information about the features presented below as well as other available features, you can read the
# [astropy.modeling documentation](https://docs.astropy.org/en/stable/modeling/).
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rc('image', origin='lower')
plt.rc('figure', figsize=(10, 6))
# ## Using built-in models
#
# The astropy.modeling sub-package includes a number of built-in models, which can be accessed from the astropy.modeling.models module:
# Once you create a model, you can think of it as a parameterized function:
# You can access and set parameters as attributes:
# Multi-dimensional models behave the same except that more inputs have to be passed when calling the model, as expected:
# This also demonstrates that models can be initialized without parameter values, in which case a set of defaults will be chosen. To call the model:
# Models can be called with arrays as input:
# ## Fitting 1D data
#
# Above, we saw how to initialize models and use them as parametrized functions. In some cases, you may want to fit these models to data - for this you can use the astropy.modeling.fitting sub-package:
# We can set up fake data to fit for now:
x = np.linspace(-5., 5., 200)
y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
y += np.random.normal(0., 0.2, x.shape)
yerr = np.random.uniform(0.15, 0.25, x.shape)
_ = plt.errorbar(x, y, yerr=yerr, fmt='.')
# We start off by creating a fitter - there are a number of different fitters available, and here we will use the Levenberg-Marquardt algorithm (which is implemented by [scipy](https://www.scipy.org/) behind the scenes):
# We set up an initial model to fit to the data:
# and fitting consists of calling the fitter with the initial model and the data:
# or taking errors into account:
# Note that this fitter instance should have an attribute ``fitter.fit_info`` which contains information about the fit. Whether this kind of attribute is available or not depends on the fitter you are using, so be sure to check the documentation for each fitter:
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>Try fitting a Lorentzian and a Trapezoidal model to the same data. You can use tab-completion to find these models, or search in the <a href="http://docs.astropy.org/en/stable/modeling/index.html">documentation page</a>. Overplot these models on the data along with the Gaussian fit.</p>
#
# </div>
#
# </section>
#
# ## Fitting 2D data
#
# Fitting 2D data is similar to fitting 1D data, except that the inputs to the fitting need to be two-dimensional arrays. To demonstrate this we start off by generating a fake 2D dataset:
y2, x2 = np.mgrid[:128, :128]
z2 = 2. * x2 ** 2 - 0.5 * x2 ** 2 + 1.5 * x2 * y2 - 1.
z2 += np.random.normal(0., 0.1, z2.shape) * 50000.
plt.imshow(z2)
# We now set up a 2D polynomial model:
# and fit it in a similar way to before:
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge</h2>
# </div>
#
#
# <div class="panel-body">
#
# <p>Read in the GAIA source density FITS file we used in previous tutorials, and try fitting a 2D Gaussian to it.</p>
#
# </div>
#
# </section>
#
# ## Compound models
#
# In real-life cases, models such as simple Gaussians may be too simple, and you may need to fit combinations of models to data. To illustrate how to do this, we modify the simulated data we set up before:
# To fit this, we set up a compound model which is the sum of two model *instances*:
# and we fit it as before:
# Note that the parameter names of a compound model include numerical suffixes to indicate which model they belong to:
# It is also possible to get the individual models back, with the final parameters set:
#
# <section class="challenge panel panel-success">
# <div class="panel-heading">
# <h2><span class="fa fa-pencil"></span> Challenge</h2>
# </div>
#
#
# <div class="panel-body">
#
# <ol>
# <li>Modify the plot of the combined fit to show the individual model components for the best-fit parameters.</li>
# <li>Continue the previous challenge to fit the LMC source density map by fitting two Gaussians to it. Be aware that especially with compound models, initial values matter! [advanced]</li>
# </ol>
#
# </div>
#
# </section>
#
# ## Working with units and quantities
#
# Most models can work with unitful quantities. For example, we can create a Gaussian model with physical units for the input and output values:
# ## Additional functionality
#
# The astropy.modeling sub-package contains a lot more functionality, including:
#
# * Defining your own models
# * Fixing or tiying parameters
# * Using custom statistics
#
# See the [documentation](http://docs.astropy.org/en/stable/modeling/index.html) for more!
# <center><i>This notebook was written by <a href="https://aperiosoftware.com/">Aperio Software Ltd.</a> © 2019, and is licensed under a <a href="https://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License (CC BY 4.0)</a></i></center>
#
# 
| 13-modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Análisis de los datos obtenidos
# Compararación de tres filamentos distintos
#
# * Filamento de BQ
# * Filamento de formfutura
# * Filamento de filastriuder
# %pylab inline
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos los ficheros con los datos
conclusiones = pd.read_csv('Conclusiones.csv')
columns=['bq','formfutura','filastruder']
#Mostramos un resumen de los datos obtenidoss
conclusiones[columns].describe()
# Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
graf=conclusiones[columns].plot(figsize=(16,10),ylim=(1.5,2.5))
graf.axhspan(1.65,1.85, alpha=0.2)
#datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
conclusiones[columns].boxplot(return_type='axes')
# Aumentando la velocidad se ha conseguido que disminuya el valor máxima, sin embargo ha disminuido el valor mínimo. Para la siguiente iteracción, se va a volver a las velocidades de 1.5- 3.4 y se van a añadir más reglas con unos incrementos de velocidades menores, para evitar saturar la velocidad de traccción tanto a nivel alto como nivel bajo.
# Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
# #Filtrado de datos
# Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas.
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
# +
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
# -
# ##Representación de X/Y
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
# #Analizamos datos del ratio
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
# #Límites de calidad
# Calculamos el número de veces que traspasamos unos límites de calidad.
# $Th^+ = 1.85$ and $Th^- = 1.65$
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
| ipython_notebooks/07_conclusiones/.ipynb_checkpoints/Conclusiones-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# 
# # Account management
# Qiskit Runtime is available on both IBM Cloud and IBM Quantum. The former requires an IBM Cloud account and the latter an IBM Quantum account. If you don't have these accounts, please refer to [01_introduction_ibm_cloud_runtime.ipynb](01_introduction_ibm_cloud_runtime.ipynb) or [02_introduction_ibm_quantum_runtime.ipynb](02_introduction_ibm_quantum_runtime.ipynb) on how to set one up.
#
#
# There are a number of methods for handling account management. Your account credentials can be saved to disk, or used in a session and never saved.
#
# - `save_account()`: Save your account to disk for future use.
# - `delete_account()`: Delete the saved account from disk.
# - `active_account()`: List the account currently in the session.
# - `saved_account()`: List the account stored on disk.
# ## Storing credentials
# The [save_account()](https://qiskit.org/documentation/partners/qiskit_ibm_runtime/stubs/qiskit_ibm_runtime.QiskitRuntimeService.html#qiskit_ibm_runtime.QiskitRuntimeService.save_account) method can be used to store your account credentials on disk, in the `$HOME/.qiskit/qiskit-ibm.json` file. Once the credentials are saved, you will only need to use `QiskitRuntimeService()` to initialize your account in the future.
# <div class="alert alert-block alert-info">
# <b>Note:</b> Account credentials are saved in plain text, so only do so if you are using a trusted device.
# </div>
# Below are examples of saving an IBM Cloud and an IBM Quantum accounts. The `channel` parameter allows to distinguish between different account types. If you are saving multiple accounts per channel, consider using the `name` parameter to differentiate them.
#
# +
from qiskit_ibm_runtime import QiskitRuntimeService
# Save an IBM Cloud account on disk.
# QiskitRuntimeService.save_account(channel="ibm_cloud", token=<IBM Cloud API key>, instance=<IBM Cloud CRN> or <IBM Cloud service name>)
# Save an IBM Quantum account on disk.
# QiskitRuntimeService.save_account(channel="ibm_quantum", token=<IBM Quantum API token>)
# -
# ## Initializing
# You need to initialize your account in a Python session before you can start using Qiskit Runtime. If you have the credentials already saved, you can initialize an `QiskitRuntimeService` instance without additional parameters.
# Read default credentials from disk.
service = QiskitRuntimeService()
# If you have both an IBM Cloud and an IBM Quantum accounts saved, `QiskitRuntimeService()` by default will load the IBM Cloud account. To load the IBM Quantum account, you can specify `QiskitRuntimeService(channel="ibm_quantum")` instead.
#
# Alternatively, if you specified a `name` for your account when saving it, you can also specify the name of the account to load.
# +
# Save an IBM Cloud account on disk and give it a name.
# QiskitRuntimeService.save_account(channel="ibm_cloud", token=<IBM Cloud API key>, instance=<IBM Cloud CRN>, name="prod")
# service = QiskitRuntimeService(name="prod")
# -
# If you want to use your credentials for just the session instead of saving it, you can pass the credentials in when initializing the `QiskitRuntimeService` instance:
# +
# Initialize an IBM Cloud account without saving it.
# service = QiskitRuntimeService(channel="ibm_cloud", token=<IBM Cloud API key>, instance=<IBM Cloud CRN>)
# +
from qiskit.tools.jupyter import *
# %qiskit_copyright
| docs/tutorials/04_account_management.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
class Number(object):
def reverse(self, n):
if n // 10:
return 10 ** (len(str((n))) - 1) * (n % 10) + self.reverse(n // 10)
else:
return n
print('Created')
# -
n = Number()
print(n)
rev_n = n.reverse(1234)
print(rev_n)
print(rev_n + 9)
import pandas as pd
import numpy as np
x = np.linspace(0, 5 * np.pi, 100)
x
y = np.sin(x)
y
import os
os.system('pip3.7 install matplotlib')
from matplotlib import pyplot as plt
# import matplotlib
# import sys
# sys.version
plt.plot(x, y)
plt.title('New Sine Wave')
plt.plot(x, y)
| docs/.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VacationPy
# ----
#
# #### Note
# * Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.
#
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
output_data_file = r"C:\Git\06-Python-APIs\Instructions\output_data\cities.csv"
# Import API key
from api_keys import g_key
import gmaps.datasets
gmaps.configure(api_key=g_key)
# Range of latitudes and longitudes
# -
# ### Store Part I results into DataFrame
# * Load the csv exported in Part I to a DataFrame
# read the cities.csv to create dataframe, previous cells are not needed to be executed as long as cities.csv was created
cities_df = pd.read_csv(r"C:\Git\python-api-challenge\output_data\cities_2.csv", encoding="utf-8")
# Drop NaN Values within dataframe
cities_df = cities_df.dropna()
cities_df.head(10)
len(cities_df)
# ### Humidity Heatmap
# * Configure gmaps.
# * Use the Lat and Lng as locations and Humidity as the weight.
# * Add Heatmap layer to map.
# +
# Store latitude and longitude in locations
locations = cities_df[["Lat", "Lng"]]
# Fill NaN values and convert to float
humidity = cities_df["Humidity"].astype(float)
#Define center location for map by finding coordinates for Bossangoa in Central African Republic
center_location = cities_df.loc[cities_df['City'] == "bossangoa", ["Lat","Lng"]]
#Get coordinates for Bossangoa map centering
center_loc = (6.4926,17.4552)
# Plot Heatmap
fig = gmaps.figure(center=center_loc, zoom_level=2)
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=max(cities_df["Humidity"]),
point_radius=2)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
# -
#Display map center location
center_location
# ### Create new DataFrame fitting weather criteria
# * Narrow down the cities to fit weather conditions.
# * Drop any rows will null values.
# +
# Narrow down the cities to specific temperature range between 70-80F, Wind less than 10mph and 0 Cloudiness
vac_cities_df = cities_df
# Drop any rows that don't contain all three conditions. Want to be sure the weather is ideal.
#Find cities with ideal Max Temperature 70-80F, Wind Spped less than 10mph, and Cloudiness at 0
vac_cities_df = vac_cities_df.loc[(vac_cities_df["Max Temp"] < 80) & (vac_cities_df["Max Temp"] > 70) &\
(vac_cities_df["Wind Speed"] < 10) &\
(vac_cities_df["Cloudiness"] == 0)]
# Drop any empty cells and reset the index
vac_cities_df = vac_cities_df.dropna().reset_index()
vac_cities_df
# -
# ### Hotel Map
# * Store into variable named `hotel_df`.
# * Add a "Hotel Name" column to the DataFrame.
# * Set parameters to search for hotels with 5000 meters.
# * Hit the Google Places API for each city's coordinates.
# * Store the first Hotel result into the DataFrame.
# * Plot markers on top of the heatmap.
#this section is used just to review json file for atributes
lat = -16.2325
lng = 39.9086
params = {
"location": f"{lat},{lng}",
"radius": 5000,
"types" : "hotel",
"key": g_key
}
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
response = requests.get(base_url, params=params)
resp_jsn = requested.json()
resp_jsn
#Create hotel_df dataframe from vac_cities_df dataframe
hotel_df = vac_cities_df
#Ceate an empty hotels list that will be populated later
hotels = []
#Use for loop to cycle through hotel_df and obtain values for latitude, longitude and city
#Additionally, make search based on parameters below to find nearest hotel
for i in range(len(hotel_df)):
lat = hotel_df.loc[i]["Lat"]
lng = hotel_df.loc[i]["Lng"]
city = hotel_df.loc[i]["City"]
#Assign search parameters for gmaps
params = {
"location": f"{lat},{lng}",
"radius": 5000,
"types" : "hotel",
"key": g_key
}
#Assign url for search
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
response = requests.get(url, params=params)
resp_jsn = response.json()
print(f"Retrieving Results for index: {[i]} : {city}.")
#Error Handling
try:
hotels.append(resp_jsn['results'][0]['name'])
except (KeyError, IndexError):
hotels.append("")
print(f"Index not found: {[i]}")
hotel_df["Hotel Name"] = hotels
hotel_df = hotel_df.dropna(how='any')
hotel_df
# +
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# +
# Create hotel symbol layer
hotel_layer = gmaps.symbol_layer(
locations, fill_color='rgba(0, 150, 0, 0.4)',
stroke_color='rgba(0, 0, 150, 0.4)', scale=2,
info_box_content=hotel_info
)
# Add the layer to the map
fig.add_layer(markers)
fig.add_layer(hotel_layer)
fig
# -
| starter_code/VacationPy_Solved.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spotify project
# ## Dataset Description
#
# The dataset I will be using for this analysis is from Kaggle and can be accessed here:
# https://www.kaggle.com/zaheenhamidani/ultimate-spotify-tracks-db
#
#
# Most of the columns are self-explanatory, and represent a feature of a song (e.g. genre, name, loudness, acousticness, duration, etc.) Popularity column is an integer number between 0 and 100.
#
#
#
# The goal of this project is to predict how popular a song would be, based on its features. Spotify might be interested in popularity prediction to decide which songs to recommend to their users. Moreover, this analysis would help them make data-driven decisions when deciding things like how much to pay for song licenses.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
from scipy.stats import norm
songs = pd.read_csv('/Users/aliyazhaxylykbayeva/Documents/GitHub/personal-projects/SpotifyFeatures.csv')
songs = songs.dropna(thresh=1)
songs.head()
# -
# ### Data Exploration and Cleaning
# I'll do some data exploration and clean the dataset before analyzing it any further to get a better idea of what I'm working with.
genres = songs.groupby('genre').count()
genres.head()
genres = genres.dropna(thresh=1)
genres = genres[['artist_name']]
genres = genres.rename(columns={'artist_name':'count'})
genres.plot.bar()
songs = songs.replace("Children’s Music", "Children's Music")
genres = songs.groupby('genre').count()
genres = genres[['artist_name']]
genres = genres.rename(columns={'artist_name':'count'})
genres.plot.bar()
# +
x = songs[['popularity','danceability','duration_ms','energy','instrumentalness','loudness','liveness','speechiness','tempo', 'valence']]
corr_mat = x.corr()
f, ax = plt.subplots(figsize =(7, 7))
sns.heatmap(corr_mat)
# -
# ### Random Forest
#
# I'll train the algorithm and fit the model to get a better idea of feature importance when it comes to predicting popularity.
#
# #### Edit:
# As I was doing this part and ran two random forests and CV Grid Search, I realized that the most logical approach was to analyze this dataset splitting it up by genre. People who are into Electronic Dance Music probably care about danceability the most, while people who are into classical music put more importance on instrumentalness.
y = songs['popularity']
x = songs[['danceability','duration_ms','energy','instrumentalness','loudness','liveness','speechiness','tempo', 'valence']]
# +
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
randomforest = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=200, n_jobs=-1)
randomforest.fit(x_train, y_train)
# -
yhat_test=randomforest.predict(x_test)
res_hat = yhat_test-y_test
rmse1=np.sqrt(sum(res_hat**2)/len(yhat_test))
# Root mean squared error is 14.6878, which, in my opinion, is very good given that we only analyzed the song by its quantitative features.
y.describe()
randomforest.feature_importances_
# Based on feature importance, I'll engineer new features that will allow for a more accurate prediction. In this case, duration, loudness, and speechiness are the most important.
x = x.assign(loudness_duration=(x['loudness']*x['duration_ms']))
x = x.assign(loudness_speechiness=(x['loudness']*x['speechiness']))
x = x.assign(speechiness_duration=(x['speechiness']*x['duration_ms']))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
randomforest2 = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=200, n_jobs=-1)
randomforest2.fit(x_train, y_train)
yhat_test=randomforest2.predict(x_test)
res_hat = yhat_test-y_test
rootmeansq=np.sqrt(sum(res_hat**2)/len(yhat_test))
# ### Analyzing popularity patterns by genre
#
# I could see from running 2 random forests that accuracy of the predictions was not getting significantly better with the engineered features, so I decided to analyze each genre on its own.
genre=np.unique(songs['genre'])
rmse2=[]
for i in genre:
temp=songs[songs.genre == i]
y = temp['popularity']
x = temp[['danceability','duration_ms','energy','instrumentalness','loudness','liveness','speechiness','tempo', 'valence']]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
randomforest = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=200, n_jobs=-1)
randomforest.fit(x_train, y_train)
yhat_test=randomforest.predict(x_test)
res_hat = yhat_test-y_test
rmse2.append((round(np.sqrt(sum(res_hat**2)/len(yhat_test)),3)))
genre=genre.tolist()
# +
genre.append("Overall")
rmse2.append(rmse1)
import seaborn
import matplotlib
plt.figure(figsize=(10, 15))
seaborn.barplot(x=rmse2,y=genre)
# -
# Even with less data in each training set, when analyzing by genre, the testing error (RMSE) was lower than the overall error for every single genre. While some genres like Children's Music or Reggaeton are harder to predict with high accuracy, this experiment has proven that it is very important to take descriptive features like genre into account.
#
# Now, I'd like to ask another interesting question - what is the effect of each song characteristic on its popularity on average. To perform that analysis, I'll use linear regression.
# +
import statsmodels.api as sm
# Add important features
x=songs[['danceability','duration_ms','energy','instrumentalness','loudness','liveness','speechiness','tempo', 'valence']]
x['duration']=x['duration_ms'].div(60000)
# Popularity is likely nonlinear in duration, so I added a column with squared values of duration.
x['duration_sq']= np.square(x['duration'])
x=x.drop(axis=1,columns='duration_ms')
y=songs['popularity']
# Train and fit linear regression
lm=sm.OLS(exog=x, endog=y, hasconst=True)
lm_res = lm.fit()
lm_res.summary()
# -
# ## Conclusion
#
# From linear regression analysis I performed above estimates that danceability had the largest per unit effect on popularity, followed by energy. Speechiness and valence, on the other hand, had the biggest negative effect on popularity.
#
# Longer songs are more popular, but there's a diminishing return, indicated by the negative quadratic term (duration_sq).
| Spotify Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cme193]
# language: python
# name: conda-env-cme193-py
# ---
# # Lecture 6 - Scikit-learn
#
# Today we're going to cover the popular machine learning library [Scikit-learn](http://scikit-learn.org/stable/)
#
# First:
# ```bash
# conda install scikit-learn
# ```
#
# Note, when importing scikit-learn, you do
# ```python
# import sklearn # not "scikit-learn"
# ```
#
# This library can do just about anything you would learn in an introductory machine learning class (although it doesn't really do deep learning). This includes:
#
# * Regression
# * SVMs
# * Decision trees/random forests
# * dimensionality reduction
# * clustering
# * validation
# * ...
#
# Supervised learning:
# * Regression and classification methods
# * All types of models: logistic regression, ridge, SVM, lasso regression, decision trees... up to Neural networks (no GPU support)
# * Stochastic Gradient Descent, Nearest-Neighbors,
# * Also features semi-supervised learning, ensemble methods, feature selection methods, Naive Bayes, and Isotonic Regression
#
# Unsupervised learning:
# * Gaussian Mixture Models, Manifold Learning
# * Clustering, Bi-clustering
# * PCA, LDA, Outlier detection, Covariance estimation
#
# You may wish to check out [some examples](http://scikit-learn.org/stable/auto_examples/)
#
# As usual, this class will assume you have some passing familiarity with at least something above, since this class isn't really trying to tell you *why* you may wish to classify something or do a regression, just *how* to do it in Python (or at least point you in that direction).
# ## Loading an example dataset
#
# First we will load some data to play with. The data we will use is a very simple
# flower database known as the Iris dataset.
#
# We have 150 observations of the iris flower specifying some measurements:
#
# - sepal length, sepal width, petal length and petal width together with its subtype:
# *Iris setosa*, *Iris versicolor*, *Iris virginica*.
#
# Yes, we saw this last class as well.
import numpy as np
import sklearn
from sklearn import datasets
iris = datasets.load_iris()
# This data is stored in the `.data` member, which is a `(n_samples, n_features)`
# array.
end_string = '\n' + '--'*25 + '\n'
print(iris.keys(), end = end_string)
print(iris.target.shape, end = end_string)
# The class of each observation is stored in the `.target` attribute of the
# dataset. This is an integer 1D array of length `n_samples`:
print(iris.target.shape)
np.unique(iris.target)
# # The Scikit-learn Paradigm
#
# Almost everything you do in scikit learn will be a variation of the same basic pattern, regardless of the specifics of what you're actually doing
#
# 1. Load the model class
# 2. Initialize a model (this is where you specify parameters)
# 3. Fit your model to data
# 4. (Context dependent) - predict, visualize, explore your fit model
# # Basic Classification
#
# We'll start with a nearest neighbor classifier.
#
# ## k-Nearest neighbors classifier
#
# The simplest possible classifier is the nearest neighbor: given a new
# observation, take the label of the training samples closest to it in
# *n*-dimensional space, where *n* is the number of *features* in each sample.
#
# The k-nearest neighbors classifier internally uses an algorithm based on
# ball trees to represent the samples it is trained on.
#
# 
#
# Note that most functionality in `sklearn` lives in modules, so you'll need to do something like
# ```python
# from sklearn import neighbors
# ```
# +
from sklearn import datasets
iris = datasets.load_iris()
from sklearn import neighbors # access to model class
knn = neighbors.KNeighborsClassifier() # initialize a model (default parameters)
knn.fit(iris.data, iris.target) # fit the model
knn.predict([[0.1, 0.2, 0.3, 0.4]]) # do something with the model
# -
knn.get_params()
# ## Training set and testing set
#
# When experimenting with learning algorithms, it is important not to test the
# prediction of an estimator on the data used to fit the estimator.
#
# Indeed, with the kNN estimator, we would always get perfect prediction on the training set.
### Manually
perm = np.random.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
knn.fit(iris.data[:100], iris.target[:100])
knn.score(iris.data[100:], iris.target[100:])
# +
# Preferred
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
# split holding out 40 %
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# We are drastically reducing the size of our training data, better to do k-fold cross validation
scores = cross_val_score(knn, iris.data, iris.target, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# -
# ## Other Classifiers
#
# SVM, decision trees, random forests...
# +
from sklearn import svm
model = svm.SVC() # you can pass in various key-word arguments
model.fit(X_train, y_train)
# -
model.score(X_test, y_test)
# # Clustering
#
# ## K-means
#
# A simple clustering algorithm is k-means. This divides a set into *k*
# clusters, assigning each observation to a cluster so as to minimize the distance
# of that observation (in *n*-dimensional space) to the cluster's mean; the means
# are then recomputed. This operation is run iteratively until the clusters
# converge, for a maximum for `max_iter` rounds.
#
# (An alternative implementation of k-means is available in SciPy's `cluster`
# package. The `scikit-learn` implementation differs from that by offering an
# object API and several additional features, including smart initialization.)
#
# [sklearn kmeans](http://scikit-learn.org/stable/modules/clustering.html#k-means)
from sklearn import cluster
k_means = cluster.KMeans(n_clusters=3)
labels= k_means.fit_predict(iris.data)
print(labels[::10])
print(iris.target[::10])
# ## Other Clustering Methods
#
# Most standard clustering algorithms are available in scikit-learn
#
# [clustering in sklearn](http://scikit-learn.org/stable/modules/clustering.html)
agglom = cluster.AgglomerativeClustering(n_clusters=3, linkage="single")
labels= k_means.fit_predict(iris.data)
print(labels[::10])
print(iris.target[::10])
# # Regression
#
# In regression, we're looking to predict a response $y$ from data $X$. Scikit learn implements most basic regression models, as well as some less standard ones.
#
# If you're familiar with R, the models should be familiar, but the API is new.
#
# [sklearn regression](http://scikit-learn.org/stable/modules/linear_model.html)
#
# ## Logistic Regression
#
# Let's do something a little less trivial than what we have above. We'll use the pandas, patsy, and statsmodels packages
#
# ```bash
# conda install pandas statsmodels patsy
# ```
#
# [statsmodels](https://www.statsmodels.org/stable/index.html) is another python library for statistical estimation problems. We'll use it for an included dataset.
#
# [patsy](https://patsy.readthedocs.io/en/latest/) helps specify statistical models.
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
import numpy as np
# We'll use the following data set, which contains data about the incidence of extramarital affairs in marriages
print(sm.datasets.fair.SOURCE)
print(sm.datasets.fair.NOTE)
# +
# load dataset
data = sm.datasets.fair.load_pandas().data
# add "affair" column: 1 represents having affairs, 0 represents not
data['affair'] = (data.affairs > 0).astype(int)
data
# -
print("Affair proportion by children: \n \n {}\n".format(data.groupby('children')['affair'].mean()))
print("Affair proportion by age: \n \n {}".format(data.groupby('age')['affair'].mean()))
# groups marriages by how they are rated by the couple
data.groupby('rate_marriage').mean()
# We'll visualize a histogram of education levels of the courples
data.educ.hist()
plt.title('Histogram of Education')
plt.xlabel('Education Level')
_ = plt.ylabel('Frequency')
data.rate_marriage.hist()
plt.title('Histogram of Marriage Rating')
plt.xlabel('Marriage Rating')
_ = plt.ylabel('Frequency')
data.corr()
plt.imshow(data.corr())
plt.colorbar()
plt.show()
pd.plotting.scatter_matrix(data, figsize=(15, 15))
plt.show()
affair_yrs_married = pd.crosstab(data.yrs_married, data.affair.astype(bool))
affair_yrs_married.div(affair_yrs_married.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True, figsize=(10,10))
plt.title('Affair Percentage by Years Married')
plt.xlabel('Years Married')
plt.ylim([0,1.25])
_ = plt.ylabel('Percentage')
# Now we'll use the [patsy](https://patsy.readthedocs.io/en/latest/) library to create some data matrices from our data frames.
#
# Our features will be all the (non-affair) features in the original dataset, and the response will be the presence of an affair.
from patsy import dmatrices
# create dataframes with an intercept column and dummy variables for
# occupation and occupation_husb
y, X = dmatrices('affair ~ rate_marriage + age + yrs_married + children + \
religious + educ + C(occupation) + C(occupation_husb)',
data, return_type="dataframe")
print(X.columns)
X = X.rename(columns = {'C(occupation)[T.2.0]':'occ_2',
'C(occupation)[T.3.0]':'occ_3',
'C(occupation)[T.4.0]':'occ_4',
'C(occupation)[T.5.0]':'occ_5',
'C(occupation)[T.6.0]':'occ_6',
'C(occupation_husb)[T.2.0]':'occ_husb_2',
'C(occupation_husb)[T.3.0]':'occ_husb_3',
'C(occupation_husb)[T.4.0]':'occ_husb_4',
'C(occupation_husb)[T.5.0]':'occ_husb_5',
'C(occupation_husb)[T.6.0]':'occ_husb_6'})
# flatten y dataframe into a response array in numpy
y = np.ravel(y)
X
# Now we're ready to fit a logistic regression model
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
# +
# instantiate a logistic regression model, and fit with X and y
model = LogisticRegression()
model = model.fit(X, y)
# check the accuracy on the training set
model.score(X, y)
# -
y.mean()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
model2 = LogisticRegression()
model2.fit(X_train, y_train)
# predict class labels for the test set
predicted = model2.predict(X_test)
print("Predicted {} affairs in {} points".format(predicted.sum(), X_test.shape[0]))
# generate class probabilities
probs = model2.predict_proba(X_test)
print(probs)
# Now we'll create an [ROC curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) to evaluate the model's ability to predict.
# +
# generate evaluation metrics
roc_auc = metrics.roc_auc_score(y_test, probs[:, 1])
acc = metrics.accuracy_score(y_test, predicted)
print("Accuracy score: {}".format(acc))
print("ROC-AUC score {}".format(roc_auc))
# -
fpr, tpr, thresholds = metrics.roc_curve(y_test, probs[:, 1], pos_label=1)
plt.figure(figsize=(10,10))
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
conf_matrix= metrics.confusion_matrix(y_test, predicted)
print(conf_matrix)
# Classification report
report =metrics.classification_report(y_test, predicted)
print(report)
# ## Attribution
#
# Portions of this notebook ar a Jupyter Notebook port of the `scikit-learn` lecture from the
# open source [Scipy Lecture Notes][scipy-lec-notes] by <NAME> and Gael
# Varoquaux.
#
# [scipy-lec-notes]: http://www.scipy-lectures.org/
| nb/2018_fall/Lecture6-Scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="3gZxLw3s-iNR"
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import datasets,layers,models
from tensorflow import keras
# + id="mDqur3AGC5G6" colab={"base_uri": "https://localhost:8080/"} outputId="c03980cf-1ead-469a-82d4-fb63d4faaf0f"
# loading cifar10 database
(x_train,y_train),(x_test,y_test)=datasets.cifar10.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="uvl1HOcxDxdO" outputId="91ff691e-9961-47f9-8a1e-fd66b25c7532"
x_train.shape # we have 50,000 samples each sample is 32 by 32 image and 3 is for rgb channels
# + colab={"base_uri": "https://localhost:8080/"} id="Zj5VF22sEOHK" outputId="d39efe32-b4a1-41da-e4ff-f5c288bd3069"
x_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="GMUW_ANcEZZ9" outputId="9e489faf-1e64-4fc9-f33a-d2f8015cfe87"
# we are checking the data
x_train[0]
# + colab={"base_uri": "https://localhost:8080/"} id="4ejKpa13GLAw" outputId="e5b227e4-4f4c-41ee-d74c-75c88c532eb8"
y_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="BQfIPvhDEgrp" outputId="aada2a5c-2388-4007-95fc-dbc4cd2ddf27"
y_train[:5] # print first 5 labels like in 6th we have aeroplane in 9 we have bird
# we get a 2-d array but we dont need a 2-d array so we reshape thsi into 1-d
# + colab={"base_uri": "https://localhost:8080/"} id="arMzjvVLGmeL" outputId="52d75de3-b105-48b9-f0b7-3126ef114f88"
y_train=y_train.reshape(-1,) # -1 means we keep the first thing as it is like 50000 and changing 1
y_train[:5]
# + id="-MffGZTkHPXh"
classes=['aeroplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="VBFeNjU7Hz-1" outputId="286d5341-c68d-4a77-a75c-ba0df980c56a"
classes[7]
# + id="LyzNGVapEv02"
# this function is taking x ,y and index and showing image of a particular index
def plot_sample(x,y,index):
plt.figure(figsize=(15,2)) # size of image is very large changing the size we can see little bit
plt.imshow(x[index])
plt.xlabel(classes[y[index]])
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="OJHms_QyExDn" outputId="0e057d17-95be-4ebe-9c86-e7890dda044f"
plot_sample(x_train,y_train,0)
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="0KnEmfkNExFk" outputId="daec26eb-76be-421f-fa33-eae4ecad03a4"
plot_sample(x_train,y_train,20)
# + [markdown] id="jEzzL972LH5A"
# ***Preprocessing***
# + id="qdmSh8UlExKH"
# Now we want to normalize our data
# we divide each pixel by 255 coz pixel ranges from 0 to 255 so to normalize them into 0 to 1
# + id="L8KqK11jExM7"
x_train=x_train/255
x_test=x_test/255
# + colab={"base_uri": "https://localhost:8080/"} id="ke_XkHgdLkRV" outputId="f3494752-1b65-4c5f-a6fa-c5544670ca6c"
ann= models.Sequential([
layers.Flatten(input_shape=(32,32,3)), #flatten layer is a first layer which accept 32 by 32 by 3
layers.Dense(3000,activation='relu'),
layers.Dense(1000,activation='relu') , # we have 2 deep layers one having 3k neurons and other have 1k neurons
layers.Dense(10,activation='softmax') # last layer have 10 coz we have 10 categories
])
ann.compile(optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
ann.fit(x_train,y_train,epochs=5)
# + colab={"base_uri": "https://localhost:8080/"} id="ISLlkchp2yDV" outputId="5d215596-f61c-4362-8efc-01f9622d832f"
ann.evaluate(x_test,y_test)
# + id="knGhOCLV21fE" colab={"base_uri": "https://localhost:8080/"} outputId="9e2f7c4c-3562-4375-b3d7-0e59c488a1fd"
cnn= models.Sequential([
#cnn
layers.Conv2D(filters=32,activation='relu',kernel_size=(3,3),input_shape=(32,32,3)), #convolution is detecting the features in your image
layers.MaxPooling2D((2,2)),
#is a type of operation that is typically added to CNNs following individual convolutional layers.
# When added to a model, max pooling reduces the dimensionality of images by reducing the number of pixels in the output
#from the previous convolutional layer
layers.Conv2D(filters=32,kernel_size=(3,3),activation='relu',input_shape=(32,32,3)), #convolution is detecting the features in your image
layers.MaxPooling2D((2,2)),
#dense
layers.Flatten(),
layers.Dense(3000,activation='relu'),
layers.Dense(10,activation='softmax') # last layer have 10 coz we have 10 categories
])
cnn.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
cnn.fit(x_train,y_train,epochs=5)
# + id="HjgBsTEv21hI"
# + id="IUpvf0mR21k_"
# + id="0FugRlmQ21n5"
# hyper tunning
# + id="IiZQcaEW62gg"
def build_model(dropout_rate):
modeel=keras.Sequential() # coz we need to add all the layers in sequence
modeel.add(layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu',input_shape=(32,32,3))),
modeel.add(layers.MaxPooling2D((2,2))),
modeel.add(layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu')),
modeel.add(layers.MaxPooling2D((2,2))),
modeel.add(layers.Conv2D(filters=64,kernel_size=(3,3),activation='relu')),
modeel.add(layers.MaxPooling2D((2,2))),
modeel.add(layers.Flatten()),
modeel.add(layers.Dense(3000,activation='relu'))
modeel.add(layers.Dropout(dropout_rate))
modeel.add(layers.Dense(10,activation='softmax'))
modeel.compile(optimizer='adam',
#loss='sparse_categorical_crossentropy',
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False, reduction="auto", name="sparse_categorical_crossentropy"
) ,
metrics=['accuracy'] )
return modeel
# + id="i3w2qUJd66B-"
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
moodel=KerasClassifier(build_fn=build_model,verbose=1)
dropout_rate=[0.1,0.2,0.3,0.4,0.6]
epochs=[5]
paramgrid=dict(dropout_rate=dropout_rate,epochs=epochs)
grid=GridSearchCV(estimator=moodel,param_grid=paramgrid,cv=3)
# + colab={"base_uri": "https://localhost:8080/"} id="r_YGttht66uO" outputId="4c8570f9-291f-4038-d339-5a435e97e554"
grid_result=grid.fit(x_train,y_train)
# + id="6LjuKSom66xB" colab={"base_uri": "https://localhost:8080/"} outputId="9be57550-8d48-47b2-bea2-4240326f4d9d"
# summarise result
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# + id="atu59jMW66z3"
| Code/c.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mepix/CropWeedClassification/blob/main/CropCNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Gm60WrL_EXKB"
# # Crop & Weed Classification
#
# + id="K7W-5R21WhP2"
# Import Libaries
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data import Subset
import torchvision
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from torch.optim import lr_scheduler
import time
import os
import copy
from torch.utils.data.sampler import WeightedRandomSampler
import seaborn as sn
import pandas as pd
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
# + colab={"base_uri": "https://localhost:8080/"} id="lhjfB7ZZXcm6" outputId="74266894-2576-402a-bb93-84acfca35663"
# Connect to Google Drive
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="YaV4JXgqC5s7"
# # Data Preparation
#
# The following section sets the network parameters and loads the data from the desired folders.
# + colab={"base_uri": "https://localhost:8080/"} id="Qlq5nSNDXFMa" outputId="b1a1fc11-4c02-444a-cee4-c4a17ac0f3c2"
# Set the Batch Size & Hyperparameters
batch_size = 32
epochs = 100
eta = 0.01
imgDim = 224
step_size = 25
gamma = 0.01
model_type = "ResNet" # "ResNet", "AlexNet", "Simple"
optimizer_type = "Adam" #"Adam", "SGD"
scheduler_type = "StepLR"
do_weighted_random_sample = True
do_three_classes = False
use_train_test_split = False # Should Use Weighted Random Sampling for Better Performance
# Record Keeping
do_save_model = True
path_to_saved_model = '/content/gdrive/MyDrive/EE244/output/'
# Use the function train_test_split to divide data that is in one folder
if use_train_test_split:
do_three_classes = False
path_to_data = '/content/gdrive/MyDrive/EE244/data/Split-Classes'
# use the data that is already presplit into folders manually
if do_three_classes:
path_to_data_train = "/content/gdrive/MyDrive/EE244/data/Split-Classes-X3/Data-Train"
path_to_data_test = "/content/gdrive/MyDrive/EE244/data/Split-Classes-X3/Data-Test"
labels = ["Crop","Weed_Big","Weed_Small"]
else: #do the full dataset
path_to_data_train = "/content/gdrive/MyDrive/EE244/data/Data-Train"
path_to_data_test = "/content/gdrive/MyDrive/EE244/data/Data-Test"
labels = ["Crop","Weed1","Weed2","Weed3","Weed4","Weed5","Weed6","Weed7","Weed8","Weed9"]
# Normalize to Zero Mean
if model_type == "Simple":
transform = transforms.Compose(
[
transforms.Resize([224,224]),
transforms.ToTensor(),
transforms.Grayscale(),
transforms.Normalize((0.449), (0.226)) #MEAN & STDDEV for ResNet
])
else:
transform = transforms.Compose(
[
transforms.Resize([224,224]),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) #MEAN & STDDEV for ResNet
])
# Check GPU or CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Compute samples weight (each sample should get its own weight)
def getSampleWeights(dataset):
target = torch.Tensor(dataset.targets).type(torch.LongTensor)
print("Target Labels:",target)
class_sample_count = torch.tensor(
[(target == t).sum() for t in torch.unique(target, sorted=True)])
print("Sample Count:",class_sample_count)
weight = 1. / class_sample_count.float()
print("Weights:",weight)
samples_weight = torch.tensor([weight[t] for t in target])
print("Samples Weights:",samples_weight)
sampler = WeightedRandomSampler(samples_weight, len(samples_weight))
return sampler
if use_train_test_split:
# Load the Images from the Image Folder
dataset = datasets.ImageFolder(root=path_to_data, transform=transform)
# Split into Train and Test Set
pct_test = 0.25
train_idx, test_idx = train_test_split(list(range(len(dataset))), test_size=pct_test)
data = {}
data['train'] = Subset(dataset,train_idx)
data['test'] = Subset(dataset,test_idx)
# Build the Train and Test DataLoaders
dataloaders = {x:DataLoader(data[x],batch_size, num_workers=2,shuffle=True) for x in ['train','test']}
else:
# Get Train & Test Set (With Weighted Random Sampling)
data = {}
data['train'] = datasets.ImageFolder(root=path_to_data_train, transform=transform)
data['test'] = datasets.ImageFolder(root=path_to_data_test, transform=transform)
if do_weighted_random_sample:
sampler = {}
sampler['train'] = getSampleWeights(data['train'])
sampler['test'] = None #getSampleWeights(data['test'])
# Build the Train and Test DataLoaders
dataloaders = {x:DataLoader(data[x],batch_size, num_workers=2,sampler=sampler[x]) for x in ['train','test']}
else:
# Build the Train and Test DataLoaders
dataloaders = {x:DataLoader(data[x],batch_size, num_workers=2,shuffle=True) for x in ['train','test']}
# def multi2tri(data,verbose=False):
# """Turns a MultiClass Dataset into a 3-Class Dataset"""
#https://discuss.pytorch.org/t/change-labels-in-data-loader/36823/13
# if verbose: print("Input Target Classes:",np.unique(data.targets))
# data.targets = torch.tensor(data.targets).type(torch.LongTensor)
# data.targets[data.targets > 2] = 2
# if verbose: print("Output Target Classes:",np.unique(data.targets))
# return data
# for k in range(10):
# if verbose: print(data.targets[data.targets == k])
# data.targets[data.targets == k] = 2
# # data.targets[data.targets > 2] = 2
# return data
x,y = next(iter(dataloaders['train']))
# Rebundle Datasets
dataset_sizes = {
'train':len(data['train']),
'test':len(data['test'])}
# Check Sizes
print(x.shape, y.shape)
# print("Original Dataset",len(dataset))
print("Training Dataset",len(data['train']))
print("Testing Dataset",len(data['test']))
# for i, (x, y) in enumerate(dataloaders['train']):
# print("batch index {}, 0/1: {}/{}".format(
# i, (y == 0).sum(), (y == 1).sum()))
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="WO0uhVTDA0lO" outputId="6fb5d4dd-6df1-4d93-cd66-a1114a6bc8af"
def plotHistogram(data,number_bins,title=None):
"""Plots the Histogram of the Given Dataset"""
# https://matplotlib.org/stable/gallery/pyplots/pyplot_text.html#sphx-glr-gallery-pyplots-pyplot-text-py
x = data.targets
n, bins, patches = plt.hist(x, number_bins,density=True, facecolor='b', alpha=0.75)
plt.xlabel('Target Label')
plt.ylabel('Probability')
plt.title(title)
plt.xlim(0, number_bins-1)
plt.ylim(0, 1)
plt.grid(True)
plt.show()
plotHistogram(data["train"],len(labels),"Histogram of Training Data")
plotHistogram(data["test"],len(labels),"Histogram of Test Data")
# + id="JLAX3QUzfRWy"
# Determine the MEAN and STD Manually and then re-run block
# mean = 0.0
# std = 0.0
# nb_samples = 0.0
# for data in dataloaders['train']: #assume same for test and train data
# batch_samples = data.size(0)
# data = data.view(batch_samples, data.size(1), -1)
# mean += data.mean(2).sum(0)
# std += data.std(2).sum(0)
# nb_samples += batch_samples
# mean /= nb_samples
# std /= nb_samples
# print(mean)
# print(std)
#print(dataset.data.shape)
# print(data['train'].mean(axis=(0,1,2))/255)
# print(trainset.data.std(axis=(0,1,2))/255)
#[0.49139968 0.48215841 0.44653091]
#[0.24703223 0.24348513 0.26158784]
# https://discuss.pytorch.org/t/computing-the-mean-and-std-of-dataset/34949/15
# https://pytorch.org/docs/stable/generated/torch.nn.functional.normalize.html
# https://discuss.pytorch.org/t/about-normalization-using-pre-trained-vgg16-networks/23560/6?u=ptrblck
# + colab={"base_uri": "https://localhost:8080/", "height": 231} id="6AtiMGfBdi8g" outputId="84bbc103-da42-4919-e2b1-38c68f82752a"
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean # Add back the mean and std for visualization
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# INPUTS: a vector of images [batch_size,num_channels,height,width]
# CLASSES: an array containing the index of the appropriate class_names
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
# Visualize a sample set of images to verify we imported everything OK
imshow(out, title=[int(classes[x]) for x in range(batch_size)])
# + [markdown] id="hB7WdkAyE4rb"
# # Build & Train the Network Model
#
# This code is based upon the PyTorch [Transfer Learning for Computer Vision Tutorial](https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html) and related documentation for image classificaiton with PyTorch.
# + id="tCqmuRgbPYem"
# Define Simple Newtork model
class SimpleNeuralNetwork(nn.Module):
def __init__(self):
super(SimpleNeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(224*224, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
# + id="MeVQZG7ynqLF"
def getNetworkModel(model_type="ResNet",step_size=7,gamma=0.1,optimizer_type=None,scheduler_type="StepLR"):
"""Create the parameters necessary for the RESNET model"""
# Load in the ResNet Model
if model_type == "ResNet":
model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=False)
elif model_type == "AlexNet":
model = torch.hub.load('pytorch/vision:v0.10.0', 'alexnet', pretrained=False)
elif model_type == "Simple":
model = SimpleNeuralNetwork()
else:
return None #TODO: Try other Models
# Determine the Number of Features
# num_ftrs = modelResNet.fc.in_features
# Size of each output sample is generalized to nn.Linear(num_ftrs, len(class_names))
# model.fc = nn.Linear(imgDim*imgDim, len(class_names))
# Push to GPU
model = model.to(device)
# Set the Loss Criterion
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
if optimizer_type == "Adam":
optimizer = torch.optim.Adam(model.parameters(), lr=eta) #https://pytorch.org/docs/master/generated/torch.optim.Adam.html
elif optimizer_type == "SGD":
optimizer = torch.optim.SGD(model.parameters(), lr=eta)
else:
optimizer = None
# Decay LR by a factor of 0.1 every 7 epochs TOO AGGRESSIVE FOR RESNET: 0.01 epoch80 0.001 epoch120 0.0001 (Mayve start at 60 or 40 for smaller data set)
if scheduler_type == "StepLR":
scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
else:
scheduler = None
return model, criterion, optimizer, scheduler
# + id="dQgMQvECmOT3"
def train_model(model, criterion, optimizer, scheduler, num_epochs=5):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
# Store training values in lists
training_acc = []
training_loss = []
validation_acc = []
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels.long())
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# Store Performance in Arrays
if phase == 'train':
training_loss.append(epoch_loss)
training_acc.append(epoch_acc.item())
else: # must be validation
validation_acc.append(epoch_acc.item())
# deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best Test Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, training_acc, training_loss, validation_acc
# + id="o8uSw0h0o0XX"
def plotResults(training_loss,training_acc,test_acc,titleStr=""):
"""Plots the training_loss, training_acc, and test_acc with the provided title"""
# Create Figure
plt.figure(figsize=(15,3))
plt.suptitle(titleStr, fontsize=15)
# Training Loss Plot
plt.subplot(131)
plt.plot(training_loss)
plt.xlabel('Epoch', fontsize=10)
plt.ylabel('Training Loss', fontsize=10)
plt.grid(True)
# Training Accuracy Plot
plt.subplot(132)
plt.plot(training_acc)
plt.xlabel('Epoch', fontsize=10)
plt.ylabel('Training Accuracy', fontsize=10)
plt.grid(True)
# Test Accuracy Plot
plt.subplot(133)
plt.plot(test_acc)
plt.xlabel('Epoch', fontsize=10)
plt.ylabel('Test Accuracy', fontsize=10)
plt.grid(True)
# Adjust Dimensions
plt.subplots_adjust(wspace=.5)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="I5YXgFjKpCim" outputId="492ccc57-6a07-4252-ab77-040213cfb23c"
# Get a Clean Model for No Augmentation
model, criterion, optimizer, scheduler = getNetworkModel(model_type=model_type,step_size=step_size,gamma=gamma,optimizer_type=optimizer_type,scheduler_type=scheduler_type)
# Train the Network
model_trained, train_acc, train_loss, test_acc = train_model(model, criterion, optimizer,scheduler, num_epochs=epochs)
# Plot the Results
plotResults(train_loss, train_acc, test_acc,titleStr="ResNet for Crop Detection")
# Save Model
if do_save_model:
torch.save(model_trained,path_to_saved_model+"model.pth")
# + [markdown] id="n9UZnxwx4Yxv"
# # Evaluate Classifier Performance
#
# Key metrics:
#
# $$Pr = \frac{N_{TP}}{N_{TP}+N_{FP}}$$
#
# $$Re = \frac{N_{TP}}{N_{TP}+N_{FN}}$$
#
# $$F_{\beta} = \frac{(\beta^2+1)\times Pr \times Re}{\beta^2 \times Pr + Re}$$
#
# $$F_{1} = \frac{2\times Pr \times Re}{Pr + Re}$$
#
# **Note:** the rows of the "normalized" confusion matrix represent the recall values for the specified class.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="enOL_71L428N" outputId="663a8f9f-8fd2-468b-bdb3-cbdfe8b58cb8"
def getConfusionMatrix(model,dataloader,number_classes,verbose=True):
"""Calculates the Confusion Matrix for the given dataloader"""
# https://stackoverflow.com/questions/53290306/confusion-matrix-and-test-accuracy-for-pytorch-transfer-learning-tutorial
# Create a placeholder matrix
confusion_matrix = torch.zeros(number_classes, number_classes)
# Fill out the confusion Matrix
y_label = np.array([])
y_predict = np.array([])
with torch.no_grad():
for i, (inputs, classes) in enumerate(dataloader):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model(inputs)
# Gets the predicted label
_, preds = torch.max(outputs, 1)
# np.vstack([ys, xs]) if ys.size else xs
temp_labels = classes.view(1,-1).cpu().detach().numpy()
temp_preds = preds.view(1,-1).cpu().detach().numpy()
if verbose:
print("Labels:",temp_labels)
# print(temp_labels.shape)
# print(y_label.size)
print("Predictions:",temp_preds)
y_label = np.hstack([y_label,temp_labels]) if y_label.size else temp_labels
y_predict = np.hstack([y_predict,temp_preds]) if y_predict.size else temp_preds
# Iterates through a zipped tuple and increments the confusion_matrix
for t, p in zip(classes.view(-1), preds.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
# Print the array outputs
if verbose:
print(confusion_matrix)
print(confusion_matrix.diag()/confusion_matrix.sum(1))
print("Y_labels",y_label.flatten())
print("Y_predictions",y_predict.flatten())
# Returns as NumPy Arrays
return confusion_matrix.cpu().detach().numpy(), y_label.reshape(-1), y_predict.reshape(-1)
def plotConfusionMatrix(cm,normalize=True,labels=None):
"""Plots the Confusion Matrix"""
# Normalize
if normalize:
cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fmt='.2f'
else:
cmn = cm
fmt = '.3g'
fig, ax = plt.subplots(figsize=(10,7))
sn.heatmap(cmn, annot=True, cmap='Blues',fmt=fmt, xticklabels=labels, yticklabels=labels)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show(block=False)
# Reload the Model
if do_save_model:
model_trained = torch.load(path_to_saved_model+"model.pth")
model_trained.eval() # Convert to evaluation only, this is faster
# Determine the Confusion Matrix
conf_mat, y_true, y_pred = getConfusionMatrix(model_trained,dataloaders['test'],len(labels),False)
plotConfusionMatrix(conf_mat,normalize=False,labels=labels)
plotConfusionMatrix(conf_mat,normalize=True,labels=labels)
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html
# https://stackoverflow.com/questions/31421413/how-to-compute-precision-recall-accuracy-and-f1-score-for-the-multiclass-case
# https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient
# https://stackoverflow.com/questions/20927368/how-to-normalize-a-confusion-matrix/66678924#66678924
# https://stackoverflow.com/questions/33965560/r-markdown-table-with-a-caption
# Calculate the Metrics
precision, recall, fscore, _ = precision_recall_fscore_support(y_true, y_pred,average=None)
# Bundle as a DataFrame
stats = {
'Labels' : labels,
'Precision' : precision,
'Recall' : recall,
'F1 Score' : fscore
}
df = pd.DataFrame(stats)
# Print the output.
print()
print()
print(df)
print()
print()
# print(df.to_markdown())
| CropCNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Time Series Analysis
#
import warnings
import itertools
import numpy as np
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
plt.style.use('fivethirtyeight')
import pandas as pd
import statsmodels.api as sm
import matplotlib
matplotlib.rcParams['axes.labelsize']=14
matplotlib.rcParams['xtick.labelsize']=12
matplotlib.rcParams['ytick.labelsize']=12
matplotlib.rcParams['text.color']='k'
df=pd.read_excel('/home/anilla/DataScience/TimeSeries/Sample - Superstore.xls',parse_dates=True)
df.head()
df.columns.tolist()
# ### Time series analysis and forecasting of different categories
# #### i)Forecasting Furniture sales
furniture=df.loc[df['Category']== 'Furniture']
furniture.head()
## checking time range of the sale of the furniture
start=furniture['Order Date'].min()
end=furniture["Order Date"].max()
start
end
# ## Data Prepocessing
furniture.columns.isna()
cols=furniture.columns.tolist()
cols
furniture=furniture.set_index('Order Date')
furniture.index
y=furniture['Sales'].resample('MS').mean()
y['2017':]
# ## Visualization of Furniture Sales Time Series Data
y.plot(figsize=(15,8))
plt.show()
# ### one can also visualize data using time-series dedcomposition where the time series is decomposed to trend,seasonality and noise
from pylab import rcParams
rcParams['figure.figsize'] =18,8
decomposition=sm.tsa.seasonal_decompose(y,model='additive')
fig=decomposition.plot()
plt.show()
# ## Time series forecasting with ARIMA(Autoregressive Intergrated Moving Average)
p=d=q=range(0,2)
#p=number of lag observation
#d=number of time raw observations are differencing
#q=order of moving average
pdq=list(itertools.product(p,d,q))
seasonal_pdq=[(x[0],x[1],12) for x in list(itertools.product(p,d,q))]
w=list(itertools.product(p,q))
w
list(p),list(q),list(d)
pdq
print('Eaxamples of parameter combinations for saesona ARIMA...')
print('SARIMAX:{} x {}'.format(pdq[1],seasonal_pdq[1]))
print('SARIMAX:{} x {}'.format(pdq[1],seasonal_pdq[2]))
print('SARIMAX:{} x {}'.format(pdq[2],seasonal_pdq[3]))
print('SARIMAX:{} x {}'.format(pdq[2],seasonal_pdq[4]))
| .ipynb_checkpoints/Timeseries-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine learning and Statistics - Project
#
# ### Student: <NAME>
# The aim of this project is to create a web service that uses machine learning to make predictions based on the data set powerproduction. The goal is to produce a model that accurately predicts wind turbine power output from wind speed values, as in the data set. I will then develop a web service that will respond with predicted power values based on speed values sent as HTTP requests.
import numpy as np
# pandas is used to read in the csv and display in a pandas dataframe
import pandas as pd
# matplotlib pyplot will be used for plotting the data set
import matplotlib.pyplot as plt
# seaborn will be used for plotting the data set
import seaborn as sns
# reading in the data set with pandas
df = pd.read_csv("powerproduction.csv")
# displaying the datas set
df
# information about the dataset
df.info()
# shape of the data set. It has 500 rows and 2 columns
df.shape
# first 10 rows of dataset
df.head(10)
# tells us more about the table (mean, std...)
df.describe()
# Firstly, I am going to explain each of these functions independently. As we can see, there are only 2 variables in this table, speed and power. It shows frame or a series of numeric values.
#
# - count: In the table above count is 500 which means that count represents the size of the data set.
#
# - mean: Mean or Average is a central tendency of the data i.e. a number around which a whole data is spread out. In a way, it is a single number which can estimate the value of whole data set.
#
# - std: Standard deviation is the measurement of average distance between each quantity and mean. That is, how data is spread out from mean. A low standard deviation indicates that the data points tend to be close to the mean of the data set, while a high standard deviation indicates that the data points are spread out over a wider range of values.
#
# - min: Shows the smallest data figure collected, in this case it is zero.
#
# - 25%: Also called first quartile. It means that 25% of data falls behind this measurement, and 75% is above it.
#
# - 50%: The 50 percentile is the same as the median. Half of the data collected falls behind this measurement.
#
# - 75%: Also called third quartile. It means that 75% of data falls behind this measurement, and 25% is above it.
#
# - max: Shows the highest data figure collected, ie. maximum speed and power.[1]
#
#
# +
# Code adapted from: https://matplotlib.org/tutorials/introductory/customizing.html [2]
plt.rcParams['figure.figsize'] = (18, 8)
plt.rcParams['lines.linewidth'] = 2.0
# matplotlib.pyplot used for plotting speed and power
plt.plot(df['speed'], df['power'], '.b')
# adding a title to the plot
plt.title('Powerproduction data set', fontsize=18)
# adding labels to the x (speed) and y (power) axis
plt.xlabel('Speed', fontsize=16)
plt.ylabel('Power', fontsize=16)
# displaying grid on the plot
plt.grid()
# showing the plot
plt.show()
# -
sns.pairplot(df)
sns.pairplot(df, diag_kind = 'kde')
# +
# cleaning the dataset by removing all data points where the power output is zero.
df = df[df['power'] !=0]
df
# +
# Code adapted from: https://matplotlib.org/tutorials/introductory/customizing.html
plt.rcParams['figure.figsize'] = (12, 6)
plt.rcParams['lines.linewidth'] = 1.0
# matplotlib.pyplot used for plotting speed and power
plt.plot(df['speed'], df['power'], '.b')
# adding a title to the plot
plt.title('Powerproduction data set', fontsize=18)
# adding labels to the x (speed) and y (power) axis
plt.xlabel('Speed', fontsize=16)
plt.ylabel('Power', fontsize=16)
# displaying grid on the plot
plt.grid()
# showing the plot
plt.show()
# -
# #### Model 1: Linear regression
# Linear regression is a basic and commonly used type of predictive analysis. The overall idea of regression is to examine two things:
#
# 1. Does a set of predictor variables do a good job in predicting an outcome?
# 2. Which variables in particular are significant predictors of the outcome variable?
#
# These regression estimates are used to explain the relationship between one dependent variable and one or more independent variables. [3]
#
# With simple linear regression when we have a single input, we can use statistics to estimate the coefficients. This requires that you calculate statistical properties from the data such as means, standard deviations, correlations and covariance. All of the data must be available to traverse and calculate statistics. [4]
#
# Preparing Data For Linear Regression:
#
# 1. Linear Assumption. Linear regression assumes that the relationship between input and output is linear. It does not support anything else. Sometimes data needs to be transformed to make the relationship linear.
#
# 2. Remove Noise. Linear regression assumes that your input and output variables are not noisy.In this case we should consider using data cleaning operations that clarify data. This is most important for the output variable.
#
# 3. Remove Collinearity. Linear regression will over-fit your data when you have highly correlated input variables. We could calculate pairwise correlations for input data and remove the most correlated.
#
# 4. Gaussian Distributions. Linear regression will make more reliable predictions if input and output variables have a Gaussian distribution. We can benefit using transforms on variables to make their distribution more Gaussian looking.
#
# 5. Rescale Inputs: Linear regression will often make more reliable predictions if we rescale input variables using standardization or normalization.
#
#
df.corr()
# +
import numpy as np
from sklearn.linear_model import LinearRegression
# For evaluating model performance.
from sklearn.metrics import mean_squared_error, r2_score
# Code adjusted from:
# https://scikit-learn.org/stable/modules/linear_model.html#ordinary-least-squares [5]
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit[6]
# https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html#sphx-glr-auto-examples-linear-model-plot-ols-py [7]
# https://scikit-learn.org/stable/modules/linear_model.html [8]
# Creating linear regression
reg = LinearRegression()
# defining X and y as speed and power from the dataset
X = df[["speed"]]
y = df["power"]
print(X.shape)
print(y.shape)
# Training the model using the training sets
reg.fit(X, y)
# Making predictions using the testing set
y_pred = reg.predict(X)
# The coefficients
print('Coefficients: \n', reg.coef_)
# The mean squared error
print('Mean squared error: %.3f' % mean_squared_error(y, y_pred))
# The coefficient of determination
print('Coefficient of determination: %.3f' % r2_score(y, y_pred))
# -
# Inspecting the fit using matplotlib
plt.plot(df['speed'], df['power'], '.g')
plt.plot(df['speed'], y_pred, 'k-', label="model")
# ##### Importance of Coefficient of determination and correlation coefficient
#
# If the cost is greater than 0 a quantity closely related to the cost is called the coefficient of determination, also known as the R-squared value. The purpose of the R-squared value is to measure how much of the variance in y (power) is determined by x (speed).
#
# In this case coefficient of determination is 0.898 which means that explaines or measures 89% of the variance in power that is determined by speed.
#
# Coefficent of determination is calculated:
#
# $$ R^2 = 1 - \frac{\sum_i (y_i - m x_i - c)^2}{\sum_i (y_i - \bar{y})^2} $$
#
# Taken from the lectures: https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/simple-linear-regression.ipynb [9]
#
# Other than coefficent of determination I calcualted correlation between two variables, speed and power. Correlation is 0.94 and it indicates a strong positive correlation between speed and power. The correlation coefficient is a statistical measure of the strength of the relationship between the relative movements of two variables. Values range between -1.0 and 1.0. A calculated number greater than 1.0 or less than -1.0 means that there was an error in the correlation measurement. A correlation of -1.0 shows a perfect negative correlation, while a correlation of 1.0 shows a perfect positive correlation. A correlation of 0.0 shows no linear relationship between the movement of the two variables.[10]
#
# According to numbers, coefficent of determination and correlation coefficient we could say that the strenght of the relationship between the relative movements of speed and power is linear which means, as speed increases, power would increase too, but that is not what we see from the plot. Regression line doesn't follow data and we could say that their relationship is not linear after all.
#
# +
# For splitting data into train / test sets.
from sklearn.model_selection import train_test_split
# Code adjusted from: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html [11]
def train_test(model, X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
train_errors, test_errors = [], []
for n in range (1, len(X_train)):
model.fit(X_train[:n], y_train[:n])
y_train_predict = model.predict(X_train[:n])
y_test_predict = model.predict(X_test)
train_errors.append(mean_squared_error(y_train[:n], y_train_predict))
test_errors.append(mean_squared_error(y_test, y_test_predict))
plt.plot(np.sqrt(train_errors), "r-", label="train")
plt.plot(np.sqrt(test_errors), "b-.", label="test")
plt.legend()
plt.xlabel("Training dataset size")
plt.ylabel("Root mean squared error")
plt.grid()
plt.xlim(-10, 100)
linear_reg = LinearRegression()
train_test(linear_reg, X, y)
# -
# ##### What is Root Mean Square Error (RMSE)?
#
#
# Root Mean Square Error (RMSE) is the standard deviation of the prediction errors. They are a measure of how far from the regression line data points are. RMSE is a measure of how spread out these residuals are. In other words, it tells us how concentrated the data is around the line of best fit.[12] From the plot above we can see that RMSE is big when the training set is small, but as the training set increases error is getting smaller. This is something that should be investigated further as it doesn't give us precise data.
# ##### Underfitting and Overfitting
#
# Splitting a dataset might also be important for detecting if the model suffers from one of two very common problems, called underfitting and overfitting:
#
# - Underfitting is usually the consequence of a model being unable to encapsulate the relations among data. For example, this can happen when trying to represent nonlinear relations with a linear model. Underfitted models will likely have poor performance with both training and test sets.
#
# - Overfitting usually takes place when a model has an excessively complex structure and learns both the existing relations among data and noise. Such models often have bad generalization capabilities. Although they work well with training data, they usually yield poor performance with unseen (test) data. [13]
#
# In this case we are dealing with the problem of underfitting, that is to say, this model is being unable to encapsulate the relation among data. We can clearly see that in "Model 1: linear regression" plot as the regression line doesn't fit the data as it is straight and linear while data structure is non linear. Accordingly, I will look into other models to see if they are going to fit data structure more accurately.
# ##### Model 2: Polynomial regression
# Polynomial regression is one of the most fundamental concepts used in data analysis and prediction. This is one of the regression techniques which is used to predict the outcome. It is defined as the relationship between the independent and dependent variables when the dependent variable is related to the independent variable having some degree. It does not require the relationship between dependent and independent variables to be linear, so if the line is a curve than it may have any polynomial term.
#
# The main difference between linear and polynomial regression is that linear regression requires the dependent and independent variables to be linearly related while this may better fit the line if we include any higher degree to the independent variable term in the equation. This is the equation:
#
# Y= b0+a1x+a2x^2+a3x^3+…. anx^n
#
#
# If we add higher degrees, then it turns the line into a curve that better fits this data. Generally, it is used when the points in the data set are scattered and the linear model is not able to describe the result clearly. We should always keep an eye on overfitting and underfitting while considering these degrees to the equation.
#
# There are two techniques which are used in deciding the degree of the equation:
#
# - Forward Selection: It is the method of increasing the degree until it is significant enough to define the model.
#
# - Backward Selection: It is the method of decreasing the degree until it is significant enough to define the model.[14]
#
# As we have already tried linear regression and it didn't give us accurate prediction of data, polynomial regression has better conditions as the data structure/regression line is a curve which means it could have polynomial term and it would fit the data better. As we need to set the degree to the independent variable ourselves, I have tried with several different degrees and degree 3 showed the best fit. Firstly, I will show the linear regression and then polynomial regression to compare the models and see which one is a better fit.
# +
lin_reg = LinearRegression()
lin_reg.fit(X, y)
model_pred = lin_reg.predict(X)
plt.figure(figsize=(10,8));
plt.scatter(X, y);
plt.plot(X, model_pred);
# Printing coeffiecnt of determination for linear regression.
print(r2_score (y, model_pred))
# +
# Code adjusted from: https://medium.com/kharpann/performing-polynomial-regression-using-python-840eb666bfd8 [15]
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=3)
X_poly = poly_reg.fit_transform(X)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
y_pred = lin_reg_2.predict(X_poly)
plt.figure(figsize=(10,8));
plt.scatter(X, y);
plt.plot(X, y_pred, 'k-');
# Printing coefficient of determination for polynomial regression
print(r2_score(y, y_pred))
plt.xlabel('speed (m/s)', fontsize='16')
plt.ylabel('power (kW)', fontsize='16')
plt.title('Model 2: Polynomial regression', fontsize='16')
plt.grid()
# -
# Looking at the plot, polynomial regression seems to be a good fit comparing it to linear regression as polynomial regression is a better fit when it comes to data that is represented in a curve line. Comparing r2_score polynomial regression shows more precise results as R^2 (coefficient of determination) regression score function in polynomial regression is closer to 1. Best possible score is 1.0.
# ##### Model 3: Neural networks
#
# Neural Networks are a class of models within the general machine learning literature. Neural networks are a specific set of algorithms that have revolutionized machine learning. They are inspired by biological neural networks and the current so-called deep neural networks have proven to work quite well. Neural Networks are themselves general function approximations, which is why they can be applied to almost any machine learning problem about learning a complex mapping from the input to the output space.[16]
#
#
#
# ##### 1. Training set:
#
# It’s the set of data used to train the model. During each epoch, our model will be trained over and over again on this same data in our training set, and it will continue to learn about the features of this data.The hope with this is that later we can deploy our model and have it accurately predict on new data that it’s never seen before. It will be making these predictions based on what it has learned about the training data.
#
# ##### 2. Validation set:
#
# The validation set is a set of data, separate from the training set, that is used to validate our model during training. This validation process helps give information that may assist us with adjusting our hyperparameters.
#
# With each epoch during training, the model will be trained on the data in the training set. Well, it will also simultaneously be validated on the data in the validation set. During the training process, the model will be classifying the output for each input in the training set. After this classification occurs, the loss will then be calculated, and the weights in the model will be adjusted. Then, during the next epoch, it will classify the same input again.One of the major reasons we need a validation set is to ensure that our model is not overfitting to the data in the training set.
#
# ##### 3. Test set:
#
# The test set is a set of data that is used to test the model after the model has already been trained. The test set is separate from both the training set and validation set.
#
# After our model has been trained and validated using our training and validation sets, we will then use our model to predict the output of the unlabeled data in the test set.
#
# One major difference between the test set and the two other sets is that the test set should not be labeled. The training set and validation set have to be labeled so that we can see the metrics given during training, like the loss and the accuracy from each epoch. [17]
#
#
# +
# Splitting the data set into train and test.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# Some of the training set will be set to validation.
# Ratio of train, test and validation is 60:20:20
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
print(X_val.shape)
print(y_val.shape)
# -
# ### Neuron network in keras:
#
# In the further text I will use keras library to analyse data. Keras is a powerful and easy-to-use free open source Python library for developing and evaluating deep learning models. It wraps the efficient numerical computation libraries like Theano and TensorFlow and allows us to define and train neural network modelse.
#
# Steps in Keras:
#
# ##### 1. Load Data.
#
# I have already loaded powerproduction data set that will be used for analysis.
#
# ##### 2. Define Keras Model.
#
# Model used is Sequential. Models in Keras are defined as a sequence of layers. We create a Sequential model and add layers one at a time until we are happy with our network architecture. A Sequential model is appropriate for a plain stack of layers where each layer has exactly one input tensor and one output tensor.
#
# ###### 3. Compile Keras Model.
#
# When the model is defined, we can compile it. Compiling the model uses the efficient numerical libraries under the covers (the so-called backend) such as Theano or TensorFlow. In this case I will use TensorFlow. When compiling, we must specify some additional properties required when training the network. We must specify the loss function to use to evaluate a set of weights, the optimizer is used to search through different weights for the network and any optional metrics we would like to collect and report during training. I will use optimizer Adam. Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments.This is a popular version of gradient descent because it automatically tunes itself and gives good results in a wide range of problems.
#
# ##### 4. Fit Keras Model.
#
# Now the model is ready for efficient computation and we can execute it. Firstly, we can train or fit our model on our loaded data by calling the fit() function on the model.
#
# Training occurs over epochs and each epoch is split into batches.
#
# - Epoch: One pass through all of the rows in the training dataset.
# - Batch: One or more samples considered by the model within an epoch before weights are updated.
#
# One epoch is comprised of one or more batches, based on the chosen batch size and the model is fit for many epochs.
# The training process will run for a fixed number of iterations through the dataset called epochs, that we must specify using the epochs argument. We must also set the number of dataset rows that are considered before the model weights are updated within each epoch, called the batch size and set using the batch_size argument.
#
# For this problem, I will run 500 numbers of epochs and use a relatively small batch size of 10.
#
# These configurations can be chosen experimentally by trial and error. We want to train the model enough so that it learns a good (or good enough) mapping of rows of input data to the output classification. The model will always have some error, but the amount of error will level out after some point for a given model configuration. This is called model convergence.
#
#
# ##### 5. Make Predictions
#
# Making predictions is as easy as calling the predict() function on the model. We are using a sigmoid activation function on the output layer, so the predictions will be a probability in the range between 0 and 1. We can easily convert them into a crisp binary prediction for this classification task by rounding them. [21]
# Code adapted from the lectures: https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/keras-neurons.ipynb [18]
import tensorflow.keras as kr
model = kr.models.Sequential()
model.add(kr.layers.Dense(50, input_shape=(1,), activation='sigmoid', kernel_initializer="glorot_uniform", bias_initializer="zeros"))
model.add(kr.layers.Dense(1, activation='linear', kernel_initializer="glorot_uniform", bias_initializer="glorot_uniform"))
# Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order
# moments.
# Code adapted from: https://keras.io/api/optimizers/adam/ [19]
model.compile(kr.optimizers.Adam(lr=0.001), loss="mean_squared_error")
model.summary()
# +
model.fit(X_train, y_train, epochs=500, batch_size=10, validation_data=(X_val, y_val))
# -
# Loss and loss function:
#
# Loss is a prediction error of Neural Net. Method to calculate the loss is called Loss Function.
# Loss is used to calculate the gradients. And gradients are used to update the weights of the Neural Net. This is how a Neural Net is trained.[20] As we can see from above, in each epoch loss is smaller than the previous one which means that prediction error is smaller as well. In the next part I will make predictions for the test data and show it on the plot.
# Prediction for the test data - it was not used in training.
plt.plot(X, y, 'm', label="data")
plt.plot(X_test, model.predict(X_test), 'k.', label="neural n/w")
plt.xlabel('Speed (m/s)', fontsize='14')
plt.ylabel('power (kW)', fontsize='14')
plt.title("Model 3: Neural network prediction on test data")
plt.grid()
plt.legend()
# Inputting a single value to predict wind
wind = [[20]]
print(model.predict(wind))
# To conclude, neuron prediction model seems to be accurate as it gives us reasonable data which the code above confirms. We can easlily make predictions based on the model. If for example, the speed is 20 m/s wind prediction will be 98kW. When we look at the plot, prediction seems accurate and it shows us data that we could rely on. When we compare all three model, we can clearly see that there is difference in all three of them. Linear regression hasn't been proved to be a good fit for this type of data structure, as I was unable to make any predictions based on the model. That is why I went with the second model that would be similar to linear regression, but also more accurate, polynomial regression. Polynomial regression has been shown as a good model for this data structure since the line fit the dataset and it could easily give us some predictions. The third model was neural network model that gave us even better insight into the dataset. It seems even better fit to the data and more accurate. The problem with polynomial regression and neural network is that the predictions don't seem to be the same, so it is hard to tell which one is more precise. Since neural network is very popular in machine learning, its algorithm seems to be more accurate than the polynomial regression model.
# References:
#
# - Descriptive statistics: [1] https://www.investopedia.com/terms/d/descriptive_statistics.asp
# - Matplotlib:[2] https://matplotlib.org/tutorials/introductory/customizing.html
# - Linear regression: [3] https://www.statisticssolutions.com/what-is-linear-regression/
# - Linear reression: [4] https://machinelearningmastery.com/linear-regression-for-machine-learning/
# - Ordinary least squares: [5] https://scikit-learn.org/stable/modules/linear_model.html#ordinary-least-squares
# - Sklearn, linear regression: [6] https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit
# - Example of liner model:[7] https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html#sphx-glr-auto-examples-linear-model-plot-ols-py
# - Scikit-learn: [8] https://scikit-learn.org/stable/modules/linear_model.html
# - Lectures: [9] https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/simple-linear-regression.ipynb
# - Correlation coefficient: [10] https://www.investopedia.com/terms/c/correlationcoefficient.asp
# - Train, test, split: [11] https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
# - Root mean square error: [12] https://www.geeksforgeeks.org/root-mean-square-error-in-r-programming/
# - Train, test, split: [13] https://realpython.com/train-test-split-python-data/
# - Polynomial regression: [14] https://www.educba.com/polynomial-regression/
# - Polynomial regression in python: [15] https://medium.com/kharpann/performing-polynomial-regression-using-python-840eb666bfd8
# - Neura networks: [16] https://www.codementor.io/@james_aka_yale/a-gentle-introduction-to-neural-networks-for-machine-learning-hkijvz7lp
# - Test, train, validation: [17] https://deeplizard.com/learn/video/Zi-0rlM4RDs
# - Lectures: [18] https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/keras-neurons.ipynb
# - Keras: [19] https://keras.io/api/optimizers/adam/
# - Loss function: [20] https://towardsdatascience.com/understanding-different-loss-functions-for-neural-networks-dd1ed0274718
# - Keras: [21] https://machinelearningmastery.com/adam-optimization-algorithm-for-deep-learning/#:~:text=Adam%20is%20a%20replacement%20optimization,sparse%20gradients%20on%20noisy%20problems.
| Project_2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="rQph610BeO4I"
# # BERT pretraining on Japanese wiki
#
# This notebook is assumed to be executed on Colaboratory notebook with TPU.
#
# [](https://colab.research.google.com/drive/14Ky8w5NodVyfk7tm13u6vdaGPl5qvPxL)
#
# + colab={} colab_type="code" id="3Cv0EJiZ8vi6"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="R3AyT0Fq8yzD" outputId="71a538d2-f8c9-4621-ab1b-756350d95093"
tf.__version__
# + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="EWkrwgQV80JK" outputId="357ab0b1-80f4-4450-c493-e554bfdb3be4"
# !git clone --recursive https://github.com/yoheikikuta/bert-japanese.git
# + [markdown] colab_type="text" id="rYnDgi2xgDS7"
# Authentication to use TPU.
# + colab={} colab_type="code" id="lwRm42uDt8SN"
from google.colab import auth
auth.authenticate_user()
# + [markdown] colab_type="text" id="yLWTT6NZ_ZYi"
# ## Check TPU devices
# + colab={"base_uri": "https://localhost:8080/", "height": 255} colab_type="code" id="pShRjorw_Lla" outputId="b32783fd-5aef-4b87-8b91-07f9cfdd413f"
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
assert 'COLAB_TPU_ADDR' in os.environ, 'ERROR: Not connected to a TPU runtime; please see the first cell in this notebook for instructions!'
TPU_ADDRESS = 'grpc://' + os.environ['COLAB_TPU_ADDR']
print('TPU address is', TPU_ADDRESS)
with tf.Session(TPU_ADDRESS) as session:
print('TPU devices:')
pprint.pprint(session.list_devices())
# Upload credentials to TPU.
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(session, credentials=auth_info)
# Now credentials are set for all future sessions on this TPU.
# + [markdown] colab_type="text" id="iQM0ChEhg08g"
# ## Set input and output
#
# Need to put `all-maxseq(128|512).tfrecord` data for pre-traning on your GCS bucket.
# Trained objects will be saved into a specified GCS bucket.
# + colab={} colab_type="code" id="54yVw-J4iVq6"
INPUT_DATA_GCS = 'gs://bert-wiki-ja/data'
# + colab={} colab_type="code" id="V3-OQkauSdXe"
TARGET_DIRS = [
'AA',
'AB',
'AC',
'AD',
'AE',
'AF',
'AG',
'AH',
'AI',
'AJ',
'AK',
'AL',
'AM',
'AN',
'AO',
'AP',
'AQ',
'AR',
'AS',
'AT',
'AU',
'AV',
'AW',
'AX',
'AY',
'AZ',
'BA',
'BB'
]
# + colab={} colab_type="code" id="BG46L7cKB9uT"
# MAX_SEQ_LEN = 128
MAX_SEQ_LEN = 512
# + colab={} colab_type="code" id="sa9C1sAdS0tZ"
INPUT_FILE = ','.join( [ '{}/{}/all-maxseq{}.tfrecord'.format(INPUT_DATA_GCS, elem, MAX_SEQ_LEN) for elem in TARGET_DIRS] )
# + colab={} colab_type="code" id="xK-4oH2ViNzW"
OUTPUT_GCS = 'gs://bert-wiki-ja/model'
# + [markdown] colab_type="text" id="jQlc9NQOjihY"
# ## Execute pre-training
#
# NOTE that you have to give `<EMAIL>` the following permissions on the specified GCS bucket:
# - Storage Legacy Bucket Reader
# - Storage Legacy Bucket Writer
# - Storage Legacy Object Reader
# - Storage Object Viewer
#
# + colab={"base_uri": "https://localhost:8080/", "height": 14011} colab_type="code" id="ejRY72r3BL4W" outputId="eb655aee-4ea0-4c14-c43a-ac0fbf596ee1"
# # !python bert-japanese/src/run_pretraining.py \
# # --input_file={INPUT_FILE} \
# # --output_dir={OUTPUT_GCS} \
# # --use_tpu=True \
# # --tpu_name={TPU_ADDRESS} \
# # --num_tpu_cores=8 \
# # --do_train=True \
# # --do_eval=True \
# # --train_batch_size=256 \
# # --max_seq_length={MAX_SEQ_LEN} \
# # --max_predictions_per_seq=20 \
# # --num_train_steps=1000000 \
# # --num_warmup_steps=10000 \
# # --save_checkpoints_steps=10000 \
# # --learning_rate=1e-4
# + colab={"base_uri": "https://localhost:8080/", "height": 6820} colab_type="code" id="d3OEEo3XR93B" outputId="5ad89f26-d8ed-402a-a670-9c8ef7d3ce22"
# !python bert-japanese/src/run_pretraining.py \
# --input_file={INPUT_FILE} \
# --output_dir={OUTPUT_GCS} \
# --use_tpu=True \
# --tpu_name={TPU_ADDRESS} \
# --num_tpu_cores=8 \
# --do_train=True \
# --do_eval=True \
# --train_batch_size=64 \
# --max_seq_length={MAX_SEQ_LEN} \
# --max_predictions_per_seq=20 \
# --num_train_steps=1400000 \
# --num_warmup_steps=10000 \
# --save_checkpoints_steps=10000 \
# --learning_rate=1e-4
# + colab={} colab_type="code" id="GBNgHk-s4epb"
t
| notebook/pretraining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AceleraDev DataScience
#
# ## Setup
#
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data
#
#
#
#lendo os pacotes
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('train.csv')
# ## Analysis
# ### Selecao por completude
#Criando um dataframe auxliar para analisar a consistencia das variaveis
cons = pd.DataFrame({'colunas' : df.columns,
'tipo': df.dtypes,
'missing' : df.isna().sum(),
'size' : df.shape[0],
'unicos': df.nunique()})
cons['percentual'] = round(cons['missing'] / cons['size'],2)
cons.percentual.plot.hist( bins = 5)
# ### Removendo colunas com dados missing
print('Contagem de colunas com ATÉ 20% de dados faltantes', cons[cons.percentual < 0.2].shape[0])
print('Contagem de colunas com 0% de dados faltantes', cons[cons.percentual == 0].shape[0])
cons[cons.percentual == 0]['tipo'].value_counts()
cons['completa'] = ['completa' if x == 0 else 'faltante' for x in cons['percentual']]
mantem = list(cons[cons['completa'] == 'completa']['colunas'])
df = df[mantem]
colunas_numericas = list(cons[((cons['tipo'] != 'object') &
(cons['completa'] == 'completa'))]['colunas'])
# ## Exploração
# Analise univariavel
for coluna in colunas_numericas:
print(coluna)
df[coluna].plot.hist(bins = 50, log= True)
plt.show()
#Analisando a correlacao entre as variaveis númericas
plt.figure(figsize = (20,20))
sns.heatmap(df[colunas_numericas].corr().round(2), annot= True)
correlacionadas = ['GarageArea', 'GarageCars', 'GrLivArea', 'OverallQual']
# ## Analisando as features com yellowbrick
pip install yellowbrick
#Removendo a coluna ID
colunas_numericas.remove('Id')
df = df[colunas_numericas]
y_train = df['SalePrice']
X_train = df.drop(columns = 'SalePrice')
# +
from yellowbrick.features import Rank1D
visualizer = Rank1D(algorithm='shapiro')
visualizer.fit(X_train, y_train)
visualizer.transform(X_train)
visualizer.show()
# +
from yellowbrick.features import PCA
visualizer = PCA(scale=True, proj_features=True, projection=2)
visualizer.fit_transform(X_train[correlacionadas], y_train)
visualizer.show()
# +
from yellowbrick.target import FeatureCorrelation
features = list(X_train.columns)
visualizer = FeatureCorrelation(labels=features)
visualizer.fit(X_train, y_train)
visualizer.show()
# -
# ## Treinando o modelo
pip install sklearn
from sklearn.linear_model import LinearRegression
reg= LinearRegression()
reg.fit(X_train, y_train)
colunas_treinamento = X_train.columns
X_test = pd.read_csv('test.csv')
y_test = pd.read_csv('sample_submission.csv')
y_test = y_test['SalePrice']
X_test= X_test[colunas_treinamento].fillna(df[colunas_treinamento].mean())
y_pred = reg.predict(X_test)
from sklearn.metrics import mean_squared_error
erro_normal = mean_squared_error(y_pred=y_pred, y_true=y_test)
erro_normal
# ## Aplicando o Feature Selection
from sklearn.feature_selection import RFE
rfe = RFE(reg)
rfe.fit(X_train, y_train)
pd.DataFrame({'coluna':X_train.columns,
'bool': rfe.get_support(),
'coeficientes': pd.Series(reg.coef_)})
X_train_importante = rfe.transform(X_train)
X_test_importante = rfe.transform(X_test)
reg.fit(X_train_importante, y_train)
y_pred_imp = reg.predict(X_test_importante)
erro_imp = mean_squared_error(y_pred=y_pred_imp, y_true=y_test)
erro_imp
# +
from yellowbrick.model_selection import RFECV
# Instantiate RFECV visualizer with a linear SVM classifier
visualizer = RFECV(reg)
visualizer.fit(X_train, y_train) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
# -
# ## Aplicando PCA
from sklearn.decomposition import PCA
pca = PCA(0.95)
pca.fit(X_train)
pca.explained_variance_ratio_
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
reg = LinearRegression()
reg.fit(X_train_pca, y_train)
y_pred_pca = reg.predict(X_test_pca)
erro_pca = mean_squared_error(y_pred=y_pred_pca, y_true=y_test)
pd.DataFrame({'erro' : [erro_normal, erro_imp, erro_pca]}).plot(kind = 'bar', log = True)
| module_6/Semana 6 - Aceleradev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test Apache Spark
# ## Test 1
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df = spark.sql("select 'spark' as hello ")
df.show()
spark.stop()
# ## Test 2
import pyspark
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
conf = pyspark.SparkConf().setAppName('appName').setMaster('local')
sc = pyspark.SparkContext(conf=conf)
spark = SparkSession(sc)
nums = sc.parallelize([1,2,3,4])
nums.map(lambda x: x*x).collect()
sc.stop()
| spark-flavor/resources/tutorials/test-spark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ## Hyper parameter tuning by SageMaker
#
# 1. OS level setting
# 2. Prepare Requirements
# 3. Hyperparameter Tuning
#
# **Reference**
#
# * [Hyperparameter Tuning using SageMaker PyTorch Container](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/hyperparameter_tuning/pytorch_mnist/hpo_pytorch_mnist.ipynb)
# ## OS level setting
#
# Install packages.
#
# ```
# sudo yum install gcc72-c++.x86_64
# sudo yum install clang
# ```
#
# g++: Install & link same version of gcc.
# ## Prepare Requirements
# !git pull origin master
# !pip install pipenv
# ! export PIPENV_VENV_IN_PROJECT=1 && cd ../ && pipenv install --python=3.6
# +
import os
def set_pythonpath():
import sys
python_version = "python" + str(sys.version_info.major) \
+ "." + str(sys.version_info.minor)
venv_dir = "../.venv/lib/{}/site-packages".format(python_version)
lib_dir = os.path.join(os.path.realpath("."), venv_dir)
project_dir = os.path.join(os.path.realpath("."), "../")
sys.path.append(lib_dir)
sys.path.append(project_dir)
set_pythonpath()
# -
def execute_example():
from example.train import train as train_fn
root = 'https://raw.githubusercontent.com/allenai/allennlp/master/tutorials/tagger/'
train_data_path = root + 'training.txt'
validation_data_path = root + 'validation.txt'
embedding_dim = 6
hidden_dim = 6
num_epochs = 1
train_fn(train_data_path, validation_data_path,
embedding_dim, hidden_dim, num_epochs=num_epochs)
execute_example()
# ## Hyperparameter Tuning
#
# ### Create Session
# +
import sagemaker
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
sagemaker_session = sagemaker.Session()
bucket = "sagemaker.tech-sketch.jp"
prefix = "allennlp_test"
role = sagemaker.get_execution_role()
role = role.replace("service-role/", "")
# -
# ### Upload data
# +
from allennlp.common.file_utils import cached_path
root = "https://raw.githubusercontent.com/allenai/allennlp/master/tutorials/tagger/"
urls = [(root + file_name) for file_name in ("training.txt", "validation.txt")]
paths = [cached_path(u) for u in urls]
s3_paths = []
for path in paths:
s3_path = sagemaker_session.upload_data(path=path, bucket=bucket, key_prefix=prefix)
print("input spec (in this case, just an S3 path): {}".format(s3_path))
s3_paths.append(s3_path)
# -
# ### Make Estimator
# +
from sagemaker.pytorch import PyTorch
def from_root(path):
root = os.path.join(os.path.realpath("."), "../")
return os.path.abspath(os.path.join(root, path))
estimator = PyTorch(entry_point="tuning.py",
source_dir="../../allennlp-sagemaker-tuning",
dependencies=[from_root("example"), from_root(".venv")],
role=role,
framework_version="1.0.0",
train_instance_count=1,
train_instance_type="ml.p2.8xlarge",
hyperparameters={
"train-file-name": os.path.basename(s3_paths[0]),
"validation": os.path.basename(s3_paths[1]),
"epochs": 10
})
# -
# ### Define Parameter Range
hyperparameter_ranges = {
"lr": ContinuousParameter(0.01, 0.1),
"embedding-dim": CategoricalParameter([6, 12]),
"hidden-dim": CategoricalParameter([6, 12])
}
# ### Define Target Metrics
objective_metric_name = "validation loss"
objective_type = "Minimize"
metric_definitions = [
{"Name": objective_metric_name,
"Regex": "validation_loss=([0-9\\.]+)"}
]
# ### Create Tuner
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
max_jobs=3,
max_parallel_jobs=1,
objective_type=objective_type)
# ### Execute Tuning
train_dir = os.path.dirname(s3_paths[0])
print(train_dir)
tuner.fit({"training": train_dir})
| notebooks/hyper_parameter_tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mwl]
# language: python
# name: conda-env-mwl-py
# ---
# # Table of Contents
# This index notebook serves as a table of contents for live notebooks hosted on [Binder](http://mybinder.org/)
# ### 2016 - 2017
# [Meeting 1: pca-mnist.ipynb](./2016-2017.Meetings/01.pca-mnist.ipynb)
#
# [Meeting 2: hello_machine2.ipynb](./2016-2017.Meetings/02.hello_machine2.ipynb)
#
# [Meeting 5: naive_bayes_tweet_classifier_solution.ipynb](./2016-2017.Meetings/05.DIY_naive_bayes/naive_bayes_classifier_solution/naive_bayes_tweet_classifier.ipynb)
#
# [Meeting 6: Iris Data Analysis Solution.ipynb](./2016-2017.Meetings/06.K_Nearest_Neighbors/Iris/Iris%20Data%20Analysis%20Solution.ipynb)
#
# [Meeting 6: Shuttle Data Analysis Solution.ipynb](./2016-2017.Meetings/06.K_Nearest_Neighbors/Shuttle/Shuttle%20Data%20Analysis%20Solution.ipynb)
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: meaning
# language: python
# name: meaning
# ---
import numpy as np
from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rc('font', size=17)
# # Dishonest Answering Machine
EMPTYSET = frozenset(set())
# +
def food_dynamics(food_presence):
if rng.random() < p_food:
return min(food_presence + 1, 2)
return food_presence
def hunger_dynamics(hunger, time_since_eat):
return hunger + 1/(np.power(2, time_since_eat))
# return hunger + h_decrease/2
def question_dynamics(q):
if rng.random() < p_q:
return 1
return 0
def utter_dynamics(u):
return EMPTYSET
def eating_dynamics(e):
if e:
return EMPTYSET
elif e == EMPTYSET:
return 0
return e
dynamics = dict(f=food_dynamics, q=question_dynamics, h=hunger_dynamics, u=utter_dynamics, e=eating_dynamics)
def plot_food(axis, states, mnings):
times = list(range(len(states['post'])))
for t in times:
if t:
tpre = t - pre_post_offset
tpost = t + pre_post_offset
axis.plot(tpre, states['pre'][t]['f'], 'o', color=meaning_colors[mnings[t]])
axis.plot(tpost, states['post'][t]['f'], '*', color='purple')
else:
axis.plot(t, states['post'][t]['f'], '*', color='purple')
axis.set_ylim([0 - margin, 2 + margin])
axis.set_ylabel('f')
axis.set_xticks(time_steps)
axis.grid()
def plot_question_or_hunger(axis, states, mnings, key):
"""works for question or hunger"""
times = list(range(len(states['post'])))
for t in times:
if t:
tpre = t - pre_post_offset
tpost = t + pre_post_offset
axis.plot(tpre, states['pre'][t][key], 'o', color=meaning_colors[mnings[t]])
axis.plot(tpost, states['post'][t][key], '*', color='purple')
else:
axis.plot(t, states['post'][t][key], '*', color='purple')
axis.set_ylabel(key)
axis.set_xticks(time_steps)
if key == 'q':
axis.set_ylim([0 - margin, 1 + margin])
axis.grid()
elif key == 'h':
axis.axhline(h_threshold)
def plot_utterance_or_eating(axis, states, mnings, key):
"""works for utterance or eating"""
m = {EMPTYSET: 0, 'yes':2, 'no': 1, 0: 1, 1: 2}
for t in time_steps:
if t:
tpre = t - pre_post_offset
tpost = t + pre_post_offset
axis.plot(tpre, m[states['pre'][t][key]], 'o', color=meaning_colors[mnings[t]])
axis.plot(tpost, m[states['post'][t][key]], '*', color='purple')
else:
axis.plot(t, m[states['pre'][t][key]], '*', color='purple')
axis.set_ylabel(key)
axis.set_ylim([0 - margin, 2 + margin])
axis.set_yticks([0, 1, 2])
if key == 'u':
axis.set_yticklabels(['silent', 'no', 'yes'])
else:
axis.set_yticklabels(['blocked', 'inactive', 'eating'])
axis.set_xticks(time_steps)
axis.grid()
plotter = dict(
f=plot_food,
q=plot_question_or_hunger,
h=plot_question_or_hunger,
u=plot_utterance_or_eating,
e=plot_utterance_or_eating
)
# +
def eat(sstate):
if sstate['f'] > 0 and sstate['h'] and sstate['u'] == EMPTYSET and sstate['e'] in {0, 1}:
new = sstate.copy()
new['u'] = EMPTYSET
new['e'] = 1
if sstate['f'] > 0:
new['f'] -= 1
new['h'] = max(sstate['h'] - h_decrease, 0)
return new
else:
raise(ValueError('not triggered'))
def utter(sstate):
new = sstate.copy()
if sstate['h'] <= h_threshold: # not hungry
# there is food
if sstate['f'] > 0 and sstate['q'] and sstate['e'] in {EMPTYSET, 0}:
new['u'] = 'yes'
new['e'] = 0
return new
# there is no food
elif sstate['f'] == 0 and sstate['q'] and sstate['e'] in {EMPTYSET, 0}:
new['u'] = 'no'
new['e'] = 0
return new
else:
raise(ValueError('not triggered'))
else: # hungry
if sstate['q'] and sstate['e'] in {EMPTYSET, 0}:
new['u'] = 'no'
new['e'] = 0
return new
else:
raise(ValueError('not triggered'))
# -
for iii in range(10000):
rng = np.random.default_rng(iii)
start_time = 0
time_steps = [start_time]
time_horizon = 10
pre_post_offset = .08
last_eat = 0 # counter timesteps since last eaten
p_q = .7 # prob ask question
p_food = .7 # prob food replenishes
h_threshold = .55 # hunger threshold
h_decrease = 8/10 # hunger decrement step
start_state = dict(f=0, q=0, h=0, u=EMPTYSET, e=0)
# pprint(start_state)
# pprint(start_state.copy())
pre_action_state_logger = {start_time: start_state}
post_action_state_logger = {start_time: start_state}
meanings = ['neutral']
meaning_colors = {'neutral': 'k', 'eat': 'red', 'yes': 'green', 'no': 'blue'}
margin = .2
while start_time < time_horizon:
assert start_state['h'] >= 0
start_time += 1
time_steps.append(start_time)
last_eat += 1
new_state = start_state.copy()
# apply independent dynamics
for k, s in start_state.items():
if k == 'h':
new_state[k] = dynamics[k](s, last_eat)
else:
new_state[k] = dynamics[k](s)
if new_state[k] is None:
print(f'pb at time {start_time}')
raise(ValueError(f'{k}'))
pre_action_state_logger[start_time] = new_state
# act
try: # first try to eat (priority action)
new_state = eat(new_state)
last_eat = 0
meanings.append('eat')
except ValueError:
try: # if eating fails, try to utter
new_state = utter(new_state)
meanings.append(new_state['u'])
except ValueError:
meanings.append('neutral')
post_action_state_logger[start_time] = new_state
start_state = new_state
# print(f'total number of timesteps {start_time + 1}')
assert start_time+1 == len(post_action_state_logger)
assert start_time+1 == len(pre_action_state_logger)
assert start_time+1 == len(meanings)
assert start_time+1 == len(time_steps)
lies = []
for t, m in zip(time_steps, meanings):
if pre_action_state_logger[t]['f'] and m == 'no':
lies.append(True)
else:
lies.append(False)
if any(lies) and 'yes' in meanings and 'no' in meanings:
break
any(lies) and 'yes' in meanings and 'no' in meanings
print(f'working seed was {iii}')
pprint(meaning_colors)
all_states = {'pre': pre_action_state_logger, 'post': post_action_state_logger}
fig, axes = plt.subplots(len(start_state), 1, figsize=(15, 12))
plotter['f'](axes[0], all_states, meanings)
plotter['q'](axes[1], all_states, meanings, 'q')
plotter['h'](axes[2], all_states, meanings, 'h')
plotter['u'](axes[3], all_states, meanings, 'u')
plotter['e'](axes[4], all_states, meanings, 'e')
fig.tight_layout()
| dishonest_machine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Data Cleaning](https://www.kaggle.com/learn/data-cleaning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/character-encodings).**
#
# ---
#
# In this exercise, you'll apply what you learned in the **Character encodings** tutorial.
#
# # Setup
#
# The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
from learntools.core import binder
binder.bind(globals())
from learntools.data_cleaning.ex4 import *
print("Setup Complete")
# # Get our environment set up
#
# The first thing we'll need to do is load in the libraries we'll be using.
# +
# modules we'll use
import pandas as pd
import numpy as np
# helpful character encoding module
import chardet
# set seed for reproducibility
np.random.seed(0)
# -
# # 1) What are encodings?
#
# You're working with a dataset composed of bytes. Run the code cell below to print a sample entry.
sample_entry = b'\xa7A\xa6n'
print(sample_entry)
print('data type:', type(sample_entry))
# You notice that it doesn't use the standard UTF-8 encoding.
#
# Use the next code cell to create a variable `new_entry` that changes the encoding from `"big5-tw"` to `"utf-8"`. `new_entry` should have the bytes datatype.
# +
before = sample_entry.decode("big5-tw")
new_entry = before.encode()
# Check your answer
q1.check()
# -
# Lines below will give you a hint or solution code
q1.hint()
q1.solution()
# # 2) Reading in files with encoding problems
#
# Use the code cell below to read in this file at path `"../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv"`.
#
# Figure out what the correct encoding should be and read in the file to a DataFrame `police_killings`.
# +
# TODO: Load in the DataFrame correctly.
police_killings = pd.read_csv("../input/fatal-police-shootings-in-the-us/PoliceKillingsUS.csv", encoding='Windows-1252')
# Check your answer
q2.check()
# -
# Feel free to use any additional code cells for supplemental work. To get credit for finishing this question, you'll need to run `q2.check()` and get a result of **Correct**.
# +
# (Optional) Use this code cell for any additional work.
# -
# Lines below will give you a hint or solution code
q2.hint()
q2.solution()
# # 3) Saving your files with UTF-8 encoding
#
# Save a version of the police killings dataset to CSV with UTF-8 encoding. Your answer will be marked correct after saving this file.
#
# Note: When using the `to_csv()` method, supply only the name of the file (e.g., `"my_file.csv"`). This saves the file at the filepath `"/kaggle/working/my_file.csv"`.
# +
# TODO: Save the police killings dataset to CSV
police_killings.to_csv("my_file.csv")
# Check your answer
q3.check()
# -
# Lines below will give you a hint or solution code
q3.hint()
q3.solution()
# # (Optional) More practice
#
# Check out [this dataset of files in different character encodings](https://www.kaggle.com/rtatman/character-encoding-examples). Can you read in all the files with their original encodings and them save them out as UTF-8 files?
#
# If you have a file that's in UTF-8 but has just a couple of weird-looking characters in it, you can try out the [ftfy module](https://ftfy.readthedocs.io/en/latest/#) and see if it helps.
#
# # Keep going
#
# In the final lesson, learn how to [**clean up inconsistent text entries**](https://www.kaggle.com/alexisbcook/inconsistent-data-entry) in your dataset.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/172650) to chat with other Learners.*
| Platforms/Kaggle/Courses/Data_Cleaning/4.Character_Encodings/exercise-character-encodings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Web Scrapping Activities
# Import Dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
# # 1. NASA Mars News
# +
# Create an executable path
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# Visit the NASA mars news site
url = 'https://mars.nasa.gov/news/'
#print(browser)
browser.visit(url)
# Convert the browser html to a soup object and then quit the browser
html = browser.html
soup = bs(html, 'html.parser')
slide_element = soup.select_one('ul.item_list li.slide')
slide_element.find("div", class_='content_title')
news_title = slide_element.find("div", class_='content_title').get_text()
print(news_title)
news_p = slide_element.find("div", class_='article_teaser_body').get_text()
print(news_p)
# -
# # 2. JPL Mars Space Images - Featured Image
# +
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
url = "https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html"
browser.visit(url)
#use splinter code to click the second button for the full image
full_image_element = browser.find_by_tag('button')[1]
full_image_element.click()
html = browser.html
image_soup = bs(html, 'html.parser')
img_url_rel = image_soup.find('img', class_='fancybox-image').get('src')
img_url = f'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/{img_url_rel}'
# -
# # 3. Mars Facts
df = pd.read_html('https://space-facts.com/mars/')[0]
df.columns=['Description', 'Mars']
df.set_index('Description', inplace=True)
df
# # 4. Mars Hemispheres
# +
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
# +
hemisphere_image_urls = []
links = browser.find_by_css("a.product-item h3")
for index in range(len(links)):
hemisphere = {}
browser.find_by_css("a.product-item h3")[index].click()
sample_element = browser.links.find_by_text('Sample').first
# title = browser.find_by_css("h2.title").text
# link = sample_element["href"]
hemisphere['title'] = browser.find_by_css("h2.title").text
hemisphere['link'] = sample_element['href']
hemisphere_image_urls.append(hemisphere)
print("Retrieve the title and link")
browser.back()
print(hemisphere_image_urls)
# -
browser.quit()
| Missions_to_Mars/mission_to_mars_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from bs4 import BeautifulSoup
import requests
import json
url_list = {'https://www.wikihow.com/Make-a-Cheap-Terrarium','https://www.wikihow.com/Clean-a-Typewriter',
'https://www.wikihow.com/Make-a-Thank-You-Card','https://www.wikihow.com/Research-the-Market-Before-Launching-a-Product-or-Service'}
rawtxt = []
texts = {}
text_set = []
newdict = {}
count = 0
for url in url_list:
print('getting from '+url)
source = requests.get(url).text
soup = BeautifulSoup(source,'lxml')
description = [p1.get_text() for p1 in soup.find_all('p')][1]
title = soup.find('title').get_text()
#text_set.append(soup.find_all('b').get_text())
step_set = soup.find_all('script',{'type':'application/ld+json'})
step_list = [json.loads(p1.get_text()) for p1 in step_set]
step_text = []
for i in range(len(step_list[1]['step'])):
for j in range(len(step_list[1]['step'][i]['itemListElement'])):
step_text.append(BeautifulSoup(step_list[1]['step'][i]['itemListElement'][j]['itemListElement']['text']).get_text())
texts[count] = {}
texts[count]['url'] = url
texts[count]['title'] = title
texts[count]['description'] = description
texts[count]['steps'] = ' '.join(step_text)
count = count +1
# Building rawtxt string
rawtxt = ' '.join([texts[i]['steps'] for i in range(len(texts))])
len(rawtxt)
# Saving texts dictionary in pickle file for then splitting to test and train.
with open()
| scraping_web_data/Scrape_wikihowto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tkinter as tk
from tkinter import *
from tkinter import messagebox
# +
window=tk.Tk()
window.title("Handwritten digit recognition")
l1=tk.Label(window,text="Digit",font=('Algerian',20))
l1.place(x=5,y=0)
t1=tk.Entry(window,width=20, border=5)
t1.place(x=150, y=0)
def screen_capture():
import pyscreenshot as ImageGrab
import time
import os
os.startfile("C:/ProgramData/Microsoft/Windows/Start Menu/Programs/Accessories/Paint")
s1=t1.get()
os.chdir("E:/DS and ML/Untitled Folder/Untitled Folder/captured_images")
os.mkdir(s1)
os.chdir("E:/DS and ML/Untitled Folder/Untitled Folder/")
images_folder="captured_images/"+s1+"/"
time.sleep(15)
for i in range(0,5):
time.sleep(8)
im=ImageGrab.grab(bbox=(60,170,400,550)) #x1,y1,x2,y2
print("saved......",i)
im.save(images_folder+str(i)+'.png')
print("clear screen now and redraw now........")
messagebox.showinfo("Result","Capturing screen is completed!!")
b1=tk.Button(window,text="1. Open paint and capture the screen", font=('Algerian',15),bg="orange",fg="black",command=screen_capture)
b1.place(x=5, y=50)
def generate_dataset():
import cv2
import csv
import glob
header =["label"]
for i in range(0,784):
header.append("pixel"+str(i))
with open('dataset.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(header)
for label in range(10):
dirList = glob.glob("captured_images/"+str(label)+"/*.png")
for img_path in dirList:
im= cv2.imread(img_path)
im_gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray,(15,15), 0)
roi= cv2.resize(im_gray,(28,28), interpolation=cv2.INTER_AREA)
data=[]
data.append(label)
rows, cols = roi.shape
## Add pixel one by one into data array
for i in range(rows):
for j in range(cols):
k =roi[i,j]
if k>100:
k=1
else:
k=0
data.append(k)
with open('dataset.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(data)
messagebox.showinfo("Result","Generating dataset is completed!!")
b2=tk.Button(window,text="2. Generate dataset", font=('Algerian',15),bg="pink",fg="blue",command=generate_dataset)
b2.place(x=5, y=100)
def train_save_accuracy():
import pandas as pd
from sklearn.utils import shuffle
data =pd.read_csv('dataset.csv')
data=shuffle(data)
X = data.drop(["label"],axis=1)
Y= data["label"]
from sklearn.model_selection import train_test_split
train_x,test_x,train_y,test_y = train_test_split(X,Y, test_size = 0.2)
import joblib
from sklearn.svm import SVC
classifier=SVC(kernel="linear", random_state=6)
classifier.fit(train_x,train_y)
joblib.dump(classifier, "model/digit_recognizer")
from sklearn import metrics
prediction=classifier.predict(test_x)
acc=metrics.accuracy_score(prediction, test_y)
messagebox.showinfo("Result",f"Your accuracy is {acc}")
b3=tk.Button(window,text="3. Train the model, save it and calculate accuracy", font=('Algerian',15),bg="green",fg="white",command=train_save_accuracy)
b3.place(x=5, y=150)
def prediction():
import joblib
import cv2
import numpy as np #pip install numpy
import time
import pyscreenshot as ImageGrab
import os
os.startfile("C:/ProgramData/Microsoft/Windows/Start Menu/Programs/Accessories/Paint")
model=joblib.load("model/digit_recognizer")
images_folder="img/"
time.sleep(15)
while True:
img=ImageGrab.grab(bbox=(60,170,400,500))
img.save(images_folder+"img.png")
im = cv2.imread(images_folder+"img.png")
im_gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
im_gray =cv2.GaussianBlur(im_gray, (15,15), 0)
#Threshold the image
ret, im_th = cv2.threshold(im_gray,100, 255, cv2.THRESH_BINARY)
roi = cv2.resize(im_th, (28,28), interpolation =cv2.INTER_AREA)
rows,cols=roi.shape
X = []
## Add pixel one by one into data array
for i in range(rows):
for j in range(cols):
k = roi[i,j]
if k>100:
k=1
else:
k=0
X.append(k)
predictions =model.predict([X])
print("Prediction:",predictions[0])
cv2.putText(im, "Prediction is: "+str(predictions[0]), (20,20), 0, 0.8,(0,255,0),2,cv2.LINE_AA)
cv2.startWindowThread()
cv2.namedWindow("Result")
cv2.imshow("Result",im)
cv2.waitKey(10000)
if cv2.waitKey(1)==13: #27 is the ascii value of esc, 13 is the ascii value of enter
break
cv2.destroyAllWindows()
b4=tk.Button(window,text="4. Live prediction", font=('Algerian',15),bg="white",fg="red",command=prediction)
b4.place(x=5, y=200)
window.geometry("600x300")
window.mainloop()
# -
| .ipynb_checkpoints/GUI HDR-checkpoint.ipynb |
# + [markdown] colab_type="text" id="kcw2prXvB0QK"
# * Builds a feed-forward agent, the I2AAgent.
# * Includes the Copy-model agent from the paper, where the GTM simply copies frames that are passed in.
# * Includes a model with size preserving convolutional network.
# * A fully fledged I2A agent would require a pre-trained environment model.
#
# This colab shows how to build the agent, and how to distill the I2A agent policy into its internal rollout policy. This colab does not show how to implement full A3C training with the A2C loss.
# + [markdown] colab_type="toc" id="hiWkhj0UCIY5"
# >>[The PillEater environment](#scrollTo=3ZAuNuy8E2lg&uniqifier=15)
#
# >>[The components](#scrollTo=nY3LE6KkCCKR&uniqifier=15)
#
# >>[Putting it all together](#scrollTo=R4NTKiOXrHQU&uniqifier=15)
#
#
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 251, "output_extras": [{"item_id": 6}]} colab_type="code" executionInfo={"elapsed": 2835, "status": "ok", "timestamp": 1512781859127, "user": {"displayName": "S\u00<NAME>", "photoUrl": "//lh4.googleusercontent.com/-7lP1T3HsCAw/AAAAAAAAAAI/AAAAAAAAABY/fWvmCeoJXTk/s50-c-k-no/photo.jpg", "userId": "102775271233982205447"}, "user_tz": 480} id="PPgmZpt9O8bj" outputId="4d98b856-b852-427d-a765-0687ee6e45af"
# !pip install dm-sonnet
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Cz8J6DM5MXqe" slideshow={"slide_type": "slide"}
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import matplotlib.pyplot as plt
import numpy as np
import sonnet as snt
import tensorflow as tf
import time
# + [markdown] colab_type="text" id="3ZAuNuy8E2lg"
# ## The PillEater environment
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="KNUaDXN1GoXp"
STANDARD_MAP = np.array([
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
def get_random_position(map_array):
"""Gets a random available position in a binary map array.
Args:
map_array: numpy array of the map to search an available position on.
Returns:
The chosen random position.
Raises:
ValueError: if there is no available space in the map.
"""
if map_array.sum() <= 0:
raise ValueError("There is no available space in the map.")
map_dims = len(map_array.shape)
pos = np.zeros(map_dims, dtype=np.int32)
while True:
result = map_array
for i in range(map_dims):
pos[i] = np.random.randint(map_array.shape[i])
result = result[pos[i]]
if result == 0:
break
return pos
def update_2d_pos(array_map, pos, action, pos_result):
posv = array_map[pos[0]][pos[1]][action - 1]
pos_result[0] = posv[0]
pos_result[1] = posv[1]
return pos_result
def parse_map(map_array):
"""Parses a map when there are actions: stay, right, up, left, down.
Args:
map_array: 2D numpy array that contains the map.
Returns:
A 3D numpy array (height, width, actions) that contains the resulting state
for a given position + action, and a 2D numpy array (height, width) with the
walls of the map.
Raises:
ValueError: if the map does not contain only zeros and ones.
"""
act_def = [[0, 0], [0, 1], [-1, 0], [0, -1], [1, 0]]
walls = np.zeros_like(map_array)
new_map_array = []
for i in range(map_array.shape[0]):
new_map_array.append([])
for j in range(map_array.shape[1]):
new_map_array[i].append([])
if map_array[i, j] == 0:
for k in range(len(act_def)):
new_map_array[i][j].append([i + act_def[k][0], j + act_def[k][1]])
elif map_array[i, j] == 1:
for k in range(len(act_def)):
new_map_array[i][j].append([i, j])
walls[i, j] = 1
else:
raise ValueError("Option not understood, %d" % map_array[i, j])
for k in range(len(new_map_array[i][j])):
if map_array[new_map_array[i][j][k][0]][new_map_array[i][j][k][1]] == 1:
new_map_array[i][j][k][0] = i
new_map_array[i][j][k][1] = j
return np.array(new_map_array), walls
def observation_as_rgb(obs):
"""Reduces the 6 channels of `obs` to 3 RGB.
Args:
obs: the observation as a numpy array.
Returns:
An RGB image in the form of a numpy array, with values between 0 and 1.
"""
height = obs.shape[0]
width = obs.shape[1]
rgb = np.zeros((height, width, 3), dtype=np.float32)
for x in range(height):
for y in range(width):
if obs[x, y, PillEater.PILLMAN] == 1:
rgb[x, y] = [0, 1, 0]
elif obs[x, y, PillEater.GHOSTS] > 0. or obs[x, y, PillEater.GHOSTS_EDIBLE] > 0.:
g = obs[x, y, PillEater.GHOSTS]
ge = obs[x, y, PillEater.GHOSTS_EDIBLE]
rgb[x, y] = [g + ge, ge, 0]
elif obs[x, y, PillEater.PILL] == 1:
rgb[x, y] = [0, 1, 1]
elif obs[x, y, PillEater.FOOD] == 1:
rgb[x, y] = [0, 0, 1]
elif obs[x, y, PillEater.WALLS] == 1:
rgb[x, y] = [1, 1, 1]
return rgb
class PillEater(object):
WALLS = 0
FOOD = 1
PILLMAN = 2
GHOSTS = 3
GHOSTS_EDIBLE = 4
PILL = 5
NUM_ACTIONS = 5
MODES = ('regular', 'avoid', 'hunt', 'ambush', 'rush')
def __init__(self, mode, frame_cap=3000):
assert mode in PillEater.MODES
self.nghosts_init = 1
self.ghost_speed_init = 0.5
self.ghost_speed = self.ghost_speed_init
self.ghost_speed_increase = 0.1
self.end_on_collect = False
self.npills = 2
self.pill_duration = 20
self.seed = 123
self.discount = 1
self.stochasticity = 0.05
self.obs_is_rgb = True
self.frame_cap = frame_cap
self.safe_distance = 5
map_array = STANDARD_MAP
self.map, self.walls = parse_map(map_array)
self.map = np.array(self.map)
self.nactions = self.map.shape[2]
self.height = self.map.shape[0]
self.width = self.map.shape[1]
self.reverse_dir = (4, 5, 2, 3)
self.dir_vec = np.array([[0, 1], [-1, 0], [0, -1], [1, 0]])
self.world_state = dict(
pillman=self._make_pillman(),
ghosts=[],
food=np.zeros(shape=(self.height, self.width), dtype=np.float32),
pills=[None] * self.npills,
power=0
)
self.nplanes = 6
self.image = np.zeros(
shape=(self.height, self.width, self.nplanes), dtype=np.float32)
self.color_image = np.zeros(shape=(3, self.height, self.width),
dtype=np.float32)
self.frame = 0
self.reward = 0.
self.pcontinue = 1.
self._init_level(1)
self._make_image()
self.mode = mode
self.timer = 0
if self.mode == 'regular':
self.step_reward = 0
self.food_reward = 1
self.big_pill_reward = 2
self.ghost_hunt_reward = 5
self.ghost_death_reward = 0
self.all_pill_terminate = False
self.all_ghosts_terminate = False
self.all_food_terminate = True
self.timer_terminate = -1
elif self.mode == 'avoid':
self.step_reward = 0.1
self.food_reward = -0.1
self.big_pill_reward = -5
self.ghost_hunt_reward = -10
self.ghost_death_reward = -20
self.all_pill_terminate = False
self.all_ghosts_terminate = False
self.all_food_terminate = True
self.timer_terminate = 128
elif self.mode == 'hunt':
self.step_reward = 0
self.food_reward = 0
self.big_pill_reward = 1
self.ghost_hunt_reward = 10
self.ghost_death_reward = -20
self.all_pill_terminate = False
self.all_ghosts_terminate = True
self.all_food_terminate = False
self.timer_terminate = -1
elif self.mode == 'ambush':
self.step_reward = 0
self.food_reward = -0.1
self.big_pill_reward = 0
self.ghost_hunt_reward = 10
self.ghost_death_reward = -20
self.all_pill_terminate = False
self.all_ghosts_terminate = True
self.all_food_terminate = False
self.timer_terminate = -1
elif self.mode == 'rush':
self.step_reward = 0
self.food_reward = -0.1
self.big_pill_reward = 10
self.ghost_hunt_reward = 0
self.ghost_death_reward = 0
self.all_pill_terminate = True
self.all_ghosts_terminate = False
self.all_food_terminate = False
self.timer_terminate = -1
def _make_pillman(self):
return self._make_actor(0)
def _make_enemy(self):
return self._make_actor(self.safe_distance)
def _make_actor(self, safe_distance):
"""Creates an actor.
An actor is a `ConfigDict` with a positions `pos` and a direction `dir`.
The position is an array with two elements, the height and width. The
direction is an integer representing the direction faced by the actor.
Args:
safe_distance: a `float`. The minimum distance from Pillman.
Returns:
A `ConfigDict`.
"""
actor = {}
if safe_distance > 0:
occupied_map = np.copy(self.walls)
from_ = (self.world_state['pillman']['pos'] - np.array(
[self.safe_distance, self.safe_distance]))
to = (self.world_state['pillman']['pos'] + np.array(
[self.safe_distance, self.safe_distance]))
from_[0] = max(from_[0], 1)
from_[1] = max(from_[1], 1)
to[0] = min(to[0], occupied_map.shape[0])
to[1] = min(to[1], occupied_map.shape[1])
occupied_map[from_[0]:to[0], from_[1]:to[1]] = 1
actor['pos'] = get_random_position(occupied_map)
actor['dir'] = np.random.randint(4)
else:
actor['pos'] = get_random_position(self.walls)
actor['dir'] = np.random.randint(4)
return actor
def _make_pill(self):
pill = dict(
pos=get_random_position(self.walls)
)
return pill
def _init_level(self, level):
"""Initialises the level."""
self.level = level
self._fill_food(self.walls, self.world_state['food'])
self.world_state['pills'] = [self._make_pill() for _ in range(self.npills)]
self.world_state['pillman']['pos'] = get_random_position(self.walls)
self.nghosts = int(self.nghosts_init + math.floor((level - 1) / 2))
self.world_state['ghosts'] = [self._make_enemy() for _ in range(self.nghosts)]
self.world_state['power'] = 0
self.ghost_speed = (
self.ghost_speed_init + self.ghost_speed_increase * (level - 1))
self.timer = 0
def _fill_food(self, walls, food):
food.fill(-1)
food *= walls
food += 1
self.nfood = food.sum()
def _get_food(self, posx, posy):
self.reward += self.food_reward
self.world_state['food'][posx][posy] = 0
self.nfood -= 1
if self.nfood == 0 and self.all_food_terminate:
self._init_level(self.level + 1)
def _get_pill(self, pill_index):
self.world_state['pills'].pop(pill_index)
self.reward += self.big_pill_reward
self.world_state['power'] = self.pill_duration
if (not self.world_state['pills']) and self.all_pill_terminate:
self._init_level(self.level + 1)
def _kill_ghost(self, ghost_index):
self.world_state['ghosts'].pop(ghost_index)
self.reward += self.ghost_hunt_reward
if (not self.world_state['ghosts']) and self.all_ghosts_terminate:
self._init_level(self.level + 1)
def _die_by_ghost(self):
self.reward += self.ghost_death_reward
self.pcontinue = 0
def _move_pillman(self, action):
"""Moves Pillman following the action in the proto `action_proto`."""
action += 1 # our code is 1 based
pos = self.world_state['pillman']['pos']
pillman = self.world_state['pillman']
update_2d_pos(self.map, pos, action, pos)
if self.world_state['food'][pos[0]][pos[1]] == 1:
self._get_food(pos[0], pos[1])
for i, pill in enumerate(self.world_state['pills']):
pos = pill['pos']
if pos[0] == pillman['pos'][0] and pos[1] == pillman['pos'][1]:
self._get_pill(i)
break
def _move_ghost(self, ghost):
"""Moves the given ghost."""
pos = ghost['pos']
new_pos = np.zeros(shape=(2,), dtype=np.float32)
pillman = self.world_state['pillman']
available = []
for i in range(2, self.nactions + 1):
update_2d_pos(self.map, pos, i, new_pos)
if pos[0] != new_pos[0] or pos[1] != new_pos[1]:
available.append(i)
n_available = len(available)
if n_available == 1:
ghost['dir'] = available[0]
elif n_available == 2:
if ghost['dir'] not in available:
if self.reverse_dir[ghost['dir'] - 2] == available[0]:
ghost['dir'] = available[1]
else:
ghost['dir'] = available[0]
else:
rev_dir = self.reverse_dir[ghost['dir'] - 2]
for i in range(n_available):
if available[i] == rev_dir:
available.pop(i)
n_available -= 1
break
prods = np.zeros(n_available, dtype=np.float32)
x = np.array(
[pillman['pos'][0] - pos[0], pillman['pos'][1] - pos[1]], dtype=np.float32)
norm = np.linalg.norm(x)
if norm > 0:
x *= 1. / norm
for i in range(n_available):
prods[i] = np.dot(x, self.dir_vec[available[i] - 2])
if self.world_state['power'] == 0:
if self.stochasticity > np.random.uniform():
j = np.random.randint(n_available)
else:
# move towards pillman:
j = np.argmax(prods)
else:
# run away from pillman:
j = np.argmin(prods)
ghost['dir'] = available[j]
update_2d_pos(self.map, pos, ghost['dir'], pos)
def _make_image(self):
"""Represents world in a `height x width x 6` `Tensor`."""
self.image.fill(0)
self.image[:, :, PillEater.WALLS] = self.walls
self.image[:, :, PillEater.FOOD] = self.world_state['food']
self.image[self.world_state['pillman']['pos'][0], self.world_state['pillman']['pos'][1],
PillEater.PILLMAN] = 1
for ghost in self.world_state['ghosts']:
edibility = self.world_state['power'] / float(self.pill_duration)
self.image[ghost['pos'][0], ghost['pos'][1], PillEater.GHOSTS] = 1. - edibility
self.image[ghost['pos'][0], ghost['pos'][1], PillEater.GHOSTS_EDIBLE] = edibility
for pill in self.world_state['pills']:
self.image[pill['pos'][0], pill['pos'][1], PillEater.PILL] = 1
return self.image
def start(self):
"""Starts a new episode."""
self.frame = 0
self._init_level(1)
self.reward = 0
self.pcontinue = 1
self.ghost_speed = self.ghost_speed_init
return self._make_image(), self.reward, self.pcontinue
def step(self, action):
"""Advances environment one time-step following the given action."""
self.frame += 1
pillman = self.world_state['pillman']
self.pcontinue = self.discount
self.reward = self.step_reward
self.timer += 1
# Update world state
self.world_state['power'] = max(0, self.world_state['power']-1)
# move pillman
self._move_pillman(action)
for i, ghost in enumerate(self.world_state['ghosts']):
# first check if pillman went onto a ghost
pos = ghost['pos']
if pos[0] == pillman['pos'][0] and pos[1] == pillman['pos'][1]:
if self.world_state['power'] == 0:
self._die_by_ghost()
else:
self._kill_ghost(i)
break
# Then move ghosts
speed = self.ghost_speed
if self.world_state['power'] != 0:
speed *= 0.5
if np.random.uniform() < speed:
self._move_ghost(ghost)
pos = ghost['pos']
# check if ghost went onto pillman
if pos[0] == pillman['pos'][0] and pos[1] == pillman['pos'][1]:
if self.world_state['power'] == 0:
self._die_by_ghost()
else:
self._kill_ghost(i)
# assume you can only eat one ghost per turn:
break
self._make_image()
# Check if level over
if self.timer == self.timer_terminate:
self._init_level(self.level + 1)
# Check if framecap reached
if self.frame_cap > 0 and self.frame >= self.frame_cap:
self.pcontinue = 0
def observation(self, agent_id=0):
return (self.reward,
self.pcontinue,
observation_as_rgb(self.image))
# + [markdown] colab_type="text" id="nY3LE6KkCCKR"
# ## The components
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Hy0obCpXHfvU"
def update_frame_stack(old_stack, new_frame):
return tf.concat([old_stack[..., 3:], new_frame], axis=-1)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="bj4MovHWMczy" slideshow={"slide_type": "-"}
class I2AAgent(snt.RNNCore):
"""The I2A agent imagines possible futures and learns how to interpret these
imaginations.
The state of the agent is made of the last few frames observed from the
environment. This state is initialised to black frames.
"""
def __init__(self, num_actions, model_free_path, imag_path,
height, width, stack_size, name='i2a_agent'):
super(I2AAgent, self).__init__(name=name)
self.num_actions = num_actions
self.model_free_path = model_free_path
self.imag_path = imag_path
self.height = height
self.width = width
self.stack_size = stack_size
def _build(self, frame, prev_state):
# Add a batch dimension
frame = tf.expand_dims(frame, axis=0)
next_state = update_frame_stack(prev_state, frame)
# Compute features from imagination and model free paths
imag_feature = self.imag_path(next_state)
model_free_feature = self.model_free_path(next_state)
policy_feature = tf.concat([imag_feature, model_free_feature], axis=1)
# Compute logits and baseline (needed for A2C loss)
value_and_logits = snt.Linear(
output_size=1 + self.num_actions,
name='value_and_logits')(policy_feature)
baseline=value_and_logits[:, 0]
policy_logits=value_and_logits[:, 1:]
action = tf.multinomial(policy_logits, 1)
action = tf.cast(action, tf.int32)
return (action, policy_logits, baseline), next_state
def initial_state(self, batch_size=1):
assert batch_size == 1
return tf.zeros((batch_size, self.height, self.width, 3*self.stack_size))
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="63HW22E5OMlK" slideshow={"slide_type": "slide"}
class FrameProcessing(snt.AbstractModule):
"""Can be used for model-free path for example. This module outputs a flat
Tensor."""
def __init__(self, output_size, name='frame_processing'):
super(FrameProcessing, self).__init__(name=name)
self.output_size = output_size
def _build(self, frame):
hidden = snt.Conv2D(
output_channels=16, kernel_shape=3, stride=1)(frame)
hidden = tf.nn.relu(hidden)
hidden = snt.Conv2D(
output_channels=16, kernel_shape=3, stride=2)(hidden)
hidden = tf.nn.relu(hidden)
hidden = snt.Linear(self.output_size)(snt.BatchFlatten()(hidden))
return tf.nn.relu(hidden)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="_ldpVKNLPvxO"
class ImagPath(snt.AbstractModule):
"""Imagines possible futures and encodes all imaginations into a single flat
Tensor."""
def __init__(self, num_actions, rollout_depth,
env_model, rollout_policy, single_imag_feature,
name='imag_path'):
super(ImagPath, self).__init__(name=name)
self.num_actions = num_actions
self.rollout_depth = rollout_depth
self.env_model = env_model
self.rollout_policy = rollout_policy
self.single_imag_feature = single_imag_feature
def _tile_by_actions(self, tensor):
return tf.concat(
[tf.expand_dims(tensor, axis=1)] * self.num_actions, axis=1)
def _build(self, frame_stack):
imag_features = []
# We need to do 'num_actions' rollouts. For efficiency reasons, we need to
# batch all rollouts. If frame_stack has a batch dimension of B, then we
# will have an effective batch dimension of B * num_actions.
batch_size = frame_stack.get_shape()[0].value
# frame_stack has shape (B, height, width, num_channels).
# Tile it to have shape (B, num_actions, height, width, num_channels).
frame_stack = self._tile_by_actions(frame_stack)
# We force the first action
action = tf.constant(range(self.num_actions),
shape=(self.num_actions,),
dtype=tf.int32)
action = tf.stack([action] * batch_size)
# Use snt.BatchApply to collapse the first two dimensions into a single
# dimension.
env_model = snt.BatchApply(self.env_model)
imag_feature = snt.BatchApply(self.single_imag_feature)
frame, reward, _ = env_model(frame_stack, action)
frame_stack = update_frame_stack(frame_stack, frame)
imag_features.append(imag_feature(frame, reward, action))
# Subsequent actions come from the rollout policy.
for _ in range(self.rollout_depth - 1):
action, _ = snt.BatchApply(self.rollout_policy)(frame)
frame, reward, _ = env_model(frame_stack, action)
frame_stack = update_frame_stack(frame_stack, frame)
imag_features.append(imag_feature(frame, reward, action))
# Process all imagination features in reverse order to encode the rollouts
lstm = snt.LSTM(256)
lstm_state = [self._tile_by_actions(t) for t in lstm.initial_state(batch_size)]
for feature in imag_features[::-1]:
lstm_output, lstm_state = snt.BatchApply(lstm)(feature, lstm_state)
encoded_rollouts = lstm_output
# The encoded_rollouts has shape (B, num_actions, lstm_output_size).
# Flatten it.
return tf.reshape(encoded_rollouts, [1, -1])
class NoImagPath(snt.AbstractModule):
"""Use this instead of ImagPath to get a model-free agent."""
def __init__(self, name='no_imag_path'):
super(NoImagPath, self).__init__(name=name)
def _build(self, frame_stack):
batch_size = frame_stack.get_shape()[0].value
return tf.zeros((batch_size, 0))
# + [markdown] colab_type="text" id="ua05Vpx-CsSM"
# Either of the models below can be used in I2A. The first model implements one of the
# baselines considered in the paper.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="__mjCMTLgqi9"
class SizePreservingConvNetModel(snt.AbstractModule):
"""A simple model that predicts the next frame using a size preserving
convolutional network. The action is tiled and broadcasted to the whole
input frame stack as additional channels."""
def __init__(self, num_actions, reward_bins,
name='size_preserving_conv_net_model'):
super(SizePreservingConvNetModel, self).__init__(name=name)
self.num_actions = num_actions
self.num_reward_bins = len(reward_bins)
self.reward_bins = tf.constant(
reward_bins, shape=(1, len(reward_bins)), dtype=tf.float32)
def _build(self, frame_stack, action):
# One-hot actions are broadcasted to all locations in the frames.
action_one_hot = tf.one_hot(action, depth=self.num_actions)
action_2d = tf.expand_dims(action_one_hot, axis=1)
action_2d = tf.expand_dims(action_2d, axis=1)
height, width = [d.value for d in frame_stack.get_shape()[1:3]]
action_tiled = snt.TileByDim(dims=[1, 2], multiples=[height, width])(
action_2d)
conv_input = tf.concat([frame_stack, action_tiled], axis=3)
hidden = snt.Conv2D(
output_channels=16, kernel_shape=3, stride=1)(conv_input)
hidden = tf.nn.relu(hidden)
# Rewards are binned to transform the problem of learning a reward predictor
# into a classification problem.
hidden_for_reward = snt.Conv2D(
output_channels=8, kernel_shape=3, stride=2)(hidden)
hidden_for_reward = tf.nn.relu(hidden_for_reward)
hidden_for_reward = snt.Linear(self.num_reward_bins)(
snt.BatchFlatten()(hidden_for_reward)
)
reward_bins = tf.nn.softmax(hidden_for_reward)
reward = tf.reduce_sum(reward_bins * self.reward_bins, axis=1)
# Pixels are treated as Bernoulli variables.
hidden_for_frame = snt.Conv2D(
output_channels=3, kernel_shape=3, stride=1)(hidden)
frame = tf.nn.sigmoid(hidden_for_frame)
return frame, reward, reward_bins
class CopyModel(snt.AbstractModule):
"""A dummy model that ignores the action and outputs the input frame."""
def __init__(self, num_reward_bins, name='copy_model'):
super(CopyModel, self).__init__(name=name)
self.num_reward_bins = num_reward_bins
def _build(self, frame_stack, action):
dummy_reward = tf.constant(
0, shape=(frame_stack.get_shape()[0],), dtype=tf.float32)
dummy_reward_bins = tf.constant(
0, shape=(frame_stack.get_shape()[0], self.num_reward_bins), dtype=tf.float32)
last_frame = frame_stack[:, :, :, -3:]
return last_frame, dummy_reward, dummy_reward_bins
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="f-CRKFAjh3Fc"
class SingleImagFeature(snt.AbstractModule):
"""This transforms a tuple of (imagined frame, predicted reward, action)
into a feature."""
def __init__(self, output_size, num_actions, name='single_imag_feature'):
super(SingleImagFeature, self).__init__(name=name)
self.output_size = output_size
self.num_actions = num_actions
def _build(self, frame, reward, action):
frame_feature = FrameProcessing(self.output_size)(frame)
action_one_hot = tf.one_hot(action, depth=self.num_actions)
return tf.concat(
[frame_feature, tf.expand_dims(reward, axis=1), action_one_hot],
axis=1)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="OllwVFZMqOwO"
class RolloutPolicy(snt.AbstractModule):
"""The rollout policy maps imagined frames to actions."""
def __init__(self, num_actions, hidden_size, name='rollout_policy'):
super(RolloutPolicy, self).__init__(name=name)
self.num_actions = num_actions
self.hidden_size = hidden_size
def _build(self, frame):
frame_feature = FrameProcessing(self.hidden_size)(frame)
logits = snt.Linear(
output_size=self.num_actions,
name='logits')(frame_feature)
action = tf.multinomial(logits, 1)
action = tf.cast(action, tf.int32)
# We need to output the logits to be able to distill another policy
# into our rollout policy
return tf.squeeze(action, axis=1), logits
# + [markdown] colab_type="text" id="R4NTKiOXrHQU"
# ## Putting it all together
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 474, "output_extras": [{"item_id": 1}, {"item_id": 2}]} colab_type="code" executionInfo={"elapsed": 2707, "status": "ok", "timestamp": 1512781901966, "user": {"displayName": "S\u00e9<NAME>", "photoUrl": "//lh4.googleusercontent.com/-7lP1T3HsCAw/AAAAAAAAAAI/AAAAAAAAABY/fWvmCeoJXTk/s50-c-k-no/photo.jpg", "userId": "102775271233982205447"}, "user_tz": 480} id="bnYF6XuYrKdN" outputId="6f5c71f6-3977-42be-fa39-cfc93a233d55"
def run():
tf.reset_default_graph()
num_actions = 5
rollout_depth = 3
output_size = 128
height = 15
width = 19
rgb = 3
frame_cap = 39
learning_rate = 1e-5
stack_size = 4
env_model_type = CopyModel
# Model free path
model_free_path = FrameProcessing(output_size)
# Imagination path
if env_model_type == CopyModel:
env_model = CopyModel(num_reward_bins=3)
elif env_model_type == SizePreservingConvNetModel:
env_model = SizePreservingConvNetModel(num_actions=num_actions,
reward_bins=(-1., 0., 1.))
rollout_policy = RolloutPolicy(num_actions, 128)
single_imag_feature = SingleImagFeature(output_size, num_actions)
imag_path = ImagPath(
num_actions, rollout_depth,
env_model, rollout_policy, single_imag_feature)
# The I2A agent
i2a_agent = I2AAgent(num_actions, model_free_path, imag_path,
height=height, width=width, stack_size=stack_size)
input_frame = tf.placeholder(
shape=(height, width, rgb), dtype=tf.float32, name='input_frame')
# We connect the agent to the initial state. We can override this by using
# a feed_dict with agent_state as key at session runtime.
agent_state = i2a_agent.initial_state()
(action, policy_logits, baseline), agent_next_state = i2a_agent(input_frame,
agent_state)
# The environment
env = PillEater(mode='regular', frame_cap=frame_cap)
env.start()
# Distillation loss. Sonnet handles weight sharing naturally, so we can simply
# apply the rollout_policy on the frames.
_, rollout_logits = rollout_policy(tf.expand_dims(input_frame, axis=0))
distill_loss = tf.nn.softmax_cross_entropy_with_logits(
logits=rollout_logits,
labels=tf.stop_gradient(tf.nn.softmax(policy_logits)))
optim_step = tf.train.RMSPropOptimizer(
learning_rate=learning_rate, epsilon=0.1).minimize(distill_loss)
# Run until termination of an episode, and display all observed frames.
init_global = tf.global_variables_initializer()
tf.get_default_graph().finalize()
with tf.Session() as sess:
sess.run(init_global)
env_pcontinue = 1.
num_frames = frame_cap + 1
num_horiz = 10
num_vert = (num_frames + num_horiz - 1) // num_horiz
fig, axes_grid = plt.subplots(num_vert, num_horiz)
fig.set_figwidth(2 * num_horiz)
fig.set_figheight(2 * num_vert)
axes = []
for ax in axes_grid:
axes.extend(ax)
for axis in axes:
axis.get_xaxis().set_visible(False)
axis.get_yaxis().set_visible(False)
step_count = 0
total_reward = 0.
prev_state = None
while env_pcontinue == 1.:
env_reward, env_pcontinue, env_frame = env.observation()
total_reward += env_reward
feed_dict = {input_frame: env_frame}
if prev_state is not None:
feed_dict[agent_state] = prev_state
action_out, _, prev_state = sess.run([action, optim_step, agent_next_state],
feed_dict=feed_dict)
env.step(action_out[0, 0])
axis = axes[step_count]
axis.imshow(env_frame)
step_count += 1
print('Total reward at end of episode: {}.'.format(total_reward))
run()
| mltrain-nips-2017/sebastien_racaniere/I2A - NIPS workshop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.2 64-bit
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(15,15))
import matplotlib_inline
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
pd.set_option("display.max_columns",7000)
pd.set_option("display.max_rows",None)
pd.set_option("display.width",90000)
train=pd.read_csv("Train.csv")
vardfn=pd.read_csv("VariableDefinitions.csv")
train.head(10)
vardfn
train.shape
pd.set_option("display.max_rows",90000)
train.info()
train.dtypes
train.isnull().sum()
sns.heatmap(train.isnull(),yticklabels=False,cbar=False)
group=train.isnull().sum().sort_values().plot(kind='barh')
plt.title("MISSING VALUES IN DATA SET")
for p in group.patches:
z=((p.get_width()/train.shape[0])*100)
per="{:,.0f}%".format(z)
width=p.get_width()
height=p.get_height()
x=p.get_x() + width + 0.02
y=p.get_y() + height/2
plt.annotate(per,(x,y))
train['most_impressing'].value_counts()
# +
sns.countplot(train['most_impressing'])
# -
sns.heatmap(train.corr(),annot=True,square=True,vmax=1,vmin=-1)
# +
sns.countplot(train['total_female'])
plt.show()
# -
sns.displot(train['total_female'])
# +
sns.kdeplot(train['total_female'],color="green",shade=True)
# -
sns.kdeplot(train['total_male'],shade=True)
sns.countplot(train['total_male'])
misn_numerical=['total_male','total_female']
for x in misn_numerical:
train[x]=train[x].fillna(train[x].median())
group=train.isnull().sum().sort_values().plot(kind="barh")
plt.title("MISSING VALUES IN DATASET")
for p in group.patches:
z=((p.get_width()/train.shape[0])*100)
per="{:,.0f}%".format(z)
width=p.get_width()
height=p.get_height()
x=p.get_x() + width + 0.02
y=p.get_y() + height/2
plt.annotate(per,(x,y))
train.isnull().sum()
cater_missng=['travel_with','most_impressing']
for x in cater_missng:
train[x]=train[x].fillna("None")
train.isnull().sum()
train.info()
| Tazatep dataset/TanzaTourAI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import datetime
import pandas as pd
import spacy
import re
import string
import numpy as np
from matplotlib import cm
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
# %matplotlib inline
from spacy.tokens import Token
from tqdm import tqdm
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from sklearn.decomposition import PCA
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import DBSCAN
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import silhouette_score
from sklearn.model_selection import cross_val_score , GridSearchCV,train_test_split
from sklearn.naive_bayes import MultinomialNB,GaussianNB
from sklearn.neighbors import KNeighborsClassifier
import enchant
pd.set_option('display.max_rows', 500)
# +
dict_check = enchant.Dict("en_US")
#### Importing the file ####
Path="src/"
Filename='projects_Preprocessed.csv'
df=pd.read_csv(Path+Filename)
Cat_File="category_hier.csv"
Cat_data=pd.read_csv(Path+Cat_File)
varcluster_file="variable_clusters.csv"
varcluster=pd.read_csv(Path+varcluster_file)
varcluster_info=pd.DataFrame(
{'cluster_id' :[ '0' , '1' , '2' , '3' , '4' , '5' , '6' , '7' , '8' , '9' , '10', '11', '12', '13', '14', '15']
,'cluster_name':[ 'General' ,'Life Sciences' ,'Humanities and Social Sciences' ,'Engineering Sciences' ,'Natural Sciences' ,'Medicine' ,'Foundation & Acquisition' ,'Chemistry & Mechanical & Electrical' ,'Life Sciences' ,'Physics & Mathematical & Geometry & Analytical & Computer' ,'Eco System & Chemistry' ,'History & Cultural' ,'Climate & Earth' ,'Human & Experiment' ,'Biology & Genetics' ,'Text & Publish' ]
,'category' :['General', 'Life Sciences', 'Humanities and Social Sciences', 'Engineering Sciences', 'Natural Sciences', 'Life Sciences', 'Uncategorized', 'Uncategorized', 'Life Sciences', 'Uncategorized' , 'Natural Sciences', 'Humanities and Social Sciences', 'Natural Sciences', 'Uncategorized', 'Uncategorized', 'General']
})
df=df[df['Translates']!="The goal of the Heisenberg Program is to enable outstanding scientists who fulfill all the requirements for a long-term professorship to prepare for a scientific leadership role and to work on further research topics during this time. In pursuing this goal, it is not always necessary to choose and implement project-based procedures. For this reason, in the submission of applications and later in the preparation of final reports - unlike other support instruments - no 'summary' of project descriptions and project results is required. Thus, such information is not provided in GEPRIS."]
## Filtering the null abstracts & short description
df=df[(pd.isnull(df.PreProcessedDescription)==False) & (df.PreProcessedDescription.str.strip()!='abstract available')& (df.PreProcessedDescription.str.len()>100)]
# -
df.head()
merged_data=df.merge(Cat_data[["File_Categories","Category"]], how="left", left_on="SubjectArea", right_on="File_Categories")
merged_data=merged_data[pd.isnull(merged_data["Category"])==False]
dummies=pd.get_dummies(merged_data['Category'])
merged_data=pd.concat([merged_data,dummies], axis=1,ignore_index=False)
# +
# TF IDF Conversion
vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(2, 2))
review_vectors = vectorizer.fit_transform(merged_data["PreProcessedDescription"])
features_df = pd.DataFrame(review_vectors.toarray(), columns = vectorizer.get_feature_names())
features_df.reset_index(drop=True, inplace=True)
merged_data.reset_index(drop=True, inplace=True)
merged_data=pd.concat([merged_data,features_df], axis=1,ignore_index=False)
wordslist=merged_data.columns.tolist()[len(df.columns)+6:]
# -
plt.subplots(figsize=(14,8))
i=1
for cat,bucket in merged_data.groupby('Category'):
plt.subplot(2,2,i)
bucket[wordslist].sum().sort_values(ascending=False).head(20).plot(kind='bar',color='green')
plt.title(cat)
plt.xticks(rotation=60)
i=i+1
plt.tight_layout()
pca = PCA(n_components=2).fit_transform(merged_data[merged_data.columns[16:]])
concated_ds=pd.concat([pd.DataFrame(pca),merged_data], ignore_index=True, axis=1)
concated_ds.columns=['PCA_1','PCA_2']+merged_data.columns.tolist()
merged_data=concated_ds
merged_data.Category=merged_data.Category.str.strip()
varcluster_info.cluster_id=varcluster_info.cluster_id.astype('int32')
varcluster=varcluster.merge(varcluster_info, how='left',left_on='Cluster', right_on='cluster_id')
## Removing the General cluster columns
merged_data=merged_data.drop(columns=merged_data.columns[merged_data.columns.isin(varcluster[varcluster.category=='General'].Variable)])
## Removing the columns that are far away from of cluster
merged_data=merged_data.drop(columns=merged_data.columns[merged_data.columns.isin(varcluster[varcluster.RS_Ratio>=1].Variable)])
# +
## Other variable cluster with same category analysis
#catfiltered_ds=merged_data[merged_data.Category=='Engineering Sciences ']
#othercolumns=merged_data.columns[~merged_data.columns.isin(varcluster[varcluster.category=='Engineering Sciences'].Variable)]
#merged_data[['Description','File_Categories','Category']].loc[catfiltered_ds[othercolumns[15:]].head().T.sum().index]
# +
#Filename='DBScanCluster.csv'
#NewMergedDS=pd.read_csv(Path+Filename)
# -
merged_data.columns[18:].tolist()
for cat in merged_data.Category.unique():
print('******'+cat+'******')
FeatureCols=merged_data.columns[18:]
CategoricalDS= merged_data[merged_data.Category==cat]
for ps in np.linspace(500,500,1, dtype=int):
pca =PCA(n_components=ps)
pca_data= pd.DataFrame(pca.fit_transform(CategoricalDS[FeatureCols]))
print('PCA components :',ps,'Variance coveragence' ,np.max(pca.explained_variance_ratio_.cumsum())*100)
clusterer = DBSCAN(eps=.85, min_samples=15, n_jobs=4)
preds = clusterer.fit_predict(pca_data)
print(pd.Series(preds).value_counts())
# = silhouette_score (pca_data, preds, metric='euclidean')
#print("For n_clusters = {}, silhouette score is {}, Density {})".format(n_clusters, score,pd.Series(preds).value_counts().tolist()))
#print(score)
# +
FeatureCols=merged_data.columns[18:]
comps=500
epsilon=.9#875
min_sample=15
print(str(datetime.datetime.now())+" : Started ")
CategoricalDS1=merged_data[merged_data.Category=='Engineering Sciences']
Features=CategoricalDS1[FeatureCols]
pca =PCA(n_components=comps)
pca_data1= pd.DataFrame(pca.fit_transform(Features))
DBScanDS1 = DBSCAN(eps=epsilon, min_samples=15, n_jobs=4).fit_predict(pca_data1)
CategoricalDS1['DBScanCluster']=DBScanDS1
pca_data1['DBScanCluster']=DBScanDS1
print('Engineering Sciences', CategoricalDS1['DBScanCluster'].value_counts())
CategoricalDS2=merged_data[merged_data.Category=='Humanities and Social Sciences']
Features=CategoricalDS2[FeatureCols]
pca =PCA(n_components=comps)
pca_data2= pd.DataFrame(pca.fit_transform(Features))
DBScanDS2 = DBSCAN(eps=epsilon, min_samples=15, n_jobs=4).fit_predict(pca_data2)
CategoricalDS2['DBScanCluster']=DBScanDS2
pca_data2['DBScanCluster']=DBScanDS2
print('Humanities and Social Sciences', CategoricalDS2['DBScanCluster'].value_counts())
CategoricalDS3=merged_data[merged_data.Category=='Life Sciences']
Features=CategoricalDS3[FeatureCols]
pca =PCA(n_components=comps)
pca_data3= pd.DataFrame(pca.fit_transform(Features))
DBScanDS3 = DBSCAN(eps=epsilon, min_samples=15, n_jobs=4).fit_predict(pca_data3)
CategoricalDS3['DBScanCluster']=DBScanDS3
pca_data3['DBScanCluster']=DBScanDS3
print('Life Sciences', CategoricalDS3['DBScanCluster'].value_counts())
CategoricalDS4=merged_data[merged_data.Category=='Natural Sciences']
Features=CategoricalDS4[FeatureCols]
pca =PCA(n_components=comps)
pca_data4= pd.DataFrame(pca.fit_transform(Features))
DBScanDS4 = DBSCAN(eps=epsilon, min_samples=15, n_jobs=4).fit_predict(pca_data4)
CategoricalDS4['DBScanCluster']=DBScanDS4
pca_data4['DBScanCluster']=DBScanDS4
print('Natural Sciences', CategoricalDS4['DBScanCluster'].value_counts())
NewMergedDS=pd.concat([CategoricalDS1,CategoricalDS2,CategoricalDS3,CategoricalDS4])
NewMergedPCADS=pd.concat([pca_data1,pca_data2,pca_data3,pca_data4])
NewMergedDS[NewMergedDS.columns[:14].tolist()+['DBScanCluster']].to_csv(Path+'DBScanClusterPCA.csv', index=False)
print(str(datetime.datetime.now())+" : Completed ")
# -
#NewMergedDS.DBScanCluster.value_counts()
NewMergedDS['DBScanCluster'].value_counts()
NewMergedDS['Category'].value_counts()
NewMergedPCADS[NewMergedPCADS.columns[:-1]]=NewMergedPCADS[NewMergedPCADS.columns[:-1]]+1
NewMergedDS=NewMergedDS.reset_index()[NewMergedDS.columns[0:]]
# +
NewMergedDS['CategoryConv']=''
NewMergedDS['CategoryConv'][NewMergedDS['Category']=='Engineering Sciences']=0
NewMergedDS['CategoryConv'][NewMergedDS['Category']=='Humanities and Social Sciences']=1
NewMergedDS['CategoryConv'][NewMergedDS['Category']=='Natural Sciences']=2
NewMergedDS['CategoryConv'][NewMergedDS['Category']=='Life Sciences']=3
NewMergedDS['CategoryConv']=NewMergedDS['CategoryConv'].astype('int')
# +
#NewMergedDS.columns[18:len(NewMergedDS.columns)-1]
# +
Features=NewMergedDS.columns[18:len(NewMergedDS.columns)-1]
OP_Feature='CategoryConv'
X_Training_DS=NewMergedDS[Features][NewMergedDS.DBScanCluster==0]
#X_Training_DS=NewMergedPCADS[NewMergedPCADS.DBScanCluster==0]
y_Training_DS=NewMergedDS[OP_Feature][NewMergedDS.DBScanCluster==0]
X_Test_DS=NewMergedDS[Features][NewMergedDS.DBScanCluster!=0]
#X_Test_DS=NewMergedPCADS[NewMergedPCADS.DBScanCluster!=0]
y_Test_DS=NewMergedDS[OP_Feature][NewMergedDS.DBScanCluster!=0]
X_train, X_test, y_train, y_test = train_test_split(X_Training_DS,y_Training_DS, test_size=0.2, random_state=0)
# +
modelNB = MultinomialNB(alpha=1)
modelNB.fit(X_train, y_train)
nfolds=5
scores=cross_val_score(modelNB, X_Training_DS,y_Training_DS, cv=nfolds, scoring="accuracy")
pd.Series(scores).plot(kind="box", label="Accuracy");
plt.title('Accuracy_score from '+str(nfolds)+' Folds (Accuracy) for '+str(round(pd.Series(scores).mean(), 2)))
y_pred = modelNB.predict(X_test)
print('Accuracy Score : '+str(accuracy_score(y_test,y_pred )))
# +
modelKBC = KNeighborsClassifier(n_neighbors=20)
modelKBC.fit(X_train, y_train)
#nfolds=1
#y_pred = modelKBC.predict(X_test)
#print('Accuracy Score : '+str(accuracy_score(y_test,y_pred )))
#scores=cross_val_score(modelKBC, X_Training_DS,y_Training_DS, cv=nfolds, scoring="accuracy")
#pd.Series(scores).plot(kind="box", label="Accuracy");
#plt.title('Accuracy_score from '+str(nfolds)+' Folds (Accuracy) for '+str(round(pd.Series(scores).mean(), 2)))
# +
def name_max_value(DF):
colname='Category_1_Values'
if (DF['Engineering Sciences']==DF[colname]):
return 'Engineering Sciences'
elif (DF['Humanities and Social Sciences']==DF[colname]):
return 'Humanities and Social Sciences'
elif (DF['Natural Sciences']==DF[colname]):
return 'Natural Sciences'
elif (DF['Life Sciences']==DF[colname]):
return 'Life Sciences'
else:
return ''
def name_sec_max_value(DF):
colname='Category_2_Values'
if (DF['Engineering Sciences']==DF[colname]):
return 'Engineering Sciences'
elif (DF['Humanities and Social Sciences']==DF[colname]):
return 'Humanities and Social Sciences'
elif (DF['Natural Sciences']==DF[colname]):
return 'Natural Sciences'
elif (DF['Life Sciences']==DF[colname]):
return 'Life Sciences'
else:
return ''
def name_3rd_max_value(DF):
colname='Category_3_Values'
if (DF['Engineering Sciences']==DF[colname]):
return 'Engineering Sciences'
elif (DF['Humanities and Social Sciences']==DF[colname]):
return 'Humanities and Social Sciences'
elif (DF['Natural Sciences']==DF[colname]):
return 'Natural Sciences'
elif (DF['Life Sciences']==DF[colname]):
return 'Life Sciences'
else:
return ''
# +
#matching_category()
cols=['Engineering Sciences','Humanities and Social Sciences','Natural Sciences','Life Sciences']
PredictedValues=pd.DataFrame(modelNB.predict_log_proba(NewMergedDS[Features]), columns=cols)
PredictedValues['Category_1_Values']=PredictedValues[cols].apply(np.max,axis=1)
PredictedValues['Category_2_Values']=PredictedValues[cols].apply(np.sort,axis=1).apply(lambda x:x[2])
PredictedValues['Category_3_Values']=PredictedValues[cols].apply(np.sort,axis=1).apply(lambda x:x[1])
PredictedValues['Category_1']=PredictedValues.apply(name_max_value,axis=1)
PredictedValues['Category_2']=PredictedValues.apply(name_sec_max_value,axis=1)
PredictedValues['Category_3']=PredictedValues.apply(name_3rd_max_value,axis=1)
PredictedValues['Category_12_Variance']=PredictedValues.apply(lambda x :x['Category_1_Values']-x['Category_2_Values'], axis=1)
PredictedValues['Category_23_Variance']=PredictedValues.apply(lambda x :x['Category_2_Values']-x['Category_3_Values'], axis=1)
# +
#& (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])
plt.subplots(figsize=(14,8))
j=1
for i,w in NewMergedDSAligned[(NewMergedDSAligned.DBScanCluster!=0) ].groupby(['Category']):
print(i, w.count()['DBScanCluster'])
print(w['Category_1'][(w['Category_1']!=i) & (w['Category_12_Variance']>0.5) & (w['Category_1_Values']>-0.35)].value_counts())
print('')
plt.subplot(2,4,j)
w['Category_1_Values'][(w['Category_1']!=i) & (w['Category_12_Variance']>0.5) & (w['Category_1_Values']>-0.35)].hist()
plt.title(i+' Only one Feature')
j=j+1
plt.subplot(2,4,j)
w['Category_1_Values'][(w['Category_1']!=i) & (w['Category_12_Variance']<0.5) & (w['Category_1_Values']>-0.5)].hist()
plt.title(i)
j=j+1
plt.tight_layout()
# -
NewMergedDSAligned=pd.concat([NewMergedDS[NewMergedDS.columns.tolist()[2:14]+['DBScanCluster']],PredictedValues[PredictedValues.columns[4:]]], axis=1, ignore_index=False)
# +
#(NewMergedDSAligned.DBScanCluster!=0) &
NewMergedDSAligned['DBScanCluster'][ (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].value_counts()
# -
NewMergedDSAligned=NewMergedDSAligned[NewMergedDSAligned['Translates']!="The goal of the Heisenberg Program is to enable outstanding scientists who fulfill all the requirements for a long-term professorship to prepare for a scientific leadership role and to work on further research topics during this time. In pursuing this goal, it is not always necessary to choose and implement project-based procedures. For this reason, in the submission of applications and later in the preparation of final reports - unlike other support instruments - no 'summary' of project descriptions and project results is required. Thus, such information is not provided in GEPRIS."]
cats='Humanities and Social Sciences'
NewMergedDSAligned[['Translates','Category_1_Values']][(NewMergedDSAligned['Category_1']!=cats) & (NewMergedDSAligned['Category']==cats) & (NewMergedDSAligned.DBScanCluster!=0) & (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].sort_values('Category_1_Values', ascending=False).Translates.head(100).tolist()#.tail().
#cats='Natural Sciences'
NewMergedDSAligned[['Translates','Category']+NewMergedDSAligned.columns[13:].tolist()][(NewMergedDSAligned['Category_1']!=cats) & (NewMergedDSAligned['Category']==cats) & (NewMergedDSAligned.DBScanCluster!=0) & (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].sort_values('Category_1_Values', ascending=False).head(100)
#NewMergedDS[NewMergedDS.columns.tolist()[2:16]].head()
NewMergedDSAligned[NewMergedDSAligned.columns.tolist()[6:]].head()
NewMergedDSAligned.to_csv(Path+'DBScanNBFindingsPCA.csv', index=False)
NewMergedDSAligned[['Translates','Category_1_Values']][(NewMergedDSAligned.DBScanCluster!=0) & (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].sort_values('Category_1_Values', ascending=False).Translates.head(40).tolist()#.tail().
NewMergedDSAligned[['Translates','Category']+NewMergedDSAligned.columns[13:].tolist()][(NewMergedDSAligned.DBScanCluster!=0) & (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].sort_values('Category_1_Values', ascending=False).head(40)#.tail()
NewMergedDSAligned[['Translates','Category_12_Variance']][(NewMergedDSAligned.DBScanCluster!=0) & (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].sort_values('Category_12_Variance', ascending=False).Translates.head(5).tail().tolist()
NewMergedDSAligned[['Translates','Category']+NewMergedDSAligned.columns[15:].tolist()][(NewMergedDSAligned.DBScanCluster!=0) & (NewMergedDSAligned['Category']!=NewMergedDSAligned['Category_1'])].sort_values('Category_12_Variance', ascending=False).head(5).tail()
| Archieve/4.DBScan Clusters with PCA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transfer learning with AlexNet
# ## import 하는 패키지와 라이브러리들
# +
from IPython.core.interactiveshell import InteractiveShell # 표를 이쁘게 만들어주는 기능
import seaborn as sns # 데이터 분포를 시각화해주는 라이브러리
# PyTorch
# torchvision : 영상 분야를 위한 패키지, ImageNet, CIFAR10, MNIST와 같은 데이터셋을 위한 데이터 로더와 데이터 변환기 등이 포함되어 있다.
from torchvision import transforms, datasets, models
import torch
# optim : 가중치를 갱신할 Optimizer가 정의된 패키지. SGD + momentum, RMSProp, Adam등과 같은 알고리즘이 정의되어 있다.
# cuda : CUDA 텐서 유형에 대한 지원을 추가하는 패키지이다. CPU텐서와 동일한 기능을 구현하지만 GPU를 사용하여 계산한다.
from torch import optim, cuda
# DataLoader : 학습 데이터를 읽어오는 용도로 사용되는 패키지.
# sampler : 데이터 세트에서 샘플을 추출하는 용도로 사용하는 패키지
from torch.utils.data import DataLoader, sampler
import torch.nn as nn
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# Data science tools
import numpy as np
import pandas as pd # Pandas : Data science를 위한 패키지이다.
import os
# Image manipulations
from PIL import Image
# Useful for examining network
from torchsummary import summary
# Timing utility
from timeit import default_timer as timer
# Visualizations
import matplotlib.pyplot as plt # matplotlib를 쓸때 seaborn이 있는것과 없는것이 생긴게 다르다.
# %matplotlib inline
plt.rcParams['font.size'] = 14
# Printing out all outputs
InteractiveShell.ast_node_interactivity = 'all'
# -
# ## 초기화
# - 학습할 모델 명
# - 몇 epoch 학습할 것인지
# - 배치 크기는 몇으로 할 것인지
#
# ### alexnet, vgg11, vgg16, vgg19, resnet50
#
# +
# 학습할 모델 입력
model_choice = 'resnet50'
# 몇 epoch 학습할 것인지
training_epoch = 20
# 배치 사이즈 조절
batch_size = 128
# -
# # [ Data Setup ]
#
# ## 데이터셋 경로 / GPU 학습 가능 여부 확인
#
# - 불러올 데이터셋의 경로를 지정한다.
# - train, validation, test 로 나눠져 있으므로, 각각의 경로를 지정한다.
# - 학습된 모델을 저장할 이름을 지정한다.
# - 배치크기를 지정한다.
# - GPU에서 학습이 가능한지 확인한다.
# +
# Location of data
datadir = '/home/kunde/DeepLearningProject/ingredient_data_TR7_VA2_TE1/' # 데이터셋 경로
traindir = datadir + 'train/'
validdir = datadir + 'valid/'
testdir = datadir + 'test/'
# 학습된 데이터 저장시 이름을 정하는 부분
save_file_name = './ModelSave/' + model_choice + '-transfer.pt'
checkpoint_path = './ModelSave/' + model_choice + '-transfer.pth'
# Whether to train on a gpu
train_on_gpu = cuda.is_available() # GPU를 사용할 수 있는지 없는지 판단한다.
if train_on_gpu == True:
print('GPU에서 학습이 가능합니다\n')
else:
print('GPU에서 학습이 불가능합니다.\n')
# -
# ## DataFrame 설정 - 데이터셋의 구성을 보기 위한 부분이다
# +
# Empty lists
categories = []
img_categories = []
n_train = []
n_valid = []
n_test = []
hs = []
ws = []
# os.listdir(path) : path에 존재하는 파일, 서브폴더 목록을 가져온다.
# Iterate through each category
for d in os.listdir(traindir): # train 데이터의 경로를 탐색한다. os.listdir을 사용하면 train 폴더 내의 폴더들을 순차적으로 탐색한다.
categories.append(d) # categories라는 리스트에 추가해준다. 폴더명을 카테고리 이름으로 해놨으므로 카테고리명이 저장된다.
# Number of each image
train_imgs = os.listdir(traindir + d)
valid_imgs = os.listdir(validdir + d)
test_imgs = os.listdir(testdir + d)
n_train.append(len(train_imgs))
n_valid.append(len(valid_imgs))
n_test.append(len(test_imgs))
# Find stats for train images
for i in train_imgs:
img_categories.append(d)
img = Image.open(traindir + d + '/' + i) # 이미지 열기
img_array = np.array(img)
# Shape
hs.append(img_array.shape[0])
ws.append(img_array.shape[1])
# Dataframe of categories
# Pandas 라이브러리를 이용한 부분. Dataframe은 테이블 형식의 데이터를 다룰때 사용한다. 컬럼, 로우(데이터), 인덱스로 이루어져있다.
cat_df = pd.DataFrame({'category': categories,
'n_train': n_train,
'n_valid': n_valid,
'n_test': n_test}).sort_values('category')
# Dataframe of training images
image_df = pd.DataFrame({
'category': img_categories,
'height': hs,
'width': ws
})
cat_df.sort_values('n_train', ascending=False, inplace=True)
cat_df.head()
cat_df.tail()
# -
# ## DataSet 구성하기 - Data Augmentation, ImageFolder, DataLoader
# +
# Image transformations
image_transforms = {
# Train uses data augmentation
'train':
transforms.Compose([
transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),
transforms.RandomRotation(degrees=15),
transforms.ColorJitter(),
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(size=224), # Image net standards
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]) # Imagenet standards
]),
# Validation does not use augmentation
'val':
transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
# Test does not use augmentation
'test':
transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# Datasets from each folder
data = {
'train':
datasets.ImageFolder(root=traindir, transform=image_transforms['train']),
'val':
datasets.ImageFolder(root=validdir, transform=image_transforms['val']),
'test':
datasets.ImageFolder(root=testdir, transform=image_transforms['test'])
}
# Dataloader iterators
dataloaders = {
'train': DataLoader(data['train'], batch_size=batch_size, shuffle=True),
'val': DataLoader(data['val'], batch_size=batch_size, shuffle=True),
'test': DataLoader(data['test'], batch_size=batch_size, shuffle=True)
}
# iter() : 전달된 데이터의 반복자를 꺼내 반환한다.
trainiter = iter(dataloaders['train'])
# next() : 반복자를 입력받아 그 반복자가 다음에 출력해야할 요소를 반환한다.
features, labels = next(trainiter) # 1개만 꺼내기위해 넣은 코드인듯
features.shape, labels.shape # 그냥 단순히 어떤 데이터가 어떤 형태로 들어있는지 알려주기 위한 코드인듯.
n_classes = len(cat_df)
print(f'There are {n_classes} different classes.')
len(data['train'].classes)
# -
# # [ CNN Model ]
# ## 모델 구조 확인 ( 모델 변경할때마다 실행할 것 )
# +
# 구조만 확인하는 부분이다. 그 이유는 모델마다 구조가 다르고, 바꿔줘야 하는 부분이 다르기 때문에 미리 확인하는거다
# 이부분은 실행안하고 건너뛰어도 됨
if model_choice == 'alexnet':
model = models.alexnet(pretrained=True)
elif model_choice == 'vgg11':
model = models.vgg11(pretrained=True)
elif model_choice == 'resnet50':
model = models.resnet50(pretrained=True)
model
# -
# ## Pretrained Model 불러오는 함수
def get_pretrained_model(model_name):
# 이 함수는 여러 모델을 선택적으로 불러올 수 있도록 작성되어 있지만
# 모델마다 파일을 하나씩 만들것이기 때문에 불필요한가? 그렇게 하지말까? 어쩌지..
# 생각해보니 여러 모델을 만드는것도 일인듯..
"""Retrieve a pre-trained model from torchvision
Params
-------
model_name (str): name of the model (currently only accepts vgg16 and resnet50)
Return
--------
model (PyTorch model): cnn
"""
if model_name == 'alexnet': # 알렉스넷인 경우
model = models.alexnet(pretrained=True)
# Freeze early layers
for param in model.parameters():
param.requires_grad = False
# 모델 구조를 보면 알겠지만 classifier 부분은 6개의 레이어로 이루어져있다.
# 여기에서 6번째 레이어의 in_features를 꺼내서 n_inputs에 담는 코드이다.
n_inputs = model.classifier[6].in_features
# Add on classifier
# 모델의 classifier 부분의 6번째 레이어에 새로운 레이어를 넣는 부분이다.
# Linear 레이어와 Softmax 레이어가 들어간다.
# Linear 레이어는 Fully-Connected Layer와 동일한 역할을 한다.
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, n_classes), nn.LogSoftmax(dim=1))
elif model_name == 'vgg16':
model = models.vgg16(pretrained=True)
# Freeze early layers
for param in model.parameters():
param.requires_grad = False
n_inputs = model.classifier[6].in_features
# Add on classifier
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, n_classes), nn.LogSoftmax(dim=1))
elif model_name == 'resnet50':
model = models.resnet50(pretrained=True)
# ResNet 50의 경우 분류기 부분이 (fc): Linear(in_features=2048, out_features=1000, bias=True) 형식으로 되어있다.
for param in model.parameters():
param.requires_grad = False
n_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(n_inputs, n_classes), nn.LogSoftmax(dim=1))
# Move to gpu and parallelize
if train_on_gpu:
model = model.to('cuda')
return model
# ## Pretrained_model 호출, 호출된 모델 Summary 보기
model = get_pretrained_model(model_choice)
summary(model, input_size=(3, 224, 224), batch_size=batch_size, device='cuda')
if model_choice == 'alexnet':
print(model.classifier[6])
elif model_choice == 'resnet50':
print(model.fc)
# +
model.class_to_idx = data['train'].class_to_idx
model.idx_to_class = {
idx: class_
for class_, idx in model.class_to_idx.items()
}
list(model.idx_to_class.items())[:30]
# -
# # [ Training Start ]
# ## Loss 함수 정의, Optimizer 정의
# +
criterion = nn.NLLLoss()
# Adam의 Default Learning Rate = 1e-3 = 0.001
optimizer = optim.Adam(model.parameters())
for p in optimizer.param_groups[0]['params']:
if p.requires_grad:
print(p.shape) # 최적화해야할 파라미터 그룹 출력
# -
# ## Model 학습 함수
def train(model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name,
max_epochs_stop=3,
n_epochs=20,
print_every=2,
early_stop=True):
"""Train a PyTorch Model
Params
--------
model (PyTorch model): 학습할 모델
criterion (PyTorch loss): 이 값을 최소화 하는것이 목적, 손실함수값
optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters
train_loader (PyTorch dataloader): training dataloader to iterate through
valid_loader (PyTorch dataloader): validation dataloader used for early stopping
save_file_name (str ending in '.pt'): file path to save the model state dict
max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping
n_epochs (int): maximum number of training epochs
print_every (int): frequency of epochs to print training stats
Returns
--------
model (PyTorch model): trained cnn with best weights
history (DataFrame): history of train and validation loss and accuracy
"""
# Early stopping intialization
epochs_no_improve = 0 # epoch을 진행해도 valid_loss의 감소가 없으면 1씩 올라간다.
valid_loss_min = np.Inf # np.Inf : 무한대
valid_max_acc = 0 # ???
history = []
# Number of epochs already trained (if using loaded in model weights)
try: # model이 아직 학습되지 않았다면 model.epochs라는 변수가 없을 것이다. 그래서 에러가 나기 때문에 except문이 실행된다.
print(f'이미 {model.epochs} epochs 만큼 학습된 모델입니다.\n')
except:
model.epochs = 0
print(f'첫 학습을 시작합니다.\n')
overall_start = timer() # 학습에 들어가기전의 시간을 기록한다.
# Main loop
for epoch in range(n_epochs): # 입력받은 Epochs 만큼 반복한다.
# keep track of training and validation loss each epoch
# train_loss와 vaild_loss, train_acc와 vaild_acc를 기록할 변수를 만든다.
train_loss = 0.0
valid_loss = 0.0
train_acc = 0
valid_acc = 0
# Set to training
model.train() # 학습모드로 설정한다.
start = timer() # epochs의 시작 시간을 기록한다.
# Training loop
# data : 학습에 사용될 이미지 데이터, target : 이미지에 라벨링된 데이터(여기에서는 폴더명)
for ii, (data, target) in enumerate(train_loader):
# Tensors to gpu
if train_on_gpu: # GPU에서 트레이닝이 되는지 여부를 담은 변수
data, target = data.cuda(), target.cuda() # .cuda()메소드를 사용해서 GPU에서 연산이 가능하도록 바꿔준다.
# Clear gradients
optimizer.zero_grad()
# Predicted outputs are log probabilities
output = model(data) # 여기에서 모델은 학습에 사용되는 VGG나 AlexNet과 같은 구조를 말한다. 이 모델은 함수로써 쓰이며 input값으로 데이터를 넣으면 output이 나온다.
# Loss and backpropagation of gradients
loss = criterion(output, target) # loss 값 업데이트
# 역전파 단계 : 모델의 매개변수에 대한 손실의 변화도를 계산한다.
loss.backward()
# 이 함수를 호출하면 매개변수가 갱신된다.
optimizer.step()
# Track train loss by multiplying average loss by number of examples in batch
# loss는 (1,)형태의 Tensor이며, loss.item()은 loss의 스칼라 값이다.
# 여기에서 data.size(0)는 배치사이즈를 말한다.
train_loss += loss.item() * data.size(0)
# Calculate accuracy by finding max log probability
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
# Need to convert correct tensor from int to float to average
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples in batch
train_acc += accuracy.item() * data.size(0)
# Track training progress
print(
f'Epoch: {epoch}\t학습진행률 : {100 * (ii + 1) / len(train_loader):.2f}%' \
+f'\t 현재 Epoch에서 걸린 시간 : {timer() - start:.2f}s'\
+f'\t Train_loss : {train_loss/len(train_loader.dataset):.4f}' \
+f'\t Train_Acc : {100 * (train_acc/len(train_loader.dataset)):.2f}%' ,
end='\r') # end='\r' : 해당 줄의 처음으로 와서 다시 출력한다.
# After training loops ends, start validation ===============================================
else: # 트레이닝 루프가 끝나면 실행되는 곳이다.
model.epochs += 1 # 트레이닝 루프 한번을 반복했기 때문에 epoch을 1 올려준다.
# Don't need to keep track of gradients
with torch.no_grad():
# Set to evaluation mode
model.eval() # 평가모드로 설정한다. pytorch에는 train(), eval() 두가지 모드밖에 없다. eval()모드에서는 드랍아웃이 작동하지 않는다.
start_eval = timer()
print('')
# Validation loop
for ii, (data, target) in enumerate(valid_loader):
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Forward pass
# 평가시엔 역전파는 수행하지 않는다.
output = model(data)
# Validation loss
loss = criterion(output, target)
# Multiply average loss times the number of examples in batch
valid_loss += loss.item() * data.size(0)
# Calculate validation accuracy
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(
correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples
valid_acc += accuracy.item() * data.size(0)
print(
f'\t\t평가진행률 : {100 * (ii + 1) / len(valid_loader):.2f}%' \
+f'\t 현재 Epoch에서 걸린 시간 : {timer() - start_eval:.2f}s'\
+f'\t Vaild_loss : {valid_loss/len(valid_loader.dataset):.4f}' \
+f'\t Vaild_Acc : {100 * (valid_acc/len(valid_loader.dataset)):.2f}%' ,
end='\r') # end='\r' : 해당 줄의 처음으로 와서 다시 출력한다.
# Calculate average losses
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
# Calculate average accuracy
train_acc = train_acc / len(train_loader.dataset)
valid_acc = valid_acc / len(valid_loader.dataset)
history.append([train_loss, valid_loss, train_acc, valid_acc])
# Print training and validation results
if (epoch + 1) % print_every == 0:
print(
f'\n\t\tTraining Loss: {train_loss:.4f} \t\t Validation Loss: {valid_loss:.4f}'
)
print(
f'\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%'
)
print(
f'\t\t현재 Epochs에서 Train과 Vaild에 걸린 시간 : {timer() - start:.2f}s\n'
)
# Save the model if validation loss decreases
# 예를 들어보자. 초기 valid_loss_min이 무한대값이다. 당연히 epoch 0에선 이 값보다 작을수밖에 없다.
# 따라서 valid_loss_min 값이 epoch 0에서의 valid_loss값으로 바뀐다.
# epoch 1부터 valid_loss가 이전 epoch보다 작아지지 않는다면 epochs_no_improve 값이 증가한다.
# 만약 작아지지 않는 상태가 max_epochs_stop 값보다 커지게 되면 중지한다.
# 그 이유는 학습이 계속 진행되더라도 loss 값이 더 이상 작아지지 않으므로, 수렴했다고 볼 수 있기 때문이다.
if valid_loss < valid_loss_min:
# Save model
torch.save(model.state_dict(), save_file_name) # 이때 저장되는 모델은 최적의 epochs를 가진 모델이다.
# Track improvement
epochs_no_improve = 0
valid_loss_min = valid_loss
valid_best_acc = valid_acc
best_epoch = epoch
# Otherwise increment count of epochs with no improvement
else:
epochs_no_improve += 1
# Trigger early stopping
if early_stop == True : # Early_stop 옵션이 있는 경우에만 진행한다.
if epochs_no_improve >= max_epochs_stop:
print(
f'\n설정한 Epochs보다 빠르게 학습이 끝났습니다. 더 이상 학습해도 의미가 없을것 같습니다.\n' \
+ f'현재까지 진행한 총 epochs : {epoch}\t 최상의 epochs : {best_epoch} (loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%)'
)
total_time = timer() - overall_start
print(f'\n[ 총 학습시간 : {total_time:.2f}s, Epoch당 평균 학습 시간 : {total_time / (epoch+1):.2f}s ]')
# Load the best state dict
model.load_state_dict(torch.load(save_file_name))
# Attach the optimizer
model.optimizer = optimizer
# Format history
history = pd.DataFrame(
history,
columns=[
'train_loss', 'valid_loss', 'train_acc',
'valid_acc'
])
return model, history
# Attach the optimizer
model.optimizer = optimizer
# Record overall time and print out stats
total_time = timer() - overall_start
print(
f'\n최고 epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%'
)
print(f'\n[ 총 학습시간 : {total_time:.2f}s, Epoch당 평균 학습 시간 : {total_time / (epoch+1):.2f}s ]')
# Format history
history = pd.DataFrame(
history,
columns=['train_loss', 'valid_loss', 'train_acc', 'valid_acc'])
return model, history
# ## Model 학습 함수 호출
# +
cuda.empty_cache() # GPU 캐시 초기화
model, history = train(
model, # 사용할 모델
criterion, # 사용할 Loss 함수
optimizer, # 사용할 Optimizer함수
dataloaders['train'], # train 데이터셋
dataloaders['val'], # validation 데이터셋
save_file_name=save_file_name, # 저장할 이름
max_epochs_stop=1, # 몇 epoch 동안 vaild loss의 감소가 없으면 학습을 중단할 것인지
n_epochs=training_epoch, # 최대 몇 epochs 학습할것인지
print_every=1, # 몇 epoch마다 출력할 것인지
early_stop=False) # Early_stop을 할것인지
# -
# # [ Training Results ]
# ## 그래프 그리기
plt.figure(figsize=(8, 6))
for c in ['train_loss', 'valid_loss']:
plt.plot(
history[c], label=c)
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Average Negative Log Likelihood')
plt.title('Training and Validation Losses')
plt.figure(figsize=(8, 6))
for c in ['train_acc', 'valid_acc']:
plt.plot(
100 * history[c], label=c)
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Average Accuracy')
plt.title('Training and Validation Accuracy')
# # [ Model Save & Load ]
# ## 모델 저장
def save_checkpoint(model, path):
"""Save a PyTorch model checkpoint
Params
--------
model (PyTorch model): model to save
path (str): location to save model. Must start with `model_name-` and end in '.pth'
Returns
--------
None, save the `model` to `path`
"""
# model_name = path.split('-')[0]
# assert (model_name in ['vgg16', 'resnet50']), "Path must have the correct model name"
# Basic details
checkpoint = {
'class_to_idx': model.class_to_idx,
'idx_to_class': model.idx_to_class,
'epochs': model.epochs,
}
# Extract the final classifier and the state dictionary
if model_choice == 'vgg16':
# Check to see if model was parallelized
checkpoint['classifier'] = model.classifier
checkpoint['state_dict'] = model.state_dict()
elif model_choice == 'resnet50':
checkpoint['fc'] = model.fc
checkpoint['state_dict'] = model.state_dict()
# Add the optimizer
checkpoint['optimizer'] = model.optimizer
checkpoint['optimizer_state_dict'] = model.optimizer.state_dict()
# Save the data to the path
torch.save(checkpoint, path)
save_checkpoint(model, path=checkpoint_path)
# ## 모델 불러오기
#
# Now we need to write the function to load in the checkpoint. This just takes in a `path` and returns a model from a saved checkpoint.
def load_checkpoint(path):
"""Load a PyTorch model checkpoint
Params
--------
path (str): saved model checkpoint. Must start with `model_name-` and end in '.pth'
Returns
--------
None, save the `model` to `path`
"""
# Get the model name
# model_name = path.split('-')[0]
# assert (model_name in ['vgg16', 'resnet50'
# ]), "Path must have the correct model name"
# Load in checkpoint
checkpoint = torch.load(path)
if model_choice == 'vgg16':
model = models.vgg16(pretrained=True)
# Make sure to set parameters as not trainable
for param in model.parameters():
param.requires_grad = False
model.classifier = checkpoint['classifier']
elif model_choice == 'resnet50':
model = models.resnet50(pretrained=True)
# Make sure to set parameters as not trainable
for param in model.parameters():
param.requires_grad = False
model.fc = checkpoint['fc']
# Load in the state dict
model.load_state_dict(checkpoint['state_dict'])
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} total gradient parameters.')
# Move to gpu
if train_on_gpu:
model = model.to('cuda')
# Model basics
model.class_to_idx = checkpoint['class_to_idx']
model.idx_to_class = checkpoint['idx_to_class']
model.epochs = checkpoint['epochs']
# Optimizer
optimizer = checkpoint['optimizer']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return model, optimizer
# +
model, optimizer = load_checkpoint(path=checkpoint_path)
summary(model, input_size=(3, 224, 224), batch_size=batch_size)
# -
# # [ 추론 ]
def imshow_tensor(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# Set the color channel as the third dimension
image = image.numpy().transpose((1, 2, 0))
# Reverse the preprocessing steps
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Clip the image pixel values
image = np.clip(image, 0, 1)
ax.imshow(image)
plt.axis('off')
return ax, image
def process_image(image_path):
"""Process an image path into a PyTorch tensor"""
image = Image.open(image_path)
# Resize
img = image.resize((256, 256))
# Center crop
width = 256
height = 256
new_width = 224
new_height = 224
left = (width - new_width) / 2
top = (height - new_height) / 2
right = (width + new_width) / 2
bottom = (height + new_height) / 2
img = img.crop((left, top, right, bottom))
# Convert to numpy, transpose color dimension and normalize
img = np.array(img).transpose((2, 0, 1)) / 256
# Standardization
means = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
stds = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
img = img - means
img = img / stds
img_tensor = torch.Tensor(img)
return img_tensor
# +
np.random.seed = 100
def random_test_image():
"""Pick a random test image from the test directory"""
c = np.random.choice(cat_df['category'])
root = testdir + c + '/'
img_path = root + np.random.choice(os.listdir(root))
return img_path
# _ = imshow_tensor(process_image(random_test_image()))
# -
def predict(image_path, model, topk=5):
"""Make a prediction for an image using a trained model
Params
--------
image_path (str): filename of the image
model (PyTorch model): trained model for inference
topk (int): number of top predictions to return
Returns
"""
real_class = image_path.split('/')[-2]
# Convert to pytorch tensor
img_tensor = process_image(image_path)
# Resize
if train_on_gpu:
img_tensor = img_tensor.view(1, 3, 224, 224).cuda()
else:
img_tensor = img_tensor.view(1, 3, 224, 224)
# Set to evaluation
with torch.no_grad():
model.eval()
# Model outputs log probabilities
out = model(img_tensor)
ps = torch.exp(out)
# Find the topk predictions
topk, topclass = ps.topk(topk, dim=1)
# Extract the actual classes and probabilities
top_classes = [
model.idx_to_class[class_] for class_ in topclass.cpu().numpy()[0]
]
top_p = topk.cpu().numpy()[0]
return img_tensor.cpu().squeeze(), top_p, top_classes, real_class
def display_prediction(image_path, model, topk):
"""Display image and preditions from model"""
# Get predictions
img, ps, classes, y_obs = predict(image_path, model, topk)
# Convert results to dataframe for plotting
result = pd.DataFrame({'p': ps}, index=classes)
# Show the image
plt.figure(figsize=(16, 5))
ax = plt.subplot(1, 2, 1)
ax, img = imshow_tensor(img, ax=ax)
# Set title to be the actual class
ax.set_title(y_obs, size=20)
ax = plt.subplot(1, 2, 2)
# Plot a bar plot of predictions
result.sort_values('p')['p'].plot.barh(color='blue', edgecolor='k', ax=ax)
plt.xlabel('Predicted Probability')
plt.tight_layout()
display_prediction(random_test_image(), model, topk=5)
def accuracy(output, target, topk=(1, )):
"""Compute the topk accuracy(s)"""
if train_on_gpu:
output = output.to('cuda')
target = target.to('cuda')
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
# Find the predicted classes and transpose
_, pred = output.topk(k=maxk, dim=1, largest=True, sorted=True)
pred = pred.t()
# Determine predictions equal to the targets
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
# For each k, find the percentage of correct
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
# +
testiter = iter(dataloaders['test'])
# Get a batch of testing images and labels
features, targets = next(testiter)
if train_on_gpu:
accuracy(model(features.to('cuda')), targets, topk=(1, 5))
else:
accuracy(model(features), targets, topk=(1, 5))
# -
def evaluate(model, test_loader, criterion, topk=(1, 5)):
"""Measure the performance of a trained PyTorch model
Params
--------
model (PyTorch model): trained cnn for inference
test_loader (PyTorch DataLoader): test dataloader
topk (tuple of ints): accuracy to measure
Returns
--------
results (DataFrame): results for each category
"""
classes = []
losses = []
# Hold accuracy results
acc_results = np.zeros((len(test_loader.dataset), len(topk)))
i = 0
model.eval()
with torch.no_grad():
# Testing loop
for data, targets in test_loader:
# Tensors to gpu
if train_on_gpu:
data, targets = data.to('cuda'), targets.to('cuda')
# Raw model output
out = model(data)
# Iterate through each example
for pred, true in zip(out, targets):
# Find topk accuracy
acc_results[i, :] = accuracy(
pred.unsqueeze(0), true.unsqueeze(0), topk)
classes.append(model.idx_to_class[true.item()])
# Calculate the loss
loss = criterion(pred.view(1, n_classes), true.view(1))
losses.append(loss.item())
i += 1
# Send results to a dataframe and calculate average across classes
results = pd.DataFrame(acc_results, columns=[f'top{i}' for i in topk])
results['class'] = classes
results['loss'] = losses
results = results.groupby(classes).mean()
return results.reset_index().rename(columns={'index': 'class'})
# +
criterion = nn.NLLLoss()
# Evaluate the model on all the training data
results = evaluate(model, dataloaders['test'], criterion)
results.head()
results.tail()
results.sort_values('top1', ascending=False, inplace=True)
results.tail()
# +
results = results.merge(cat_df, left_on='class', right_on='category').\
drop(columns=['category'])
# Plot using seaborn
sns.lmplot(
y='top1', x='n_train', data=results, height=6)
plt.xlabel('images')
plt.ylabel('Accuracy (%)')
plt.title('Top 1 Accuracy vs Number of Training Images')
plt.ylim(-5, 105)
# -
sns.lmplot(
y='top5', x='n_train', data=results, height=6)
plt.xlabel('images')
plt.ylabel('Accuracy (%)')
plt.title('Top 5 Accuracy vs Number of Training Images')
plt.ylim(-5, 105)
# +
# Weighted column of test images
results['weighted'] = results['n_test'] / results['n_test'].sum()
# Create weighted accuracies
for i in (1, 5):
results[f'weighted_top{i}'] = results['weighted'] * results[f'top{i}']
# Find final accuracy accounting for frequencies
top1_weighted = results['weighted_top1'].sum()
top5_weighted = results['weighted_top5'].sum()
loss_weighted = (results['weighted'] * results['loss']).sum()
print(f'Final test cross entropy per image = {loss_weighted:.4f}.')
print(f'Final test top 1 weighted accuracy = {top1_weighted:.2f}%')
print(f'Final test top 5 weighted accuracy = {top5_weighted:.2f}%')
# -
| Deep_CNN_Project/AlexNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # **PRINT** is used to print any text or numerical data which is witten by user.
# print also us helps to display message of the task which is being done when the program is being executed In python programming are being scanned from first line of the Now when you want to print something then you need quotations mark to print any-kind of string (means display characters) to display out message which you want to print.
print('Hello World') # to print any kind of messages to user of inform user
# # Try to draw an imaginary dog using print command.
print('')
print('*' * 3)
# hold on as I told you that we will be printing characters in quotation marks then why am I using multiplication here well you will see the magic when you run it when you multiply sting with any number than it will multiply the output and give you the answer in the above case it will print 3 time '*' like this *** << this is your answer of the above line now you can try down bellow with your actions
#
# # Practice
print('|' * 10) # change 10 and characters in the quotation mark
# # Now what if I use addition instead of multiplication then what will happen. Let's give it a try.
print('First word' + ' Second word')
# here both the words will add together and give you the answer 'First word Second word' so when you use addition instead of multiplication will combine those words and give you a single word.
| Jupyter Notebook/.ipynb_checkpoints/01. Hello World-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlu3
# language: python
# name: nlu3
# ---
import os
tags = ["relationships","atheismbot","ukpolitics"]
# +
# # cp /home/cs224u/processed_10_1k/processed_atheismbot/atheismbot_story/* .
# # cp /home/cs224u/processed_10_1k/processed_relationships/relationships_story/* .
# # cp /home/cs224u/processed_10_1k/processed_ukpolitics/ukpolitics_story/* .
train_story = []
val_story = []
test_story = []
for t in tags:
# get train, val, test
train_r = open("/home/cs224u/processed_10_1k/processed_"+t+"/"+t+"_train_list.txt" ,"r")
val_r = open("/home/cs224u/processed_10_1k/processed_"+t+"/"+t+"_val_list.txt" ,"r")
test_r = open("/home/cs224u/processed_10_1k/processed_"+t+"/"+t+"_test_list.txt" ,"r")
train_s = train_r.readlines()
val_s = val_r.readlines()
test_s = test_r.readlines()
train_s[-1] = train_s[-1] + "\n"
val_s[-1] = val_s[-1] + "\n"
test_s[-1] = test_s[-1] + "\n"
train_story += train_s
val_story += val_s
test_story += test_s
# write to file
train_w = open("/home/cs224u/processed_10_1k/processed_combine2/combine2_train_list.txt" ,"w+")
val_w = open("/home/cs224u/processed_10_1k/processed_combine2/combine2_val_list.txt" ,"w+")
test_w = open("/home/cs224u/processed_10_1k/processed_combine2/combine2_test_list.txt" ,"w+")
for t in train_story:
train_w.write(t)
for t in val_story:
val_w.write(t)
for t in test_story:
test_w.write(t)
train_w.close()
val_w.close()
test_w.close()
# -
test_story[0]
test_story[-1]
for t in train_story + val_story + test_story:
print (t)
break
train_story[0]
train_story[1]
| process_data/.ipynb_checkpoints/combine_3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="uGd4NYQX1Rf_"
# *Note: You are currently reading this using Google Colaboratory which is a cloud-hosted version of Jupyter Notebook. This is a document containing both text cells for documentation and runnable code cells. If you are unfamiliar with Jupyter Notebook, watch this 3-minute introduction before starting this challenge: https://www.youtube.com/watch?v=inN8seMm7UI*
#
# ---
#
# In this challenge, you will create a book recommendation algorithm using **K-Nearest Neighbors**.
#
# You will use the [Book-Crossings dataset](http://www2.informatik.uni-freiburg.de/~cziegler/BX/). This dataset contains 1.1 million ratings (scale of 1-10) of 270,000 books by 90,000 users.
#
# After importing and cleaning the data, use `NearestNeighbors` from `sklearn.neighbors` to develop a model that shows books that are similar to a given book. The Nearest Neighbors algorithm measures distance to determine the “closeness” of instances.
#
# Create a function named `get_recommends` that takes a book title (from the dataset) as an argument and returns a list of 5 similar books with their distances from the book argument.
#
# This code:
#
# `get_recommends("The Queen of the Damned (Vampire Chronicles (Paperback))")`
#
# should return:
#
# ```
# [
# 'The Queen of the Damned (Vampire Chronicles (Paperback))',
# [
# ['Catch 22', 0.793983519077301],
# ['The Witching Hour (Lives of the Mayfair Witches)', 0.7448656558990479],
# ['Interview with the Vampire', 0.7345068454742432],
# ['The Tale of the Body Thief (Vampire Chronicles (Paperback))', 0.5376338362693787],
# ['The Vampire Lestat (Vampire Chronicles, Book II)', 0.5178412199020386]
# ]
# ]
# ```
#
# Notice that the data returned from `get_recommends()` is a list. The first element in the list is the book title passed in to the function. The second element in the list is a list of five more lists. Each of the five lists contains a recommended book and the distance from the recommended book to the book passed in to the function.
#
# If you graph the dataset (optional), you will notice that most books are not rated frequently. To ensure statistical significance, remove from the dataset users with less than 200 ratings and books with less than 100 ratings.
#
# The first three cells import libraries you may need and the data to use. The final cell is for testing. Write all your code in between those cells.
# + id="Y1onB6kUvo4Z"
# import libraries (you may add additional imports but you may not have to)
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# + id="iAQGqqO_vo4d" outputId="6b48de00-368d-4ce4-d21f-22a4bf7fe8bc" colab={"base_uri": "https://localhost:8080/"}
# get data files
# !wget https://cdn.freecodecamp.org/project-data/books/book-crossings.zip
# !unzip book-crossings.zip
books_filename = 'BX-Books.csv'
ratings_filename = 'BX-Book-Ratings.csv'
# + id="NClILWOiEd6Q"
# import csv data into dataframes
df_books = pd.read_csv(
books_filename,
encoding = "ISO-8859-1",
sep=";",
header=0,
names=['isbn', 'title', 'author'],
usecols=['isbn', 'title', 'author'],
dtype={'isbn': 'str', 'title': 'str', 'author': 'str'})
df_ratings = pd.read_csv(
ratings_filename,
encoding = "ISO-8859-1",
sep=";",
header=0,
names=['user', 'isbn', 'rating'],
usecols=['user', 'isbn', 'rating'],
dtype={'user': 'int32', 'isbn': 'str', 'rating': 'float32'})
# + id="xAcXjkCFCh0A" outputId="624db992-70f3-4d76-9e02-04d2f1795794" colab={"base_uri": "https://localhost:8080/"}
df = pd.merge(df_books, df_ratings, on='isbn')
print(df.head())
# + id="JFeRDPbp_F0d" outputId="80fcb6ce-4bb8-4d7b-f277-2fb76199fe67" colab={"base_uri": "https://localhost:8080/"}
vc = df['user'].value_counts()
df = df[df['user'].isin(vc[vc>200].index)]
vc = df['rating'].value_counts()
df = df[df['rating'].isin(vc[vc>100].index)]
print(df.head())
# + id="ssOkB-n1EAcg"
df = df.drop_duplicates(['title', 'user'])
# + id="Sa3aIpdFAah_" outputId="76bbfd9a-4921-496c-d844-abc126de3786" colab={"base_uri": "https://localhost:8080/"}
df_pivot = df.pivot_table(index = 'title', columns='user', values='rating').fillna(0)
print(df_pivot.head())
# + id="AX4TkqhnDwV_"
df_matrix = csr_matrix(df_pivot.values)
# + id="pbkRBTbQEzQ8" outputId="83cb52dd-a72c-4226-e7d5-bb9cf32e9507" colab={"base_uri": "https://localhost:8080/"}
model = NearestNeighbors(metric='cosine', n_neighbors=5)
model.fit(df_matrix)
# + id="f5ZUd-L1SQz7"
# function to return recommended books - this will be tested
def get_recommends(book = ""):
# Get index
x = df_pivot.index.get_loc(book)
#use model nearest neighbors.
distances, indices = model.kneighbors(df_pivot.iloc[x,:].values.reshape(1, -1), n_neighbors= 8)
recommended_books = []
for x in range(1,6):
bookrecommended = [df_pivot.index[indices.flatten()[x]], distances.flatten()[x]]
recommended_books.append(bookrecommended)
recommended_books = [book, recommended_books]
return recommended_books
# + id="G3phwemFHxAb" outputId="fc0547f1-750b-45ae-906c-a3f32046bf81" colab={"base_uri": "https://localhost:8080/", "height": 583}
books = get_recommends("Always Have Popsicles")
print(books)
# + [markdown] id="eat9A2TKawHU"
# Use the cell below to test your function. The `test_book_recommendation()` function will inform you if you passed the challenge or need to keep trying.
# + id="jd2SLCh8oxMh" outputId="da9d1405-936c-4fa0-b77d-6d44f936091b" colab={"base_uri": "https://localhost:8080/"}
books = get_recommends("Where the Heart Is (Oprah's Book Club (Paperback))")
print(books)
def test_book_recommendation():
test_pass = True
recommends = get_recommends("Where the Heart Is (Oprah's Book Club (Paperback))")
if recommends[0] != "Where the Heart Is (Oprah's Book Club (Paperback))":
test_pass = False
recommended_books = ["I'll Be Seeing You", 'The Weight of Water', 'The Surgeon', 'I Know This Much Is True']
recommended_books_dist = [0.8, 0.77, 0.77, 0.77]
for i in range(2):
if recommends[1][i][0] not in recommended_books:
test_pass = False
if abs(recommends[1][i][1] - recommended_books_dist[i]) >= 0.05:
test_pass = False
if test_pass:
print("You passed the challenge! 🎉🎉🎉🎉🎉")
else:
print("You haven't passed yet. Keep trying!")
test_book_recommendation()
| fcc_book_recommendation_knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: aaai20-belafonte
# language: python
# name: aaai20-belafonte
# ---
# # Run Weka Experiment
# +
import PxW
import pandas as pd
import arff
import os
import numpy as np
from os.path import dirname
RANDOM_STATE = 42
def filename(basename, step=1, prefix="", suffix="", extension="arff", check=True):
filename = "-".join([x for x in (prefix, basename, suffix) if len(x) > 0])+".{}".format(extension)
root_dir = dirname(os.getcwd())
data_dir = os.path.relpath(os.path.join(root_dir, 'data'))
step_dir = os.path.join(data_dir, "step-"+str(step).zfill(2))
if check:
if not os.path.exists(step_dir):
os.makedirs(step_dir)
return os.path.join(step_dir, filename)
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.metrics import f1_score
def f1_weka(out, average='macro'):
out = out.apply(LabelEncoder().fit_transform)
f1 = f1_score(out['actual'], out['predicted'], average=average)
return f1
# +
root_dir = dirname(os.getcwd())
data_dir = os.path.relpath(os.path.join(root_dir, 'data'))
step = 2 # Where final datasets reside.
step_dir = os.path.join(data_dir, "step-"+str(step).zfill(2))
datasets = ['iris']
# -
res = []
for ds in datasets:
# Train
fn_train = filename(ds, step=1, suffix='train')
clf = PxW.J48()
clf.fit(fn_train, verbose=False)
# Test
fn_qry = [os.path.join(step_dir, fn) for fn in os.listdir(step_dir) if ds in fn]
fn_qry.sort()
for q_idx, fn in enumerate(fn_qry):
print(q_idx)
print(fn)
out = clf.predict(fn, verbose=True)
f1 = f1_weka(out, average='macro')
res.append(f1)
res
| note/experiments/weka-exp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Twitter Sentiment Analysis for Indian Election 2019
# **Abstract**<br>
# The goal of this project is to do sentiment analysis for the Indian Elections. The data used is the tweets that are extracted from Twitter. The BJP and Congress are the two major political parties that will be contesting the election. The dataset will consist of tweets for both the parties. The tweets will be labeled as positive or negative based on the sentiment score obtained using Textblob library. This data will be used to build models that can classify new tweets as positive or negative. The models built are a Bidirectional RNN and GloVe word embedding model.
# **Implementation**<br>
import os
import pandas as pd
import tweepy
import re
import string
from textblob import TextBlob
import preprocessor as p
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
nltk.download('punkt')
import pandas as pd
from nltk.tokenize import word_tokenize
from string import punctuation
from nltk.corpus import stopwords
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Embedding, SimpleRNN,Input
from keras.models import Sequential,Model
from keras.preprocessing import sequence
from keras.layers import Dense,Dropout
from keras.layers import Embedding, Flatten, Dense,Conv1D,MaxPooling1D
from sklearn import preprocessing
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import itertools
import seaborn as sns
from sklearn.metrics import confusion_matrix
from keras.utils import to_categorical
from collections import Counter
import tensorflow as tf
from keras.layers import LSTM, Bidirectional, Dropout
# **Data Creation**
# We use Tweepy API to access Twitter and download tweets. Tweepy supports accessing Twitter via Basic Authentication and the newer method, OAuth. Twitter has stopped accepting Basic Authentication so OAuth is now the only way to use the Twitter API.
# The below code downloads the tweets from Twitter based on the keyword that we pass. The tweets sentiment score is obtained using the textblog library. The Tweets are then preprocessed. The preprocessing involved removing emoticons, removing stopwords.
# +
consumer_key= '9oO3eQOBkuvCRPqMsFvnShRrq'
consumer_secret= '<KEY>'
access_token='<KEY>'
access_token_secret='<KEY>'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#file location changed to "data/telemedicine_data_extraction/" for clearer path
congress_tweets = "C:/Users/Abhishek/Election Twitter Sentiment analysis/congress_test.csv"
bjp_tweets = "C:/Users/Abhishek/Election Twitter Sentiment analysis/bjp_test_new.csv"
#set two date variables for date range
start_date = '2019-04-1'
end_date = '2019-04-20'
# -
# **Data cleaning scripts**
# +
# Happy Emoticons
emoticons_happy = set([
':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}',
':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D',
'=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P',
'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)',
'<3'
])
# Sad Emoticons
emoticons_sad = set([
':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<',
':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c',
':c', ':{', '>:\\', ';('
])
#Emoji patterns
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
#combine sad and happy emoticons
emoticons = emoticons_happy.union(emoticons_sad)
#mrhod clean_tweets()
def clean_tweets(tweet):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(tweet)
#after tweepy preprocessing the colon left remain after removing mentions
#or RT sign in the beginning of the tweet
tweet = re.sub(r':', '', tweet)
tweet = re.sub(r'…', '', tweet)
#replace consecutive non-ASCII characters with a space
tweet = re.sub(r'[^\x00-\x7F]+',' ', tweet)
#remove emojis from tweet
tweet = emoji_pattern.sub(r'', tweet)
#filter using NLTK library append it to a string
filtered_tweet = [w for w in word_tokens if not w in stop_words]
filtered_tweet = []
#looping through conditions
for w in word_tokens:
#check tokens against stop words , emoticons and punctuations
if w not in stop_words and w not in emoticons and w not in string.punctuation:
filtered_tweet.append(w)
return ' '.join(filtered_tweet)
#print(word_tokens)
#print(filtered_sentence)
# -
#method write_tweets()
def write_tweets(keyword, file):
# If the file exists, then read the existing data from the CSV file.
if os.path.exists(file):
df = pd.read_csv(file, header=0)
else:
df = pd.DataFrame(columns=COLS)
#page attribute in tweepy.cursor and iteration
for page in tweepy.Cursor(api.search, q=keyword,
count=200, include_rts=False, since=start_date).pages(50):
for status in page:
new_entry = []
status = status._json
## check whether the tweet is in english or skip to the next tweet
if status['lang'] != 'en':
continue
#when run the code, below code replaces the retweet amount and
#no of favorires that are changed since last download.
if status['created_at'] in df['created_at'].values:
i = df.loc[df['created_at'] == status['created_at']].index[0]
if status['favorite_count'] != df.at[i, 'favorite_count'] or \
status['retweet_count'] != df.at[i, 'retweet_count']:
df.at[i, 'favorite_count'] = status['favorite_count']
df.at[i, 'retweet_count'] = status['retweet_count']
continue
#tweepy preprocessing called for basic preprocessing
#clean_text = p.clean(status['text'])
#call clean_tweet method for extra preprocessing
filtered_tweet=clean_tweets(status['text'])
#pass textBlob method for sentiment calculations
blob = TextBlob(filtered_tweet)
Sentiment = blob.sentiment
#seperate polarity and subjectivity in to two variables
polarity = Sentiment.polarity
subjectivity = Sentiment.subjectivity
#new entry append
new_entry += [status['id'], status['created_at'],
status['source'], status['text'],filtered_tweet, Sentiment,polarity,subjectivity, status['lang'],
status['favorite_count'], status['retweet_count']]
#to append original author of the tweet
new_entry.append(status['user']['screen_name'])
try:
is_sensitive = status['possibly_sensitive']
except KeyError:
is_sensitive = None
new_entry.append(is_sensitive)
# hashtagas and mentiones are saved using comma separted
hashtags = ", ".join([hashtag_item['text'] for hashtag_item in status['entities']['hashtags']])
new_entry.append(hashtags)
mentions = ", ".join([mention['screen_name'] for mention in status['entities']['user_mentions']])
new_entry.append(mentions)
#get location of the tweet if possible
try:
location = status['user']['location']
except TypeError:
location = ''
new_entry.append(location)
try:
coordinates = [coord for loc in status['place']['bounding_box']['coordinates'] for coord in loc]
except TypeError:
coordinates = None
new_entry.append(coordinates)
single_tweet_df = pd.DataFrame([new_entry], columns=COLS)
df = df.append(single_tweet_df, ignore_index=True)
csvFile = open(file, 'a' ,encoding='utf-8')
df.to_csv(csvFile, mode='a', columns=COLS, index=False, encoding="utf-8")
#declare keywords as a query for three categories
Congress_keywords = '#IndianNationalCongress OR #RahulGandhi OR #SoniaGandhi OR #INC'
BJP_keywords = '#BJP OR #Modi OR #AmitShah OR #BhartiyaJantaParty'
# Creates two CSV files. First saves tweets for BJP and second saves tweets for Congress.
#call main method passing keywords and file path
write_tweets(Congress_keywords, congress_tweets)
write_tweets(BJP_keywords, bjp_tweets)
# **LABELING TWEETS AS POSITIVE NEGATIVE**<br>
#
# The tweepy libary gives out sentiment polarity in the range of -1 to +1. For our topic of election prediction the neutral tweets would be of no use as they will not provide any valuable information. Thus for simplicity purpose I have labeled tweets as only positive and negative. Tweets with polarity less than 0 will be labelled negative(0) and greater than 0 will be positive(1)
bjp_df['polarity'] = bjp_df['polarity'].apply(lambda x: 1 if x > 0 else 0)
congress_df['polarity'] = congress_df['polarity'].apply(lambda x: 1 if x > 0 else 0)
bjp_df['polarity'].value_counts()
# 
congress_df['polarity'].value_counts()
# 
# ## **RESAMPLING THE DATA** <br>
# Since the ratio of the negative tweets to positive tweets is not proportional. Our data set is not balanced. This will create a bias while training the model. To avoid this I have resampled the data. New data was downloaded from twitter using the above procedure. For both the parties only positive tweets were sampled and appened to the main files to balance the data. After balancing the data. The count of positive and negative tweets for both the parties is as follows. The code for the resampling procedure can be found in the notebook Data_Labeling.ipynb
# 
# **CREATING FINAL DATASET**
frames = [bjp, congress]
election_data = pd.concat(frames)
# The final dataset that will be used for our analysis saved in a csv file. That file can be loaded used to run our models. The final dataset looks as follows.
# 
# **TOKENIZING DATA**
# We tokenize the text and keep the maximum length of the the vector 1000.
# 
# **TRAIN TEST SPLIT WITH 80:20 RATIO**
# +
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(.20 * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
# -
# **CREATING EMBEDDING MATRIX WITH HELP OF PRETRAINED MODEL: GLOVE**
#
# Word Embeddings are text converted into numbers. There are number of ways to represent the numeric forms.<br>
#
# Types of embeddings: Frequency based, Prediction based.<br>Frequency Based: Tf-idf, Co-occurrence matrix<br>
#
# Prediction-Based: BOW, Skip-gram model
# Using Pre-trained word vectors: Word2vec, Glove
#
# Word Embedding is done for the experiment with the pre trained word vector Glove.
#
# Glove version used : 100-dimensional GloVe embeddings of 400k words computed on a 2014 dump of English Wikipedia. Training is performed on an aggregated global word-word co-occurrence matrix, giving us a vector space with meaningful substructures
# 
embedding_matrix = np.zeros((len(word_index) + 1, 100))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# Creating an embedding layer using GloVe
embedding_layer = Embedding(len(word_index) + 1,
100,
weights=[embedding_matrix],
input_length=1000,
trainable=False)
# # Model 1
# **Glove Word Embedding model**
# GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and the resulting representations showcase inter-esting linear substructures of the word vector space. GloVe can be used to find relations between words like synonyms, company - product relations, zip codes, and cities etc. It is also used by the spaCy model to build semantic word em-beddings/feature vectors while computing the top list words that match with distance measures such as Cosine Similar-ity and Euclidean distance approach.
def model_creation():
input_layer = Input(shape=(1000,), dtype='int32')
embed_layer = embedding_layer(input_layer)
x = Dense(100,activation='relu')(embed_layer)
x = Dense(50,activation='relu', kernel_regularizer=keras.regularizers.l2(0.002))(x)
x = Flatten()(x)
x = Dense(50,activation='relu', kernel_regularizer=keras.regularizers.l2(0.002))(x)
x = Dropout(0.5)(x)
x = Dense(50, activation='relu')(x)
x = Dropout(0.5)(x)
#x = Dense(512, activation='relu')(x)
#x = Dropout(0.4)(x)
final_layer = Dense(1, activation='sigmoid')(x)
opt = keras.optimizers.Adam(lr= learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model = Model(input_layer,final_layer)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['acc'])
return model
# **MODEL 1 Architecture**
learning_rate = 0.0001
batch_size = 1024
epochs = 10
model_glove = model_creation()
# 
# 
# **SAVE BEST MODEL AND WEIGHTS for Model1**
# serialize model to JSON
model_json = model_glove.to_json()
with open(".\\SavedModels\\Model_glove.h5", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model_glove.save_weights(".\\SavedModels\\Weights_glove.h5")
# **MODEL1 LOSS AND ACCURAY**
# 
# **MODEL1 PERFORMANCE**
# +
def plot_modelacc(fit_model):
with plt.style.context('ggplot'):
plt.plot(fit_model.history['acc'])
plt.plot(fit_model.history['val_acc'])
plt.ylim(0,1)
plt.title("MODEL ACCURACY")
plt.xlabel("# of EPOCHS")
plt.ylabel("ACCURACY")
plt.legend(['train', 'test'], loc='upper left')
return plt.show()
def plot_model_loss(fit_model):
with plt.style.context('ggplot'):
plt.plot(fit_model.history['loss'])
plt.plot(fit_model.history['val_loss'])
plt.title("MODEL LOSS")
plt.xlabel("# of EPOCHS")
plt.ylabel("LOSS")
plt.legend(['train', 'test'], loc='upper left')
return plt.show()
# -
# 
# **CONFUSION MATRIX**<br>
# A confusion matrix will show us the how the model predicted with respect to the acutal output.
# 
# True Positives: 870 (Predicted True and True in reality)<br>
# True Negative: 1141(Predicted False and False in realtity)<br>
# False Positive: 33 (Predicted Positve but Negative in reality)<br>
# False Negative: 29 (Predicted Negative but Positive in reality)
# # Model 2
# **Bidirectional RNN model**
# Bidirectional Recurrent Neural Networks (BRNN) connect two hidden layers of opposite directions to the same output. With this form of generative deep learning, the output layer can get information from past (backwards) and future (forward) states simultaneously.Invented in 1997 by Schuster and Paliwal,BRNNs were introduced to increase the amount of input information available to the network. For example, multilayer perceptron (MLPs) and time delay neural network (TDNNs) have limitations on the input data flexibility, as they require their input data to be fixed. Standard recurrent neural network (RNNs) also have restrictions as the future input information cannot be reached from the current state. On the contrary, BRNNs do not require their input data to be fixed. Moreover, their future input information is reachable from the current state.
#
# BRNN are especially useful when the context of the input is needed. For example, in handwriting recognition, the performance can be enhanced by knowledge of the letters located before and after the current letter.
# **MODEL 1 Architecture**
# 
# 
# **SAVING BEST MODEL2 AND ITS WEIGHTS**
# serialize model to JSON
model_json = model.to_json()
with open(".\\SavedModels\\Model_Bidir_LSTM.h5", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(".\\SavedModels\\Weights_bidir_LSTM.h5")
print("Saved model to disk")
# **MODEL 2 LOSS AND ACCURACY**
# 
# 
# **MODEL 2 CONFUSION MATRIX**
# 
# True Positives: 887(Predicted True and True in reality)
# True Negative: 1140(Predicted False and False in realtity)
# False Positive: 35 (Predicted Positve but Negative in reality)
# False Negative: 11 (Predicted Negative but Positive in reality)
# **PREDICTION USING THE BEST MODEL**
# The models were compared based on the Test loss and Test Accuracy. The Bidirectional RNN performed slightly better than the GloVe model. The RNN despite its simple architec-ture performed better than the Glove model. We use the Bidirectional RNN to make the predictions for the tweets that will be used to infer election results.
# Load the test data on which the predictions will be made using our best model. The data for both the parties was collected using the same procedure like above.
congress_test = pd.read_csv('congress_test.csv')
bjp_test = pd.read_csv('bjp_test.csv')
# We took equal samples for both the files. We took 2000 tweets for Congress and 2000 for BJP. The party that gets the most number of positive votes can be infered to have the higest probablity of winning the 2019 English.
congress_test =congress_test[:2000]
bjp_test = bjp_test[0:2000]
# Tokenize the tweets in the same was that were used for the Bidirectional RNN model.
congress_inputs = tokenze_data(congress_inputs)
bjp_inputs = tokenze_data(bjp_inputs)
# **LOAD THE BEST MODEL (BIDIRECTIONAL LSTM)**
from keras.models import model_from_json
# load json and create model
json_file = open(".\\SavedModels\\Model_Bidir_LSTM.h5", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(".\\SavedModels\\Weights_bidir_LSTM.h5")
print("Loaded model from disk")
# **SENTIMENT PREDICTION USING THE MODEL**
congress_prediction = loaded_model.predict(congress_inputs)
bjp_prediction = loaded_model.predict(bjp_inputs)
# If the probabilty of the outcome is greater than 0.5 for any class then the sentiment belongs to that particular class. Since we are concerned with only the count of positive sentiments. We will check the second column variables for our inference.
# +
congress_pred = (congress_prediction>0.5)
bjp_pred = (bjp_prediction>0.5)
def get_predictions(party_pred):
x = 0
for i in party_pred:
if(i[1]==True):
x+=1
return x
# -
# 
# **CONCLUSION**
# Just like the training data the majority of the tweets have a negative sentiment attached to them. After feeding 2000 tweets for both the Congress and BJP. The model predicted that BJP has 660 positive tweets while Congress has 416 positive tweets.<br><br> This indicated that the contest this year would be close and the chances of BJP winning on Majority like the 2015 elections are less. This has been corraborated by the poor perfomace of the BJP in the recent state elections where the lost power in three Major Hindi speaking states Rajasthan, Madhya Pradesh and Chattishgarh. <br><br>
# **FUTURE SCOPE**
# For this project only, a small sample of twitter data was considered for the analysis. It is difficult to give an estimate based on the limited amount of information we had access to. For future work, we can start by increasing the size of our dataset. In addition to Twitter, data can also be obtained from websites like Facebook, News websites. Apart from these we can try different models like Bidirectional RNN with attention mechanism. We can implement BERT which is currently the state of the art for solving various Natural Language Pro-cessing problems.
# **LISCENCE**
# **REFERENCES**
# [1] <NAME> and <NAME>, “Long short- ¨ term memory,” Neural computation, vol. 9, no. 8, pp. 1735–1780,1997.<br>
# [2] <NAME> and <NAME>, “Bidirectional recurrentneural networks,” Signal Processing, IEEE Transactions on, vol. 45, no. 11, pp. 2673–2681, 1997.<br>
# [3] <NAME>, <NAME>, <NAME>.GloVe: Global Vectors for Word Representation <br>
# [4] <NAME> <NAME> <NAME> Sentiment Analysis of Twitter Data <br>
# [5] <NAME> and <NAME>, “Framewise ¨ phoneme classification with bidirectional LSTM and other neural network
# architectures,” Neural Networks, vol. 18, no. 5, pp. 602–610,2005
| Portfolio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## importing the required libraries ##
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import h5py
import numpy as np
import keras.utils as kut
import cv2
import matplotlib.pyplot as plt
## get the data set from saved hdf file ##
with h5py.File('data.h5', 'r') as hdf:
X_train = np.array(hdf.get('X_train'))
Y_train = np.array(hdf.get('Y_train'))
print(Y_train.shape)
# -
Y_onehot = kut.to_categorical(Y_train) ## converting the categories into one_hot list
print(Y_onehot[:10])
# +
X = tf.keras.utils.normalize(X_train, axis=1) ## normalizing the data set
## Convolution Neural Network ##
model = Sequential()
model.add(Conv2D(64, (5,5), input_shape =X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, (5,5)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64, activation=tf.nn.relu))
model.add(Dense(32, activation=tf.nn.relu))
model.add(Dense(10, activation=tf.nn.softmax))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=['accuracy'])
model.fit(X, Y_onehot, epochs= 20, validation_split = 0.1)
# -
## saving the model ##
model.save('num_reader.model')
# +
# predictions = model.predict([X])
# +
# print(predictions)
# +
# print(np.argmax(predictions[4]))
# -
## loading the model ##
model = tf.keras.models.load_model("num_reader.model")
# predictions = model.predict([X])
# print(predictions)
# +
final_string = ""
## preprocessing the test image ##
def preprocess(filepath):
IMG_SIZE = 25
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
img_array = tf.keras.utils.normalize(img_array, axis=1)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap ='gray')
plt.show()
print(prediction)
return new_array.reshape(-1,IMG_SIZE, IMG_SIZE, 1)
for square in range(81):
prediction = np.argmax(model.predict([preprocess("<file path>")])) # predicting on test images
final_string+=str(prediction)
# -
print(final_string)
print(len(final_string))
| CNN_model/Conv_mod_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lightgbm as lgb
from catboost import CatBoostClassifier
# %matplotlib inline
# +
features = ["Age", "Pclass", "Sex_male", "Family", "Alone", "Fare", "Embarked_S", "Embarked_C", "Embarked_Q"]
def data_preprocessing(train, test):
dataset = pd.concat([train, test], axis=0)
# 家族人数を表すカラムを追加
dataset['Family'] = dataset['SibSp'] + dataset['Parch']
dataset.loc[dataset['Family'] == 0, 'Alone'] = 1
dataset['Alone'] = dataset['Alone'].fillna(value=0)
# Age, honorificの処理 *********************************************************************************
# 最大分割回数 = 2で、名前を","または"."で分割
dataset['honorific'] = dataset['Name'].str.split('[,.]', 2, expand = True)[1].str.strip()
# データ可視化用の処理
#hist_honorific = dataset[['honorific', 'Survived', 'PassengerId']].groupby(['honorific', 'Survived']).count().unstack()
#hist_honorific.plot.bar(stacked=True)
# 敬称でgroupbyし、平均年齢を算出
average_age = dataset[['honorific', 'Age']].groupby(['honorific']).mean()
# 置き換え用の一時カラム'abc'に、敬称に基づいた平均年齢を格納
dataset['abc'] = dataset['honorific'].apply(lambda x: average_age.loc[x,'Age'])
# 年齢がNaNについては敬称に基づいた平均年齢で補完
dataset['Age'] = dataset['Age'].fillna(dataset['abc'])
# 置き換え用の一時カラムを削除
dataset = dataset.drop(columns = ['abc'])
# 重要そうな敬称以外は'Other'に統一
dataset['honorific'] = dataset['honorific'].where((dataset['honorific'] == 'Mr') | (dataset['honorific'] == 'Miss') | (dataset['honorific'] == 'Mrs') | (dataset['honorific'] == 'Master'), other = 'Other')
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
dataset['honorific'] = encoder.fit_transform(dataset['honorific'])
# **********************************************************************************************
# Ageの欠損値にはpandaではなくscikit-learnのライブラリを使用
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values = np.nan, strategy = 'median')
dataset['Fare'] = imputer.fit_transform(dataset['Fare'].values.reshape(-1,1))
imputer = SimpleImputer(missing_values = np.nan, strategy = 'most_frequent')
dataset['Embarked'] = imputer.fit_transform(dataset['Embarked'].values.reshape(-1,1))
dataset = pd.get_dummies(dataset, columns=['Embarked'], drop_first=False)
# SexはLabelEncodingを用いると多重共線性が発生? OneHot Encodingへ変更
dataset = pd.get_dummies(dataset, columns=['Sex'], drop_first=True)
# Features Scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
dataset['Age'] = scaler.fit_transform(dataset['Age'].values.reshape(-1,1))
dataset['Fare'] = scaler.fit_transform(dataset['Fare'].values.reshape(-1,1))
dataset_train = dataset.iloc[:train.shape[0], :]
dataset_test = dataset.iloc[train.shape[0]:, :]
return dataset_train, dataset_test
dataset_train = pd.read_csv('data/train.csv', sep=',')
dataset_test = pd.read_csv('data/test.csv', sep=',')
dataset_train, dataset_test = data_preprocessing(dataset_train, dataset_test)
X_train = dataset_train[features].values
y_train = dataset_train['Survived'].values
Id = dataset_test['PassengerId']
X_submit = dataset_test[features].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2)
# +
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
class MajorityVote(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
def fit(self, X, y):
self.models_ = [clone(x) for x in self.models]
for model in self.models_:
model.fit(X,y)
return self
def predict(self, X):
predict = pd.DataFrame()
label = 0
for model in self.models_:
prediction = model.predict(X)
predict[str(label)] = prediction
label += 1
return predict.mode(axis=1)[0]
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
logi_classifier = LogisticRegression()
k_classifier = KNeighborsClassifier(n_neighbors=2, metric='minkowski', p=1)
svm_classifier = SVC(kernel='linear', random_state=0)
naive_classifier = GaussianNB()
rndm_classifier = RandomForestClassifier(n_estimators=10, criterion='gini', bootstrap=False, max_features=2, min_samples_leaf=1, min_samples_split=20, random_state=0)
majority = MajorityVote(models=(logi_classifier, svm_classifier, naive_classifier, rndm_classifier))
majority.fit(X_train, y_train)
y_pred = majority.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(f'accuracy_score = {accuracy_score(y_test,y_pred):.5f}')
# +
# Logistic Regression
from sklearn.linear_model import LogisticRegression
logi_classifier = LogisticRegression()
logi_classifier.fit(X_train, y_train)
y_pred = logi_classifier.predict(X_submit)
from sklearn.metrics import confusion_matrix, accuracy_score
def create_confusion_matrix(y_test, y_pred):
cm = confusion_matrix(y_test, y_pred)
print(cm)
print(f'accuracy_score = {accuracy_score(y_test,y_pred):.5f}')
#create_confusion_matrix(y_test, y_pred)
output = pd.DataFrame({'PassengerId': Id, 'Survived': y_pred})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!")
# +
# K-Nearest Neighbors
from sklearn.neighbors import KNeighborsClassifier
k_classifier = KNeighborsClassifier(n_neighbors=2, metric='minkowski', p=1)
k_classifier.fit(X_train, y_train)
y_pred = k_classifier.predict(X_test)
create_confusion_matrix(y_test, y_pred)
# + tags=["outputPrepend"]
# # 過学習?のため結果はいまいち。EDAを見直す必要あり?
cat_classifier = CatBoostClassifier(iterations=1000, use_best_model=True, eval_metric='Accuracy', od_pval=0, learning_rate=0.0001, depth=10)
cat_classifier.fit(X_train, y_train, eval_set=(X_test, y_test))
prediction = cat_classifier.predict(X_submit)
#y_pred = cat_classifier.predict(X_test)
output = pd.DataFrame({'PassengerId': Id, 'Survived': prediction})
output.to_csv('my_submission.csv', index=False)
#print("Your submission was successfully saved!")
# +
# Support Vector Machine
from sklearn.svm import SVC
svm_classifier = SVC(kernel='linear', random_state=0)
svm_classifier.fit(X_train, y_train)
y_pred = svm_classifier.predict(X_test)
create_confusion_matrix(y_test, y_pred)
# +
# Naive Bayes
from sklearn.naive_bayes import GaussianNB
naive_classifier = GaussianNB()
naive_classifier.fit(X_train, y_train)
y_pred = naive_classifier.predict(X_test)
create_confusion_matrix(y_test, y_pred)
# +
# Random Forest Classification
from sklearn.ensemble import RandomForestClassifier
rndm_classifier = RandomForestClassifier(n_estimators=10, criterion='gini', bootstrap=False, max_features=2, min_samples_leaf=1, min_samples_split=20, random_state=0)
rndm_classifier.fit(X_train, y_train)
y_pred = rndm_classifier.predict(X_test)
create_confusion_matrix(y_test, y_pred)
# -
# KFold
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
kfold = KFold(n_splits=10, random_state=42)
result = cross_val_score = cross_val_score(RandomForestClassifier(), X_train, y_train, cv = kfold, scoring = 'accuracy')
print(result.mean())
# grid search
'''
from sklearn.model_selection import GridSearchCV
grid_parameters = [
{'n_estimators': [1, 2, 5, 10, 100, 1000],
'criterion': ['gini', 'entropy'],
'max_features': [1, 2, 5, 10, 20],
'min_samples_split': [1, 2, 5, 10, 20],
'min_samples_leaf': [1, 2, 5, 10, 20],
'bootstrap': [True, False],
}
]
grid_search = GridSearchCV(RandomForestClassifier(), grid_parameters, cv=5, scoring='accuracy', n_jobs = -1)
grid_search.fit(X_train, y_train)
grid_search.best_params_
# LightGBM
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test)
parameter = {
'objective': 'binary',
'random_seed': 1234,
'num_iterations': 100,
'max_depth': -1,
'num_leaves':20,
'max_bin': 500,
'min_data_in_leaf': 57
}
model = lgb.train(parameter, train_set=lgb_train, valid_sets=lgb_test, num_boost_round=200,
early_stopping_rounds=20, verbose_eval=10)
# + tags=[]
y_pred = model.predict(X_test)
metric = []
range_index = 10000
for x in range(0, range_index, 1):
y_adjust = np.where(y_pred > x/range_index, 1, 0)
metric.append(accuracy_score(y_test, y_adjust))
#create_confusion_matrix(y_test, y_pred)
print(f'\n\nmax accuracy is {max(metric):.4f} at {metric.index(max(metric))/range_index}')
y_pred = np.where(y_pred > 0.5429, 1, 0)
create_confusion_matrix(y_test, y_pred)
# +
# Predict
test_dataset = pd.read_csv('data/test.csv')
X, y, test_dataset = data_preprocessing(test_dataset)
y_pred = model.predict(X)
y_pred = np.where(y_pred > 0.4128, 1, 0)
output = pd.DataFrame({'PassengerId': test_dataset['PassengerId'], 'Survived': y_pred})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!")
# -
| Titanic - Machine Learning from Disaster/Titanic - Machine Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''env_movshon'': conda)'
# language: python
# name: python38564bitenvmovshonconda86904444310c4a0e8be1c65ba6d14699
# ---
# %load_ext autoreload
# %autoreload 2
# %config Completer.use_jedi = False
# # Movshon lab - Blackrock Converter
from movshon_lab_to_nwb import MovshonBlackrockNWBConverter
from pynwb import NWBFile, NWBHDF5IO
from nwbwidgets import nwb2widget
from pathlib import Path
import yaml
import pprint
# +
# Source data
base_path = Path('/home/luiz/storage/taufferconsulting/client_ben/project_movshon/data_blackrock/')
file_recording_raw = str(base_path / 'XX_LE_textures_20191128_002.ns6')
file_recording_processed = str(base_path / 'XX_LE_textures_20191128_002.ns3')
file_sorting = str(base_path / 'XX_LE_textures_20191128_002.nev')
source_data = dict(
BlackrockRaw=dict(filename=file_recording_raw),
BlackrockProcessed=dict(filename=file_recording_processed),
BlackrockSorting=dict(
filename=file_sorting,
nsx_to_load=6
)
)
# Initialize converter
converter = MovshonBlackrockNWBConverter(source_data=source_data)
# Get metadata from source data and modify any values you want
metadata = converter.get_metadata()
metadata['NWBFile']['session_description'] = 'my example conversion'
# Get conversion options and modify any values you want
conversion_options = converter.get_conversion_options()
conversion_options['BlackrockRaw'] = dict(stub_test=True)
conversion_options['BlackrockProcessed'] = dict(stub_test=True)
# OPTIONAL - Validate source_data, metadata and conversion_options
converter.validate_source(source_data)
converter.validate_metadata(metadata)
converter.validate_conversion_options(conversion_options)
# Run conversion
output_file = 'out_example_blackrock.nwb'
converter.run_conversion(
metadata=metadata,
nwbfile_path=output_file,
save_to_file=True,
overwrite=True,
conversion_options=conversion_options
)
# -
# ## Check NWB file with widgets
io = NWBHDF5IO(output_file, 'r')
nwbfile = io.read()
nwb2widget(nwbfile)
| tutorials/blackrock_nwb_conversion_simple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Summary of common problems with *Link List*
#
# [简体中文JavaScript版](https://mp.weixin.qq.com/s/ZtF0fkQa8wZGXmeRDIvHtg)
# ## *Idle Time*
#
# The *Daily Problem* channel has been updated continuously for 60 days. Subscribers know that the quality of the topic is very high. I will do it every day, in order not to waste money to buy this topic every day.
#
# Yep, I paid for these questions. Although most of them are available on [*LeetCode*][1], I still feel it's more suitable for you and me after selection and daily distribution.
#
# For 60 days, some people have been following me, such as [*@Dragon1573*][2]. He recorded every question and answer I pushed on [*Github*][3] and merged it by week. Post a picture and everyone feels.
#
# 
#
# The URL address of the repository is **<https://github.com/Dragon1573/Daily-Problem>**
#
# [1]: https://leetcode.com/
# [2]: https://github.com/Dragon1573
# [3]: https://github.com/
#
# ****
# ## Revision Time
#
# Today I won't push any questions. I'll sort out the basics of the linked list to ensure that everyone is addicted.
# ### 0x0 Summary
#
# 1. *Linked list* usually is a string of *Node*, which a *Node* contains a value `val` and a pointer `next` referring the next node.
# 2. Most of the time, we should use multiple pointers to solve the question.
# 3. When the head node of the linked list is insure, use a fake head node `dummyHead`.
# 4. Master the *reverse list*!
# 5. **Skills:** In order to judge the *circular list*, let a pointer moves twice as fast as the other.
# 6. A *doubly linked list*, that is, a linked list with 2 pointers. One of them pointing to the next and the other pointing to the previous.
#
# Here, we prepared 5 questions for everyone. We recommended you can write it down by hand if you have free time.
#
# - Reverse the linked list
# - Remove the recommended element in the linked list
# - Generate an odd-even linked list
# - Check a palindrome linked list
# - Realize a doubly linked list on your own
# ### 0x1 Define a Node structure
#
# Apart from *JavaScript*, *Python 3* is an object-oriented programming language. In order to express the algorithm accurately, we have to define a *Node* first.
class Node:
''' Node Definition '''
def __init__(self, value, next_=None):
''' The Constructor '''
self.value = value
self.next_ = next_
def __str__(self):
''' Linked List Stringify '''
string = str(self.value)
self = self.next_
while self is not None:
string += ('->' + str(self.value))
self = self.next_
return string
# ### 0x2 Reverse a linked list
#
# In this part, we provide both iterably and recursively algorithms.
class ReverseLinkedList:
''' Reverse a linked list '''
def iterably(self, array: Node):
''' Reverse iterably '''
previous = None
current = array
while current is not None:
next_ = current.next_
current.next_ = previous
previous = current
current = next_
return previous
def recursively(self, array: Node):
''' Reverse recursively '''
if array is None:
return None
if array.next_ is None:
return array
previous = self.recursively(array.next_)
array.next_.next_ = array
array.next_ = None
return previous
def test(self):
''' Test cases '''
array = Node(1, Node(2, Node(3, Node(4, Node(5)))))
print(array)
reversed_result = self.iterably(array)
print(reversed_result)
reverse_again = self.recursively(reversed_result)
print(reverse_again)
''' Main Scripts '''
ReverseLinkedList().test()
# ### 0x3 Revome Element(s) in a linked list
#
# ```text
# Input: 1->2->6->3->4->5->6
# Output: 1->2->3->4->5
# ```
class RemoveElement:
''' Remove all elements with recommended value in a linked list '''
def remove(self, array: Node, value):
''' Remove element '''
dummy = Node(0, array)
cursor = dummy
while cursor is not None and cursor.next_ is not None:
if cursor.next_.value == value:
next_ = cursor.next_.next_
cursor.next_.next_ = None
cursor.next_ = next_
cursor = cursor.next_
return dummy.next_
def test(self):
''' Test Cases '''
array = Node(1, Node(2, Node(6, Node(3, Node(4, Node(5, Node(6)))))))
print(array)
result = self.remove(array, 6)
print(result)
''' Main Scripts '''
RemoveElement().test()
# ### 0x4 Generate an odd-even linked list
#
# Link all odd and even index nodes together seprately, then connect even linked list after the odd one. Return a new linked list.
#
# ```text
# Input: 1->2->3->4->5
# Output: 1->3->5->2->4
# ```
#
# ```text
# Input: 2->1->3->5->6->4->7
# Output: 2->3->6->7->1->5->4
# ```
#
# **Notes:** *The first element is odd and the second element is even, etc..*
#
# You should finish it with $O(1)$ space complexity and $O(n)$ time complexity.
class OddEvenList:
''' Generate an odd-even linked list '''
def group(self, array: Node):
''' Group odd and even together '''
if array is None:
return array
odd = array
even = array.next_
even_head = even
while odd.next_ is not None and even.next_ is not None:
odd.next_ = even.next_
odd = odd.next_
even.next_ = odd.next_
even = even.next_
odd.next_ = even_head
return array
def test(self):
''' Test Cases '''
array_1 = Node(1, Node(2, Node(3, Node(4, Node(5)))))
print(array_1)
grouped_1 = self.group(array_1)
print(grouped_1)
array_2 = Node(2, Node(1, Node(3, Node(5, Node(6, Node(4, Node(7)))))))
print(array_2)
grouped_2 = self.group(array_2)
print(grouped_2)
''' Main Scripts '''
OddEvenList().test()
# ### 0x5 Check a palindrome linked list
class PalindromeLinkedList:
''' Palindrome linked list '''
def reverse(self, array: Node):
''' Reverse a linked list '''
previous = None
current = array
while current is not None:
next_ = current.next_
current.next_ = previous
previous = current
current = next_
return previous
def isPalindrome(self, array: Node):
''' Check if it is a palindrome '''
reversed_list = self.reverse(array)
while array is not None:
if array.value != reversed_list.value:
return False
array = array.next_
reversed_list = reversed_list.next_
return True
def test(self):
''' Test Cases '''
print(self.isPalindrome(Node(1, Node(2))))
print(self.isPalindrome(Node(1, Node(2, Node(2, Node(1))))))
''' Main Scripts '''
PalindromeLinkedList().test()
# ### 0x6 Realize a doubly linked list on your own
#
# Such as `get()`, `addHead()`, `addTail()`, `deleteAtIndex()` and so on.
class DoublyNode:
''' Doubly Node '''
def __init__(self, value, next_=None, prev=None):
''' The Constructor '''
self.value = value
self.next_ = next_
self.prev = prev
def __str__(self):
''' Stringify a list '''
string = str(self.value)
self = self.next_
while self:
string += ('<->' + str(self.value))
return string
class DoublyLinkedList:
''' Doubly linked list '''
def __init__(self):
''' The Constructor '''
self.head = Node(None)
self.tail = Node(None)
self.head.next_ = self.tail
self.tail.prev = self.head
def getNode(self, index):
''' Get a node from an index '''
current = self.head.next_
while current and index > 0:
current = current.next_
index -= 1
if current == self.tail or not current or index != 0:
return None
return current
def get(self, index):
''' Get the value of the index '''
node = self.getNode(index)
if node:
return node.value
else:
return None
def addHead(self, value):
''' Add an element at the begin of the list '''
node = Node(value)
node.prev = self.head
node.next_ = self.head.next_
self.head.next_.prev = node
self.head.next_ = node
def addTail(self, value):
''' Add an element at the end of the list '''
node = Node(value)
node.prev = self.tail.prev
node.next_ = self.tail
self.tail.prev.next_ = node
self.tail.prev = node
def addAtIndex(self, index, value):
''' Add an element at an index '''
current = self.getNode(index)
if current is None:
raise IndexError('Index out of bounds!')
node = Node(value)
node.prev = current.prev
node.next_ = current
current.prev.next_ = node
current.prev = node
def deleteAtIndex(self, index):
''' Remove the element at the index '''
current = self.getNode(index)
if current is None:
raise IndexError('Index out of bounds!')
current.prev.next_ = current.next_
current.next_.prev = current.prev
current.next_ = None
current.prev = None
def test(self):
''' Test Cases '''
self.addHead(0)
self.addTail(5)
print(self.head)
self.addAtIndex(1, 1)
self.addAtIndex(2, 2)
print(self.head)
self.addAtIndex(3, 3)
self.addAtIndex(4, 4)
print(self.head)
self.deleteAtIndex(0)
print(self.head)
self.deleteAtIndex(4)
print(self.head)
''' Main Scripts '''
DoublyLinkedList().test()
| Revision/Link_List.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
housing = pd.read_csv("housing_prices.csv")
housing.shape
housing.head()
housing.columns
# ### Exploratory Analysis
# ### Univariate
# +
# Analyze the spread of the "LotArea" column
sns.distplot(housing["LotArea"], kde=False);
# +
# What are the different types of housing exterios
plot = sns.countplot(x = "Exterior1st", data = housing)
plot.set_xticklabels(plot.get_xticklabels(), rotation=40);
# -
# ### Bi-variate Analysis
# <b> Q: Does the price of the house depend on the lot area? </b>
sns.regplot(x = "LotArea", y = "SalePrice", data = housing)
# Are outliers skewing the relationship.. Redraw the relationshp after removing very large values
housing["LotArea"].quantile([0.5,0.95,0.99])
housing_sub = housing.loc[housing["LotArea"] < housing["LotArea"].quantile(0.95)]
# +
sns.regplot(x = "LotArea", y = "SalePrice", data = housing_sub)
# -
housing["LotArea"].corr(housing["SalePrice"])
housing_sub["LotArea"].corr(housing_sub["SalePrice"])
# ### Plotting multiple graphs
# <b> Q: Analyze the relationship between SalesPrice and all "Square Feet (SF)" related columns </b>
sf_cols = [col_name for col_name in housing.columns if "SF" in col_name]
len(sf_cols)
fig, axs = plt.subplots(nrows = 3,ncols = 3, figsize = (10,10))
for i in range(0, len(sf_cols)):
rows = i // 3
cols = i % 3
ax = axs[rows, cols]
plot = sns.regplot(x = sf_cols[i], y = "SalePrice", data = housing, ax=ax)
### Is the price of the house impacted by the Exterior covering on house
housing["Exterior1st"].value_counts()
fig, axs = plt.subplots(figsize = (10,10))
sns.boxplot(x = "Exterior1st", y = "SalePrice", data = housing, ax=axs);
# <b>Q1. Rather than plotting the relationship between all the different types of Exterior, do the following:</b>
#
# a) Create a new column - <i>"Exterior_New"</i> with only the top 3 values of Exterior in it. All other values are converted to "Others"
#
# b) Plot the relationship between Sales price and this new columns
#
# <b>Q2: Does the relationship between House exterior and Price change depending on how old the house is. For this question simply categorize houses into Old and New based on the condition: YearBuilt > 2000 "New" else "Old"</b>
# <b> Q1. Rather than plotting the relationship between all the different types of Exterior, do the following:</b>
top_3_exterior = housing["Exterior1st"].value_counts().head(3)
top_3_exterior = list(top_3_exterior.index)
housing["Exterior_new"] = np.where(housing["Exterior1st"].isin(top_3_exterior),housing["Exterior1st"],"Others")
housing["Exterior_new"].value_counts()
plot = sns.boxplot(x = "Exterior_new", y = "SalePrice", data = housing);
plot.set_xticklabels(plot.get_xticklabels(), rotation=40);
# <b> Does the relationship between House exterior and Price change depending on how old the house is </b>
housing["new_house"] = np.where(housing["YearBuilt"] > 2000,"New","Old")
housing["new_house"].value_counts()
sns.boxplot(x = "new_house", y = "SalePrice", hue = "Exterior_new", data = housing);
housing.groupby(["Exterior_new","new_house"]).agg({"SalePrice":["count", "median"]})
# <b> Has the type of exterior changed for newer homes compared to older houses </b>
#
#
sns.countplot(x = "new_house", data = housing, hue = "Exterior_new");
| python_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %load_ext rpy2.ipython
# + language="R"
# library(ggplot2)
# library(dplyr)
# library(tidyr)
# library(gridExtra)
# library(phyloseq)
# library(vegan)
# library(scatterplot3d)
# + language="R"
# otu.tbl.file1 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/0/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq'
# otu.tbl.file2 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/100/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq'
#
# physeq1 = readRDS(otu.tbl.file1)
# physeq2 = readRDS(otu.tbl.file2)
# + language="R"
#
# ord1 = ordinate(physeq1, method='NMDS', distance='bray')
# #p1 = plot_ordination(physeq1, ord1, justDF=T)
#
# ord2 = ordinate(physeq2, method='NMDS', distance='bray')
# #p2 = plot_ordination(physeq2, ord2, justDF=T)
#
# ord1 %>% head %>% print
# ord2 %>% head %>% print
# + language="R"
# otu.tbl = physeq1 %>% otu_table %>% t
# bc1 = vegdist(otu.tbl)
#
# bc1.col = data.frame(t(combn(rownames(otu.tbl),2)), as.numeric(bc1))
# colnames(bc1.col) = c('X1', 'X2', 'dist')
#
# bc1.col$X1.lib = gsub('__.+', '', bc1.col$X1)
# bc1.col$X2.lib = gsub('__.+', '', bc1.col$X2)
#
# bc1.col$X1.F.start = gsub('.+__([0-9.]+)-.+', '\\1', bc1.col$X1)
# bc1.col$X2.F.start = gsub('.+__([0-9.]+)-.+', '\\1', bc1.col$X2)
#
# bc1.col$X1.F.end = gsub('.+-', '', bc1.col$X1)
# bc1.col$X2.F.end = gsub('.+-', '', bc1.col$X2)
#
# bc1.col %>% head
# + language="R"
# otu.tbl = physeq2 %>% otu_table %>% t
# bc2 = vegdist(otu.tbl)
#
# bc2.col = data.frame(t(combn(rownames(otu.tbl),2)), as.numeric(bc2))
# colnames(bc2.col) = c('X1', 'X2', 'dist')
#
#
# bc2.col$X1.lib = gsub('__.+', '', bc2.col$X1)
# bc2.col$X2.lib = gsub('__.+', '', bc2.col$X2)
#
# bc2.col$X1.F.start = gsub('.+__([0-9.]+)-.+', '\\1', bc2.col$X1)
# bc2.col$X2.F.start = gsub('.+__([0-9.]+)-.+', '\\1', bc2.col$X2)
#
# bc2.col$X1.F.end = gsub('.+-', '', bc2.col$X1)
# bc2.col$X2.F.end = gsub('.+-', '', bc2.col$X2)
#
# bc2.col %>% head
# + language="R"
# bc1.col$file = 1
# bc2.col$file = 2
#
# asNum = function (x){ as.numeric(as.character(x)) }
#
# tbl.j = rbind(bc1.col, bc2.col)
# tbl.j = tbl.j %>%
# filter(X1.lib != X2.lib) %>%
# mutate(X1.F.start = asNum(X1.F.start),
# X2.F.start = asNum(X2.F.start),
# F.start.dist = abs(X1.F.start - X2.F.start),
# dist = asNum(dist),
# file = as.character(file))
#
# #scatterplot3d(tbl.j$X1.F.start, tbl.j$X2.F.start, tbl.j$dist, color=tbl.j$file)
# + magic_args="-w 500 -h 400" language="R"
#
# ggplot(tbl.j, aes(F.start.dist, dist, color=file)) +
# geom_point()
# -
# # Mapping fractions between gradient communities in order to perform procrustes
# + language="R"
# otu.tbl.file1 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/0/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq'
# otu.tbl.file2 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/100/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq'
#
# physeq1 = readRDS(otu.tbl.file1)
# physeq2 = readRDS(otu.tbl.file2)
# + language="R"
#
# ord1 = ordinate(physeq1, method='NMDS', distance='bray')
# ord2 = ordinate(physeq2, method='NMDS', distance='bray')
#
# ord1 %>% scores %>% head %>% print
# ord2 %>% scores %>% head %>% print
# + language="R"
#
# get.fracs = function(ord){
# fracs = gsub('.+__', '', rownames(ord %>% scores)) %>% as.data.frame()
# colnames(fracs) = c('fractions')
# fracs = fracs %>%
# separate(fractions, c('start','end'), sep='-', convert=T) %>%
# mutate(start = start * 1000,
# end = end * 1000)
# return(fracs)
# }
#
# ord1.f = get.fracs(ord1)
# ord2.f = get.fracs(ord2)
# + language="R"
# library(IRanges)
# + language="R"
#
# ord1.r = IRanges(start=ord1.f$start, end=ord1.f$end)
# ord2.r = IRanges(start=ord2.f$start, end=ord2.f$end)
# + language="R"
#
# ov = findOverlaps(ord1.r, ord2.r, select='first')
# ov
# + language="R"
#
# ov = findOverlaps(ord1.r, ord2.r)
# ov
# -
# # Calculating centroid of binned fraction samples
#
# * centroid of all 20 replicates for fraction samples that fall into the BD-range bin
# * trying oriellipse() function from vegan package
# + language="R"
# otu.tbl.file1 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/0/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq'
# otu.tbl.file2 = '/home/nick/notebook/SIPSim/dev/bac_genome1210/atomIncorp_taxaIncorp/100/10/1/OTU_n2_abs1e9_sub-norm_filt.physeq'
#
# physeq1 = readRDS(otu.tbl.file1)
# physeq2 = readRDS(otu.tbl.file2)
# + language="R"
#
# ord1 = ordinate(physeq1, method='NMDS', distance='bray')
# ord2 = ordinate(physeq2, method='NMDS', distance='bray')
# + language="R"
#
# grps = as.character(rep(seq(1,nrow(ord1$points) / 2), 2))
# grps = append(grps, '2')
#
# plot(ord1, type = "p", display='sites')
# elps = ordiellipse(ord1, grps, kind="se", conf=0.95, lwd=2, col="blue")
#
# elps = elps %>% summary %>% t %>% as.data.frame
# elps
# + language="R"
#
# ggplot(elps, aes(NMDS1, NMDS2)) +
# geom_point()
# + language="R"
#
# get.ellipse = function(ord){
# grps = as.character(rep(seq(1,nrow(ord$points) / 2), 2))
# grps = append(grps, '2')
#
# plot(ord, type = "p", display='sites')
# elps = ordiellipse(ord, grps, kind="se", conf=0.95, lwd=2, col="blue")
#
# elps = elps %>% summary %>% t %>% as.data.frame
# return(elps)
# }
#
#
# get.ellipse(ord1)
# + language="R"
#
# BD.range = seq(1.6, 1.9, 0.004)
#
# mid = function(x, y){ (x + y)/2 }
#
# get.BD.range = function(tbl, BD.range){
# tbl = as.data.frame(tbl)
# tbl$lib = gsub('__.+', '', rownames(tbl)) %>% as.character
# tbl$BD.start = gsub('.+__([0-9.]+)-.+', '\\1', rownames(tbl)) %>% as.numeric
# tbl$BD.end = gsub('.+-', '', rownames(tbl)) %>% as.numeric
# tbl$BD.mid = mapply(mid, tbl$BD.start, tbl$BD.end)
# tbl$BD.bin = cut(tbl$BD.mid, breaks=BD.range)
#
# tbl = tbl %>%
# unite(group, lib, BD.bin, sep='__')
#
# return(tbl)
# }
#
# ord.BD = get.BD.range(ord1 %>% scores, BD.range=BD.range)
# ord.BD %>% head
# + language="R"
# ?tidyr::unite
# + language="R"
# # making fixed BD-range & binning by BD.mid
#
# BD.range = seq(1.6, 1.9, 0.004)
# BD.range
# + language="R"
# ord.BD$BD.bin = cut(ord.BD$BD.mid, breaks=BD.range)
# ord.BD %>% head
# -
| ipynb/sandbox/procrustes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: simex
# language: python
# name: simex
# ---
from timeit import default_timer as timer
import os
import SimEx
from SimEx import *
# # Data path setup
data_path = '../../data/simulation/'
prop_path = 'prop/prop_out_9fs_5keV_from_CHY.h5'
os.chdir(data_path)
os.getcwd()
# # Wave propogration
prop_analysis=XFELPhotonAnalysis(input_path=prop_path)
prop_analysis.plotTotalPower(spectrum=True)
prop_analysis.plotTotalPower()
prop_analysis.plotIntensityMap()
import h5py
with h5py.File(prop_path, 'r') as f:
grp = f['misc']
print(grp.keys())
print (grp['xFWHM'][...]*1e6)
print (grp['yFWHM'][...]*1e6)
# # Photon-matter interaction (form factor calculation)
# Lysozyme sample from [Sugahara Nmethods 2015](https://www.nature.com/articles/nmeth.3172), [PDB File](https://www.rcsb.org/structure/3WUL)
pmi_parameters={"number_of_trajectories" : 1,
"random_rotation" : False}
photon_matter_interactor=XMDYNDemoPhotonMatterInteractor(parameters=pmi_parameters,
input_path=prop_path,
output_path='pmi',
sample_path='3WUL.pdb')
photon_matter_interactor.backengine()
photon_matter_interactor.saveH5()
# # Scattering
# ## Configure Detector geometry
panel = DetectorPanel(ranges={"fast_scan_min" : 0, "fast_scan_max" : 100,
"slow_scan_min" : 0, "slow_scan_max" : 100},
pixel_size=6*220.0e-6*meter,
energy_response=1.0/electronvolt,
distance_from_interaction_plane=0.13*meter,
corners={"x" : -49, "y": -49},
saturation_adu=1.e6,
)
detector_geometry = DetectorGeometry(panels=panel,)
# # Configure the Diffractor Parameters
diffraction_parameters = SingFELPhotonDiffractorParameters(
uniform_rotation=False,
slice_interval=100,
number_of_slices=100,
number_of_diffraction_patterns=1,
detector_geometry=detector_geometry,
forced_mpi_command='mpirun -np 1',
)
diffractor = SingFELPhotonDiffractor(parameters=diffraction_parameters,
input_path='pmi',
output_path="diffr")
diffractor.backengine()
diffractor.saveH5()
spi_analysis = DiffractionAnalysis(diffractor.output_path,
pattern_indices=[1],
poissonize=False)
spi_analysis.plotPattern(logscale=True)
| src/controller/singFEL/Benchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from shamanai import *
shamanrt = ShamanRT()
# +
print(f'Welcome to Shaman-AI')
print(f'Create New Project: 1 \n Load Project: 2 )
answer = input('Please Select from list')
if answer == '1':
# +
import os
import numpy as np
# Import the cropping widget and example images
from interactivecrop.interactivecrop import main as crop
from interactivecrop.samples import sample_images, sample_names
PATH_TO_SAMPLES = 'shamanai/saves/image_samples/'
# +
from PIL import Image, ImageDraw, ImageFont
from PIL import Image
image_list = [np.array(Image.open(PATH_TO_SAMPLES+i)) for i in os.listdir(PATH_TO_SAMPLES)]
image_name_list = [i[:-4].title() for i in os.listdir(PATH_TO_SAMPLES)]
def crop2(image_path, coords, saved_location):
"""
@param image_path: The path to the image to edit
@param coords: A tuple of x/y coordinates (x1, y1, x2, y2)
@param saved_location: Path to save the cropped image
"""
image_obj = Image.open(image_path)
cropped_image = image_obj.crop(coords)
cropped_image.save(saved_location)
#cropped_image.show()
# Define the callback to save the image
def callback(image_name, im):
#fnt = ImageFont.truetype('static/arial.ttf', 50)
# Get a font
#im.draw() # Draw the shape onto the PIL image; Otherwise we print the unmodified image. Note draw() is inplace operator
PIL_im = im.image # Access the PIL image from the 'shape' object
#d = ImageDraw.Draw(PIL_im)
#print(im.shape)
#PIL_im.save('Text.jpg')
#cropped_image = Image.open('Text.jpg').convert('L')
#cropped_image = cropped_image.crop(im.size)
#print(im.size)
cropped_image = PIL_im
start_x = im.size[0]
start_y = im.size[1]
width = im.size[2]
height = im.size[3]
box = (start_x, start_y, start_x + width, start_y + height)
cropped_image = cropped_image.crop(box)
#cropped_image = cropped_image.crop(im.size)
#cropped_image.save(saved_location)
cropped_image.save('Text2.jpg')
#print(im.size)
#d.text((10,10), text=str(im.size), fill='white') # Draw the crop shape onto the image
#cropped_image(image_name+'_w_Text.jpg')
#crop2(image_name+'_w_Text.jpg', im.size, 'cropped.jpg')
#PIL_im.save(image_name+'_w_Text.jpg')
crop(image_list, image_name_list=image_name_list, callback=callback, optimize=True)
# +
if __name__ == '__main__':
image = 'grasshopper.jpg'
crop2(PIL_im, (161, 166, 706, 1050), 'cropped.jpg')
# +
image_list = [PATH_TO_SAMPLES+i for i in os.listdir(PATH_TO_SAMPLES)]
image_name_list = [i[:-4].title() for i in os.listdir(PATH_TO_SAMPLES)]
crop(image_list, image_name_list=image_name_list, optimize=False)
# -
from interactivecrop.interactivecrop import main as crop
from interactivecrop.samples import sample_images, sample_names
crop(sample_images,image_name_list = sample_names)
crop(sample_images, sample_names, optimize=False)
# +
#mouselogger = MouseLogger()
#mouselogger.grab_screen()
# -
| .ipynb_checkpoints/Shaman-AI-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.12 ('numpy-tutorial')
# language: python
# name: python3
# ---
# # Numpy Tutorial Notes
#
# The [tutorial slides](https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019/blob/master/slides.pdf)
# provide a useful summary of numpy features and concepts. This notebook captures similar examples, notes and comments.
#
# ```
# git clone https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019.git
# # cd Numpy-Tutorial-SciPyConf-2019
# ```
#
# See [YouTube Video](https://youtu.be/ZB7BZMhfPgk).
import numpy as np
a = np.array(range(1, 5))
a
type(a)
a.dtype
f = np.array([1.2, 2.3, 4.5, 5.6])
f.dtype
a[0]
a[0] = 10
a
# floats are truncated to int
a[0] = 11.5
a
a.ndim
# tuple of N elements in each dimension; like a.size in matlab
a.shape
# size is the total number of elements
a.size
# ### element-wise or vectorized operations
a + f
a * f
a / f
f ** a
# vectorized operations work with constants
a * 10
# ### Universal Functions (ufuncs)
np.sin(a)
# ## Indexing and Slicing Examples
a = np.arange(25).reshape(5,5)
a
# slice columns - red
a[:,[1,3]]
# slice columns - red
a[:, 1::2]
# last row - yellow
a[-1,:]
# fifth row
a[4,:]
# blue
a[fc00:db20:35b:7399::5, :-1:2]
# blue
a[1::2, :3:2]
# ### Slice Assignments
a = np.array([0, 1, 2, 3, 4])
a[-2:]
a[-2:] = range(2)
a
# assign a constant to slice
a[-2:] = 99
a
# assignment shape must equal shape assigned to
try:
a[-2:] = range(5)
except ValueError as err:
print(err)
# ### Indexing Shared Memory
a = np.arange(1,5)
a
# a slice assignment points to shared memory
b = a[:2]
b
assert id(a[0]) == id(b[0])
b[0] = -1
b
# note this also changes the shared data in a
a
# ### Indexing Syntax Explained
a[0] == a.__getitem__(0)
a[0] = 100
a
a.__setitem__(0, 100)
a
# ### Data Copies
a = np.arange(1,5)
b = a.copy()
b[0] = -1
b[0], a[0]
id(b[0]), id(a[0])
# ### Fancy Indexes and Masks
#
# Fancy indexes return copies instead of views into the original array
a = np.array([-1, -3, 1, 4, -6, 9, 3])
a
# threshold data below zero
negative = a < 0
negative
# get all the items that are negative
a[negative]
# set all the negative items to zero
a[negative] = 0
a
# ### Fancy Indexes in 2D
#
# Each dimension is indexed to create tuple-indexes
a = np.arange(25).reshape(5,5)
a
# select items with values: 2, 13, 16, 19
a[[0,2,3,3],[2,3,1,4]]
# Read the indexes above as tuples like:
# (0, 2), (2,3), (3, 1), (3, 4)
#
# This is like zipping the index arrays
list(zip([0,2,3,3],[2,3,1,4]))
# use a mask to select all items divisible by 3
mask = (a % 3) == 0
mask
a[mask]
# ### Multidimensional Indexing
#
# See
# - [YouTube Segment](https://youtu.be/ZB7BZMhfPgk?t=4735)
# - [tutorial slides](https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019/blob/master/slides.pdf) (slide 31)
#
# Numpy dimension idexes are row-major
# - numpy is row-major system
# - Fortan and Matlab are column-major system
# - in a row-major system, each new dimension is *pre-pended*
# - 1D: the axis=0 increases by columns
# - 2D:
# - the axis=0 increases by rows
# - the axis=1 increases by columns
# - 3D:
# - the axis=0 increases by depth
# - the axis=1 increases by rows
# - the axis=2 increases by columns
# - 4D:
# - the axis=0 increases by blocks of 3D (e.g. time dimension)
# - the axis=1 increases by depth
# - the axis=2 increases by rows
# - the axis=3 increases by columns
#
# In the `.shape` tuple, the last dimension (`-1`) is always the columns of the array. These items in the last dimension are stored in a contiguous block of memory.
# ## Creating Arrays
#
# See slide 32 of the [tutorial slides](https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019/blob/master/slides.pdf).
# ## Array Computation Methods
#
# See:
# - [YouTube Segment](https://youtu.be/ZB7BZMhfPgk?t=5120)
# - [tutorial slides](https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019/blob/master/slides.pdf) (slide 37)
#
# For the example data
#
# `a = np.arange(-15, 15).reshape(5, 6) ** 2`
#
# compute:
# - the maximum of each row (one max per row)
# - the mean of each column (one mean per column)
# - the position of the overall minimum (requires several steps)
# example data
a = np.arange(-15, 15).reshape(5, 6) ** 2
a
# a rule of thumb is to choose an axis by using the
# element of the array shape that will be collapsed, e.g.
a.shape # (5, 6)
# axis=0 will collapse over 5 rows (drop rows)
# axis=1 will collapse over 6 columns (drop columns)
# max of each row will collapse over 6 columns or index=1
np.max(a, axis=1)
# the axis=-1 can be used, which is always columns in numpy
np.max(a, axis=-1)
# mean of columns will collapse over 5 rows or index=0
np.mean(a, axis=0)
min_idx = np.argmin(a)
min_idx
min_arridx = np.unravel_index(min_idx, a.shape)
min_arridx
# ## Array Broadcasting
#
# See:
# - [YouTube - Computation Rules](https://youtu.be/ZB7BZMhfPgk?t=5112)
# - [tutorial slides](https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019/blob/master/slides.pdf) (slide 38 & 45)
# example of distance from center
import matplotlib.pyplot as plt
a = np.linspace(0, 1, 15) - 0.5
b = a[:, np.newaxis] # b.shape == (15, 1)
dist2 = a**2 + b**2 # broadcasting sum.
dist = np.sqrt(dist2)
plt.imshow(dist)
plt.colorbar()
# ## Numpy Arrays in Memory
#
# See:
# - [YouTube Segment](https://youtu.be/ZB7BZMhfPgk?t=7296)
# - [tutorial slides](https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019/blob/master/slides.pdf) (slide 59)
| numpy_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from fqfa.fastq.fastq import parse_fastq_reads
from fqfa.util.file import open_compressed
# # Benchmark 1: list of reads
#
# This code creates a list containing all the reads in the file.
# Note that the data structures for the reads are quite different, with two being package-specific objects and one being a tuple.
#
# Because pyfastx does not support bzip2, these results are most useful for comparing with fqfa's gzip benchmarks.
# ## fqfa
#
# Unlike pyfastx, fqfa takes an open file handle rather than a file name.
# In these examples, this is addressed using a context created by a with statement.
with open_compressed("BRCA1_input_sample.fq.bz2") as handle:
# %time reads = [x for x in parse_fastq_reads(handle)]
for x in reads[:5]:
print(x)
del reads
# # Benchmark 2: summarized quality statistics
#
# This code calculates the median average read quality for all reads in the file.
from statistics import median
# ## fqfa
#
# This code uses the ``average_quality()`` method implemented by the FastqRead class.
with open_compressed("BRCA1_input_sample.fq.bz2") as handle:
# %time read_quals = [x.average_quality() for x in parse_fastq_reads(handle)]
print(f"Median average quality is {median(read_quals)}")
del read_quals
# # Benchmark 3: filtering reads on quality
#
# This code creates a list of reads for which all bases are at least Q20.
# The performance and usage in this section is quite similar to Benchmark 2.
# ## fqfa
#
# This code uses the ``min_quality()`` method implemented by the FastqRead class.
with open_compressed("BRCA1_input_sample.fq.bz2") as handle:
# %time filt_reads = [x for x in parse_fastq_reads(handle) if x.min_quality() >= 20]
print(f"Kept {len(filt_reads)} reads after applying filter.")
del filt_reads
| docs/notebooks/benchmarks_bz2.ipynb |
# +
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix,accuracy_score
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, Activation
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# %matplotlib inline
# -
dataset_train=pd.read_csv('PM_train.txt',sep=' ',header=None).drop([26,27],axis=1)
col_names = ['id','cycle','setting1','setting2','setting3','s1','s2','s3','s4','s5','s6','s7','s8','s9','s10','s11','s12','s13','s14','s15','s16','s17','s18','s19','s20','s21']
dataset_train.columns=col_names
print('Shape of Train dataset: ',dataset_train.shape)
dataset_train.head()
dataset_test=pd.read_csv('PM_test.txt',sep=' ',header=None).drop([26,27],axis=1)
dataset_test.columns=col_names
#dataset_test.head()
print('Shape of Test dataset: ',dataset_train.shape)
dataset_train.head()
pm_truth=pd.read_csv('PM_truth.txt',sep=' ',header=None).drop([1],axis=1)
pm_truth.columns=['more']
pm_truth['id']=pm_truth.index+1
pm_truth.head()
# generate column max for test data
rul = pd.DataFrame(dataset_test.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
rul.head()
# run to failure
pm_truth['rtf']=pm_truth['more']+rul['max']
pm_truth.head()
pm_truth.drop('more', axis=1, inplace=True)
dataset_test=dataset_test.merge(pm_truth,on=['id'],how='left')
dataset_test['ttf']=dataset_test['rtf'] - dataset_test['cycle']
dataset_test.drop('rtf', axis=1, inplace=True)
dataset_test.head()
dataset_train['ttf'] = dataset_train.groupby(['id'])['cycle'].transform(max)-dataset_train['cycle']
dataset_train.head()
df_train=dataset_train.copy()
df_test=dataset_test.copy()
period=30
df_train['label_bc'] = df_train['ttf'].apply(lambda x: 1 if x <= period else 0)
df_test['label_bc'] = df_test['ttf'].apply(lambda x: 1 if x <= period else 0)
df_train.head()
features_col_name=['setting1', 'setting2', 'setting3', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11',
's12', 's13', 's14', 's15', 's16', 's17', 's18', 's19', 's20', 's21']
target_col_name='label_bc'
sc=MinMaxScaler()
df_train[features_col_name]=sc.fit_transform(df_train[features_col_name])
df_test[features_col_name]=sc.transform(df_test[features_col_name])
# +
def gen_sequence(id_df, seq_length, seq_cols):
df_zeros=pd.DataFrame(np.zeros((seq_length-1,id_df.shape[1])),columns=id_df.columns)
id_df=df_zeros.append(id_df,ignore_index=True)
data_array = id_df[seq_cols].values
num_elements = data_array.shape[0]
lstm_array=[]
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
lstm_array.append(data_array[start:stop, :])
return np.array(lstm_array)
# function to generate labels
def gen_label(id_df, seq_length, seq_cols,label):
df_zeros=pd.DataFrame(np.zeros((seq_length-1,id_df.shape[1])),columns=id_df.columns)
id_df=df_zeros.append(id_df,ignore_index=True)
data_array = id_df[seq_cols].values
num_elements = data_array.shape[0]
y_label=[]
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
y_label.append(id_df[label][stop])
return np.array(y_label)
# -
seq_length=50
seq_cols=features_col_name
X_train=np.concatenate(list(list(gen_sequence(df_train[df_train['id']==id], seq_length, seq_cols)) for id in df_train['id'].unique()))
print(X_train.shape)
# generate y_train
y_train=np.concatenate(list(list(gen_label(df_train[df_train['id']==id], 50, seq_cols,'label_bc')) for id in df_train['id'].unique()))
print(y_train.shape)
X_test=np.concatenate(list(list(gen_sequence(df_test[df_test['id']==id], seq_length, seq_cols)) for id in df_test['id'].unique()))
print(X_test.shape)
# generate y_test
y_test=np.concatenate(list(list(gen_label(df_test[df_test['id']==id], 50, seq_cols,'label_bc')) for id in df_test['id'].unique()))
print(y_test.shape)
# +
nb_features =X_train.shape[2]
timestamp=seq_length
model = Sequential()
model.add(LSTM(
input_shape=(timestamp, nb_features),
units=100,
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
units=50,
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# -
model.fit(X_train, y_train, epochs=10, batch_size=200, validation_split=0.05, verbose=1,
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')])
scores = model.evaluate(X_train, y_train, verbose=1, batch_size=200)
print('Accurracy: {}'.format(scores[1]))
y_pred=model.predict_classes(X_test)
print('Accuracy of model on test data: ',accuracy_score(y_test,y_pred))
print('Confusion Matrix: \n',confusion_matrix(y_test,y_pred))
def prob_failure(machine_id):
machine_df=df_test[df_test.id==machine_id]
machine_test=gen_sequence(machine_df,seq_length,seq_cols)
m_pred=model.predict(machine_test)
failure_prob=list(m_pred[-1]*100)[0]
return failure_prob
machine_id=16
print('Probability that machine will fail within 30 days: ',prob_failure(machine_id))
print("hello")
| Pred-Maintainance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="VzG3C-sXEsQ7" colab_type="text"
# # Decision Tree Classifier for Iris Dataset Classification
# + [markdown] id="lE4uPjxcFDz2" colab_type="text"
# ### Import the required libraries
# + id="SlW0wdgW2qgQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="c91900a6-a462-4fe3-a5f4-dc67e49de085"
# import required libraries
import sklearn.datasets as datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, plot_confusion_matrix
from sklearn import tree
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="SdqogEtxFKoH" colab_type="text"
# ### Load Iris dataset
# + id="SC4uzFut8IUL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0b7a6a1f-145c-4e1e-fd34-75d598e263a3"
from google.colab import drive
drive.mount('/content/drive')
# + id="2KDmzREg8K_L" colab_type="code" colab={}
iris_df = pd.read_csv("/content/drive/My Drive/Colab Notebooks/Iris.csv")
# + id="ShwF9OqcDm4-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="cf22eb0f-8174-4d79-87af-b607caa61f25"
iris_df.head()
# + [markdown] id="Ms2Z99NhF6f4" colab_type="text"
# ### Explore the dataset
# + id="D9AgmjY08hPu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="897b3224-ebb9-4074-876d-3a3cf0b9ba03"
iris_df.isnull().any()
# + id="QSjqn_Bh8l4b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="6095e9d8-3286-4432-9e5b-d61fdc870fb6"
iris_df.dtypes
# + id="5uo_XCfI8r80" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 284} outputId="e761ef0d-cbb3-4068-e920-fc7dd4cca4b7"
iris_df.describe()
# + [markdown] id="yWqdPyiGGCLa" colab_type="text"
# ### Perform pair plotting to view relationship between the features present in the data
# + id="qG05Da668vJM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 944} outputId="15bf732c-60d4-4c8d-ba11-d984720cc736"
sns.pairplot(iris_df, hue='Species')
# + [markdown] id="CdW2UTKxGJ3k" colab_type="text"
# ### Split data into train and test set
# + id="irXgIQdV9HIl" colab_type="code" colab={}
X = iris_df[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']].values
y = iris_df['Species'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.9, random_state=1)
# + [markdown] id="ojlzjLytGRMG" colab_type="text"
# ### Train the decision tree classifier
# + id="YntSNeu-9HVt" colab_type="code" colab={}
#scikit learn decision tree model training
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
# + [markdown] id="iSExYV61GZxk" colab_type="text"
# ### Test the model trained on test set
# + id="atc-OIFh6yKz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 101} outputId="27d05b66-6699-42af-e2d5-ec7c9fb6bcad"
prediction = clf.predict(X_test)
prediction
# + [markdown] id="bneIK8NtGm3i" colab_type="text"
# ##### Perform Evaluation on Test set
# + id="apMt2fRd9HNM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="06ae1bb0-fdee-415b-ad23-facc5c028de9"
# evaluation for multi class classification
print(classification_report(y_test, prediction))
# + [markdown] id="Rpz0iw21G4uE" colab_type="text"
# ##### Plot Confusion Matrix
# + id="vh3Gzb4Q9HAN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="433709a6-7a99-4cca-cc68-0d924637a9a6"
plot_confusion_matrix(clf, X_test, y_test)
# + [markdown] id="okI771f0HOJB" colab_type="text"
# ### Visualize the Decision Tree
# + id="tnFbfEQp2xY6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="c775a6dc-d35d-4a86-9b19-b39a61873515"
# Install required libraries
# !pip install pydotplus
# !apt-get install graphviz -y
# + id="FU3FR1Sb2zFB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 846} outputId="12ee48ff-50dd-4a3c-b3df-eb358049a778"
# Import necessary libraries for graph viz
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
# Visualize the graph
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data, feature_names=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'],
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# + [markdown] id="A3cqBQJXDG3H" colab_type="text"
# ### Feeding new data to the classifier to predict the right class
# + id="5YesJjzIDLjh" colab_type="code" colab={}
SepalLengthCm = 4.8
SepalWidthCm = 2.9
PetalLengthCm = 1.3
PetalWidthCm = 0.2
# + id="Wo9mUCpQDgqQ" colab_type="code" colab={}
x = [[SepalLengthCm, SepalWidthCm, PetalLengthCm, PetalWidthCm]]
res = clf.predict(x)
# + id="EtirmzusDwOe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2e3b3d72-8fe7-423e-abae-c62f86797deb"
print("The class predicted is --> " + str(*res))
| Decision_Tree_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BaishaliChetia/CapsNet-Keras/blob/master/VGGprunedMnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="TY3FXDEqMlER" outputId="27e88225-a3ea-4c32-c22c-3aca80a25d57"
import tempfile
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
import pandas as pd
import matplotlib.pyplot as plt
# %load_ext tensorboard
# + id="1lMhMBepNP62" colab={"base_uri": "https://localhost:8080/"} outputId="f5ddf920-d587-4432-dcd5-0209c3282a5f"
pip install -q tensorflow-model-optimization
# + id="2v8C4zg_7714"
import tensorflow_model_optimization as tfmot
# + id="XF5afecBVrsY"
tf.random.set_seed(500000)
# + id="I6XK1tAnMw6l"
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 and 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# + colab={"base_uri": "https://localhost:8080/"} id="DlLCmXVPOIeo" outputId="d0c0a2cc-e892-47a2-8324-9c8c419ff219"
train_X =[]
for i in range(train_images.shape[0]):
train_pad = np.pad(train_images[i], pad_width=10 , mode= 'edge')
#print(train_pad)
train_X.append(train_pad)
#PADDING test images
test_X = []
for i in range(test_images.shape[0]):
test_pad = np.pad(test_images[i], pad_width=10 , mode= 'edge')
#print(train_pad)
test_X.append(test_pad)
train_X = np.array(train_X)
test_X = np.array(test_X)
train_X.shape, test_X.shape
# + colab={"base_uri": "https://localhost:8080/"} id="Rx4ohVZLOYuO" outputId="b0df7112-3ec5-43e9-98d9-bf3475a04f2c"
train_X=np.dstack([train_X] * 3)
test_X=np.dstack([test_X]*3)
train_X.shape,test_X.shape
# + colab={"base_uri": "https://localhost:8080/"} id="lxln7tiSOlW1" outputId="91f37343-9a16-409d-e074-1b50590407aa"
train_X = train_X.reshape(-1, 48,48,3)
test_X= test_X.reshape (-1,48,48,3)
train_X = train_X.astype('float32')
test_X = test_X.astype('float32')
train_X.shape,test_X.shape
# + id="aC2Z6I3_WXXm"
from keras.callbacks import ModelCheckpoint, CSVLogger
#comparison_metric = MyAccuracy()
#checkpoint_filepath = "/content/drive/MyDrive/Weights/weights-improvement-{epoch:02d}-{val_my_accuracy:.2f}.hdf5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath = "/content/drive/MyDrive/MnistResults/vggfine_best_weights4.hdf5",
save_weights_only=True,
monitor="val_accuracy",
#monitor="val_my_accuracy_19",
mode='max',
save_best_only=True)
model_checkpoint_callback2 = tf.keras.callbacks.ModelCheckpoint(
filepath = "/content/drive/MyDrive/MnistResults/vggfine_latest_weights4.hdf5",
save_weights_only=True,
monitor="val_accuracy",
mode='max',
save_best_only=False)
log_csv = CSVLogger("/content/drive/MyDrive/MnistResults/vggfine_mylogs4.csv", separator = ",", append = False)
callback_list = [model_checkpoint_callback, model_checkpoint_callback2, log_csv]
# + colab={"base_uri": "https://localhost:8080/"} id="bWeSAUAYWh1r" outputId="302f62cd-35a9-416b-e38d-93151fae7590"
from google.colab import drive
drive.mount('/content/drive')
# + id="5Sa9pwRbOxkQ"
vggModel = tf.keras.applications.VGG16(weights='imagenet', include_top=False, input_shape=(48, 48, 3))
vggModel.trainable = False
# + colab={"base_uri": "https://localhost:8080/"} id="TrGc2YL1M5cQ" outputId="93f9511e-e386-4bdd-8967-d756094f5271"
# Define the model architecture.
model = keras.Sequential([
vggModel,
keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dense(50, activation = 'relu'),
keras.layers.Dense(10, activation = 'softmax')
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_X,
train_labels,
epochs= 50,
validation_split=0.1, callbacks = callback_list
)
# + colab={"base_uri": "https://localhost:8080/", "height": 648} id="S-9w1EDn1Und" outputId="46418550-008c-424f-a94f-016a02887a6f"
vggAccData = pd.read_csv("/content/drive/MyDrive/MnistResults/vggfine_mylogs4.csv")
plt.rcParams["figure.figsize"] = (30, 10)
plt.rcParams["font.size"] = 20
fig, ax = plt.subplots(1, 2)
ax[0].plot(vggAccData['accuracy'])
ax[0].plot(vggAccData['val_accuracy'])
ax[0].set_title('Model Accuracy')
ax[0].set_ylabel('Accuracy')
ax[0].set_xlabel('Epoch')
ax[0].legend(['Training Accuracy', 'Validation Accuracy'], loc='best')
ax[1].plot(vggAccData['loss'])
ax[1].plot(vggAccData['val_loss'])
ax[1].set_title('Model Loss')
ax[1].set_ylabel('Loss')
ax[1].set_xlabel('Epoch')
ax[1].legend(['Training Loss', 'Validation Loss'], loc='best')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="9qjv5YOxoeDO" outputId="0d27cc73-5065-4a14-e398-f213b2a33a4f"
_, baseline_model_accuracy = model.evaluate(
test_X, test_labels, verbose=1)
# + colab={"base_uri": "https://localhost:8080/"} id="OZpv30WnTh-r" outputId="7b0501d8-d513-4d3a-b088-6b51099b27f3"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="uA5PCZcANZ3S" outputId="fb090bae-db6a-49ee-f720-4f67755c20a8"
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
# Compute end step to finish pruning after 2 epochs.
batch_size = 128
epochs = 2
validation_split = 0.1 # 10% of training set will be used for validation set.
num_images = train_images.shape[0] * (1 - validation_split)
end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,
final_sparsity=0.80,
begin_step=0,
end_step=end_step)
}
# Define model for pruning.
model_for_pruning = prune_low_magnitude(model, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(train_X, train_labels,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
_, model_for_pruning_accuracy = model_for_pruning.evaluate(
test_X, test_labels, verbose=0)
# + colab={"base_uri": "https://localhost:8080/"} id="uLdUID3ToNRf" outputId="80cc09cc-ec2d-4b38-f4d1-e5d62d7c00bc"
vggfine_baseModel = model.save("/content/drive/MyDrive/MnistResults/vggfine_save_basemodel4.tf", save_format='tf')
# + colab={"base_uri": "https://localhost:8080/"} id="fPzVhtnweIkX" outputId="1f699a06-a97f-4719-b117-9f5b7658a76b"
finalSparsity = 0.05
list_accuracy0 = []
while finalSparsity <= 0.95:
model.load_weights('/content/drive/MyDrive/MnistResults/vggfine_best_weights4.hdf5')
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.0,
final_sparsity=finalSparsity,
begin_step=0,
end_step=end_step)
}
# Define model for pruning.
model_for_pruning = prune_low_magnitude(model, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(train_X, train_labels,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
_, model_for_pruning_accuracy = model_for_pruning.evaluate(
test_X, test_labels, verbose=0)
print(f'Pruned test accuracy at final_sparsity: at {finalSparsity} is {model_for_pruning_accuracy}')
list_accuracy0.append(model_for_pruning_accuracy)
finalSparsity += 0.05
# + colab={"base_uri": "https://localhost:8080/"} id="OCy5M5AguljN" outputId="b855caa8-219c-4853-85fc-972fdfb80594"
model_for_pruning.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="jYGxHae9u9O8" outputId="caf98611-0d05-456d-bd63-1f0335c756a9"
modelP = model_for_pruning
model_for_export = tfmot.sparsity.keras.strip_pruning(modelP)
model_for_export.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="EXiauLihvOVW" outputId="6c4869b7-9d62-4f16-c69e-9c2fae9cd283"
model1 = keras.models.load_model("/content/drive/MyDrive/MnistResults/vggfine_save_basemodel4.tf")
model1.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="E9bToATCwGvR" outputId="10e80daf-acb3-4c13-f65c-dd782963d4ba"
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
# Compute end step to finish pruning after 2 epochs.
batch_size = 128
epochs = 2
validation_split = 0.1 # 10% of training set will be used for validation set.
num_images = train_images.shape[0] * (1 - validation_split)
end_step = np.ceil(num_images / batch_size).astype(np.int32) * epochs
finalSparsity = 0.55
list_accuracy50 = []
while finalSparsity <= 0.95:
model1.load_weights('/content/drive/MyDrive/MnistResults/vggfine_best_weights4.hdf5')
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.50,
final_sparsity=finalSparsity,
begin_step=0,
end_step=end_step)
}
# Define model for pruning.
model_for_pruning = prune_low_magnitude(model1, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(train_X, train_labels,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
_, model_for_pruning_accuracy = model_for_pruning.evaluate(
test_X, test_labels, verbose=0)
print(f'Pruned test accuracy at final_sparsity: at {finalSparsity} is {model_for_pruning_accuracy}')
list_accuracy50.append(model_for_pruning_accuracy)
finalSparsity += 0.05
print(list_accuracy50)
# + colab={"base_uri": "https://localhost:8080/"} id="8urNrRJe7geS" outputId="391689b0-cd1e-4c58-f4fe-c74a8874b805"
def prune_all_sparsity(initial, final):
finalSparsity = final
initialSparsity = initial
list_accuracy = []
while finalSparsity <= 0.95:
model1.load_weights('/content/drive/MyDrive/MnistResults/vggfine_best_weights4.hdf5')
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity= initialSparsity,
final_sparsity=finalSparsity,
begin_step=0,
end_step=end_step)
}
# Define model for pruning.
model_for_pruning = prune_low_magnitude(model1, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(train_X, train_labels,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
_, model_for_pruning_accuracy = model_for_pruning.evaluate(
test_X, test_labels, verbose=0)
print(f'Pruned test accuracy at final_sparsity: at {finalSparsity} is {model_for_pruning_accuracy}')
list_accuracy.append(model_for_pruning_accuracy)
finalSparsity += 0.05
print(list_accuracy)
return list_accuracy
# call prune_all_sparsity for different initial sparsities
initial= 0.0
while initial <= 0.50:
print(f"Pruniing for initial sparsity: {initial}")
final = 0.55
prune_result = prune_all_sparsity(initial, final )
print(f"accuracy list at {initial} sparsity is {prune_result}")
initial += 0.10
# + colab={"base_uri": "https://localhost:8080/"} id="SAldXthakq1G" outputId="fd07ef2f-5d8c-44c4-e90e-67925552843c"
# !du -sh /content/drive/MyDrive/MnistResults/*
# + [markdown] id="gnBa4RaGve0q"
# ## Compression of VGG
# + id="NQn4wGIHvd0o"
| VGGprunedMnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transformations
# !pip install napari
# !pip install SimpleITK
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# %gui qt
import os
if 'TEST_ENV' in os.environ:
TEST_ENV = os.environ['TEST_ENV'].lower() == "true"
else:
TEST_ENV = 0
print(f"Running test environment: {bool(TEST_ENV)}")
# +
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
resp = urlopen("http://www.fmrib.ox.ac.uk/primers/intro_primer/ExBox3/ExBox3.zip")
zipfile = ZipFile(BytesIO(resp.read()))
img_file = zipfile.extract("ExBox3/T1_brain.nii.gz")
mask_file = zipfile.extract("ExBox3/T1_brain_seg.nii.gz")
# +
import SimpleITK as sitk
import numpy as np
# load image and mask
img_file = "./ExBox3/T1_brain.nii.gz"
mask_file = "./ExBox3/T1_brain_seg.nii.gz"
img = sitk.GetArrayFromImage(sitk.ReadImage(img_file))
img = img.astype(np.float32)
mask = mask = sitk.GetArrayFromImage(sitk.ReadImage(mask_file))
mask = mask.astype(np.float32)
assert mask.shape == img.shape
print(f"Image shape {img.shape}")
print(f"Image shape {mask.shape}")
# -
if TEST_ENV:
def view_batch(batch):
pass
else:
# %gui qt
import napari
def view_batch(batch):
viewer = napari.view_image(batch["data"].cpu().numpy(), name="data")
viewer.add_image(batch["mask"].cpu().numpy(), name="mask", opacity=0.2)
# +
import torch
from rising.transforms import *
batch = {
"data": torch.from_numpy(img).float()[None, None],
"mask": torch.from_numpy(mask).long()[None, None],
}
def apply_transform(trafo, batch):
transformed = trafo(**batch)
print(f"Transformed data shape: {transformed['data'].shape}")
print(f"Transformed mask shape: {transformed['mask'].shape}")
print(f"Transformed data min: {transformed['data'].min()}")
print(f"Transformed data max: {transformed['data'].max()}")
print(f"Transformed data mean: {transformed['data'].mean()}")
return transformed
# -
print(f"Transformed data shape: {batch['data'].shape}")
print(f"Transformed mask shape: {batch['mask'].shape}")
print(f"Transformed data min: {batch['data'].min()}")
print(f"Transformed data max: {batch['data'].max()}")
print(f"Transformed data mean: {batch['data'].mean()}")
trafo = Scale(1.5, adjust_size=False)
transformed = apply_transform(trafo, batch)
view_batch(transformed)
trafo = Rotate([0, 0, 45], degree=True, adjust_size=False)
transformed = apply_transform(trafo, batch)
view_batch(transformed)
trafo = Translate([0.1, 0, 0], adjust_size=False)
transformed = apply_transform(trafo, batch)
view_batch(transformed)
| notebooks/transformations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import os
import pandas as pd
import geopandas as gpd
os.chdir("C:/Users/aroy29/Dropbox (ASU)/Strava Analysis/Data/MAG2016/Analysis_Generated_Data")
train_data = pd.read_csv("MAG_train_data_GLM_global.csv")
train_data.head()
train_data.columns
#Read predictions and assign classes to the predicted volume for TBAG Data
def assign_class(filepath,scenario):
df = pd.read_csv(filepath)
vol = df["predicted"]
if scenario==1:
for item in vol:
if(item in range(0,151)):
scale.append('VL')
elif(item in range(151,450)):
scale.append('L')
elif(item in range(450,800)):
scale.append('M')
elif(item in range(800,1500)):
scale.append('H')
else:
scale.append('VH')
if scenario==2:
for item in vol:
if(item in range(0,250)):
scale.append('VL')
elif(item in range(250,700)):
scale.append('L')
elif(item in range(700,1200)):
scale.append('M')
elif(item in range(1200,1600)):
scale.append('H')
else:
scale.append('VH')
if scenario==3:
for item in vol:
if(item in range(0,65)):
scale.append('VL')
elif(item in range(65,250)):
scale.append('L')
elif(item in range(250,700)):
scale.append('M')
elif(item in range(700,1400)):
scale.append('H')
else:
scale.append('VH')
if scenario==4:
for item in vol:
if(item in range(0,500)):
scale.append('VL')
elif(item in range(500,1500)):
scale.append('L')
elif(item in range(1500,2000)):
scale.append('M')
elif(item in range(2000,2500)):
scale.append('H')
else:
scale.append('VH')
if scenario==5:
for item in vol:
if(item in range(0,125)):
scale.append('VL')
elif(item in range(125,350)):
scale.append('L')
elif(item in range(350,800)):
scale.append('M')
elif(item in range(800,1500)):
scale.append('H')
else:
scale.append('VH')
tbag_counts = pd.read_csv("TBAG_Counts_2016.csv")
tbag_counts.head()
strava_segments_tbag = gpd.read_file("Shapefiles/TBAG_Strava_Segments2016.shp")
strava_segments_tbag.columns
df = strava_segments_tbag.merge(tbag_counts,left_on="LocID",right_on="Site_ID")
df.head()
df["Site_ID"] = df["EDGE_ID"]
df["TBAG_Daily"] = df["Count_y"]
df["Strava_Daily"] = df["TATHCNT"]
df1 = df[["Site_ID","Longitude","Latitude","TBAG_Daily","Strava_Daily","Month","dist_to_green_spaces","dist_to_residential_areas","dist_to_commercial_areas"]]
df1 = df1.groupby(["Site_ID","Longitude","Latitude","Month"],as_index=False).mean()
df1.head()
tbag_strava_counts = df1
tbag_demographics = gpd.read_file("Shapefiles/Export_Output_2.shp")
tbag_demographics.head()
# ### Calculate Demographic variables
tbag_demographics["Site_ID"] = tbag_demographics.maricopa_2
tbag_demographics["Pop_per_sq_mile"] = tbag_demographics.POP_PER_SQ
tbag_demographics["MEDIAN_HOUSEHOLD_INCOME"] = tbag_demographics.MEDIAN_HOU
tbag_demographics["PCT_BICYCLE"] = tbag_demographics.PCT_BICYCL
tbag_demographics["PCT_ONE_OR_MORE_VEH"] = (tbag_demographics.PCT_ONE_VE+tbag_demographics.PCT_TWO_VE+
tbag_demographics.PCT_THREE_)/3
tbag_demographics["PCT_NON_WHITE"] = (tbag_demographics.PCT_HISPAN+tbag_demographics.PCT_BLACK_+
tbag_demographics.PCT_NATIVE+tbag_demographics.PCT_ASIAN_+
tbag_demographics.PCT_PACIFI+tbag_demographics.PCT_OTHER_)/6
tbag_demographics["EDU_ABV_HS"] = (tbag_demographics.PCT_HSGRAD+tbag_demographics.PCT_SOMECO+
tbag_demographics.PCT_ASSOCI+tbag_demographics.PCT_BACHEL+
tbag_demographics.PCT_GRADPR)/5
tbag_demographics = tbag_demographics[["Site_ID","Pop_per_sq_mile","MEDIAN_AGE","MEDIAN_HOUSEHOLD_INCOME",
"PCT_BICYCLE","PCT_ONE_OR_MORE_VEH","PCT_NON_WHITE","EDU_ABV_HS"]]
tbag_train_data = tbag_strava_counts.merge(tbag_demographics,on="Site_ID")
tbag_train_data = tbag_train_data.groupby(["Site_ID","Longitude","Latitude","Month","MEDIAN_AGE",
"MEDIAN_HOUSEHOLD_INCOME","PCT_BICYCLE","PCT_ONE_OR_MORE_VEH",
"PCT_NON_WHITE","EDU_ABV_HS"],as_index=False).mean()
tbag_train_data.head()
# ### Calculate AADT variables
tbag_aadt = gpd.read_file("Shapefiles/Tempe Shapefiles/Tempe_strava_edges_counts_aadt.shp")
tbag_aadt = tbag_aadt[["edge_id","aadt","speed_limi"]].groupby(["edge_id"],as_index=False).mean()
tbag_aadt["AADT"] = tbag_aadt.aadt
tbag_aadt["Avg_segment_speed_limit"] = tbag_aadt.speed_limi
tbag_aadt = tbag_aadt[["edge_id","AADT","Avg_segment_speed_limit"]]
tbag_aadt.head()
tbag_train_data = tbag_train_data.merge(tbag_aadt,left_on="Site_ID",right_on="edge_id")
tbag_train_data.head()
tbag_train_data.to_csv("Tempe_TBAG_Test_data.csv")
# +
### Train the model using MAG data and test with TBAG data
| Notebooks/Generate TBAG test data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Regresión lineal simple (RLS)
#
# La RLS, es la aproximación más simple al aprendizaje supervisado. En particular, la regresión lineal es una herramienta útil para predecir una respuesta cuantitativa.
#
# Es un método que tiene muchos años y está presente en toda la bibliografía.
#
# Aunque parezca super simple comparado con las técnicas modernas de machine learning, la regresión lineal aún es un método útil y ampliamente usado.
#
# Principalmente, sirve como un buen punto de partida para aproximaciones más nuevas: muchas de las técnicas **fancy** pueden interpretarse como generalizaciones o extensiones de la regresión lineal.
#
# Por lo tanto es súper importante tener una buena compresión de la regresión lineal antes de estudiar los algoritmos más complejos de machine learning.
#
#
# ## Dataset Advertising
#
# Supongamos que que somos consultores estadísticos, y nos contratan con el objetivo de aumentar las ventas de un determinado producto.
# El dataset Advertising consiste en las ventas del producto en 200 mercados, y el presupuesto dedicado en publicidad en 3 medios: TV, radio y diario.
#
# Si logramos identificar una relación entre la inversión en publicidad y las ventas, podremos recomendarle a nuestro cliente hacia dónde debe dirigir su inversión en publicidad.
#
# La variables predictoras serán los presupuestos para cada canal y la variable de respuesta será las ventas.
#
# <u>Exploremos un poco los datos:</u>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
plt.rcParams["figure.figsize"] = (20,5)
df = pd.read_csv('https://datasets-humai.s3.amazonaws.com/datasets/advertising.csv')
df.head()
# Veamos la relación entre las ventas y la publicidad en cada uno de los medios
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
df.plot.scatter(x='TV', y='Sales', ax=ax1)
df.plot.scatter(x='Radio', y='Sales', ax=ax2)
df.plot.scatter(x='Newspaper', y='Sales', ax=ax3);
# Pensemos en estos datos. Algunas preguntas que podrían surgir:
# <ul>
# <li>¿Hay alguna relación entre el presupuesto en publicidad y las ventas?</li>
# <li>¿Qué tan fuerte es esa relación?</li>
# <li>¿Cuáles de los medios mencionados contribuyen a las ventas?</li>
# <li>¿Con cuánta precisión podemos predecir las ventas futuras?</li>
# <li>¿Es esta relación lineal?</li>
# </ul>
#
# Resulta que la regresión lineal puede ser usada para responder cada una de estas preguntas y algunas más.
# Veamos algunos conceptos y luego intentaremos responderlas.
#
# La regresión lineal simple intenta predecir una respuesta cuantitativa Y en base a una única variable predictora X.
# Asume que hay aproximadamente una relación lineal entre X e Y.
#
# Matemáticamente: $$ Sales \approx \hat {\beta}_{0} + \hat {\beta}_{1} TV $$
#
# 𝛽0 y 𝛽1 son dos constantes que representan el intercepto y la pendiente en el modelo lineal.
#
# Juntos, 𝛽0 y 𝛽1 son conocidos como los **parámetros del modelo**.
#
# Una vez que hemos usado nuestro set de entrenamiento para producir los estimadores y para los coeficientes del modelo, podemos predecir futuras ventas en base a un valor particular de TV.
#
# ## ¿Cómo calculamos los parámetros del modelo?
#
# Vamos a elegir el par 𝛽0 y 𝛽1 tales que minimizan la distancia entre la línea recta y los verdaderos valores que observamos:
#
# <img src="https://i.ibb.co/8c2zbDy/mco.png" alt="Girl in a jacket" width="80%">
#
# Ahora con Python:
#
from sklearn.linear_model import LinearRegression
#Mi modelo será una instancia de la clase LinearRegression (¡Recuerden Programación Orientada a Objetos!)
model = LinearRegression(fit_intercept=True)
# Definimos la "X" y la "y" con las que vamos a entrenar nuestro modelo
X = df.loc[:,['TV']]
y = df['Sales']
# Noten que alrededor de tv hay dos corchetes, mientras que alrededor de Sales hay uno sólo.
#
# Miren lo siguiente:
type(X)
type(y)
X.shape
y.shape
# En scikit learn las variables explicativas se expresan en un DataFrame y la variable explicada es siempre una serie.
#Los coeficientes (Betas) del modelo todavía no están definidos
model.coef_
# +
# Usamos el método fit para entrenar el modelo
# -
model.fit(X,y)
model.coef_
model.intercept_
# <strong> ¿Cómo interpretamos estos coeficientes? </strong>
#
# $ \hat {\beta}_{0} = 6.9748214882298925 $
#
# Este coeficiente indica que cuando la publicidad en TV es de 0, de todas maneras las ventas son de 6.97 unidades.
#
# $ \hat {\beta}_{1} = 0.05546477 $
#
# Este coeficiente indica que cuando agregamos 1 unidad de publicidad en TV, las ventas aumentan en 0.05 unidades.
#
# ## Ejercicio
#
# ¿Cuántas ventas esperaríamos con una inversión en televisión de 4 unidades?
# # Precisión de los coeficientes estimados
#
# La matemática que soporta la regresión lineal simple, se basa en suponer que la variable explicativa (X) y la explicada (y) guardan una relación lineal perfecta perturbada por **ruido aleatoreo**: fenómenos que no podemos o no queremos explicar dentro del modelo y que no dependen de X.
#
# Los fenómenos del mundo real nunca son exactamente así, pero vamos a encontrar que esta simplificación es útil en muchos casos para, por ejemplo, estudiar la relación entre X e y.
#
# Lo bueno de Python es que podemos simular datos que sí cumplen estrictamente este supuesto de linealidad + ruido aleatoreo y observar qué pasa con las derviaciones estadísticas.
#
# Supongamos que el precio de los departamentos de una ciudad es de 10000usd de base + usd2000/m2 más una perturbación aleatoria. Nuestra ciudad está compuesta por 1000 departamentos.
#
# Vamos a simular esa población:
# Las superficies de los departamentos se distribuyen normalmente y
# tienen una media de 100 mts2 con un desvío estándar de 20mts2
superficies = np.random.normal(loc=100, scale=20, size=1000).astype(int)
print(superficies[0:30])
# Los errores aleatorios tienen un promedio de $0 y un desvío estándar de usd3000
errores = np.random.normal(loc=0, scale=80000, size=1000).astype(int)
print(errores[0:30])
# Generamos nuestra "población" de 1000 departamentos
precios_departamentos = (superficies * 2000 + 10000 + errores).astype(int)
print(precios_departamentos[0:30])
# Ahora supongamos que somos un grupo de relevadoras de precios y esa población es completamente desconocida para nosotras. Tenemos la posiblidad de tocar el timbre a algunos vecinos de la ciudad y preguntarles cuánto pagaron por su casa, pero esto nos cuesta tiempo y esfuerzo.
#
# Nos preguntamos entonces:
#
#
#
# * Dada una casa de 100mts2 ¿Cuál es su precio? ¿Cuánta confianza puedo tener en ese valor? ¿Y dada una casa de 500mts2?
# * ¿Puedo afirmar con X% de confianza, que a mayor número de mts2 mayor precio?
# * ¿Cuántas casas tenemos que conocer para poder estimar los precios con un X% de confianza?
# * ¿Cuántas casas tenemos que conocer para entender cuánto influyen los mts2 en el precio con un X% de confianza?
#
# Todas estas preguntas se pueden responder si suponemos que en nuestra población se cumplen los supuestos de la regresión lineal (vamos a entrar en detalle en la próxima clase) y aplicamos técnicas estadísticas.
# ## 1. La confianza en las predicciones
#
# ¿Qué pasa si tomamos una muestra de 30 departamentos? ¿Cómo se vería nuestra regresión?
# +
df_poblacion = pd.DataFrame({'superficies':superficies,'precios':precios_departamentos})
df_muestra = df_poblacion.sample(30)
# -
model.fit(df_muestra[['superficies']],df_muestra['precios'])
coeficiente = model.coef_
print(coeficiente)
f'Según el modelo que podemos construir con esta muestra, por cada mts2 de superficie, el precio aumenta ${coeficiente[0]}'
# ¿Qué pasa si tomamos otra muestra?
df_muestra = df_poblacion.sample(30)
model.fit(df_muestra[['superficies']],df_muestra['precios'])
print(model.coef_)
# Ahora tomemos 100 muestras y vamos a graficarlas. También veamos en rojo la verdadera función generadora de los datos:
#
# precio_venta = 10000 + 2000 * superficia
# +
for i in range(100):
# Tomamos una muesta de 30 departamentos
df_muestra = df_poblacion.sample(30)
# Entrenamos el modelo sobre la muestra
model.fit(df_muestra[['superficies']],df_muestra['precios'])
# Utilizamos al modelo para predecir los valores de todos los departamentos
predicciones = model.predict(df_poblacion[['superficies']])
# Graficamos cada una de las 100 regresiones
plt.plot(df_poblacion['superficies'],predicciones,color='blue',alpha=0.1)
proceso_generador_perfecto = 10000 + df_poblacion['superficies'] * 2000
plt.plot(df_poblacion['superficies'],proceso_generador_perfecto,color='red')
plt.show()
# -
# Todas las regresiones son distintas, pero las predicciones se parecen mucho más alrededor de 100 que en los extremos ¿Recuerdan cuál era la superficie promedio de los departamentos en nuestra ciudad?
#
#
# ### Conclusión 1
#
# Las predicciones son más precisas cerca del centroide de los datos que en los extremos. En otras palabras, nuestro modelo conoce bien lo que vio y más allá de eso, sólo puede hacer extrapolaciones cada vez más imprecisas.
#
# ¿Qué pasa si en lugar de 30, tomamos muestras más grandes? Es decir, aunque cueste más esfuerzo hacemos un relevamiento más exahustivo...
# +
for i in range(100):
# Ahora tomamos una muesta de 150 departamentos
df_muestra = df_poblacion.sample(150)
# Entrenamos el modelo sobre la muestra
model.fit(df_muestra[['superficies']],df_muestra['precios'])
# Utilizamos al modelo para predecir los valores de todos los departamentos
predicciones = model.predict(df_poblacion[['superficies']])
# Graficamos cada una de las 100 regresiones
plt.plot(df_poblacion['superficies'],predicciones,color='blue',alpha=0.1)
proceso_generador_perfecto = 10000 + df_poblacion['superficies'] * 2000
plt.plot(df_poblacion['superficies'],proceso_generador_perfecto,color='red')
plt.show()
# -
# ### Conclusión 2
# Si tomamos muestras más grandes, las regresiones son todas más parecidas entre sí.
#
# La interpretación estadística de esta incerteza en las predicciones, está dada por los intervalos de confianza.
# Algunas librerías de Python permiten calcular los intervalos de confianza de un modelo. No es el caso de scikit learn porque esta librería está pensada para machine learning en general, no sólo para regresiones y busca crear una interfaz común para todos los modelos.
#
# Para acceder a estimaciones estadísticas como los intervalos de confianza, en el próximo encuentro vamos a utilizar statsmodel.
| MachineLearning/1_Introduccion/rls.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Breast Cancer
#
# * [Breast Cancer Wisconsin (Diagnostic) Data Set - Kaggle](https://www.kaggle.com/uciml/breast-cancer-wisconsin-data)
# * https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
#
# 1) ID 번호
# 2) 진단 (M = 악성, B = 양성)
# 3-32)
#
# 각 세포 핵에 대해 10 개의 실수 값 기능이 계산됩니다.
# ```
# a) 반경 (중심에서 주변 지점까지의 거리 평균)
# a) radius (mean of distances from center to points on the perimeter)
#
# b) 텍스처 (회색조 값의 표준 편차)
# b) texture (standard deviation of gray-scale values)
#
# c) 둘레
# c) perimeter
#
# d) 면적
# d) area
#
# e) 부드러움 (반경 길이의 국부적 변동)
# e) smoothness (local variation in radius lengths)
#
# f) 콤팩트 함 (둘레 ^ 2 / 면적-1.0)
# f) compactness (perimeter^2 / area - 1.0)
#
# g) 오목 함 (윤곽의 오목한 부분의 심각도)
# g) concavity (severity of concave portions of the contour)
#
# h) 오목한 점 (윤곽의 오목한 부분의 수)
# h) concave points (number of concave portions of the contour)
#
# i) 대칭
# i) symmetry
#
# j) 프랙탈 차원 ( "해안선 근사치"-1)
# j) fractal dimension ("coastline approximation" - 1)
#
# ```
# 평균, 표준 오류 및 "최악"또는 최대 (3 개 중 평균
# 이러한 특징 중 가장 큰 값)은 각 이미지에 대해 계산되었습니다.
# 결과적으로 30 개의 기능이 있습니다. 예를 들어 필드 3은 평균 반경, 필드
# 13은 반경 SE이고 필드 23은 최악의 반경입니다.
#
# 모든 특성 값은 4 자리 유효 숫자로 다시 코딩됩니다.
#
# 누락 된 속성 값 : 없음
#
# 등급 분포 : 357 양성, 212 악성
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use("seaborn")
# -
df = pd.read_csv("data/data.csv")
df.shape
df.head()
df.tail()
df.info()
df.isnull().sum()
del df["Unnamed: 32"]
df.nunique()
df.describe()
df["diagnosis"].value_counts().plot.barh()
h = df.hist(figsize=(20, 20))
df.describe().columns
num_cols = ['radius_mean', 'texture_mean', 'perimeter_mean', 'area_mean',
'smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean',
'radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se',
'compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se',
'fractal_dimension_se', 'radius_worst', 'texture_worst',
'perimeter_worst', 'area_worst', 'smoothness_worst',
'compactness_worst', 'concavity_worst', 'concave points_worst',
'symmetry_worst', 'fractal_dimension_worst']
len(num_cols)
fig, ax = plt.subplots(6, 5, figsize=(20, 20))
h = df.loc[df["diagnosis"] == "M", num_cols].hist(alpha=0.5, ax=ax)
h = df.loc[df["diagnosis"] == "B", num_cols].hist(alpha=0.5, ax=ax)
plt.legend(['M', 'B'], shadow=True)
corr = df.corr()
mask = np.triu(np.ones_like(corr, dtype=bool))
plt.figure(figsize=(20, 20))
sns.heatmap(corr, annot=True, fmt=".2f", cmap="coolwarm", vmax=1, vmin=-1, mask=mask)
df_num_std = (df[num_cols] - df[num_cols].mean()) / (df[num_cols].std())
df_num_std["diagnosis"] = df["diagnosis"]
df_num_std
df_melt = pd.melt(df_num_std,id_vars="diagnosis",
var_name="features",
value_name="value")
df_melt.head()
plt.figure(figsize=(20,10))
sns.violinplot(x="features", y="value", hue="diagnosis", data=df_melt, split=True, inner="quart")
plt.xticks(rotation=90)
| breast-cancer/eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from os import path
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.pipeline import make_pipeline, make_union
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn import metrics
from functions import load_bad_words, build_data_path, print_report
from constants import LABEL_COLS
# -
training_data_path = build_data_path('train.csv')
df = pd.read_csv(training_data_path)
X = df['comment_text']
# df['not_toxic'] = df[LABEL_COLS].apply(not_toxic, axis=1)
# LABEL_COLS.append('not_toxic')
y = df[LABEL_COLS]
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33)
# +
clf = OneVsRestClassifier(SVC(gamma='scale'))
tfidf = TfidfVectorizer(lowercase=True, stop_words='english')
pipeline = make_pipeline(tfidf, clf)
pipeline.fit(X_train, y_train)
y_predictions = pipeline.predict(X_valid)
# -
print_report(y_valid, y_predictions)
| src/SVC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
pred = pd.read_csv('predictors.csv')
msf = pd.read_csv('msf.csv')
# shift the return so that we have the future return to predict
msf['ret'] = msf.groupby(['permno']).ret.shift(-1)
df = pred.merge(msf).query('yyyymm>200000').dropna(subset=['ret'])
from pycaret.regression import *
df['ret'] = (df.ret*10000).astype(int)
training_data = df.query("yyyymm<201800")
test_data = df.query("yyyymm>201800")
pred_ret = setup(data = training_data, target = 'ret', session_id=123,
normalize = True,
combine_rare_levels = True, rare_level_threshold = 0.05,
remove_multicollinearity = True, multicollinearity_threshold = 0.95,
log_experiment = True, experiment_name = 'pred_ret',
test_data=test_data
)
compare_models()
xgb = create_model('xgboost')
plot_model(xgb)
cat_final = finalize_model(cat)
predictions = predict_model(cat_final,data=test_data)
predictions
| ML/ml_pred.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
fname = "cs205-final-project/data/filtered.txt"
# +
#http://stackoverflow.com/questions/30088006/cant-figure-out-how-to-fix-the-error-in-the-following-code
# read the entire file into a python array
with open(fname, 'rb') as f:
data = f.readlines()
# remove the trailing "\n" from each line
data = map(lambda x: x.rstrip(), data)
# each element of 'data' is an individual JSON object.
# i want to convert it into an *array* of JSON objects
# which, in and of itself, is one large JSON object
# basically... add square brackets to the beginning
# and end, and have all the individual business JSON objects
# separated by a comma
data_json_str = "[" + ','.join(data) + "]"
# now, load it into pandas
df = pd.read_json(data_json_str)
df
# -
| misc/getting-to-know.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "83c1ea98-bc26-4c13-a514-62b4d3462931"}
# 
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d8290bca-6607-42d6-95b2-1c814f91d5b5"}
# # 7. Text Preprocessing Annotators with Spark NLP
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a9c612ac-aa41-47bc-aa00-dac43bb6ef21"}
import sparknlp
import pandas as pd
print("Spark NLP version", sparknlp.version())
print("Apache Spark version:", spark.version)
spark
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4c4f42b2-9b7b-4c61-86d9-559e2cf062a3"}
# **Note** Read this article if you want to understand the basic concepts in Spark NLP.
#
# https://towardsdatascience.com/introduction-to-spark-nlp-foundations-and-basic-components-part-i-c83b7629ed59
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "fa5fdc91-a422-42ae-b74f-19062d629e60"}
# ## Annotators and Transformer Concepts
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "22290061-5150-4307-a842-9aeb848033da"}
# In Spark NLP, all Annotators are either Estimators or Transformers as we see in Spark ML. An Estimator in Spark ML is an algorithm which can be fit on a DataFrame to produce a Transformer. E.g., a learning algorithm is an Estimator which trains on a DataFrame and produces a model. A Transformer is an algorithm which can transform one DataFrame into another DataFrame. E.g., an ML model is a Transformer that transforms a DataFrame with features into a DataFrame with predictions.
# In Spark NLP, there are two types of annotators: AnnotatorApproach and AnnotatorModel
# AnnotatorApproach extends Estimators from Spark ML, which are meant to be trained through fit(), and AnnotatorModel extends Transformers which are meant to transform data frames through transform().
# Some of Spark NLP annotators have a Model suffix and some do not. The model suffix is explicitly stated when the annotator is the result of a training process. Some annotators, such as Tokenizer are transformers but do not contain the suffix Model since they are not trained, annotators. Model annotators have a pre-trained() on its static object, to retrieve the public pre-trained version of a model.
# Long story short, if it trains on a DataFrame and produces a model, it’s an AnnotatorApproach; and if it transforms one DataFrame into another DataFrame through some models, it’s an AnnotatorModel (e.g. WordEmbeddingsModel) and it doesn’t take Model suffix if it doesn’t rely on a pre-trained annotator while transforming a DataFrame (e.g. Tokenizer).
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c58d8fd3-02bd-40fb-84b7-da8873a65d93"}
# !wget -q https://gist.githubusercontent.com/vkocaman/e091605f012ffc1efc0fcda170919602/raw/fae33d25bd026375b2aaf1194b68b9da559c4ac4/annotators.csv
dbutils.fs.cp("file:/databricks/driver/annotators.csv", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b4758ee1-ca82-4636-af9e-595fca0496d7"}
import pandas as pd
df = pd.read_csv("annotators.csv")
display(df)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4a5efe03-26d5-42f1-927f-4ef864e533b4"}
# By convention, there are three possible names:
#
# Approach — Trainable annotator
#
# Model — Trained annotator
#
# nothing — Either a non-trainable annotator with pre-processing
# step or shorthand for a model
#
# So for example, Stemmer doesn’t say Approach nor Model, however, it is a Model. On the other hand, Tokenizer doesn’t say Approach nor Model, but it has a TokenizerModel(). Because it is not “training” anything, but it is doing some preprocessing before converting into a Model.
# When in doubt, please refer to official documentation and API reference.
# Even though we will do many hands-on practices in the following articles, let us give you a glimpse to let you understand the difference between AnnotatorApproach and AnnotatorModel.
# As stated above, Tokenizer is an AnnotatorModel. So we need to call fit() and then transform().
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2b27c9f4-074b-4225-9cd2-51616c73a2f9"}
# Now let’s see how this can be done in Spark NLP using Annotators and Transformers. Assume that we have the following steps that need to be applied one by one on a data frame.
#
# - Split text into sentences
# - Tokenize
# - Normalize
# - Get word embeddings
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f825a023-af10-4d45-9e4b-11f593a8ab8a"}
# What’s actually happening under the hood?
#
# When we fit() on the pipeline with Spark data frame (df), its text column is fed into DocumentAssembler() transformer at first and then a new column “document” is created in Document type (AnnotatorType). As we mentioned before, this transformer is basically the initial entry point to Spark NLP for any Spark data frame. Then its document column is fed into SentenceDetector() (AnnotatorApproach) and the text is split into an array of sentences and a new column “sentences” in Document type is created. Then “sentences” column is fed into Tokenizer() (AnnotatorModel) and each sentence is tokenized and a new column “token” in Token type is created. And so on.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4178dda5-1331-41e6-a5c4-ca13fb427d66"}
# ### Create Spark Dataframe
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "441e51b3-9864-4427-b6ed-c7e129ec2884"}
text = '<NAME> is a nice guy and lives in New York'
spark_df = spark.createDataFrame([[text]]).toDF("text")
spark_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7ed2e5ab-d29a-4f34-96bd-3537d254c646"}
from pyspark.sql.types import StringType, IntegerType
# if you want to create a spark datafarme from a list of strings
text_list = ['<NAME> is a nice guy and lives in New York.', '<NAME> is also a nice guy and lives in Gotham City.']
spark.createDataFrame(text_list, StringType()).toDF("text").show(truncate=80)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "aaab5ae4-4ab2-4163-950f-6f1f9fc3a0e1"}
from pyspark.sql import Row
spark.createDataFrame(list(map(lambda x: Row(text=x), text_list))).show(truncate=80)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ed0ece6a-4f79-4ffc-bd67-49ed30e65fa4"}
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jupyter/annotation/english/spark-nlp-basics/sample-sentences-en.txt
dbutils.fs.cp("file:/databricks/driver/sample-sentences-en.txt", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4b5f3cef-5e21-4c82-ac9f-7383826813f5"}
with open('sample-sentences-en.txt') as f:
print (f.read())
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1035abd9-f941-464f-946d-f06a2cdafefb"}
spark_df = spark.read.text('/sample-sentences-en.txt').toDF('text')
spark_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1e23cf62-5fb7-4d6a-afd7-8b0e0e147726"}
spark_df.select('text').show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1e5faeec-e860-47b6-a4c2-d238c36d8e1c"}
# or we can even create a spark dataframe from pandas dataframe
temp_spark_df = spark.createDataFrame(df)
temp_spark_df.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c01c4ae6-f7d3-4f88-a0da-f06eed0a387a"}
temp_spark_df.createOrReplaceTempView("table1")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "470c6993-4631-4d7b-a6f7-45e3df3aa5f2"}
# %scala
var scalaDF = spark.sql("select * from table1")
scalaDF.show(2)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c240affb-c520-4f7b-b77c-6b523b2b6b4c"}
pythonDF = spark.sql("select * from table1")
pythonDF.show(3)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b1afe0d3-c576-42a3-bdc0-dcb3d75fb932"}
textFiles = spark.sparkContext.wholeTextFiles("./*.txt",4)
spark_df_folder = textFiles.toDF(schema=['path','text'])
spark_df_folder.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "fceaeb3d-8167-46f3-9009-2feb21a474ee"}
spark_df_folder.select('text').take(1)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a1698c43-5e14-4554-859f-85cbde02e950"}
# ### Transformers
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e341e9fa-bc2b-48e7-bf93-e3111e944a7a"}
# What are we going to do if our DataFrame doesn’t have columns in those type? Here comes transformers. In Spark NLP, we have five different transformers that are mainly used for getting the data in or transform the data from one AnnotatorType to another. Here is the list of transformers:
#
# `DocumentAssembler`: To get through the NLP process, we need to get raw data annotated. This is a special transformer that does this for us; it creates the first annotation of type Document which may be used by annotators down the road.
#
# `TokenAssembler`: This transformer reconstructs a Document type annotation from tokens, usually after these have been, lemmatized, normalized, spell checked, etc, to use this document annotation in further annotators.
#
# `Doc2Chunk`: Converts DOCUMENT type annotations into CHUNK type with the contents of a chunkCol.
#
# `Chunk2Doc` : Converts a CHUNK type column back into DOCUMENT. Useful when trying to re-tokenize or do further analysis on a CHUNK result.
#
# `Finisher`: Once we have our NLP pipeline ready to go, we might want to use our annotation results somewhere else where it is easy to use. The Finisher outputs annotation(s) values into a string.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "21249bdd-ab61-4808-a6d1-b9afc2efaec7"}
# each annotator accepts certain types of columns and outputs new columns in another type (we call this AnnotatorType).
#
# In Spark NLP, we have the following types:
#
# `Document`, `token`, `chunk`, `pos`, `word_embeddings`, `date`, `entity`, `sentiment`, `named_entity`, `dependency`, `labeled_dependency`.
#
# That is, the DataFrame you have needs to have a column from one of these types if that column will be fed into an annotator; otherwise, you’d need to use one of the Spark NLP transformers.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "37f7d22b-e223-4631-b9a6-a44fcfa34a13"}
# ## Document Assembler
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1faa3027-3ff6-47ae-90cc-5427cbe34a68"}
# In Spark NLP, we have five different transformers that are mainly used for getting the data in or transform the data from one AnnotatorType to another.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1d4da2d2-dd47-4a1a-bae4-7ebc8f6a6126"}
# That is, the DataFrame you have needs to have a column from one of these types if that column will be fed into an annotator; otherwise, you’d need to use one of the Spark NLP transformers. Here is the list of transformers: DocumentAssembler, TokenAssembler, Doc2Chunk, Chunk2Doc, and the Finisher.
#
# So, let’s start with DocumentAssembler(), an entry point to Spark NLP annotators.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "81481bba-c77a-4d87-91b4-a01471781351"}
# To get through the process in Spark NLP, we need to get raw data transformed into Document type at first.
#
# DocumentAssembler() is a special transformer that does this for us; it creates the first annotation of type Document which may be used by annotators down the road.
#
# DocumentAssembler() comes from sparknlp.base class and has the following settable parameters. See the full list here and the source code here.
#
# `setInputCol()` -> the name of the column that will be converted. We can specify only one column here. It can read either a String column or an Array[String]
#
# `setOutputCol()` -> optional : the name of the column in Document type that is generated. We can specify only one column here. Default is ‘document’
#
# `setIdCol()` -> optional: String type column with id information
#
# `setMetadataCol()` -> optional: Map type column with metadata information
#
# `setCleanupMode()` -> optional: Cleaning up options,
#
# possible values:
# ```
# disabled: Source kept as original. This is a default.
# inplace: removes new lines and tabs.
# inplace_full: removes new lines and tabs but also those which were converted to strings (i.e. \n)
# shrink: removes new lines and tabs, plus merging multiple spaces and blank lines to a single space.
# shrink_full: remove new lines and tabs, including stringified values, plus shrinking spaces and blank lines.
# ```
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "27e5489c-021c-43e9-909e-2ca24b4ca717"}
spark_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7d114d6e-4a18-4328-bde7-07457e7d5cfa"}
from sparknlp.base import *
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")\
.setCleanupMode("shrink")
doc_df = documentAssembler.transform(spark_df)
doc_df.show(truncate=30)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "cf4610a4-52c9-4b88-9cee-18f4b85a5f70"}
# At first, we define DocumentAssembler with desired parameters and then transform the data frame with it. The most important point to pay attention to here is that you need to use a String or String[Array] type column in .setInputCol(). So it doesn’t have to be named as text. You just use the column name as it is.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4fab2c15-c99c-47c5-b7df-cf7929ef04a2"}
doc_df.printSchema()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "935ed9c0-d651-4fdf-8e51-33058a091a7b"}
doc_df.select('document.result','document.begin','document.end').show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9a49d269-cb03-4a75-b53a-ac6e5cfd63f3"}
# The new column is in an array of struct type and has the parameters shown above. The annotators and transformers all come with universal metadata that would be filled down the road depending on the annotators being used. Unless you want to append other Spark NLP annotators to DocumentAssembler(), you don’t need to know what all these parameters mean for now. So we will talk about them in the following articles. You can access all these parameters with {column name}.{parameter name}.
#
# Let’s print out the first item’s result.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b04729d6-880e-49e9-b4b1-9d5da6b72f35"}
doc_df.select("document.result").take(1)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d4665e32-9eed-4391-be36-93c08d702f78"}
# If we would like to flatten the document column, we can do as follows.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7233eacd-3dce-4b79-8066-96d533739863"}
import pyspark.sql.functions as F
doc_df.withColumn(
"tmp",
F.explode("document"))\
.select("tmp.*")\
.show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "fdbcde6c-785c-4050-9adf-5d6748af4358"}
# ## Sentence Detector
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d462f297-0501-4f88-8dea-d839cf684cc5"}
# Finds sentence bounds in raw text.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "636758df-8118-4bb2-bfd9-df890d0ad89a"}
# `setCustomBounds(string)`: Custom sentence separator text e.g. `["\n"]`
#
# `setUseCustomOnly(bool)`: Use only custom bounds without considering those of Pragmatic Segmenter. Defaults to false. Needs customBounds.
#
# `setUseAbbreviations(bool)`: Whether to consider abbreviation strategies for better accuracy but slower performance. Defaults to true.
#
# `setExplodeSentences(bool)`: Whether to split sentences into different Dataset rows. Useful for higher parallelism in fat rows. Defaults to false.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "51602eca-0169-44e1-b9b3-96d1f19a859c"}
from sparknlp.annotator import *
# we feed the document column coming from Document Assembler
sentenceDetector = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentences')
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b1b6854e-6ebe-4188-aa3f-ce33cc0fc59d"}
sent_df = sentenceDetector.transform(doc_df)
sent_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ea255770-5d50-4dc0-a29c-e9a5dbd92392"}
sent_df.select('sentences').take(3)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3f6a6e0a-3b49-46b4-b59f-d963bd42de2e"}
text ='The patient was prescribed 1 capsule of Advil for 5 days. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night, 12 units of insulin lispro with meals, and metformin 1000 mg two times a day. It was determined that all SGLT2 inhibitors should be discontinued indefinitely fro 3 months.'
text
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b91a6f48-de9e-4713-b9e6-5f86b9ee4bd4"}
spark_df = spark.createDataFrame([[text]]).toDF("text")
spark_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "bcead8bf-d5a1-4442-9637-e14ecb4aae83"}
spark_df.show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "bec84776-b53f-4550-a408-6fd91ad23963"}
doc_df = documentAssembler.transform(spark_df)
sent_df = sentenceDetector.transform(doc_df)
sent_df.show(truncate=True)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7877d2ba-ba32-4d70-a176-ae3f18476979"}
sent_df.select('sentences.result').take(1)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "6693944c-4d63-4146-80c4-8106e259daa7"}
sentenceDetector.setExplodeSentences(True)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "8d9b279e-9f63-4ecd-b3b7-11519a7b2922"}
sent_df = sentenceDetector.transform(doc_df)
sent_df.show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ef9d1858-d913-4379-90af-e04ce5b2f343"}
sent_df.select('sentences.result').show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "651a5f4c-0725-467b-8aa3-8b2e6e7edf99"}
from pyspark.sql import functions as F
sent_df.select(F.explode('sentences.result')).show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2637ac02-a310-48ee-838e-6305e764e130"}
# ### Sentence Detector DL
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "afb4d447-c30b-4b52-a134-00b9cc25eb29"}
sentencerDL = SentenceDetectorDLModel\
.pretrained("sentence_detector_dl", "en") \
.setInputCols(["document"]) \
.setOutputCol("sentences")
sent_dl_df = sentencerDL.transform(doc_df)
sent_dl_df.select(F.explode('sentences.result')).show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "56e67e59-5027-4ca1-9a20-e01d2388e5f2"}
documenter = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentenceDetector = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentences')
sentencerDL = SentenceDetectorDLModel\
.pretrained("sentence_detector_dl", "en") \
.setInputCols(["document"]) \
.setOutputCol("sentences")
sd_pipeline = PipelineModel(stages=[documenter, sentenceDetector])
sd_model = LightPipeline(sd_pipeline)
# DL version
sd_dl_pipeline = PipelineModel(stages=[documenter, sentencerDL])
sd_dl_model = LightPipeline(sd_dl_pipeline)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3a6e0168-3739-48ce-9916-bca13bb76637"}
text = """John loves Mary.Mary loves Peter
Peter loves Helen .Helen loves John;
Total: four people involved."""
for anno in sd_model.fullAnnotate(text)[0]["sentences"]:
print("{}\t{}\t{}\t{}".format(
anno.metadata["sentence"], anno.begin, anno.end, anno.result))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d5e9c00c-4af5-484c-b968-ce61b7b54208"}
for anno in sd_dl_model.fullAnnotate(text)[0]["sentences"]:
print("{}\t{}\t{}\t{}".format(
anno.metadata["sentence"], anno.begin, anno.end, anno.result))
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e51ef60b-828d-4fe7-9bed-4d48022bb73d"}
# ## Tokenizer
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3f1954c2-4e89-4103-8d60-273ce7c98ce4"}
# Identifies tokens with tokenization open standards. It is an **Annotator Approach, so it requires .fit()**.
#
# A few rules will help customizing it if defaults do not fit user needs.
#
# setExceptions(StringArray): List of tokens to not alter at all. Allows composite tokens like two worded tokens that the user may not want to split.
#
# `addException(String)`: Add a single exception
#
# `setExceptionsPath(String)`: Path to txt file with list of token exceptions
#
# `caseSensitiveExceptions(bool)`: Whether to follow case sensitiveness for matching exceptions in text
#
# `contextChars(StringArray)`: List of 1 character string to rip off from tokens, such as parenthesis or question marks. Ignored if using prefix, infix or suffix patterns.
#
# `splitChars(StringArray)`: List of 1 character string to split tokens inside, such as hyphens. Ignored if using infix, prefix or suffix patterns.
#
# `splitPattern (String)`: pattern to separate from the inside of tokens. takes priority over splitChars.
# setTargetPattern: Basic regex rule to identify a candidate for tokenization. Defaults to \\S+ which means anything not a space
#
# `setSuffixPattern`: Regex to identify subtokens that are in the end of the token. Regex has to end with \\z and must contain groups (). Each group will become a separate token within the prefix. Defaults to non-letter characters. e.g. quotes or parenthesis
#
# `setPrefixPattern`: Regex to identify subtokens that come in the beginning of the token. Regex has to start with \\A and must contain groups (). Each group will become a separate token within the prefix. Defaults to non-letter characters. e.g. quotes or parenthesis
#
# `addInfixPattern`: Add an extension pattern regex with groups to the top of the rules (will target first, from more specific to the more general).
#
# `minLength`: Set the minimum allowed legth for each token
#
# `maxLength`: Set the maximum allowed legth for each token
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "65f5a81d-44bd-43cc-826a-028db41575fd"}
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "8346fed8-5d81-4d91-872a-b56f28039d31"}
text = '<NAME> (Spiderman) is a nice guy and lives in New York but has no e-mail!'
spark_df = spark.createDataFrame([[text]]).toDF("text")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "efbfc338-bbde-48df-9935-8dfd56284df8"}
doc_df = documentAssembler.transform(spark_df)
token_df = tokenizer.fit(doc_df).transform(doc_df)
token_df.show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3c8ea348-b8f7-4981-ae6b-2b60a5a93327"}
token_df.select('token.result').take(1)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "bd873cb6-8dfa-4710-8929-01af041c67d2"}
tokenizer = Tokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setSplitChars(['-'])\
.setContextChars(['?', '!', '('])\
.addException("New York")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4a3ac2fc-3fee-4f1b-be68-f53f78d56f41"}
token_df = tokenizer.fit(doc_df).transform(doc_df)
token_df.select('token.result').take(1)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a9ec30ed-dd3f-4e6a-857d-3994504ec4a3"}
# ## Regex Tokenizer
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "66ae6b55-7ed2-4a4e-b7be-77d76eaadaf4"}
from pyspark.sql.types import StringType
content = "1. T1-T2 DATE**[12/24/13] $1.99 () (10/12), ph+ 90%"
pattern = "\\s+|(?=[-.:;*+,$&%\\[\\]])|(?<=[-.:;*+,$&%\\[\\]])"
df = spark.createDataFrame([content], StringType()).withColumnRenamed("value", "text")
documenter = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentenceDetector = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentence')
regexTokenizer = RegexTokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("regexToken") \
.setPattern(pattern) \
.setPositionalMask(False)
docPatternRemoverPipeline = Pipeline().setStages([
documenter,
sentenceDetector,
regexTokenizer])
result = docPatternRemoverPipeline.fit(df).transform(df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "589c559c-ad93-4795-adb1-c5a2513c76f0"}
result.show(10,30)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7fc00b44-d665-4c0d-a42a-802f377c3542"}
import pyspark.sql.functions as F
result_df = result.select(F.explode('regexToken.result').alias('regexToken')).toPandas()
result_df
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2173234d-6b4b-4b8a-a686-ca95ba128505"}
# ## Stacking Spark NLP Annotators in Spark ML Pipeline
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "38d817ef-8da6-4fbb-b79d-9aea8d093b0b"}
# Spark NLP provides an easy API to integrate with Spark ML Pipelines and all the Spark NLP annotators and transformers can be used within Spark ML Pipelines. So, it’s better to explain Pipeline concept through Spark ML official documentation.
#
# What is a Pipeline anyway? In machine learning, it is common to run a sequence of algorithms to process and learn from data.
#
# Apache Spark ML represents such a workflow as a Pipeline, which consists of a sequence of PipelineStages (Transformers and Estimators) to be run in a specific order.
#
# In simple terms, a pipeline chains multiple Transformers and Estimators together to specify an ML workflow. We use Pipeline to chain multiple Transformers and Estimators together to specify our machine learning workflow.
#
# The figure below is for the training time usage of a Pipeline.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "de662b8f-8bd2-4a82-bd7e-4da79b32f19d"}
# A Pipeline is specified as a sequence of stages, and each stage is either a Transformer or an Estimator. These stages are run in order, and the input DataFrame is transformed as it passes through each stage. That is, the data are passed through the fitted pipeline in order. Each stage’s transform() method updates the dataset and passes it to the next stage. With the help of Pipelines, we can ensure that training and test data go through identical feature processing steps.
#
# Now let’s see how this can be done in Spark NLP using Annotators and Transformers. Assume that we have the following steps that need to be applied one by one on a data frame.
#
# - Split text into sentences
# - Tokenize
#
# And here is how we code this pipeline up in Spark NLP.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ad6d45a0-f43f-488b-97f9-6b50955e4c1b"}
from pyspark.ml import Pipeline
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentenceDetector = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentences')
tokenizer = Tokenizer() \
.setInputCols(["sentences"]) \
.setOutputCol("token")
nlpPipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1cf7b856-a09a-460a-9b9d-da3cbf9d6f66"}
spark_df = spark.read.text('/sample-sentences-en.txt').toDF('text')
spark_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b387387e-9366-4bf2-9c9d-d6175fc9fb22"}
result = pipelineModel.transform(spark_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a4efe029-9836-435c-8a58-fa8b99041a51"}
result.show(truncate=20)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "53306690-28e3-4bdb-a5fd-237e06920b4b"}
result.printSchema()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e13d8428-17d0-40a2-a0ee-040e97856e38"}
result.select('sentences.result').take(3)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "45244802-e534-4f40-baf0-2b2f3841ce8b"}
result.select('token').take(3)[2]
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e2cd9251-310f-44fe-873a-c8ae690431c1"}
# ## Normalizer
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c55d1c55-69d9-480f-80e7-32103be3fcd7"}
# Removes all dirty characters from text following a regex pattern and transforms words based on a provided dictionary
#
# `setCleanupPatterns(patterns)`: Regular expressions list for normalization, defaults [^A-Za-z]
#
# `setLowercase(value)`: lowercase tokens, default false
#
# `setSlangDictionary(path)`: txt file with delimited words to be transformed into something else
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b33157cf-156f-4b47-a6e0-ac8a42a174ec"}
import string
string.punctuation
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b5bff50f-b163-4fe2-9a77-3b05661ce968"}
from sparknlp.base import *
from sparknlp.annotator import *
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")\
.setLowercase(True)\
.setCleanupPatterns(["[^\w\d\s]"]) # remove punctuations (keep alphanumeric chars)
# if we don't set CleanupPatterns, it will only keep alphabet letters ([^A-Za-z])
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "03ccdcc5-0da4-4c38-802a-b88e93611b0e"}
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
normalizer
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "193245f2-2a58-440b-a8ef-1d0eee68ae9c"}
pipelineModel.stages
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "68490423-96d4-4c14-9a6a-560a7932a945"}
result = pipelineModel.transform(spark_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f7b6d42b-ea24-4e5c-b306-d14d57daa668"}
result.show(truncate=20)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7c603748-1235-452c-afef-0d987622ba34"}
result.select('token').take(3)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "befba439-67c8-450c-b7b8-bd3e311c839e"}
result.select('normalized.result').take(3)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "0b890b23-04b9-4b6e-8c86-139a822ecabb"}
result.select('normalized').take(3)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "73324c77-c998-4bba-9f3b-5c90292f0110"}
# ## Document Normalizer
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b190b712-9b9d-4985-a8f5-1460f9acf6b9"}
# The DocumentNormalizer is an annotator that can be used after the DocumentAssembler to narmalize documents once that they have been processed and indexed .
# It takes in input annotated documents of type Array AnnotatorType.DOCUMENT and gives as output annotated document of type AnnotatorType.DOCUMENT .
#
# Parameters are:
# - inputCol: input column name string which targets a column of type Array(AnnotatorType.DOCUMENT).
# - outputCol: output column name string which targets a column of type AnnotatorType.DOCUMENT.
# - action: action string to perform applying regex patterns, i.e. (clean | extract). Default is "clean".
# - cleanupPatterns: normalization regex patterns which match will be removed from document. Default is "<[^>]*>" (e.g., it removes all HTML tags).
# - replacement: replacement string to apply when regexes match. Default is " ".
# - lowercase: whether to convert strings to lowercase. Default is False.
# - removalPolicy: removalPolicy to remove patterns from text with a given policy. Valid policy values are: "all", "pretty_all", "first", "pretty_first". Defaults is "pretty_all".
# - encoding: file encoding to apply on normalized documents. Supported encodings are: UTF_8, UTF_16, US_ASCII, ISO-8859-1, UTF-16BE, UTF-16LE. Default is "UTF-8".
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7841718d-c77e-49fd-810c-43a74289c066"}
text = '''
<div id="theworldsgreatest" class='my-right my-hide-small my-wide toptext' style="font-family:'Segoe UI',Arial,sans-serif">
THE WORLD'S LARGEST WEB DEVELOPER SITE
<h1 style="font-size:300%;">THE WORLD'S LARGEST WEB DEVELOPER SITE</h1>
<p style="font-size:160%;">Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum..</p>
</div>
</div>'''
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9648ec65-bac0-4c6a-807a-b104400d86fe"}
spark_df = spark.createDataFrame([[text]]).toDF("text")
spark_df.show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5ccc6e22-4075-44aa-9b7c-fbb26d063f02"}
documentNormalizer = DocumentNormalizer() \
.setInputCols("document") \
.setOutputCol("normalizedDocument")
documentNormalizer.extractParamMap()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "0fb4fda8-30b0-4512-84b0-93e37d72a29c"}
documentAssembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
#default
cleanUpPatterns = ["<[^>]*>"]
documentNormalizer = DocumentNormalizer() \
.setInputCols("document") \
.setOutputCol("normalizedDocument") \
.setAction("clean") \
.setPatterns(cleanUpPatterns) \
.setReplacement(" ") \
.setPolicy("pretty_all") \
.setLowercase(True)
docPatternRemoverPipeline = Pipeline() \
.setStages([
documentAssembler,
documentNormalizer])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = docPatternRemoverPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c31adf33-a2c1-4849-9430-906a00ab9735"}
result = pipelineModel.transform(spark_df)
result.select('normalizedDocument.result').show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "df4f86a8-383c-4bb4-a2cf-c745863498b5"}
# for more examples : https://github.com/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/annotation/english/document-normalizer/document_normalizer_notebook.ipynb
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f5df9b65-c8ac-44f9-af68-1b9de1a9df7b"}
# ## Stopwords Cleaner
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5f8d048a-296c-45e7-a7f2-ab169a28674f"}
# This annotator excludes from a sequence of strings (e.g. the output of a Tokenizer, Normalizer, Lemmatizer, and Stemmer) and drops all the stop words from the input sequences.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "139001f8-51d6-4f59-aa43-c61219cb5c9e"}
# Functions:
#
# `setStopWords`: The words to be filtered out. Array[String]
#
# `setCaseSensitive`: Whether to do a case sensitive comparison over the stop words.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9b4ac841-174f-4949-b3c8-7598aec661d4"}
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("token")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)\
#.setStopWords(["no", "without"]) (e.g. read a list of words from a txt)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "6bb1fc7c-4fdb-4e92-9bc3-f62ce004cd26"}
stopwords_cleaner.getStopWords()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "752d0ab1-4969-4caf-810c-772f522f8903"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("token")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)\
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
stopwords_cleaner
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ed88bd54-b6d3-458d-b620-1a816d9ca120"}
spark_df = spark.read.text('/sample-sentences-en.txt').toDF('text')
result = pipelineModel.transform(spark_df)
result.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9289b548-8516-4576-a13a-f13e0ebed5e9"}
result.select('cleanTokens.result').take(1)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4eaf8d13-a400-4836-908f-86c6ef7042a2"}
# ## Token Assembler
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d8190cdf-30da-4013-9e17-75302b66d086"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
sentenceDetector = SentenceDetector()\
.setInputCols(['document'])\
.setOutputCol('sentences')
tokenizer = Tokenizer() \
.setInputCols(["sentences"]) \
.setOutputCol("token")
normalizer = Normalizer() \
.setInputCols(["token"]) \
.setOutputCol("normalized")\
.setLowercase(False)\
stopwords_cleaner = StopWordsCleaner()\
.setInputCols("normalized")\
.setOutputCol("cleanTokens")\
.setCaseSensitive(False)\
tokenassembler = TokenAssembler()\
.setInputCols(["sentences", "cleanTokens"]) \
.setOutputCol("clean_text")
nlpPipeline = Pipeline(stages=[
documentAssembler,
sentenceDetector,
tokenizer,
normalizer,
stopwords_cleaner,
tokenassembler
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(spark_df)
result.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c851e449-96da-4ec9-847e-efed42b1a699"}
result.select('clean_text').take(1)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f4e39abd-85ba-46ca-b846-95b789c13f9c"}
# if we use TokenAssembler().setPreservePosition(True), the original borders will be preserved (dropped & unwanted chars will be replaced by spaces)
result.select('clean_text').take(1)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "6b147593-8c32-4d5f-b7c8-b1bc3a6bc121"}
result.select('text', F.explode('clean_text.result').alias('clean_text')).show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b2c25178-daf8-4a24-9142-ffbd48e70377"}
import pyspark.sql.functions as F
result.withColumn(
"tmp",
F.explode("clean_text")) \
.select("tmp.*").select("begin","end","result","metadata.sentence").show(truncate = False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "0ac98e0a-a1b9-4167-89dd-f7531dfa6e73"}
result.select('text', F.explode('clean_text.result').alias('clean_text')).toPandas()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b64bbe0f-2796-461c-acc8-dda7a8206785"}
# if we hadn't used Sentence Detector, this would be what we got. (tokenizer gets document instead of sentences column)
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
tokenassembler = TokenAssembler()\
.setInputCols(["document", "cleanTokens"]) \
.setOutputCol("clean_text")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
normalizer,
stopwords_cleaner,
tokenassembler
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(spark_df)
result.select('text', 'clean_text.result').show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "bd67a439-994b-4129-a404-ae938f67dfba"}
result.withColumn(
"tmp",
F.explode("clean_text")) \
.select("tmp.*").select("begin","end","result","metadata.sentence").show(truncate = False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "61ea7dad-7090-4972-ad9b-0760aefc10fa"}
# **IMPORTANT NOTE:**
#
# If you have some other steps & annotators in your pipeline that will need to use the tokens from cleaned text (assembled tokens), you will need to tokenize the processed text again as the original text is probably changed completely.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2c97f60c-d34d-4f46-8a6c-171bbed12917"}
# ## Stemmer
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "da6e00c8-7673-4dfb-9f17-24234bbe79d1"}
# Returns hard-stems out of words with the objective of retrieving the meaningful part of the word
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "955e8d19-b379-438c-a6a0-b25691c58341"}
stemmer = Stemmer() \
.setInputCols(["token"]) \
.setOutputCol("stem")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a018531d-aeb0-49fb-b250-2870ebfbac3a"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
stemmer
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "03729060-747f-4dc2-8bc9-e76c8a64f4a3"}
result = pipelineModel.transform(spark_df)
result.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1941f1c0-fcbb-46fa-9bbe-14d0fc33671b"}
result.select('stem.result').show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "77f6f18d-6b97-49e9-a312-89e930e799e3"}
import pyspark.sql.functions as F
result_df = result.select(F.explode(F.arrays_zip(result.token.result, result.stem.result)).alias("cols")) \
.select(F.expr("cols['0']").alias("token"),
F.expr("cols['1']").alias("stem")).toPandas()
result_df.head(10)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "977a78c4-4fb8-47b8-822c-49879c4d36af"}
# ## Lemmatizer
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "237e8d97-38b4-43d2-a129-b88c1467b80d"}
# Retrieves lemmas out of words with the objective of returning a base dictionary word
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9df584d5-0f47-4ece-ae92-6f7015b5a3f5"}
# !wget -q https://raw.githubusercontent.com/mahavivo/vocabulary/master/lemmas/AntBNC_lemmas_ver_001.txt
dbutils.fs.cp("file:/databricks/driver/AntBNC_lemmas_ver_001.txt", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c9f61716-a28c-4260-842c-1079c6133fe3"}
lemmatizer = Lemmatizer() \
.setInputCols(["token"]) \
.setOutputCol("lemma") \
.setDictionary("file:/databricks/driver/AntBNC_lemmas_ver_001.txt", value_delimiter ="\t", key_delimiter = "->")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ea94e671-ac29-40ec-85e8-620a671c8991"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
stemmer = Stemmer() \
.setInputCols(["token"]) \
.setOutputCol("stem")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
stemmer,
lemmatizer
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4bb865d3-571a-425e-895a-ee50be1c9a59"}
result = pipelineModel.transform(spark_df)
result.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "dfeac277-b573-449c-b2c9-efca655233bb"}
result.select('lemma.result').show(truncate=False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "33a9aad9-5cf2-4691-b798-c62e2e6bdc58"}
result_df = result.select(F.explode(F.arrays_zip(result.token.result, result.stem.result, result.lemma.result)).alias("cols")) \
.select(F.expr("cols['0']").alias("token"),
F.expr("cols['1']").alias("stem"),
F.expr("cols['2']").alias("lemma")).toPandas()
result_df.head(10)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b621358f-f88d-4b2a-8b1f-cbe8a35781a9"}
# ## NGram Generator
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ff8094e6-b451-417e-a3b0-4ec3fd97269c"}
# NGramGenerator annotator takes as input a sequence of strings (e.g. the output of a `Tokenizer`, `Normalizer`, `Stemmer`, `Lemmatizer`, and `StopWordsCleaner`).
#
# The parameter n is used to determine the number of terms in each n-gram. The output will consist of a sequence of n-grams where each n-gram is represented by a space-delimited string of n consecutive words with annotatorType `CHUNK` same as the Chunker annotator.
#
# Functions:
#
# `setN:` number elements per n-gram (>=1)
#
# `setEnableCumulative:` whether to calculate just the actual n-grams or all n-grams from 1 through n
#
# `setDelimiter:` Glue character used to join the tokens
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "71452e5b-c6c4-4078-8e10-f969543b1122"}
ngrams_cum = NGramGenerator() \
.setInputCols(["token"]) \
.setOutputCol("ngrams") \
.setN(3) \
.setEnableCumulative(True)\
.setDelimiter("_") # Default is space
# .setN(3) means, take bigrams and trigrams.
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
ngrams_cum
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
result = pipelineModel.transform(spark_df)
result.select('ngrams.result').show(truncate=150)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "cc34dd5e-94ce-435e-9134-f877b1a74734"}
ngrams_nonCum = NGramGenerator() \
.setInputCols(["token"]) \
.setOutputCol("ngrams_v2") \
.setN(3) \
.setEnableCumulative(False)\
.setDelimiter("_") # Default is space
ngrams_nonCum.transform(result).select('ngrams_v2.result').show(truncate=150)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "11f195c3-7f63-494b-aff6-3a93e5be7b33"}
# ## TextMatcher
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1e42e00d-8f89-4d12-b91e-cd2b8eaf60d1"}
# Annotator to match entire phrases (by token) provided in a file against a Document
#
# Functions:
#
# `setEntities(path, format, options)`: Provides a file with phrases to match. Default: Looks up path in configuration.
#
# `path`: a path to a file that contains the entities in the specified format.
#
# `readAs`: the format of the file, can be one of {ReadAs.LINE_BY_LINE, ReadAs.SPARK_DATASET}. Defaults to LINE_BY_LINE.
#
# `options`: a map of additional parameters. Defaults to {“format”: “text”}.
#
# `entityValue` : Value for the entity metadata field to indicate which chunk comes from which textMatcher when there are multiple textMatchers.
#
# `mergeOverlapping` : whether to merge overlapping matched chunks. Defaults false
#
# `caseSensitive` : whether to match regardless of case. Defaults true
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "887ded9d-b002-4570-8111-fcb6726a885a"}
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Public/data/news_category_train.csv
dbutils.fs.cp("file:/databricks/driver/news_category_train.csv", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "8d0fdb5d-9c59-4b50-a1cf-850eb95aed1e"}
news_df = spark.read \
.option("header", True) \
.csv("/news_category_train.csv")
news_df.show(5, truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "45fff651-b7c3-4c00-a3ce-810dbe2f896a"}
# write the target entities to txt file
entities = ['Wall Street', 'USD', 'stock', 'NYSE']
with open ('financial_entities.txt', 'w') as f:
for i in entities:
f.write(i+'\n')
entities = ['soccer', 'world cup', 'Messi', 'FC Barcelona']
with open ('sport_entities.txt', 'w') as f:
for i in entities:
f.write(i+'\n')
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "21e4b216-9f70-4352-bf4b-7155aa1b420c"}
dbutils.fs.cp("file:/databricks/driver/financial_entities.txt", "dbfs:/")
dbutils.fs.cp("file:/databricks/driver/sport_entities.txt", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9a6df1f5-05cc-43cc-8fa2-5bed97fc083c"}
documentAssembler = DocumentAssembler()\
.setInputCol("description")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
financial_entity_extractor = TextMatcher() \
.setInputCols(["document",'token'])\
.setOutputCol("financial_entities")\
.setEntities("file:/databricks/driver/financial_entities.txt")\
.setCaseSensitive(False)\
.setEntityValue('financial_entity')
sport_entity_extractor = TextMatcher() \
.setInputCols(["document",'token'])\
.setOutputCol("sport_entities")\
.setEntities("file:/databricks/driver/sport_entities.txt")\
.setCaseSensitive(False)\
.setEntityValue('sport_entity')
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
financial_entity_extractor,
sport_entity_extractor
])
empty_df = spark.createDataFrame([['']]).toDF("description")
pipelineModel = nlpPipeline.fit(empty_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c6e059d6-c8f3-41b7-b3b7-0bf2f618c771"}
result = pipelineModel.transform(news_df)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "804aabcc-24bf-4aaa-8386-08b86d498197"}
result.select('financial_entities.result','sport_entities.result').take(2)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "fa8ba972-3687-4c0a-874b-ef97df2c5cab"}
result.select('description','financial_entities.result','sport_entities.result')\
.toDF('text','financial_matches','sport_matches').filter((F.size('financial_matches')>1) | (F.size('sport_matches')>1))\
.show(truncate=70)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "643d962a-8143-427c-a3bf-f078216e0e00"}
result_df = result.select(F.explode(F.arrays_zip(result.financial_entities.result,
result.financial_entities.begin,
result.financial_entities.end)).alias("cols")) \
.select(F.expr("cols['0']").alias("clinical_entities"),
F.expr("cols['1']").alias("begin"),
F.expr("cols['2']").alias("end")).toPandas()
result_df.head(10)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e16edc30-6fab-488f-a007-cea10d981f8e"}
# ## RegexMatcher
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ddb75086-1f9a-46d8-a6d7-267df8799689"}
# ! wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed-sample.csv
dbutils.fs.cp("file:/databricks/driver/pubmed-sample.csv", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "800b8a3a-9409-4217-94ba-0b244fd40021"}
pubMedDF = spark.read\
.option("header", "true")\
.csv("/pubmed-sample.csv")\
.filter("AB IS NOT null")\
.withColumnRenamed("AB", "text")\
.drop("TI")
pubMedDF.show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1acb9284-72e5-41af-9fa1-7e9ccefcede1"}
rules = '''
renal\s\w+, started with 'renal'
cardiac\s\w+, started with 'cardiac'
\w*ly\b, ending with 'ly'
\S*\d+\S*, match any word that contains numbers
(\d+).?(\d*)\s*(mg|ml|g), match medication metrics
'''
with open('regex_rules.txt', 'w') as f:
f.write(rules)
dbutils.fs.cp("file:/databricks/driver/regex_rules.txt", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e669a4af-209c-4497-b89b-fa0b3be03613"}
RegexMatcher().extractParamMap()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5b47399e-7459-4006-831d-5d4898b3ab5e"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
regex_matcher = RegexMatcher()\
.setInputCols('document')\
.setStrategy("MATCH_ALL")\
.setOutputCol("regex_matches")\
.setExternalRules(path="file:/databricks/driver/regex_rules.txt", delimiter=',')
nlpPipeline = Pipeline(stages=[
documentAssembler,
regex_matcher
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
match_df = pipelineModel.transform(pubMedDF)
match_df.select('regex_matches.result').take(3)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d156f59a-4c1b-4634-8f22-ea30698cc94b"}
match_df.select('text','regex_matches.result')\
.toDF('text','matches').filter(F.size('matches')>1)\
.show(truncate=70)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "979a88e0-7851-4e06-ac0c-d2f79115aac8"}
# ## MultiDateMatcher
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1d63498a-869d-49b2-9cb1-6f328ab62944"}
# Extract exact & normalize dates from relative date-time phrases. The default anchor date will be the date the code is run.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e394bf1c-c99f-4dc8-b574-007ac624d83e"}
MultiDateMatcher().extractParamMap()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5cb17b0b-1c62-4d8f-8e49-348c03c8f4ea"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
date_matcher = MultiDateMatcher() \
.setInputCols('document') \
.setOutputCol("date") \
.setOutputFormat("yyyy/MM/dd")\
.setSourceLanguage("en")
date_pipeline = PipelineModel(stages=[
documentAssembler,
date_matcher
])
sample_df = spark.createDataFrame([['I saw him yesterday and he told me that he will visit us next week']]).toDF("text")
result = date_pipeline.transform(sample_df)
result.select('date.result').show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d09f413c-65a0-4859-8edc-d968c4bd5ea8"}
# Let's set the Input Format and Output Format to specific formatLet's set the Input Format and Output Format to specific format
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "778ef7a4-cd68-4925-a38d-3596d7904c80"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
date_matcher = MultiDateMatcher() \
.setInputCols('document') \
.setOutputCol("date")\
.setInputFormats(["dd/MM/yyyy"])\
.setOutputFormat("yyyy/MM/dd")\
.setSourceLanguage("en")
date_pipeline = PipelineModel(stages=[
documentAssembler,
date_matcher
])
sample_df = spark.createDataFrame([["the last payment date of this invoice is 21/05/2022"]]).toDF("text")
result = date_pipeline.transform(sample_df)
result.select('date.result').show(truncate=False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "573a2b5a-d233-40ce-b9d3-77979983e4c7"}
# ## Text Cleaning with UDF
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b976839f-997a-4ed7-bb56-50cfaadb8026"}
text = '<h1 style="color: #5e9ca0;">Have a great <span style="color: #2b2301;">birth</span> day!</h1>'
text_df = spark.createDataFrame([[text]]).toDF("text")
import re
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType, IntegerType
clean_text = lambda s: re.sub(r'<[^>]*>', '', s)
text_df.withColumn('cleaned', udf(clean_text, StringType())('text')).select('text','cleaned').show(truncate= False)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d0cd3190-2067-4f72-bd2c-0a8333786cdc"}
find_not_alnum_count = lambda s: len([i for i in s if not i.isalnum() and i!=' '])
find_not_alnum_count("it's your birth day!")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "87e28ec7-b37d-4d92-a33a-3817900457a3"}
text = '<h1 style="color: #5e9ca0;">Have a great <span style="color: #2b2301;">birth</span> day!</h1>'
find_not_alnum_count(text)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "abec629a-eee9-4f5d-9f6e-b6748987204c"}
text_df.withColumn('cleaned', udf(find_not_alnum_count, IntegerType())('text')).select('text','cleaned').show(truncate= False)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d2e1dd3c-d47f-4f90-a3f7-c0c0f3154d87"}
# ## Finisher
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ee905adc-3f3a-4aa0-95ab-2da6cbb1b5b7"}
# ***Finisher:*** Once we have our NLP pipeline ready to go, we might want to use our annotation results somewhere else where it is easy to use. The Finisher outputs annotation(s) values into a string.
#
# If we just want the desired output column in the final dataframe, we can use Finisher to drop previous stages in the final output and get the `result` from the process.
#
# This is very handy when you want to use the output from Spark NLP annotator as an input to another Spark ML transformer.
#
# Settable parameters are:
#
# `setInputCols()`
#
# `setOutputCols()`
#
# `setCleanAnnotations(True)` -> Whether to remove intermediate annotations
#
# `setValueSplitSymbol(“#”)` -> split values within an annotation character
#
# `setAnnotationSplitSymbol(“@”)` -> split values between annotations character
#
# `setIncludeMetadata(False)` -> Whether to include metadata keys. Sometimes useful in some annotations.
#
# `setOutputAsArray(False)` -> Whether to output as Array. Useful as input for other Spark transformers.
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "14af4d1c-a8c3-403b-9011-6d84be469dc6"}
finisher = Finisher() \
.setInputCols(["regex_matches"]) \
.setIncludeMetadata(False) # set to False to remove metadata
nlpPipeline = Pipeline(stages=[
documentAssembler,
regex_matcher,
finisher
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
match_df = pipelineModel.transform(pubMedDF)
match_df.show(truncate = 50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4f375408-0f1a-4e25-b890-cff84b504dac"}
match_df.printSchema()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "6c4d07ff-6234-4775-921e-8adffd77cf00"}
match_df.filter(F.size('finished_regex_matches')>2).show(truncate = 50)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "de4b6d0f-1ab9-4007-926b-8c727946c5aa"}
# ## LightPipeline
#
# https://medium.com/spark-nlp/spark-nlp-101-lightpipeline-a544e93f20f1
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5b3535a7-4e3d-4b34-b0b3-948150d5ce55"}
# LightPipelines are Spark NLP specific Pipelines, equivalent to Spark ML Pipeline, but meant to deal with smaller amounts of data. They’re useful working with small datasets, debugging results, or when running either training or prediction from an API that serves one-off requests.
#
# Spark NLP LightPipelines are Spark ML pipelines converted into a single machine but the multi-threaded task, becoming more than 10x times faster for smaller amounts of data (small is relative, but 50k sentences are roughly a good maximum). To use them, we simply plug in a trained (fitted) pipeline and then annotate a plain text. We don't even need to convert the input text to DataFrame in order to feed it into a pipeline that's accepting DataFrame as an input in the first place. This feature would be quite useful when it comes to getting a prediction for a few lines of text from a trained ML model.
#
# **It is nearly 10x faster than using Spark ML Pipeline**
#
# `LightPipeline(someTrainedPipeline).annotate(someStringOrArray)`
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7a50ac78-e1fe-48d9-99a4-dbf22bfcb483"}
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = Tokenizer() \
.setInputCols(["document"]) \
.setOutputCol("token")
stemmer = Stemmer() \
.setInputCols(["token"]) \
.setOutputCol("stem")
nlpPipeline = Pipeline(stages=[
documentAssembler,
tokenizer,
stemmer,
lemmatizer
])
empty_df = spark.createDataFrame([['']]).toDF("text")
pipelineModel = nlpPipeline.fit(empty_df)
pipelineModel.transform(spark_df).show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a9f974e6-2e42-430a-8daa-3fbff06fecf7"}
from sparknlp.base import LightPipeline
light_model = LightPipeline(pipelineModel)
light_result = light_model.annotate("John and Peter are brothers. However they don't support each other that much.")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "63656c1f-fb62-4ab0-8e56-0e2f50579786"}
light_result.keys()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "45166c74-9d76-4c11-9c3c-a5a028c54100"}
list(zip(light_result['token'], light_result['stem'], light_result['lemma']))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "e1e5cbc5-5a2e-4651-bec7-71d2d3cbe350"}
light_result = light_model.fullAnnotate("John and Peter are brothers. However they don't support each other that much.")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "47e66689-5d60-4e86-81b4-7d0ea320bc11"}
light_result
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "af2a1445-1bba-45be-a39b-bb65d979053e"}
text_list= ["How did serfdom develop in and then leave Russia ?",
"There will be some exciting breakthroughs in NLP this year."]
light_model.annotate(text_list)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "b63c7abc-5591-4971-9d90-0a0073d344e2"}
# **important note:** When you use Finisher in your pipeline, regardless of setting `cleanAnnotations` to False or True, LightPipeline will only return the finished columns.
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "edc724a7-36d2-4005-b4b1-a47b789f10ac"}
# End of Notebook #
| tutorials/Certification_Trainings/Public/databricks_notebooks/7. Text Preprocessing Annotators with Spark NLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # N-ary Tree Preorder Traversal
#
# Given an n-ary tree, return the preorder traversal of its nodes' values.
#
# Nary-Tree input serialization is represented in their level order traversal, each group of children is separated by the null value (See examples).
#
# ## 解析
#
# 题目来源:[LeetCode - N-ary Tree Preorder Traversal - 589](https://leetcode.com/problems/n-ary-tree-preorder-traversal/)
#
# 题目非常简单,和二叉树前序遍历是一样的,只不过子节点变成多个。
def preorder(root):
result = []
def walk(node):
nonlocal result
if node is None:
return;
result.append(node.val)
for child in node.children:
walk(child)
walk(root)
return result
| N-ary-Tree-Preorder-Traversal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + slideshow={"slide_type": "slide"}
# -*- coding: utf-8 -*-
"""
Example script to forward/back-project some data.
In this example, we will create projection data and images purely
from within Python.
Note that the code that use geometric shapes below needs a version of STIR
later than 3Nov2018. There are no other prerequisites (i.e. no script to run first!).
Author: <NAME>
"""
# -
# %matplotlib notebook
# + [markdown] slideshow={"slide_type": "slide"}
# # Initial imports
# + slideshow={"slide_type": "slide"}
import stir
import stirextra
#import numpy
import matplotlib.pyplot as plt
# + [markdown] slideshow={"slide_type": "slide"}
# # We first need to define a scanner
# + slideshow={"slide_type": "slide"}
# STIR has multiple scanners predefined.
print(stir.Scanner.list_all_names())
# + [markdown] slideshow={"slide_type": "slide"}
# # Let's use an old scanner that doesn't have too many detectors (for speed)
# + slideshow={"slide_type": "slide"}
scanner=stir.Scanner.get_scanner_from_name("ECAT 931")
print(scanner.parameter_info())
# + [markdown] slideshow={"slide_type": "slide"}
# # Now we need to describe the actual size of the projection data
# + slideshow={"slide_type": "slide"}
# We call this the `projection data information`.
#
# We will use a "2D" PET acquisition in this example.
# This corresponds to `span=3`, with only 1 "segment".
# You can ignore this terminology now, or check it out at
# http://stir.sourceforge.net/documentation/STIR-glossary.pdf
span=3;
max_ring_diff=1;
# use default number of "views" (or "azimutal angles")
num_views=scanner.get_num_detectors_per_ring()/2;
# construct the object using ProjDataInfoCTI
# (the naming of this function was related to the scanner manufacturer, but will be changed in the future)
proj_data_info=stir.ProjDataInfo.ProjDataInfoCTI(scanner,
span, max_ring_diff,
num_views, scanner.get_default_num_arccorrected_bins());
# + [markdown] slideshow={"slide_type": "slide"}
# # Create an empty image with suitable dimensions and voxel sizes
# + slideshow={"slide_type": "slide"}
# This image will cover the whole FOV and having the "traditional"
# z-spacing of half the scanner ring-distance. (STIR needs this at the moment).
# For illustration, we use smaller voxels than the default (we "zoom in")
zoom=1.2;
image_data=stir.FloatVoxelsOnCartesianGrid(proj_data_info, zoom);
# + [markdown] slideshow={"slide_type": "slide"}
# # initialise a projection matrix
# + slideshow={"slide_type": "slide"}
# Using ray-tracing here
# Note that the default is to restrict the projection to a cylindrical FOV
projmatrix=stir.ProjMatrixByBinUsingRayTracing();
projmatrix.set_up(proj_data_info, image_data);
# + [markdown] slideshow={"slide_type": "slide"}
# # construct projectors
# + slideshow={"slide_type": "slide"}
forwardprojector=stir.ForwardProjectorByBinUsingProjMatrixByBin(projmatrix);
forwardprojector.set_up(proj_data_info, image_data);
backprojector=stir.BackProjectorByBinUsingProjMatrixByBin(projmatrix);
backprojector.set_up(proj_data_info, image_data);
# + [markdown] slideshow={"slide_type": "slide"}
# # create projection data for output of forward projection
# + slideshow={"slide_type": "slide"}
# We'll create the data in memory here
exam_info=stir.ExamInfo();
projdataout=stir.ProjDataInMemory(exam_info, proj_data_info);
# Note: we could write to file, but it is right now a bit complicated to open a
# projection data file for read/write:
# inout=stir.ios.trunc|stir.ios.ios_base_in|stir.ios.out;
# projdataout=stir.ProjDataInterfile(exam_info, proj_data_info, 'my_test_python_projection.hs',inout);
# + [markdown] slideshow={"slide_type": "slide"}
# # Done creating data and projectors!
# + slideshow={"slide_type": "slide"}
# Let's now create an interesting image with 2 cylinders
# + [markdown] slideshow={"slide_type": "slide"}
# # create a first cylinder (note: units are in mm)
# + slideshow={"slide_type": "slide"}
# we'll put it in the middle of the scanner
# This is currently a bit difficult in STIR due to its
# choice of origin (the middle of the first ring).
length=60
radius=40
middle_slice=(image_data.get_max_z()+image_data.get_min_z())/2
z_centre=middle_slice*image_data.get_voxel_size().z()
# create a coordinate for the centre (note the z,y,x order)
centre=stir.FloatCartesianCoordinate3D(z_centre,0,0)
# create a geometrical shape
shape=stir.EllipsoidalCylinder(length, radius, radius,
centre)
# + [markdown] slideshow={"slide_type": "slide"}
# # we set the image to a discretised version of this shape
# + slideshow={"slide_type": "slide"}
# (the last argument means we'll sample every voxel only once)
shape.construct_volume(image_data, stir.IntCartesianCoordinate3D(1,1,1))
# + [markdown] slideshow={"slide_type": "slide"}
# # Let's add another translated cylinder
# + slideshow={"slide_type": "slide"}
# The way to do this is currently still awkward. Apologies.
shape.translate(stir.FloatCartesianCoordinate3D(15,90,40))
# make a clone and fill that one with the second shape
image_data2=image_data.clone()
shape.construct_volume(image_data2, stir.IntCartesianCoordinate3D(1,1,1))
# now add that to the previous one (currently messy as we need to pass through numpy, sorry)
image_data_array=stirextra.to_numpy(image_data);
image_data_array+=stirextra.to_numpy(image_data2);
image_data.fill(image_data_array.flat)
# + [markdown] slideshow={"slide_type": "slide"}
# # display 2 transaxial slices of the volume
# + slideshow={"slide_type": "slide"}
maxforplot=image_data.find_max(); # save for display
image_data_array=stirextra.to_numpy(image_data);
plt.figure();
plt.subplot(1,2,1)
plt.imshow(image_data_array[middle_slice,:,:]);
plt.clim(0,maxforplot)
plt.title('slice %d' % middle_slice)
plt.subplot(1,2,2)
plt.imshow(image_data_array[middle_slice+5,:,:]);
plt.title('slice %d' % (middle_slice+5))
plt.clim(0,maxforplot)
# + [markdown] slideshow={"slide_type": "slide"}
# # forward project the image!
# + slideshow={"slide_type": "slide"}
forwardprojector.forward_project(projdataout, image_data);
# + [markdown] slideshow={"slide_type": "slide"}
# # get the output
# + slideshow={"slide_type": "slide"}
# With the above settings, we are simulating an acquisition in "2D" mode for a
# scanner with "rings" of detector crystals. There will therefore be only a
# single "segment", corresponding to LORs which are (almost) orthogonal to the
# scanner axis.
seg=projdataout.get_segment_by_sinogram(0);
# + [markdown] slideshow={"slide_type": "slide"}
# # A note on projection data sizes
# + slideshow={"slide_type": "slide"}
# Segment 0 is a 3D array of size
# num_sinograms x num_views x num_tangential_positions.
# with the order of the indices as above if we ask for the data "by sinogram"
# As we used "span">1, there will be both "direct" and "indirect" sinograms.
# (Don't worry if you don't know the relevant terminology.)
# We will therefore have the same number of sinograms as slices
# in the image (both equal to 2*num_rings-1).
print(seg.shape())
print(image_data.shape())
print(scanner.get_num_rings())
# + [markdown] slideshow={"slide_type": "slide"}
# # Display the data
# + slideshow={"slide_type": "slide"}
# We'll display a single sinogram and a horizontal profile (i.e. projections
# for a single "slice" and "view"), but you could display the data in another cut of course.
seg_array=stirextra.to_numpy(seg);
plt.figure();
plt.subplot(1,3,1)
plt.imshow(seg_array[middle_slice,:,:]);
plt.title('projection as sinogram')
plt.xlabel('tangential')
plt.ylabel('view')
plt.subplot(1,3,2)
plt.imshow(seg_array[:,0,:]);
plt.title('projection at first view')
plt.xlabel('tangential')
plt.ylabel('plane')
plt.subplot(1,3,3)
plt.plot(seg_array[middle_slice,0,:])
plt.plot(seg_array[middle_slice,proj_data_info.get_num_views()/2,:])
plt.title('Horizontal profiles\n(middle sinogram)');
plt.legend(('first view', 'middle view'));
# + [markdown] slideshow={"slide_type": "slide"}
# # display all sinograms in a (repeated) loop
# + slideshow={"slide_type": "slide"}
#%% It might make more sense to you if you display every view in the animation
# (as in the evaluate_simulation* scripts), so you could try this as well.
import matplotlib.animation as animation
bitmaps=[]
fig=plt.figure()
for sino in range(seg_array.shape[0]):
bitmap=plt.imshow(seg_array[sino,:,:]);
plt.clim(0,seg_array.max())
plt.axis('off');
bitmaps.append([bitmap])
ani = animation.ArtistAnimation(fig, bitmaps, interval=100, blit=True, repeat_delay=1000)
# + [markdown] slideshow={"slide_type": "slide"}
# # backproject this projection data
# + slideshow={"slide_type": "slide"}
# we will do this into a new image of the same geometry as the original
back_projection=image_data.get_empty_copy()
backprojector.back_project(back_projection, projdataout);
# + [markdown] slideshow={"slide_type": "slide"}
# # display the same slices as above
# + slideshow={"slide_type": "slide"}
maxforplot=back_projection.find_max(); # save for display
back_projection_array=stirextra.to_numpy(back_projection);
plt.figure();
plt.subplot(1,2,1)
plt.imshow(back_projection_array[middle_slice,:,:]);
plt.clim(0,maxforplot)
plt.title('Back-projection, plane %d' % middle_slice)
plt.subplot(1,2,2)
plt.imshow(back_projection_array[middle_slice+5,:,:]);
plt.title('Back-projection, plane %d' % (middle_slice+5))
plt.clim(0,maxforplot);
# + [markdown] slideshow={"slide_type": "slide"}
# # Preliminary conclusion
# + slideshow={"slide_type": "slide"}
# This should show that simple backprojection gives a smooth version of the
# original image back. It's therefore useful as a sanity check.
# Actual image reconstruction needs to work a bit harder...
# + [markdown] slideshow={"slide_type": "slide"}
# # Let's do a few minimal experiments on changing the projector
# + slideshow={"slide_type": "slide"}
#
# + [markdown] slideshow={"slide_type": "slide"}
# # For this experiment, we will use a larger cylinder in the centre
# + slideshow={"slide_type": "slide"}
length=60
radius=200
middle_slice=(image_data.get_max_z()+image_data.get_min_z())/2
z_centre=middle_slice*image_data.get_voxel_size().z()
centre=stir.FloatCartesianCoordinate3D(z_centre,0,0)
shape=stir.EllipsoidalCylinder(length, radius, radius,
centre)
shape.construct_volume(image_data, stir.IntCartesianCoordinate3D(1,1,1))
# + [markdown] slideshow={"slide_type": "slide"}
# # forward project it and display
# + slideshow={"slide_type": "slide"}
forwardprojector.forward_project(projdataout, image_data);
seg=projdataout.get_segment_by_sinogram(0);
seg_array=stirextra.to_numpy(seg);
plt.figure();
plt.imshow(seg_array[middle_slice,:,:]);
plt.title('Forward projection');
# + [markdown] slideshow={"slide_type": "slide"}
# # back-projection and display
# + slideshow={"slide_type": "slide"}
# This shows a beautiful pattern, a well-known feature of a ray-tracing matrix
back_projection.fill(0) # set to zero, otherwise it will add to the previous results
backprojector.back_project(back_projection, projdataout);
back_projection_array=stirextra.to_numpy(back_projection);
plt.figure();
plt.imshow(back_projection_array[middle_slice,:,:]);
# + [markdown] slideshow={"slide_type": "slide"}
# # Let's use more LORs per sinogram bin (which will be a bit slower of course)
# + slideshow={"slide_type": "slide"}
projmatrix.set_num_tangential_LORs(10);
# Need to call set_up again
projmatrix.set_up(proj_data_info, image_data);
# + [markdown] slideshow={"slide_type": "slide"}
# # You could re-run the forward projection, but we'll skip that for now
# + slideshow={"slide_type": "slide"}
# The output will be almost identical anyway.
# forwardprojector.forward_project(projdataout, image_data);
# + [markdown] slideshow={"slide_type": "slide"}
# # Run another backprojection and display
# + slideshow={"slide_type": "slide"}
back_projection_10=back_projection.get_empty_copy()
backprojector.back_project(back_projection_10, projdataout);
back_projection_10_array=stirextra.to_numpy(back_projection_10);
plt.figure();
plt.subplot(1,2,1)
plt.imshow(back_projection_10_array[middle_slice,:,:]);
plt.title('Back-projection with 10 LORs per bin')
plt.subplot(1,2,2)
plt.plot(back_projection_10_array[middle_slice,80,:]);
# + [markdown] slideshow={"slide_type": "slide"}
# # compare profiles to check if overall features are fine
# + slideshow={"slide_type": "slide"}
plt.figure()
plt.plot(back_projection_array[middle_slice,80,:])
plt.plot(back_projection_10_array[middle_slice,80,:])
plt.title('comparing both profiles');
# + [markdown] slideshow={"slide_type": "slide"}
# # What now?
# + slideshow={"slide_type": "slide"}
# You have all the basic tools to do a simple analytic PET simulation
# (no attenuation etc here yet though).
# You can also add other shapes (stir.Ellipsoid etc), or just make them yourself
# using numpy commands
| notebooks/projection_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="nhfQ8xFEbeoT" colab_type="text"
# 使用colab运行的配置代码
# + id="TfdLx68Qw0bD" colab_type="code" outputId="3063edeb-d872-450a-df50-13e26b55c990" colab={"base_uri": "https://localhost:8080/", "height": 281}
# !apt-get install -y -qq software-properties-common python-software-properties module-init-tools
# !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
# !apt-get update -qq 2>&1 > /dev/null
# !apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
# !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
# !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
# + id="m8O8YXkEIRvW" colab_type="code" colab={}
# !mkdir -p drive
# !google-drive-ocamlfuse drive -o nonempty
import os
os.chdir("drive/2048-api")
# + id="wisldqSQsR0n" colab_type="code" outputId="cb5e6deb-dc5e-4ad7-fc14-2860f66108b4" colab={"base_uri": "https://localhost:8080/", "height": 35}
import keras
from game2048.game import Game
from game2048.displays import Display, IPythonDisplay
from game2048.agents import ExpectiMaxAgent
from keras.models import Sequential,load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,BatchNormalization
from keras.optimizers import Adam
import numpy as np
from sklearn.model_selection import train_test_split
# + [markdown] id="u6JzwL2TbnPW" colab_type="text"
# 定义Agent和model
# + id="jrKTvSg4WSIg" colab_type="code" colab={}
from game2048.agents import Agent
class MyAgent(Agent):
def __init__(self, game, display=None):
self.game = game
self.display = display
def step(self):
x_train=np.array(self.game.board)
x=x_train
x=np.log2(x+1)
x=np.trunc(x)
x = keras.utils.to_categorical(x, 12)
x = x.reshape(1, 4, 4, 12)
pred=model.predict(x,batch_size=128)
r=pred[0]
r1=r.tolist()
direction2=r1.index(max(r1))
return direction2
# + id="bKoC8wCKcJUv" colab_type="code" colab={}
model=Sequential()
model.add(Conv2D(filters= 128, kernel_size=(4,1),kernel_initializer='he_uniform', padding='Same', activation='relu',input_shape=input_shape)) # 第一个卷积层,32个卷积核,大小5x5,卷积模式SAME,激活函数relu,输入张量的大小 4*4*12
model.add(Conv2D(filters= 128, kernel_size=(1,4),kernel_initializer='he_uniform', padding='Same', activation='relu'))
model.add(Conv2D(filters= 128, kernel_size=(1,1),kernel_initializer='he_uniform', padding='Same', activation='relu'))
model.add(Conv2D(filters= 128, kernel_size=(2,2),kernel_initializer='he_uniform', padding='Same', activation='relu'))
model.add(Conv2D(filters= 128, kernel_size=(3,3),kernel_initializer='he_uniform', padding='Same', activation='relu'))
model.add(Conv2D(filters= 128, kernel_size=(4,4),kernel_initializer='he_uniform', padding='Same', activation='relu'))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(256, kernel_initializer='he_uniform',activation='relu'))
model.add(BatchNormalization())
model.add(Dense(128, kernel_initializer='he_uniform',activation='relu'))
model.add(Dense(4, activation='softmax'))
model.summary()
model.compile(optimizer='adam', loss = 'categorical_crossentropy',metrics=['accuracy'])
# + id="advwh8b7ttB9" colab_type="code" colab={}
model=load_model('model.h5')
# + [markdown] id="8wA2Fx43bM6E" colab_type="text"
# 主要训练部分
# + id="hEIqXj-_GQWG" colab_type="code" colab={}
model=load_model('2048_new2_2048.h5')
#清空棋盘和方向的矩阵
count=0
while count<500:
count=count+1
print('第',count+1,'轮:')
image=[]
label=[]
for _ in range(0,50):
game = Game(4, score_to_win=1024, random=False)
agent1 = ExpectiMaxAgent(game)
while game.end==False:
direction=agent1.step()
x=np.array(game.board)
x=np.log2(x+1)
x=np.trunc(x)
x = keras.utils.to_categorical(x, 12)
x = x.reshape(1, 4, 4, 12)
pred=model.predict(x,batch_size=128)
r=pred[0]
r1=r.tolist()
mydirection=r1.index(max(r1))
image.append(game.board)
label.append(direction)
game.move(mydirection)
print(np.max(game.board))
x=np.array(game.board)
x_train=np.array(image)
y_train=np.array(label)
x_train=np.log2(x_train+1)
x_train=np.trunc(x_train)
x_train = keras.utils.to_categorical(x_train, 12)
print(x_train.shape)
y_train = keras.utils.to_categorical(y_train,4)
model.train_on_batch(x_train, y_train)
model.save('model.h5')
# + [markdown] id="LAja05QFbbED" colab_type="text"
# 评测50次分数
# + id="CYJh3q0HthF_" colab_type="code" outputId="1b4a971b-cef0-458d-cc4b-b5a7e2b8e3a4" colab={"base_uri": "https://localhost:8080/", "height": 35}
i=[]
for _ in range(50):
game = Game(4, random=False)
agent1 = MyOwnAgent(game)
agent1.play()
i.append(np.max(game.board))
print(np.mean(i),np.max(i))
# + id="uP_Gam1z05HC" colab_type="code" colab={}
model.save('model666.h5')
| train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#This script is used to split ASV table into genus specific ASV tables. Use the rarefied feature table and taxonomy from Qiita analysis
from biom import Table
from biom.util import biom_open
from os.path import abspath, join
from qiime2 import Artifact
from os import makedirs
from qiime2.plugins import diversity
import pandas as pd
# +
# get biom qza
biom_fp = '/Users/jenniferhoutz/Dropbox/moeller_rotation/manuscript_analysis/primate_micro_filtered_rarefied_table.qza'
# get taxonomy qza
tax_fp = '/Users/jenniferhoutz/Dropbox/moeller_rotation/manuscript_analysis/taxonomy_assignment_primate_micro_rarefied.qza'
# +
# read biom qza into qiime2 Artifact class
biom_art = Artifact.load(abspath(biom_fp))
# load the qiime2 artifact into biom Table class
biom = biom_art.view(Table)
# +
# read biom tax into qiime2 Artifact class
tax_art = Artifact.load(abspath(tax_fp))
# read taxonomy artifact as Pandas DF
tax_df = tax_art.view(pd.DataFrame)
# +
# read in metadata
md_fp = '/Users/jenniferhoutz/Dropbox/moeller_rotation/manuscript_analysis/primate_micro_filtered_metadata.txt'
from qiime2 import Metadata
metadata = Metadata.load(md_fp)
# -
tax_df.head()
# +
tax_cols = tax_df['Taxon'].str.split('; ', expand=True)
tax_cols.columns = ['Kingdom',
'Phylum',
'Class',
'Order',
'Family',
'Genus',
'Species']
tax_cols.head()
# -
genus_str = tax_cols[['Kingdom',
'Phylum',
'Class',
'Order',
'Family',
'Genus']].fillna(' ').apply(lambda x: '; '.join(x), axis=1)
genus_str
genus_str.value_counts()
# +
# grab list of family_str where value_counts > threshold
threshold = 5
genus_thr = pd.Series(genus_str.value_counts()).where(lambda x : x >= 5).dropna().index
# +
# make an output dir
outdir = './genus_otu_tables'
makedirs(outdir, exist_ok=True)
# +
# for each family_thr value, filter the OTU table and write to file
for f in genus_thr:
f_ids = pd.Series(genus_str).where(lambda x : x == f).dropna().index
genus_otu = biom.filter(f_ids, axis='observation', inplace=False)
output_f = f.replace(';','_').replace(' ','')
output_fn = 'genus.%s.qza' % output_f
output_fp = join(outdir, output_fn)
# export as q2 artifact
genus_art = Artifact.import_data("FeatureTable[Frequency]", genus_otu)
genus_art.save(output_fp)
# # export as hdf5 biom
# with biom_open(output_fp, 'w') as t: # doctest: +SKIP
# family_otu.to_hdf5(t, "%s table" % f)
# +
# group all the code into a single method to facilitate rerunning
def split_otu_tables_by_tax(biom_t, tax_df, output_dir,
metadata,
threshold=5,
level=5,
tax_names=['Kingdom',
'Phylum',
'Class',
'Order',
'Family',
'Genus',
'Species'],
sampling_depth=5):
# fix the taxonomy
tax_cols = tax_df['Taxon'].str.split('; ', expand=True)
tax_cols.columns = tax_names
# make concatenated tax string at appropriate level
cat_cols = tax_names[:level+1]
print(cat_cols)
tax_str = tax_cols[cat_cols].fillna(' ').apply(lambda x: '; '.join(x), axis=1)
# find taxa above threshold number of OTUs
tax_thr = pd.Series(tax_str.value_counts()).where(lambda x : x >= threshold).dropna().index
# make output dir
makedirs(output_dir, exist_ok=True)
print(tax_thr)
# for each family_thr value, filter the OTU table and write to file
for t in tax_thr:
t_ids = pd.Series(tax_str).where(lambda x : x == t).dropna().index
tax_otu = biom_t.filter(t_ids, axis='observation', inplace=False)
tax_otu.remove_empty(inplace=True)
output_f = t.replace(';','_').replace(' ','')
output_fn = '{0}.{1}.qza'.format(tax_names[level], output_f)
output_fp = join(output_dir, output_fn)
# export as q2 artifact
tax_art = Artifact.import_data("FeatureTable[Frequency]", tax_otu)
tax_art.save(output_fp)
# export the bc and jaccard emperor viz
(rarefied_table,
observed_otus_vector,
shannon_vector,
evenness_vector,
jaccard_distance_matrix,
bray_curtis_distance_matrix,
jaccard_pcoa_results,
bray_curtis_pcoa_results,
jaccard_emperor,
bray_curtis_emperor) = diversity.pipelines.core_metrics(table=tax_art,
sampling_depth=sampling_depth,
metadata=metadata)
jaccard_fp = join(output_dir, '{0}.{1}.emperor.jaccard.qzv'.format(tax_names[level], output_f))
bc_fp = join(output_dir, '{0}.{1}.emperor.braycurtis.qzv'.format(tax_names[level], output_f))
jaccard_emperor.save(jaccard_fp)
bray_curtis_emperor.save(bc_fp)
return(tax_art)
# -
# !rm -r ./test_split_otus
biom_t = biom
tax_df = tax_df
output_dir = './test_split_otus'
foo = split_otu_tables_by_tax(biom_t, tax_df, output_dir, metadata,
level=5,
threshold=5)
# !ls -l test_split_otus/
# +
table=foo
metric = 'braycurtis'
bar = diversity.methods.beta(table, metric, met)
# -
biom_t = biom
tax_df = tax_df
output_dir = './test_split_otus'
split_otu_tables_by_tax(biom_t, tax_df, output_dir,
level=3,
threshold=50)
(rarefied_table,
observed_otus_vector,
shannon_vector,
evenness_vector,
jaccard_distance_matrix,
bray_curtis_distance_matrix,
jaccard_pcoa_results,
bray_curtis_pcoa_results,
jaccard_emperor,
bray_curtis_emperor) = diversity.pipelines.core_metrics(table=foo, sampling_depth=100, metadata=metadata)
bray_curtis_pcoa_results
bray_curtis_emperor.save('./split_otu_tables/test.qzv')
# !jupyter serverextension enable --py qiime2 --sys-prefix
| genus-level-asv-tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Soccer Analytics
# -
# Welcome to a Jupyter notebook on soccer analytics. This notebook is a free resource and is part of the Callysto project, which brings data science skills to grades 5 to 12 classrooms.
#
# In this notebook, we answer the question: How do ball possession and scoring relate?
#
#
# Visualizations will be coded using Python, a computer programming language. Python contains words from English and is used by data scientists. Programming languages are how people communicate with computers.
# “Run” the cells to see the graphs.
# Click “Cell” and select “Run All.” This will import the data and run all the code to create the data visualizations (scroll back to the top after you’ve run the cells).
#
# 
# + slideshow={"slide_type": "skip"}
# import Python libraries
import pandas as pd
import plotly.express as px
# -
# ### Making a csv file
# Data source: https://www.uefa.com/uefachampionsleague/standings/
# <br>Data was collected for the group phase (6 games per team) for the 2020-2021 season from the Champions League website. The data was inputted into the cell below by reading tables on the website. Notice that the values are separated by commas; this format is needed for the computer to read the data. The `writefile` command is used to create the file.
# %%writefile possession.csv
Total goals,Goal difference,Average ball possession (%),Team
18,16,61,Bayern
16,11,57,Barcelona
16,7,44,Monchengladbach
15,5,50,Man. United
14,12,54,Chelsea
14,10,51,Juventus
13,12,59,Man. City
13,7,54,Paris
12,7,56,Dortmund
11,2,58,Real Madrid
11,-1,51,Leipzig
11,4,47,Lazio
10,7,53,Liverpool
10,7,41,Porto
10,-7,48,RB Salzburg
10,2,47,Atalanta
9,1,57,Sevilla
8,-2,51,Club Brugge
7,0,55,Ajax
7,-2,51,Inter Milan
7,-1,50,Atletico Madrid
7,-11,45,Istanbul Basaksehir
6,-5,40,Krasnodar
5,-12,47,Ferencvaros
5,-7,47,Shakhtar Donetsk
5,-5,42,Lokomotiv Moskva
4,-9,47,Zenit
4,-9,46,Midtjylland
4,-9,45,Dynamo Kyiv
3,-8,50,Rennes
2,-8,50,Olympiacos
2,-11,50,Marseille
# The Python library pandas is used to tell the computer to read and then display the data in a table, or dataframe. Pandas is a library used to organize data. The dataframe below is organized from most to least total goals per team.
# + slideshow={"slide_type": "-"}
possession_df = pd.read_csv('possession.csv')
possession_df.sort_values('Average ball possession (%)', ascending=False)
# -
# Since we are exploring how possession and scoring relate, let's calculate some measures of spread and central tendency on average ball possession (%) to better understand the shape of the data.
# + slideshow={"slide_type": "slide"}
# Compute min, max, range, mean and median
# Min average ball possession
min_df = possession_df['Average ball possession (%)'].min() # change to 'Total goals' or 'Goal difference' to for different calculations
# Max average ball possession
max_df = possession_df['Average ball possession (%)'].max()
# Range average ball possession
range_df = (possession_df['Average ball possession (%)'].max()) - (possession_df['Average ball possession (%)'].min())
# Mean of average ball possession
mean_df = possession_df['Average ball possession (%)'].mean()
# Median of average ball possession
median_df = possession_df['Average ball possession (%)'].median()
# Print results
print("The minimum value is", min_df)
print("The maximum value is", max_df)
print("The range is", range_df)
print("The mean is", mean_df)
print("The median is", median_df)
# -
# Notice that the mean and median are 50, and the range is 21.
#
# You can update or change the code. Follow the directions after the # in the code cell above.
#
# Now, let's visualize the range with a bar graph.
bar_df = px.bar(possession_df,
x='Team',
y='Average ball possession (%)', # change y to Total goals or Goal difference to visualize different variables
title='Average ball possession (%) by team') # update title, if needed
bar_df.update_layout(xaxis={'categoryorder':'total descending'})
# Notice that the x-axis represents teams, and the y-axis represents average ball possession (%). Bayern has the highest average ball possession at 60%, and Krasnodar has the lowest at 40%. Marseille, Olympiacos, Atletico Madrid, and RB Salzburg all have ball possession of 50%, which is the mean and the median. These measures of central tendency can help us divide the dataset into teams with more ball possession and teams with less ball possession.
#
# Now that we've explored the centre and spread of average ball possession (%), let's examine how average ball possession (%) relates to total goals. The scatter plot displays average ball possession (%) on the x-axis and total goals on the y-axis. Total goals range from Marseille with 2 to Bayern with 18. Hover over the data points to view more information.
# + slideshow={"slide_type": "slide"}
scatter_total_df = px.scatter(possession_df,
x="Average ball possession (%)",
y="Total goals", # change y to Goal difference
hover_data=["Team"],
trendline="ols",
title="Relationship between average ball possession (%) and total goals")
scatter_total_df.show()
# -
# Notice that the line of best fit indicates a positive trend with total goals increasing with average ball possession.
#
# Hover over the data points to find out more information. The data points further from the line seem to tell a different story. Bayern has the highest ball possession at 61% and the most total goals at 18. Marseille, on the other hand, has the least amount of total goals at 2 with ball possession of 50%.
#
# While total goals can help understand how successful teams are, the idea of possession involves keeping the ball to score and keeping the ball to prevent the other team from scoring. It might be interesting to explore the relationship between average ball possession and goal difference.
#
# Goal difference is the addition of total goals scored minus goals that other teams have scored against the team. The scatter plot below visualizes the relationship between average ball possession (%) and goal difference by team. The goal difference on the y-axis contains negative values; the negative values mean that a team has more goals scored against than more goals scored. Hover over the data points to view more information.
# + slideshow={"slide_type": "slide"}
scatter_difference_df = px.scatter(possession_df,
x="Average ball possession (%)",
y="Goal difference",
size="Total goals",
color="Team",
title="Relationship between average ball possession (%) and goal difference by team")
scatter_difference_df.show()
# -
# Notice that Bayern leads in ball possession at 61% as well as in both total goals at 18 with a goal difference of 16 -- that means only 2 goals were scored against Bayern within the 6 games prior to knock-outs.
#
# Ferencvaros has the lowest goal difference of -12 and ball possession of 47%. Marseille with the lowest total goals of 2 has the second lowest goal difference of -11 and ball possession 50% of game play.
# This cell prevents the next section from running automatically
# %%script false
#❗️Run this cell with Shift+Enter
import interactive as i
i.challenge1()
#❗️Run this cell with Shift+Enter
import interactive as i
i.challenge2()
#❗️Run this cell with Shift+Enter
import interactive as i
i.challenge3()
# To reset the last three interactive questions, select Kernel and then Restart & Clear Output from the menu.
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| notebooks/sports/soccer-partIII.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# name: pycharm-6784cf5
# ---
# + pycharm={"name": "#%%\n"}
import sys, os
root_dir = '\\'.join(os.getcwd().split('\\')[:-1])
sys.path.append(root_dir)
from copy import deepcopy
from functools import reduce
from buildingBlocks.Synthesis import Chain
from buildingBlocks.Synthesis.Synthesizer import Synthesizer
from buildingBlocks.default.Tokens import Constant, Sin, Product, Imp, Power, ImpComplex
from buildingBlocks.Globals.GlobalEntities import set_constants, get_full_constant
from buildingBlocks.default.EvolutionEntities import Equation
from buildingBlocks.default.EvolutionEntities import PopulationOfEquations
from buildingBlocks.Globals.supplementary.FrequencyProcessor import FrequencyProcessor4TimeSeries as fp
import buildingBlocks.Globals.GlobalEntities as Bg
import buildingBlocks.Builder.OperatorsBuilder as Ob
from load_data import get_data
from moea_dd.src.moeadd import *
from moea_dd.src.moeadd_supplementary import *
from copy import deepcopy
import moea_dd.forMoeadd.entities.EvolutionaryEntities as Ee
import moea_dd.forMoeadd.entities.Objectives as Objs
import numpy as np
import matplotlib.pyplot as plt
from time import perf_counter
# -
# ## Set tokens from which algorithm will be built model-expression
# Constant token is the target that will be approximated by other tokens
# ImpComplex is a set of splitted single pulses obtained from periodic impulse
# +
token1 = Constant(val=None, name_='target', mandatory=1)
token2 = Sin(optimize_id=1, name_='Sin')
token3 = Imp(optimize_id=1, name_='Imp')
token4 = Power(optimize_id=2, name_='Power')
pattern = Imp(optimize_id=1)
impComplex_token = ImpComplex(pattern=pattern, optimize_id=3)
# -
# ## Choose dataset
# There are 3 datasets of series with different structure. Good meta parameters (build_settings) of the algorithm are selected for each of them.
# ### Time series with strong seasonality
data = get_data(0)
build_settings = {
'mutation': {
'simple': dict(intensive=1, increase_prob=1),
'complex': dict(prob=0., threshold=0.1, complex_token=impComplex_token)
},
'crossover': {
'simple': dict(intensive=1, increase_prob=0.3)
},
'tokens': [token1, token2, token3, token4],
'population': {
'size': 10
}
}
# ### Time series without seasonality
i = 2 #3
data = get_data(i)
build_settings = {
'mutation': {
'simple': dict(intensive=1, increase_prob=1),
'complex': dict(prob=0.5, threshold=0.5, complex_token=impComplex_token)
},
'crossover': {
'simple': dict(intensive=1, increase_prob=0.3)
},
'tokens': [token1, token3],
'population': {
'size': 10
}
}
# ## Get target and grid on which target will be approximated
# +
grid = data['grid']
target = data['target']
target -= target.mean()
set_constants(target=target)
# -
# ## Confirm build_settings and set info about individual into evolutionary operators
# max_tokens is reqularization parameter, without it model overfits
individ = Equation(max_tokens=10)
Ob.set_operators(grid, individ, build_settings)
# # Choose type of algorithm
# Evaluate only one of the next two cells.
# ## Single-objective optimization
# optimizing only approximated quality of the model
# +
population = PopulationOfEquations(iterations=2)
time = perf_counter()
population.evolutionary()
time = perf_counter() - time
inds = population.structure
idxsort = np.argsort(list(map(lambda x: x.fitness, inds)))
inds = [inds[i] for i in idxsort]
time
# -
# ## Multi-objective optimization
# This is an add-on to the previous algorithm, additionaly optimizing model complexity (number of tokens in model). Has additional multi_build_settings.
# +
multi_build_settings = {
'weights_num': 5,
"pop_size": 20,
"epochs": 5
}
multi_individ = Ee.MoeaddIndividTS(x=individ, obj_funs=[Objs.objective1, Objs.objective2])
pop_constr = Ee.PopulationConstructor(pattern=multi_individ)
optimizer = moeadd_optimizer(pop_constructor=pop_constr,
optimized_functionals=[Objs.objective1, Objs.objective2],
solution_params=None,
delta=1 / 50.,
neighbors_number=5,
weights_num=multi_build_settings['weights_num'],
pop_size=multi_build_settings['pop_size'])
operator = Ee.EvolutionaryOperator()
optimizer.set_evolutionary(operator=operator)
optimizer.pass_best_objectives(0, 0)
def simple_selector(sorted_neighbors, number_of_neighbors=4):
return sorted_neighbors[:number_of_neighbors]
optimizer.optimize(neighborhood_selector=simple_selector, delta=0.95,
neighborhood_selector_params=(4,), epochs=multi_build_settings['epochs'], PBI_penalty=0.75)
inds = list(map(lambda x: x.vals, optimizer.pareto_levels.levels[0]))
idxsort = np.argsort(list(map(lambda x: x.fitness, inds)))
inds = [inds[i] for i in idxsort]
# -
# ## Visualize Pareto-front (for multi-objective optimization)
# +
prec = []
length = []
for idx, ind in enumerate(inds):
prec.append(ind.fitness)
length.append(len(ind.structure))
plt.plot(length[1:], prec[1:], '-o', color='brown')
plt.title('Pareto frontier')
plt.ylabel('Model quality')
plt.xlabel('Model complexity')
# -
# ## Choose one model from proposed individuals
# They are sorted by their quality (the first objective)
# +
n = 0
ind = deepcopy(inds[n])
print(ind.formula(), ind.fitness)
residuals = ind.value(grid)
model = target + residuals
model -= model.mean()
residuals -= residuals.mean()
# -
# # Generate synthetics based on the model
# Use parameters threshold_value and threshold_gaps (see sklearn.cluster.AgglomerativeClustering) to control stochasticity of synthetics (only for weakly seasonal time series), control amplitude noise of synthetic by adding custom residuals (np.array with realization of a random variable (any length, preferably more))
# +
tmp_ind = deepcopy(ind)
synth_settings = {
"threshold_value": 2,
"threshold_gaps": 0.5,
"residuals": residuals # or None
}
split_imps= {
'make': False,
'min_ampl': 0.05,
'max_freq': float('inf')
}
clusterer_value = Chain.ClustererPulses(
distance_threshold=synth_settings["threshold_value"],
params=dict(grid=grid)
)
clusterer_gaps = Chain.ClustererGaps(distance_threshold=synth_settings["threshold_gaps"])
coder = Chain.Coder2(
clusterer_value=clusterer_value,
clusterer_gaps=clusterer_gaps,
individ=tmp_ind,
params=dict(grid=grid)
)
mc = Chain.BayesianChain()
syc = Synthesizer(
individ=tmp_ind,
grid=grid,
coder=coder,
markov_chain=mc,
residuals=synth_settings['residuals'],
split_imps=split_imps
)
# print(tmp_ind.formula())
syc.fit()
# -
# # Having fitted generative model - start generate
# +
target_spec = fp.fft(grid, target)
model_spec = fp.fft(grid, model)
dt = grid[1] - grid[0]
new_grid = np.arange(grid.min(), 1.2 * grid.max(), dt)
# -
# ## Generate synthetic sample
#
# Every evaluation of this cell gives different results
synthetic = syc.predict(new_grid)[:len(grid)]
synthetic -= synthetic.mean()
synthetic_spec = fp.fft(grid, synthetic)
synthetic_quality = (np.var((np.abs(target_spec[1]) - np.abs(synthetic_spec[1])))
/ np.var(np.abs(target_spec[1])))/(np.var(target - synthetic) / np.var(target))
# ## See what you obtain
# ### In the time domain
# +
fig = plt.figure('orig and synthetic')
axs = fig.subplots(3, 1, sharex=True, sharey=True)
ts = [target, model]
# ax = [None for _ in range(3)]
labels = ['original', 'model', 'synthetic']
colors = ['blue', 'orange', 'green']
for i in range(3):
if i == 2:
axs[i].plot(grid, synthetic, color='red', linewidth=0.5, label='synthetic: quality {}'.format(round(synthetic_quality, 3)))
else:
axs[i].plot(grid, ts[i], label=labels[i], color=colors[i], linewidth=0.5)
axs[i].grid(True)
axs[i].set_xlabel('time')
axs[i].set_ylabel('amplitude')
axs[i].legend()
fig.align_labels(axs)
fig.tight_layout()
# -
# ### Spectra
# +
fig_sp = plt.figure('spectra')
axs = fig_sp.subplots(3, 1, sharex=True, sharey=True)
specs = [target_spec, model_spec]
for i in range(3):
if i == 2:
axs[i].plot(synthetic_spec[0], np.abs(synthetic_spec[1]), label=labels[i], color='red', linewidth=0.5)
else:
axs[i].plot(specs[i][0], np.abs(specs[i][1]), label=labels[i], color=colors[i], linewidth=0.5)
axs[i].grid(True)
axs[i].set_xlabel('frequency')
axs[i].set_ylabel('amplitude')
axs[i].legend()
fig_sp.align_labels(axs)
fig_sp.tight_layout()
plt.show()
# -
| examples/multi_objective_algebraic_expression/ex1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''blackjax'': conda)'
# name: python3710jvsc74a57bd0da83a268153c84bc9ca9703c2cea4521d98c583fc99858c9f792a1bd45c27e3c
# ---
# + [markdown] id="397995ab"
# # Use BlackJAX with PyMC3
# Author: <NAME>
# + [markdown] id="bb51846d"
# BlackJAX can take any log-probability function as long as it is compatible with JAX's JIT. In this notebook we show how we can use PyMC as a modeling language and BlackJAX as an inference library.
#
# For this notebook to run you will need to install PyMC3:
#
# ```bash
# pip install pymc3
# ```
# + id="ujBgnmNdOwRG"
# Higher versions will have omnistaging disabled which will throw errors when using theano
# !pip install jax==0.2.10
# + id="3a905211"
import jax
import numpy as np
import pymc3 as pm
import pymc3.sampling_jax
import blackjax.nuts as nuts
import blackjax.stan_warmup as stan_warmup
print(f"Running on PyMC3 v{pm.__version__}")
# + [markdown] id="8VrYFaoIX--y"
# ## Data
#
# Please refer to the [original TFP example](https://www.tensorflow.org/probability/examples/Eight_Schools) for a description of the problem and the model that is used.
# + id="imotOe9sUNYF"
# Data of the Eight Schools Model
J = 8
y = np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])
sigma = np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])
# + [markdown] id="aabSQ11iYGZw"
# # Model
#
# + id="PiBv9iOvRK0f"
with pm.Model() as model:
mu = pm.Normal("mu", mu=0.0, sigma=10.0)
tau = pm.HalfCauchy("tau", 5.0)
theta = pm.Normal("theta", mu=0, sigma=1, shape=J)
theta_1 = mu + tau * theta
obs = pm.Normal("obs", mu=theta, sigma=sigma, shape=J, observed=y)
# + [markdown] id="VKMdLEu1Y5jb"
# # Sampling using PyMC NUTS Sampler
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="0ZyMxwLFY_ZI" outputId="793af037-31e4-4e55-9c76-231c9d78532d"
# %%time
with model:
posterior = pm.sample(50_000, chains=1)
# + [markdown] id="3I6zXC-JZCfs"
# # Sampling using PyMC JAX Numpyro NUTS sampler
# + colab={"base_uri": "https://localhost:8080/"} id="daQ5OO6aZS9t" outputId="d865c9dc-45ae-4baa-c643-f145492ea4ab"
# %%time
with model:
hierarchical_trace_jax = pm.sampling_jax.sample_numpyro_nuts(
50_000, target_accept=0.9, chains=1
)
# + [markdown] id="h8cMqFwiZjxS"
# # Sampling using BlackJax
#
# ## Configuring the model for BlackJax
#
# + id="cTlcZCYmidZ6"
from theano.graph.fg import FunctionGraph
from theano.link.jax.jax_dispatch import jax_funcify
seed = jax.random.PRNGKey(1234)
chains = 1
# Get the FunctionGraph of the model.
fgraph = FunctionGraph(model.free_RVs, [model.logpt])
# Jax funcify builds Jax variant of the FunctionGraph.
fns = jax_funcify(fgraph)
logp_fn_jax = fns[0]
# Now we build a Jax variant of the initial state/inputs to the model.
rv_names = [rv.name for rv in model.free_RVs]
init_state = [model.test_point[rv_name] for rv_name in rv_names]
init_state_batched = jax.tree_map(
lambda x: np.repeat(x[None, ...], chains, axis=0), init_state
)
# + id="x_TiUUVMifeL"
# Then we transform the Jaxified input and FunctionGraph to a BlackJax NUTS sampler
potential = lambda x: -logp_fn_jax(*x)
initial_position = init_state
initial_state = nuts.new_state(initial_position, potential)
# + [markdown] id="tsnfayfaispl"
# ## Sampling
# + colab={"base_uri": "https://localhost:8080/"} id="6ByULStmWDA2" outputId="26fa3f89-9b55-46f6-c89c-9edaca155c98"
# %%time
kernel_factory = lambda step_size, inverse_mass_matrix: nuts.kernel(
potential, step_size, inverse_mass_matrix
)
last_state, (step_size, inverse_mass_matrix), _ = stan_warmup.run(
seed, kernel_factory, initial_state, 1000
)
def inference_loop(rng_key, kernel, initial_state, num_samples):
def one_step(state, rng_key):
state, info = kernel(rng_key, state)
return state, (state, info)
keys = jax.random.split(rng_key, num_samples)
_, (states, infos) = jax.lax.scan(one_step, initial_state, keys)
return states, infos
# Build the kernel using the step size and inverse mass matrix returned from the window adaptation
kernel = kernel_factory(step_size, inverse_mass_matrix)
# Sample from the posterior distribution
states, infos = inference_loop(seed, kernel, last_state, 50_000)
# + id="HoDBN7FX4amy"
| notebooks/use_with_pymc3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zsWZU7NMi2vJ"
# ### ModelCheckpoint.
# Callback to save the Keras model or model weights at some frequency.
#
# ```py
# tf.keras.callbacks.ModelCheckpoint(
# filepath, monitor='val_loss', verbose=0, save_best_only=False,
# save_weights_only=False, mode='auto', save_freq='epoch',
# options=None, **kwargs
# )
# ```
#
# ### Imports
# + id="7InLgJZzivXz"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import numpy as np
# + [markdown] id="vocIXDXRnldb"
# ### Configuring the ``device`` for the environment.
#
# + id="Albz4LFHnkve"
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# + [markdown] id="deJx0d64nPlS"
# ### Let's create a model that will train on the `MNIST` dataset.
# + colab={"base_uri": "https://localhost:8080/"} id="n9ZTB1WtjadF" outputId="e0ea99b3-2f1d-44e3-c1fa-ae9786f27342"
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
X_train.shape
# + id="kgsWuJXLlYdB"
def normalize(image):
image = tf.convert_to_tensor(image.astype('float32'))/255
return image
# + id="pRo-bFwtlJIn"
X_train_tensors =tf.convert_to_tensor(list(map(normalize, X_train)))
X_test_tensors = tf.convert_to_tensor(list(map(normalize, X_test)))
y_test_tensors = tf.convert_to_tensor(y_test)
y_train_tensors = tf.convert_to_tensor(y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="9XgJ-wjHlI_W" outputId="2e34bde1-e78a-4478-ea84-6322cbc28cf3"
y_test_tensors[:2]
# + [markdown] id="YX8ftKQiHIqt"
# ### Creating a `ModelCheckpoint` callback.
# + id="FdPfEESOlI1B"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath="checkpoint.h5",
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True
)
# + colab={"base_uri": "https://localhost:8080/"} id="dsWfwQMKjq7q" outputId="ea2b82af-dc63-4090-9011-93ef5650e427"
model = keras.Sequential([
keras.layers.Input(shape=(28, 28,)),
keras.layers.Flatten(),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer="adam",
metrics=["accuracy"]
)
history = model.fit(X_train_tensors, y_train_tensors, epochs=10,
verbose=1, batch_size=32,
validation_data=(X_test_tensors, y_test_tensors),
callbacks=[model_checkpoint_callback]
)
# + id="truuUdfIHyV8"
| 03_Callbacks/05_Model_Checkpoint/01_ModelCheckpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div>
# <ul>
# <li><a href="https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/" target="_blank">lesser-known-ways-of-using-notebooks</a></li>
# <li><a href="https://ipywidgets.readthedocs.io/en/latest/embedding.html" target="_blank">Embedding Widgets</a></li>
# </ul>
# </div>
# # %lsmagic
from IPython.display import display
from ipywidgets import widgets
# ### Create a text input
# +
text = widgets.Text()
display(text)
def handle_submit(sender):
print(text.value)
text.on_submit(handle_submit)
# -
# ## Buttons!
# +
# Create output widget for capturing event data.
out = widgets.Output()
# Two buttons: One to increment our counter and one to reset the counter
btn_incr = widgets.Button(
description="Click!",
layout = {"border": "5px"}
)
btn_reset = widgets.Button(
description="Reset counter!",
layout = {"border": "5px"}
)
out.value = 0
def incr():
out.value += 1
print(f"Button clicked {out.value} times!")
display(btn_incr, btn_reset, out)
def on_btn_clicked(b):
incr()
def on_reset_clicked(b):
# Clear output before resetting to zero
out.clear_output()
out.value = 0
btn_incr.on_click(on_btn_clicked)
btn_reset.on_click(on_reset_clicked)
# + [InteractExamples](https://github.com/jupyter-widgets/ipywidgets/blob/bb1c473f60454809209f006870e2785e9028e03e/docs/source/examples/Using%20Interact.ipynb)
# + [Using Three.js](https://github.com/jupyter-widgets/pythreejs)
# +
# callbacks require the signature handler(change)
# Here's an output of that event.
int_range = widgets.IntSlider()
output2 = widgets.Output()
display(int_range, output2)
def on_value_change(change):
with output2:
# print(change['new'])
print(change)
int_range.observe(on_value_change, names='value')
# -
| notebook-samples/.ipynb_checkpoints/interactivity-1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### "What just happened???"
#
# Here we take an existing modflow model and setup a very complex parameterization system for arrays and boundary conditions. All parameters are setup as multpliers: the original inputs from the modflow model are saved in separate files and during the forward run, they are multplied by the parameters to form new model inputs. the forward run script ("forward_run.py") is also written. And somewhat meaningful prior covariance matrix is constructed from geostatistical structures with out any additional arguements...oh yeah!
# %matplotlib inline
import os
import platform
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import flopy
import pyemu
# +
nam_file = "freyberg.nam"
org_model_ws = "freyberg_sfr_update"
temp_model_ws = "temp"
new_model_ws = "template"
# load the model, change dir and run once just to make sure everthing is working
m = flopy.modflow.Modflow.load(nam_file,model_ws=org_model_ws,check=False, exe_name="mfnwt",
forgive=False,verbose=True)
m.change_model_ws(temp_model_ws,reset_external=True)
m.write_input()
EXE_DIR = os.path.join("..","bin")
if "window" in platform.platform().lower():
EXE_DIR = os.path.join(EXE_DIR,"win")
elif "darwin" in platform.platform().lower():
EXE_DIR = os.path.join(EXE_DIR,"mac")
else:
EXE_DIR = os.path.join(EXE_DIR,"linux")
[shutil.copy2(os.path.join(EXE_DIR,f),os.path.join(temp_model_ws,f)) for f in os.listdir(EXE_DIR)]
try:
m.run_model()
except():
pass
# -
# You want some pilot points? We got that...how about one set of recharge multiplier pilot points applied to all stress periods? and sy in layer 1?
m.get_package_list()
# ## Parameterization
pp_props = [["upw.sy",0], ["rch.rech",None]]
# You want some constants (uniform value multipliers)? We got that too....
const_props = []
for iper in range(m.nper): # recharge for past and future
const_props.append(["rch.rech",iper])
for k in range(m.nlay):
const_props.append(["upw.hk",k])
const_props.append(["upw.ss",k])
# You want grid-scale parameter flexibility for hk in all layers? We got that too...and how about sy in layer 1 and vka in layer 2 while we are at it
grid_props = [["upw.sy",0],["upw.vka",1]]
for k in range(m.nlay):
grid_props.append(["upw.hk",k])
# Some people like using zones...so we have those too
zn_array = np.loadtxt(os.path.join("Freyberg_Truth","hk.zones"))
plt.imshow(zn_array)
zone_props = [["upw.ss",0], ["rch.rech",0],["rch.rech",1]]
k_zone_dict = {k:zn_array for k in range(m.nlay)}
# But wait, boundary conditions are uncertain too...Can we add some parameter to represent that uncertainty? You know it!
bc_props = []
for iper in range(m.nper):
bc_props.append(["wel.flux",iper])
# ## Observations
#
# Since observations are "free", we can carry lots of them around...
# here were are building a list of stress period, layer pairs (zero-based) that we will use
# to setup obserations from every active model cell for a given pair
hds_kperk = []
for iper in range(m.nper):
for k in range(m.nlay):
hds_kperk.append([iper,k])
# ## Here it goes...
# Now we will use all these args to construct a complete PEST interface - template files, instruction files, control file and even the forward run script! All parameters are setup as multiplers against the existing inputs in the modflow model - the existing inputs are extracted (with flopy) and saved in a sub directory for safe keep and for multiplying against during a forward model run. The constructor will also write a full (covariates included) prior parameter covariance matrix, which is needed for all sorts of important analyses.|
# +
mfp_boss = pyemu.helpers.PstFromFlopyModel(nam_file,new_model_ws,org_model_ws=temp_model_ws,
pp_props=pp_props,spatial_list_props=bc_props,
zone_props=zone_props,grid_props=grid_props,
const_props=const_props,k_zone_dict=k_zone_dict,
remove_existing=True,pp_space=4,sfr_pars=True,
sfr_obs=True,hds_kperk=hds_kperk)
EXE_DIR = os.path.join("..","bin")
if "window" in platform.platform().lower():
EXE_DIR = os.path.join(EXE_DIR,"win")
elif "darwin" in platform.platform().lower():
EXE_DIR = os.path.join(EXE_DIR,"mac")
else:
EXE_DIR = os.path.join(EXE_DIR,"linux")
[shutil.copy2(os.path.join(EXE_DIR,f),os.path.join(new_model_ws,f)) for f in os.listdir(EXE_DIR)]
# -
# The ``mpf_boss`` instance containts a ``pyemu.Pst`` object (its already been saved to a file, but you may want to manipulate it more)
pst = mfp_boss.pst
pst.npar,pst.nobs
# That was crazy easy - this used to take me weeks to get a PEST interface setup with level of complexity
pst.template_files
pst.instruction_files
# Lets look at that important prior covariance matrix
cov = pyemu.Cov.from_ascii(os.path.join(new_model_ws,m.name+".pst.prior.cov"))
cov = cov.x
cov[cov==0] = np.NaN
plt.imshow(cov)
# ### adjusting parameter bounds
# Let's say you don't like the parameter bounds in the new control file (note you can pass a par_bounds arg to the constructor).
pst.parameter_data
# Let's change the ``welflux`` pars
par = pst.parameter_data #get a ref to the parameter data dataframe
wpars = par.pargp=="welflux_k02"
par.loc[wpars]
par.loc[wpars,"parubnd"] = 1.1
par.loc[wpars,"parlbnd"] = 0.9
pst.parameter_data
# now we need to rebuild the prior parameter covariance matrix
cov = mfp_boss.build_prior()
# # Boom!
x = cov.x
x[x==0.0] = np.NaN
plt.imshow(x)
| examples/MODFLOW_to_PEST_even_more_boss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cfollmer
# language: python
# name: cfollmer
# ---
# +
import torch
import torch.nn.functional as F
import torchsde
from torchvision import datasets, transforms
import math
import numpy as np
import pandas as pd
from tqdm import tqdm
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from cfollmer.evaluation_utils import ECE
import cfollmer.functional as functional
from cfollmer.objectives import relative_entropy_control_cost
from cfollmer.drifts import SimpleForwardNetBN
from cfollmer.sampler_utils import FollmerSDE
# -
device = "cuda" if torch.cuda.is_available() else "cpu"
a9a_train = pd.read_csv("../data/a9a.csv", header=None)
a9a_test = pd.read_csv("../data/a9a_t.csv", header=None)
# +
X_train = a9a_train.values[:, :-1]
X_test = a9a_test.values[:, :-1]
y_train = a9a_train.values[:, -1]
y_test = a9a_test.values[:, -1]
X_train = torch.tensor(X_train, device=device, dtype=torch.float)
X_test = torch.tensor(X_test, device=device, dtype=torch.float)
y_train = torch.tensor(y_train, device=device, dtype=torch.float)
y_test = torch.tensor(y_test, device=device, dtype=torch.float)
X_train = F.pad(X_train, (0, 1), value=1.0)
X_test = F.pad(X_test, (0, 1), value=1.0)
N_train, dim = X_train.shape
N_test = X_test.shape[0]
# -
N_train, N_test, dim
# +
def log_prior(beta):
return -torch.sum(torch.abs(beta))
def log_likelihood(x, y, beta):
return -F.binary_cross_entropy_with_logits(x @ beta, y, reduction="sum")
def log_likelihood_batch(x, y, beta_batch):
func = lambda beta: log_likelihood(x, y, beta)
func = torch.vmap(func)
return func(beta_batch)
def log_posterior(x, y, params):
return log_prior(params) + (N_train / x.shape[0]) * log_likelihood(x, y, params)
def log_posterior_batch(x, y, beta_batch):
func = lambda beta: log_posterior(x, y, beta)
func = torch.vmap(func)
return func(beta_batch)
# -
gamma = 0.2**2
n_epochs = 300
data_batch_size = N_train
param_batch_size = 32
def train(gamma, n_epochs, data_batch_size, param_batch_size, dt=0.05):
sde = FollmerSDE(gamma, SimpleForwardNetBN(input_dim=dim, width=300)).to(device)
optimizer = torch.optim.Adam(sde.parameters(), lr=1e-4)
losses = []
for _ in tqdm(range(n_epochs)):
epoch_losses = []
perm = torch.randperm(N_train)
for i in range(0, N_train, data_batch_size):
x = X_train[perm[i:min(i + data_batch_size, N_train)]]
y = y_train[perm[i:min(i + data_batch_size, N_train)]]
optimizer.zero_grad()
partial_log_p = lambda params_batch: log_posterior_batch(x, y, params_batch)
loss = relative_entropy_control_cost(sde, partial_log_p, param_batch_size=param_batch_size, dt=dt, device=device)
loss.backward()
epoch_losses.append(loss.detach().cpu().numpy())
optimizer.step()
losses.append(epoch_losses)
losses = np.array(losses)
return sde, losses
sde, losses = train(gamma, n_epochs, data_batch_size, param_batch_size, dt=0.05)
def evaluate(beta_samples):
with torch.no_grad():
predict_func = lambda beta : X_test @ beta
predict_func = torch.vmap(predict_func)
out = predict_func(beta_samples)
out = torch.sigmoid(out)
probs = torch.mean(out, dim=0)
preds = torch.round(probs)
logp = log_likelihood_batch(X_test, y_test, beta_samples)
logp = torch.mean(logp) / N_test
conf = torch.max(probs, 1 - probs)
logp = logp.cpu().numpy()
probs = probs.cpu().numpy()
preds = preds.cpu().numpy()
conf = conf.cpu().numpy()
ece = ECE(conf, preds, y_test.cpu().numpy())
acc = np.mean(preds == y_test.cpu().numpy())
return ece, logp, acc
with torch.no_grad():
beta_samples = sde.sample(100, dt=0.01, device=device)
ece, logps, acc = evaluate(beta_samples)
ece, logps, acc
# +
n_runs = 5
eces, logps, accs = [], [], []
for i in range(n_runs):
sde, losses = train(gamma, n_epochs, data_batch_size, param_batch_size, dt=0.05)
with torch.no_grad():
beta_samples = sde.sample(100, dt=0.01, device=device)
ece, logp, acc = evaluate(beta_samples)
eces.append(ece)
logps.append(logp)
accs.append(acc)
# -
SBP_df = pd.DataFrame({"ECE": eces, "log predictive": np.array(logps), "accuracy": accs})
SBP_df
SBP_df.describe()
@torch.enable_grad()
def gradient(x, y, beta):
beta_ = beta.clone().requires_grad_(True)
loss = log_posterior(x, y, beta_)
grad, = torch.autograd.grad(loss, beta_)
return loss.detach().cpu().numpy(), grad
def step_size(n):
return 0.001 / (1 + n)**0.5
def sgld(n_epochs, data_batch_size):
step = 0
beta = torch.zeros(dim).float().to(device)
losses = []
accuracies = []
logps = []
beta_samples = []
for _ in tqdm(range(n_epochs)):
perm = torch.randperm(N_train)
for i in range(0, N_train, data_batch_size):
x = X_train[perm[i:min(i + data_batch_size, N_train)]]
y = y_train[perm[i:min(i + data_batch_size, N_train)]]
eps = step_size(step)
loss, grad = gradient(x, y, beta)
beta = beta + 0.5 * eps * grad + np.sqrt(eps) * torch.randn_like(beta)
step += 1
losses.append(loss)
beta_samples.append(beta)
with torch.no_grad():
test_pred = torch.round(torch.sigmoid(X_test @ beta))
test_acc = torch.mean(1 - torch.abs(y_test - test_pred))
logp = log_likelihood(X_test, y_test, beta) / X_test.shape[0]
accuracies.append(test_acc.cpu().numpy())
logps.append(logp.cpu().numpy())
return beta_samples, losses, accuracies, logps,
beta_samples, losses, accuracies, logps = sgld(n_epochs, data_batch_size)
plt.plot(losses)
plt.plot(accuracies)
plt.plot(logps)
beta_samples = torch.stack(beta_samples[-100:])
evaluate(beta_samples)
# +
n_runs = 5
eces, logps, accs = [], [], []
for i in range(n_runs):
beta_samples, _, _, _ = sgld(n_epochs, data_batch_size)
beta_samples = torch.stack(beta_samples[-100:])
ece, logp, acc = evaluate(beta_samples)
eces.append(ece)
logps.append(logp)
accs.append(acc)
# -
SGLD_df = pd.DataFrame({"ECE": eces, "log predictive": np.array(logps), "accuracy": accs})
SGLD_df
SGLD_df.describe()
| notebooks/Logistic Regression - a9a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import pandas as pd
tx_url = "https://api.etherscan.io/api?module=account&action=txlist&address=0xb1332307bd818db68e372269fd515f6523cd4bce&sort=asc&apikey=<KEY>"
token_url = "https://api.etherscan.io/api?module=account&action=tokennfttx&address=0xb1332307bd818db68e372269fd515f6523cd4bce&sort=asc&apikey=<KEY>"
#tx_url = "https://api.etherscan.io/api?module=account&action=txlist&address=0x9bc27a47b4413c51f884ae4e4b9170f3a0d7f742&sort=asc&apikey=<KEY>"
#token_url = "https://api.etherscan.io/api?module=account&action=tokennfttx&address=0x9bc27a47b4413c51f884ae4e4b9170f3a0d7f742&sort=asc&apikey=<KEY>"
tx_url = "https://api.etherscan.io/api?module=account&action=txlist&address=0x0239769a1adf4def9f07da824b80b9c4fcb59593&sort=asc&apikey=<KEY>"
token_url = "https://api.etherscan.io/api?module=account&action=tokennfttx&address=0x0239769a1adf4def9f07da824b80b9c4fcb59593&sort=asc&apikey=<KEY>"
# -
response = requests.get(tx_url)
data = response.json()
txlist_df = pd.DataFrame(data["result"])
txlist_df
response = requests.get(token_url)
data = response.json()
tokennfttx_df = pd.DataFrame(data["result"])
tokennfttx_df
txlist_df
tokennfttx_df
tokennfttx_df.columns
df = txlist_df.merge(tokennfttx_df, how='inner', on=["timeStamp"])
df
# +
df1 = pd.DataFrame(df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID"]].groupby(["timeStamp", "tokenSymbol"])["tokenID"].apply(list)).reset_index()
df2 = df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID"]].drop_duplicates(subset=['timeStamp'], keep='last').reset_index()
#[''].apply(list)
# -
df = pd.merge(df1,df2,how="inner", on="timeStamp")
df = df.rename(columns={"tokenSymbol_x": "tokenSymbol", "tokenID_x": "tokenID", "tokenSymbol_y": "tokenSymbol", "gas_x": "gas", "gasPrice_x": "gasPrice", "gasUsed_x": "gasUsed" })
df
# +
#df = df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID"]]
#.drop_duplicates(subset=['timeStamp'], keep='last')
# -
df.reset_index()
# +
df["value"] = df["value"].astype(int)/1000000000000000000
# /1000000000000000000
df
# -
total = sum(df["value"])
total
#0.003577626003325 Ether
4230000000000000000/1000000000000000000
sum(df["value"].astype(int)/1000000000000000000)
gas = sum((df["gasPrice"].astype(int) * df["gasUsed"].astype(int))/1000000000000000000)
print(gas)
print(gas + total)
sum(df[df["isError"] == "1"]["gasPrice"].astype(int) * df[df["isError"] == "1"]["gasUsed"].astype(int))/1000000000000000000
max(df["value"].astype(int)/1000000000000000000)
min(df["value"].astype(int)/1000000000000000000)
# +
from datetime import datetime
import requests
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%m-%d-%Y')
#.strftime('%m-%d-%Y %H:%M:%S')
df["timeStamp"]
# +
from datetime import datetime
import requests
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%d-%m-%Y')
#.strftime('%m-%d-%Y %H:%M:%S')
#df["timeStamp"].apply(convertToDate)
price = []
for i in range(len(df["timeStamp"])):
data = requests.get(f'https://api.coingecko.com/api/v3/coins/ethereum/history?date={df["timeStamp"].apply(convertToDate)[i]}')
print(f'https://api.coingecko.com/api/v3/coins/ethereum/history?date={df["timeStamp"].apply(convertToDate)[i]}')
response = data.json()
#print(response["market_data"])
price.append(response["market_data"]["current_price"]["usd"])
price_df = pd.DataFrame(price)
# -
price_df = price_df.rename(columns={ 0:"eth price"})
price_df
sum(df["value"] * price_df["eth price"])
# +
price_df
df
df.join(price_df)
# -
# +
(sum(df["value"] * price_df["eth price"])) + sum(((df["gasPrice"].astype(int) * df["gasUsed"].astype(int))/1000000000000000000) * price_df["eth price"])
# +
#(sum(df["value"] * price_df["eth price"])) + sum(((df["gasPrice"].astype(int) * df["gasUsed"].astype(int))/1000000000000000000) * price_df["eth price"])
# -
(sum(df["value"] * price_df["eth price"])) + sum(((df["gasPrice"].astype(int) * df["gasUsed"].astype(int))/1000000000000000000)
curl -X 'GET' \
'https://api.coingecko.com/api/v3/coins/bitcoin/history?date=05-09-2020' \
-H 'accept: application/json'
# +
url = "https://api.coingecko.com/api/v3/coins/bitcoin/history?date="
# -
import datetime
import requests
import sys
import json
requestDate = datetime.datetime(2015,7,30)
endDate = datetime.datetime(2015,12,31)
headers = { 'Authorization':'Authorization: Bearer '}
while requestDate <= endDate:
query = {'date': requestDate.strftime('%Y-%m-%d') }
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
print("{},{:.2f}\n".format(requestDate.strftime('%Y-%m-%d'),ethUSDPrice))
else:
print("Response status code {} . Exiting".format(response.status_code))
print(response.json())
break
requestDate += datetime.timedelta(days=1)
# +
from datetime import datetime
import requests
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%Y-%m-%d')
#.strftime('%m-%d-%Y %H:%M:%S')
#df["timeStamp"].apply(convertToDate)
headers = { 'Authorization':'Authorization: Bearer '}
price = []
for i in range(len(df["timeStamp"])):
query = {'date': df["timeStamp"].apply(convertToDate)[i] }
print(query)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
price.append(ethUSDPrice)
else:
print(response.json())
break
# data = requests.get(f'https://api.coingecko.com/api/v3/coins/ethereum/history?date={df["timeStamp"].apply(convertToDate)[i]}')
# print(f'https://api.coinbase.com/v2/prices/ETH-USD/spot', {df["timeStamp"].apply(convertToDate)[i]})
# response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
# response = data.json()
# #print(response["market_data"])
# price.append(response["market_data"]["current_price"]["usd"])
# price_df = pd.DataFrame(price)
price_df = pd.DataFrame(price)
price_df
# -
price_df = price_df.rename(columns={ 0:"eth price"})
price_df
df_cleaned = pd.merge(price_df, df, left_index=True, right_index=True)
sum(df_cleaned["eth price"] * df_cleaned["value"])
df_cleaned
# +
import requests
import pandas as pd
from datetime import datetime
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%Y-%m-%d')
def convertToDateFormat(data):
return datetime.utcfromtimestamp(int(data)).strftime('%m-%d-%Y')
tx_url = "https://api.etherscan.io/api?module=account&action=txlist&address=0xF717B30D486F3c2EBF77df02714e86d0da6Dbd8F&sort=asc&apikey=<KEY>"
token_url = "https://api.etherscan.io/api?module=account&action=tokennfttx&address=0xF717B30D486F3c2EBF77df02714e86d0da6Dbd8F&sort=asc&apikey=<KEY>"
response = requests.get(tx_url)
data = response.json()
txlist_df = pd.DataFrame(data["result"])
response = requests.get(token_url)
data = response.json()
tokennft_df = pd.DataFrame(data["result"])
df = txlist_df.merge(tokennft_df, how='inner', on=["timeStamp"])
dataframe_1 = pd.DataFrame(df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID"]].groupby(["timeStamp", "tokenSymbol"])["tokenID"].apply(list)).reset_index()
dataframe_2 = df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID", "contractAddress_y"]].drop_duplicates(subset=['timeStamp'], keep='last').reset_index()
df = pd.merge(dataframe_1,dataframe_2,how="inner", on="timeStamp")
df = df.rename(columns={"tokenSymbol_x": "tokenSymbol", "tokenID_x": "tokenID", "tokenSymbol_y": "tokenSymbol", "gas_x": "gas", "gasPrice_x": "gasPrice", "gasUsed_x": "gasUsed" })
df["value"] = df["value"].astype(int)/1000000000000000000
headers = { 'Authorization':'Authorization: Bearer '}
price = []
date = []
for i in range(len(df["timeStamp"])):
query = {'date': df["timeStamp"].apply(convertToDate)[i] }
#print(query)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
date.append(df["timeStamp"].apply(convertToDateFormat)[i])
price.append(ethUSDPrice)
else:
#print(response.json())
break
price_df = pd.DataFrame({ "USD": price, "date": date })
df_cleaned = pd.merge(price_df, df, left_index=True, right_index=True)
df_cleaned["gasPriceETH"] = (df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int)/1000000000000000000)
df_cleaned["gasPriceUSD"] = (df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int)/1000000000000000000) * df_cleaned["USD"]
response = { "data" : df_cleaned.to_dict("records") }
df_cleaned
# -
# +
from datetime import datetime
import requests
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%Y-%m-%d')
#.strftime('%m-%d-%Y %H:%M:%S')
#df["timeStamp"].apply(convertToDate)
headers = { 'Authorization':'Authorization: Bearer '}
price = []
for i in range(len(df["timeStamp"])):
query = {'date': df["timeStamp"].apply(convertToDate)[i] }
#print(query)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
price.append(ethUSDPrice)
else:
#print(response.json())
break
# data = requests.get(f'https://api.coingecko.com/api/v3/coins/ethereum/history?date={df["timeStamp"].apply(convertToDate)[i]}')
# print(f'https://api.coinbase.com/v2/prices/ETH-USD/spot', {df["timeStamp"].apply(convertToDate)[i]})
# response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
# response = data.json()
# #print(response["market_data"])
# price.append(response["market_data"]["current_price"]["usd"])
# price_df = pd.DataFrame(price)
price_df = pd.DataFrame(price)
price_df
# -
price_df = price_df.rename(columns={ 0:"dollar price"})
price_df
df_cleaned = pd.merge(price_df, df, left_index=True, right_index=True)
df_cleaned
# +
#gas = sum((df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int))/1000000000000000000)
# -
(df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int)/1000000000000000000)
# +
(df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int)/1000000000000000000) * df_cleaned["dollar price"]
# -
df_cleaned
# +
import requests
import pandas as pd
url = "https://api.opensea.io/api/v1/collection/wulfz-official"
response = requests.request("GET", url).json()
pd.DataFrame(response["collection"])
# -
# +
transaction_url = "https://api.etherscan.io/api?module=account&action=txlistinternal&txhash=0xfac2f3364536035f94df81c804732e5d6f7b3f009c39164ebd8355ca782288a5&apikey=<KEY>"
response = requests.get(transaction_url)
data = response.json()
data
# +
import requests
account="<KEY>"
url = f"https://api.opensea.io/api/v1/events?account_address={account}&limit=300"
headers = {
"Accept": "application/json",
"X-API-KEY": "b5e38a68201b473d8a4451dd894d1f03"
}
response = requests.request("GET", url, headers=headers).json()
df = pd.DataFrame(response["asset_events"])
df
# +
open_df = df[df["event_type"] == "successful"][["asset", "collection_slug", "dev_seller_fee_basis_points", "total_price", "quantity", "seller", "event_type", "winner_account", "listing_time", "transaction"]]
open_df["royalty_fee"] = (open_df["dev_seller_fee_basis_points"]) / 10000
open_df["opensea_fee"] = 250 / 10000
open_df["salesPriceETH"] = open_df["total_price"].astype(int) / 1000000000000000000
open_df = open_df.reset_index()
transaction_hash_list = []
for i in range(len(open_df["asset"])):
transaction_hash_list.append(open_df["transaction"][i]["transaction_hash"])
# transaction_url = f"https://api.etherscan.io/api?module=account&action=txlistinternal&txhash={transaction_hash_list[i]}&apikey=FKRKV4CZ9A6QGRZRYE5APJV3TP8JPVJIPT"
# response = requests.get(transaction_url)
# data = response.json()
# if(len(data["result"])):
# print(transaction_hash_list[i])
# print(data["result"][0]["value"])
# print(data["result"][1]["value"])
# print(data["result"])
# #
# # response = requests.get(transaction_url)
# # data = response.json()
# # data
open_df["transaction_hash"] = transaction_hash_list
open_df
# -
open_df.reset_index( inplace = True, col_level = 1)
open_df = unpack(open_df,"asset")
open_df.columns
def unpack(df, column, fillna="N/A"):
ret = None
if fillna is None:
tmp = pd.DataFrame((d for idx, d in df[column].iteritems()))
ret = pd.concat([df.drop(column,axis=1), tmp], axis=1)
else:
tmp = pd.DataFrame((d for idx, d in
df[column].iteritems())).fillna(fillna)
ret = pd.concat([df.drop(column,axis=1), tmp], axis=1)
return ret
open_df = unpack(open_df,"winner_account")
open_df = unpack(open_df,"user")
open_df = open_df.rename(columns={"profile_img_url": "winner_profile_img_url", "address": "winner_address", "config": "winner_config", "username": "winner_username"})
open_df = unpack(open_df,"seller")
open_df = unpack(open_df,"user")
open_df = open_df.rename(columns={"profile_img_url": "seller_profile_img_url", "address": "seller_address", "config": "seller_config", "username": "seller_username"})
open_df
open_df.columns
df1 = open_df[open_df["seller_address"].isin([account.lower()])]
df2 = open_df[~open_df["seller_address"].isin([account.lower()])]
df1["transaction_type"] = "sale"
df2["transaction_type"] = "buy"
open_df = df1.append(df2)[["collection_slug", "image_preview_url", "token_id", "image_url", "royalty_fee", "opensea_fee", "total_price", "quantity", "salesPriceETH", "winner_profile_img_url", "winner_address", "winner_username", "seller_profile_img_url", "seller_address", "seller_username", "transaction_type", "listing_time", "transaction_hash"]]
opend_df = open_df["listing_time"].replace(to_replace=[None], value="N/A", inplace=True)
#open_df[(open_df["username"] == "N/A") & (open_df["address"] == account.lower())]
# +
import datetime
price = []
date_list = []
for i in range(len(open_df["listing_time"])):
date = open_df["listing_time"][i][0:10]
if(date == "N/A"):
date = datetime.datetime.now().strftime('%Y-%m-%d')
query = {'date':date }
date_list.append(date)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
price.append(ethUSDPrice)
else:
#print(response.json())
break
price_df = pd.DataFrame({ "priceETHtoUSD" : price, "date": date_list })
price_df = price_df.rename(columns={ 0:""})
df_cleaned = pd.merge(price_df, open_df, left_index=True, right_index=True)
df_cleaned["salesPriceUSD"] = df_cleaned["salesPriceETH"] * df_cleaned["priceETHtoUSD"]
df_cleaned["opensea_feeETH"] = df_cleaned["opensea_fee"] * df_cleaned["salesPriceETH"]
df_cleaned["opensea_feeUSD"] = df_cleaned["opensea_feeETH"] * df_cleaned["priceETHtoUSD"]
df_cleaned["royalty_feeETH"] = df_cleaned["royalty_fee"] * df_cleaned["salesPriceETH"]
df_cleaned["royalty_feeUSD"] = df_cleaned["royalty_feeETH"] * df_cleaned["priceETHtoUSD"]
open_df_sale = df_cleaned[df_cleaned["transaction_type"] == "sale"]
open_df_sale
# +
open_df_buy = df_cleaned[df_cleaned["transaction_type"] == "buy"]
open_df_buy = open_df_buy.reset_index()
open_df_buy
for i in range(len(open_df_buy['transaction_hash'])):
transaction_url = f"https://api.etherscan.io/api?module=account&action=txlistinternal&txhash={open_df_buy['transaction_hash'][i]}&apikey=<KEY>"
print(transaction_url)
response = requests.get(transaction_url)
data = response.json()
print(data["result"])
# print(open_df_buy['transaction_hash'][i])
# print(data)
# transaction_url = f"https://api.etherscan.io/api?module=account&action=txlistinternal&txhash={transaction_hash_list[i]}&apikey=<KEY>"
# if(len(data["result"])):
# print(transaction_hash_list[i])
# print(data["result"][0]["value"])
# print(data["result"][1]["value"])
# print(data["result"])
#open_df_buy
# -
df_cleaned["image_url"][0]
# +
import requests
import pandas as pd
from datetime import datetime
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%Y-%m-%d')
def convertToDateFormat(data):
return datetime.utcfromtimestamp(int(data)).strftime('%m-%d-%Y')
tx_url = "https://api.etherscan.io/api?module=account&action=txlist&address=0xF717B30D486F3c2EBF77df02714e86d0da6Dbd8F&sort=asc&apikey=<KEY>"
token_url = "https://api.etherscan.io/api?module=account&action=tokennfttx&address=0xF717B30D486F3c2EBF77df02714e86d0da6Dbd8F&sort=asc&apikey=<KEY>"
response = requests.get(tx_url)
data = response.json()
txlist_df = pd.DataFrame(data["result"])
response = requests.get(token_url)
data = response.json()
tokennft_df = pd.DataFrame(data["result"])
df = txlist_df.merge(tokennft_df, how='inner', on=["timeStamp"])
dataframe_1 = pd.DataFrame(df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID"]].groupby(["timeStamp", "tokenSymbol"])["tokenID"].apply(list)).reset_index()
dataframe_2 = df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID", "contractAddress_y"]].drop_duplicates(subset=['timeStamp'], keep='last').reset_index()
df = pd.merge(dataframe_1,dataframe_2,how="inner", on="timeStamp")
df = df.rename(columns={"tokenSymbol_x": "tokenSymbol", "tokenID_x": "tokenID", "tokenSymbol_y": "tokenSymbol", "gas_x": "gas", "gasPrice_x": "gasPrice", "gasUsed_x": "gasUsed" })
df["value"] = df["value"].astype(int)/1000000000000000000
headers = { 'Authorization':'Authorization: Bearer '}
price = []
date = []
for i in range(len(df["timeStamp"])):
query = {'date': df["timeStamp"].apply(convertToDate)[i] }
#print(query)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
date.append(df["timeStamp"].apply(convertToDateFormat)[i])
price.append(ethUSDPrice)
else:
#print(response.json())
break
price_df = pd.DataFrame({ "USD": price, "date": date })
df_cleaned = pd.merge(price_df, df, left_index=True, right_index=True)
df_cleaned["gasPriceETH"] = (df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int)/1000000000000000000)
df_cleaned["gasPriceUSD"] = (df_cleaned["gasPrice"].astype(int) * df_cleaned["gasUsed"].astype(int)/1000000000000000000) * df_cleaned["USD"]
#response = { "data" : df_cleaned.to_dict("records") }
df_cleaned
# +
import requests
for i in range(len(open_df)):
url = f"https://api.opensea.io/api/v1/collection/{open_df['collection_slug'][i]}"
response = requests.request("GET", url).json()
print(url)
# +
from datetime import datetime
import requests
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%Y-%m-%d')
headers = { 'Authorization':'Authorization: Bearer '}
price = []
for i in range(len(df["timeStamp"])):
query = {'date': df["timeStamp"].apply(convertToDate)[i] }
#print(query)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
price.append(ethUSDPrice)
else:
#print(response.json())
break
# data = requests.get(f'https://api.coingecko.com/api/v3/coins/ethereum/history?date={df["timeStamp"].apply(convertToDate)[i]}')
# print(f'https://api.coinbase.com/v2/prices/ETH-USD/spot', {df["timeStamp"].apply(convertToDate)[i]})
# response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
# response = data.json()
# #print(response["market_data"])
# price.append(response["market_data"]["current_price"]["usd"])
# price_df = pd.DataFrame(price)
price_df = pd.DataFrame(price)
price_df
# +
import requests
import pandas as pd
account = "<KEY>"
tx_url = f"https://api.etherscan.io/api?module=account&action=txlist&address={account}&sort=asc&apikey=<KEY>"
token_url = f"https://api.etherscan.io/api?module=account&action=tokennfttx&address={account}&sort=asc&apikey=<KEY>"
# +
response = requests.get(tx_url)
data = response.json()
txlist_df = pd.DataFrame(data["result"])
response = requests.get(token_url)
data = response.json()
tokennfttx_df = pd.DataFrame(data["result"])
for i in range(len(tokennfttx_df["hash"])):
transaction_url = f"https://api.etherscan.io/api?module=account&action=txlistinternal&txhash={tokennfttx_df['hash'][i]}&apikey=<KEY>"
response = requests.get(transaction_url)
data = response.json()
print(tokennfttx_df['hash'][i])
print(data)
# response = requests.get(transaction_url)
# data = response.json()
# print(data["result"])
# print(open_df_buy['t
#tokennfttx_df["gas"].astype(int) * tokennfttx_df["gasPrice"].astype(int) /1000000000000000000
# -
mint_df = tokennfttx_df[tokennfttx_df["from"] == "0x0000000000000000000000000000000000000000"]
mint_df = txlist_df.merge(mint_df, how='inner', on=["timeStamp"])
mint_df
# +
buy_df = tokennfttx_df[(tokennfttx_df["to"] == account.lower()) & (tokennfttx_df["from"] != "0x0000000000000000000000000000000000000000")]
buy_df = txlist_df.merge(buy_df, how='inner', on=["timeStamp"])
buy_df
# +
txlist_df["hash"][7]
#0x7be8076f4ea4a4ad08075c2508e481d6c946d12b
#sale_df = tokennfttx_df[(tokennfttx_df["from"] == account.lower()) & (tokennfttx_df["to"] != account.lower())]
#df1 = pd.DataFrame(sale_df[[ "timeStamp", "tokenSymbol", "tokenName", "tokenID"]].groupby(["timeStamp", "tokenSymbol"])["tokenID"].apply(list)).reset_index()
#txlist_df.merge(df1, how='outer')
#df2 = sale_df[["value", "timeStamp", "tokenSymbol", "gas_x", "gasPrice_x", "gasUsed_x", "tokenName", "tokenID"]].drop_duplicates(subset=['timeStamp'], keep='last').reset_index()
# for i in range(len(txlist_df["hash"])):
# transaction_url = f"https://api.etherscan.io/api?module=account&action=txlistinternal&txhash={txlist_df['hash'][i]}&apikey=FKRKV4CZ9A6QGR<KEY>"
# response = requests.get(transaction_url)
# data = response.json()
# if(len(data["result"])):
# print(txlist_df["hash"][i])
# sale_df = txlist_df.merge(tokennfttx_df, how='outer')
# sale_df
# +
open_df = txlist_df.merge(tokennfttx_df, how='inner', on="hash")
transaction_hash_list = []
for i in range(len(open_df["hash"])):
#transaction_hash_list.append(open_df["hash"][i])
transaction_url = f"https://api.etherscan.io/api?module=account&action=txlistinternal&txhash={open_df['hash'][i]}&apikey=<KEY>"
response = requests.get(transaction_url)
data = response.json()
if(len(data["result"])):
for d in data['result']:
print(d["value"])
transaction_hash_list
open_df
# +
tokennfttx_df
# +
import requests
import pandas as pd
account="<KEY>"
url = f"https://api.opensea.io/api/v1/events?account_address={account}&limit=300"
headers = {
"Accept": "application/json",
"X-API-KEY": "b5e38a68201b473d8a4451dd894d1f03"
}
response = requests.request("GET", url, headers=headers).json()
event_df = pd.DataFrame(response["asset_events"])
event_df.columns
# -
def unpack(df, column, fillna="N/A"):
ret = None
if fillna is None:
tmp = pd.DataFrame((d for idx, d in df[column].iteritems()))
ret = pd.concat([df.drop(column,axis=1), tmp], axis=1)
else:
tmp = pd.DataFrame((d for idx, d in
df[column].iteritems())).fillna(fillna)
ret = pd.concat([df.drop(column,axis=1), tmp], axis=1)
return ret
event_df.reset_index( inplace = True, col_level = 1)
# +
transaction_hash_list = []
event_df = event_df[~event_df['transaction'].isna()]
for index, row in event_df.reset_index().iterrows():
transaction_hash_list.append(row["transaction"]["transaction_hash"])
event_df["hash"] = transaction_hash_list
# for i, row in event_df[~event_df['transaction'].isna()].reset_index().iterrows():
# print(row[i])
# -
t_df = tokennfttx_df.merge(event_df, left_on='hash', right_on='hash')
#
t_df.columns
test_df = t_df[["tokenName", "tokenID", 'tokenSymbol', "asset", 'total_price', 'timeStamp', 'gas', 'gasPrice', 'gasUsed',
"event_type",'contractAddress', 'dev_seller_fee_basis_points', "from", "to", "winner_account"]].fillna(0)
test_df[(test_df["from"] == account.lower()) & (test_df["total_price"] == 0)]
# +
mint_df = test_df[test_df["from"] == "0x0000000000000000000000000000000000000000"]
bought_df = test_df[(test_df["to"] == account.lower()) & (test_df["total_price"].astype(int) > 0) ]
sale_df = test_df[(test_df["to"] != account.lower()) & (test_df["total_price"].astype(int) > 0) ]
sale_df.reset_index( inplace = True, col_level = 1)
sale_df = unpack(sale_df,"winner_account")
sale_df = unpack(sale_df,"user")
bought_df.reset_index( inplace = True, col_level = 1)
bought_df = unpack(bought_df,"winner_account")
bought_df = unpack(bought_df,"user")
mint_df.reset_index( inplace = True, col_level = 1)
mint_df = unpack(mint_df, "asset")
sale_df = unpack(sale_df, "asset")
bought_df = unpack(bought_df, "asset")
mint_df["type"] = "mint"
bought_df["type"] = "buy"
sale_df["type"] = "sell"
opensea_df = pd.concat([mint_df, sale_df, bought_df], ignore_index=True, sort=False).sort_values(by="timeStamp")
def remove_duplicates(l):
return list(set(l))
df1 = pd.DataFrame(opensea_df.groupby(["timeStamp", "tokenSymbol", "type"])["tokenID"].apply(remove_duplicates)).reset_index()
opensea_df = pd.merge(df1,opensea_df.drop_duplicates(subset=['timeStamp'], keep='last').reset_index(),how="inner", on="timeStamp")
opensea_df
#.apply(list)).reset_index()
#opensea_df.drop_duplicates(subset=['timeStamp'], keep='last').reset_index()
# -
t_df = unpack(t_df,"winner_account")
t_df["winner_account"]
#t_df = unpack(t_df,"user")
# t_df = t_df.rename(columns={"profile_img_url": "winner_profile_img_url", "address": "winner_address", "config": "winner_config", "username": "winner_username"})
# t_df = unpack(t_df,"seller")
# t_df = unpack(t_df,"user")
# t_df = t_df.rename(columns={"profile_img_url": "seller_profile_img_url", "address": "seller_address", "config": "seller_config", "username": "seller_username"})
# t_df
# +
address="0xF717B30D486F3c2EBF77df02714e86d0da6Dbd8F"
url = f"https://api.opensea.io/api/v1/events?account_address={address}&limit=300"
tx_url = f"https://api.etherscan.io/api?module=account&action=txlist&address={address}&sort=asc&apikey=<KEY>"
token_url = f"https://api.etherscan.io/api?module=account&action=tokennfttx&address={address}&sort=asc&apikey=<KEY>"
response = requests.get(tx_url)
data = response.json()
txlist_df = pd.DataFrame(data["result"])
response = requests.get(token_url)
data = response.json()
tokennfttx_df = pd.DataFrame(data["result"])
headers = {
"Accept": "application/json",
"X-API-KEY": "b5e38a68201b473d8a4451dd894d1f03"
}
response = requests.request("GET", url, headers=headers).json()
event_df = pd.DataFrame(response["asset_events"])
transaction_hash_list = []
event_df = event_df[~event_df['transaction'].isna()]
for index, row in event_df.reset_index().iterrows():
transaction_hash_list.append(row["transaction"]["transaction_hash"])
event_df["hash"] = transaction_hash_list
event_df.reset_index( inplace = True, col_level = 1)
t_df = tokennfttx_df.merge(event_df, left_on='hash', right_on='hash')
test_df = t_df[["tokenName", "tokenID", 'tokenSymbol', "asset", 'total_price', 'timeStamp', 'gas', 'gasPrice', 'gasUsed',
"event_type",'contractAddress', 'dev_seller_fee_basis_points', "from", "to", "winner_account"]].fillna(0)
mint_df = test_df[test_df["from"] == "0x0000000000000000000000000000000000000000"]
bought_df = test_df[(test_df["to"] == address.lower()) & (test_df["total_price"].astype(int) > 0) ]
sale_df = test_df[(test_df["to"] != address.lower()) & (test_df["total_price"].astype(int) > 0) ]
sale_df.reset_index( inplace = True, col_level = 1)
sale_df = unpack(sale_df,"winner_account")
bought_df.reset_index( inplace = True, col_level = 1)
bought_df = unpack(bought_df,"winner_account")
mint_df.reset_index( inplace = True, col_level = 1)
mint_df = unpack(mint_df, "winner_account")
mint_df["type"] = "mint"
bought_df["type"] = "buy"
sale_df["type"] = "sell"
opensea_df = pd.concat([mint_df, sale_df, bought_df], ignore_index=True, sort=False).sort_values(by="timeStamp")
def remove_duplicates(l):
return list(set(l))
df1 = pd.DataFrame(opensea_df.groupby(["timeStamp", "tokenSymbol", "type"])["tokenID"].apply(remove_duplicates)).reset_index()
opensea_df = pd.merge(df1,opensea_df.drop_duplicates(subset=['timeStamp'], keep='last').reset_index(),how="inner", on="timeStamp")
opensea_df = opensea_df.drop(columns=[0, 'config', "level_0", "index", "event_type"])
opensea_df = opensea_df[["timeStamp", "tokenSymbol_x", "type_x", "tokenID_x", "gas", "asset", "total_price", "gasUsed", "gasPrice", "contractAddress", "user", "from", "to", "profile_img_url", "address", "dev_seller_fee_basis_points"]]
from datetime import datetime
import requests
def convertToDate(data):
return datetime.utcfromtimestamp(int(data)).strftime('%Y-%m-%d')
def convertToDateFormat(data):
return datetime.utcfromtimestamp(int(data)).strftime('%m-%d-%Y')
headers = { 'Authorization':'Authorization: Bearer '}
price = []
dates = []
for i in range(len(opensea_df["timeStamp"])):
query = {'date': opensea_df["timeStamp"].apply(convertToDate)[i] }
dates.append(opensea_df["timeStamp"].apply(convertToDateFormat)[i])
#print(query)
response = requests.get('https://api.coinbase.com/v2/prices/ETH-USD/spot', params=query,headers=headers)
if response.status_code==200:
data = response.json()
ethUSDPrice = float(data['data']['amount'])
price.append(ethUSDPrice)
else:
#print(response.json())
break
price_df = pd.DataFrame({ "ETHtoUSD": price, "date": dates})
df_cleaned = pd.merge(price_df, opensea_df, left_index=True, right_index=True)
df_cleaned["gasPriceETH"] = (opensea_df["gasPrice"].astype(int) * opensea_df["gasUsed"].astype(int)/1000000000000000000)
df_cleaned["salePriceETH"] = (opensea_df["total_price"].astype(int)/1000000000000000000)
#df_cleaned["salePriceUSD"] = (opensea_df["salePriceETH"].astype(int) * opensea_df["ETHtoUSD"].astype(float))
df_cleaned["gasPriceUSD"] = (df_cleaned["gasPriceETH"].astype(int) * df_cleaned["ETHtoUSD"])
df_cleaned["salePriceUSD"] = (df_cleaned["salePriceETH"].astype(float) * df_cleaned["ETHtoUSD"].astype(float))
# +
df_cleaned["asset"][0]
# +
df_cleaned["profile_img_url"] = df_cleaned["profile_img_url"].fillna("None")
df_cleaned["address"] = df_cleaned["address"].fillna("None")
image_url_list = []
name_list = []
for i, row in df_cleaned.iterrows():
if(row["asset"]):
image_url_list.append(row["asset"]["image_url"])
name_list.append(row["asset"]["name"])
else:
image_url_list.append("None")
name_list.append("None")
df_cleaned["nft_image_url"] = name_list
df_cleaned["nft_collection_name"] = image_url_list
df_cleaned["royaltyFee"] = df_cleaned["dev_seller_fee_basis_points"]/10000
df_cleaned["royaltyFeeUSD"] = df_cleaned["salePriceUSD"] * df_cleaned["royaltyFee"]
df_cleaned["royaltyFeeETH"] = df_cleaned["salePriceETH"] * df_cleaned["royaltyFee"]
df_cleaned[["date","tokenSymbol_x", "timeStamp", "type_x", "tokenID_x", "gasUsed",
"gasPrice", "contractAddress", "from", "to", "address", "gasPriceETH", "salePriceETH", "nft_collection_name"
"gasPriceUSD", "salePriceUSD", "nft_image_url", "royaltyFee", "royaltyFeeUSD", "royaltyFeeETH"]]
df_cleaned.to_dict("record")
#sale_df[~sale_df["tokenName"].isnull()]
# -
requests.get("").json()
| nifty-billy-dapp/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="JL3ZPJPgC4wC"
# # Wikipedia Text Generation (using RNN LSTM)
# + [markdown] colab_type="text" id="eoWGtkL8PPJ3"
# > - 🤖 See [full list of Machine Learning Experiments](https://github.com/trekhleb/machine-learning-experiments) on **GitHub**<br/><br/>
# > - ▶️ **Interactive Demo**: [try this model and other machine learning experiments in action](https://trekhleb.github.io/machine-learning-experiments/)
# + [markdown] colab_type="text" id="bui0MyTjv1Mp"
# ## Experiment overview
#
# In this experiment we will use character-based [Recurrent Neural Network](https://en.wikipedia.org/wiki/Recurrent_neural_network) (RNN) to generate a Wikipedia-like text based on the [wikipedia](https://www.tensorflow.org/datasets/catalog/wikipedia) TensorFlow dataset.
#
# 
# + [markdown] colab_type="text" id="3XxEZuRNIzHH"
# _Inspired by [Text generation with an RNN](https://www.tensorflow.org/tutorials/text/text_generation)_
# + [markdown] colab_type="text" id="IDJctDhhEDTD"
# ## Import dependencies
# + colab={} colab_type="code" id="Z2xZnxncD2Ep"
# Selecting Tensorflow version v2 (the command is relevant for Colab only).
# # %tensorflow_version 2.x
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 3119, "status": "ok", "timestamp": 1586973888852, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/<KEY>", "userId": "03172675069638383074"}, "user_tz": -120} id="SpueB6zADYgE" outputId="18c59941-ac1f-4d02-c824-8499f71a8a70"
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
import platform
import time
import pathlib
import os
print('Python version:', platform.python_version())
print('Tensorflow version:', tf.__version__)
print('Keras version:', tf.keras.__version__)
# + [markdown] colab_type="text" id="ciI_JnnNEGCw"
# ## Download the dataset
#
# [Wikipedia](https://www.tensorflow.org/datasets/catalog/wikipedia) dataset contains cleaned articles of all languages. The datasets are built from the [Wikipedia dump](https://dumps.wikimedia.org/) with one split per language. Each example contains the content of one full Wikipedia article with cleaning to strip markdown and unwanted sections (references, etc.).
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 3103, "status": "ok", "timestamp": 1586973888853, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AO<KEY>UKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="q3PDebo50-Le" outputId="7ca9b4e2-97fb-439d-8a53-dda4e9204275"
# List all available datasets to see how the wikipedia dataset is called.
tfds.list_builders()
# + [markdown] colab_type="text" id="f2qtWwKA0-Li"
# [`tfds.load`](https://www.tensorflow.org/datasets/api_docs/python/tfds/load) is a convenience method that's the simplest way to build and load a [`tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset).
# + colab={} colab_type="code" id="gsqkLYgZ0-Lj"
# Loading the wikipedia dataset.
DATASET_NAME = 'wikipedia/20190301.en'
# DATASET_NAME = 'wikipedia/20190301.uk'
dataset, dataset_info = tfds.load(
name=DATASET_NAME,
data_dir='tmp',
with_info=True,
split=tfds.Split.TRAIN,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 496} colab_type="code" executionInfo={"elapsed": 3058, "status": "ok", "timestamp": 1586973888854, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="V5nkTrRZ0-Ln" outputId="4f3b0eb0-84ef-4fb5-9087-72c096119542"
print(dataset_info)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3033, "status": "ok", "timestamp": 1586973888855, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="9A11MeAt0-Lr" outputId="749bc519-63ad-47af-ea41-7d6a83014172"
print(dataset)
# + [markdown] colab_type="text" id="eKTy6YS5Gx-g"
# ## Analyze the dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3010, "status": "ok", "timestamp": 1586973888856, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="ldLeTmcM0-Lu" outputId="b36746a7-6016-44ca-ed40-d0fd36087e21"
TRAIN_NUM_EXAMPLES = dataset_info.splits['train'].num_examples
print('Total number of articles: ', TRAIN_NUM_EXAMPLES)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 3178, "status": "ok", "timestamp": 1586973889154, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="CHkz0ZHA0-Lz" outputId="16739986-fe0e-41d8-c57d-12abe90df9dd"
print('First article','\n======\n')
for example in dataset.take(1):
print('Title:','\n------')
print(example['title'].numpy().decode('utf-8'))
print()
print('Text:', '\n------')
print(example['text'].numpy().decode('utf-8'))
# + [markdown] colab_type="text" id="GqpuKh9HMNnf"
# ## Process the dataset
# + [markdown] colab_type="text" id="FK_FFy7P0-L3"
# ### Flatten the dataset
#
# Converting the dataset from the set of articles into the set of characters. We also are interested only in `text` of each article so we may drop the `title` along the way.
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 3071, "status": "ok", "timestamp": 1586973889155, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="_AC6MHFC0-L3" outputId="22f5d674-3b5f-44b7-9b02-0e61855a4871"
def article_to_text(text):
return np.array([char for char in text.numpy().decode('utf-8')])
# Converting each dataset item to a string ('text') instead of a dictionary ({'text', 'title'}).
dataset_text = dataset.map(
lambda article: tf.py_function(func=article_to_text, inp=[article['text']], Tout=tf.string)
)
for text in dataset_text.take(2):
print(text.numpy())
print('\n')
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" executionInfo={"elapsed": 3964, "status": "ok", "timestamp": 1586973890071, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="oSsSbJbX0-L8" outputId="9e442c58-c3da-4ad8-d4c3-f297085ea312"
# Unbatch the text dataset into a more granular char dataset.
# Now each dataset item is one character instead of a big piece of text.
dataset_chars = dataset_text.unbatch()
for char in dataset_chars.take(20):
print(char.numpy().decode('utf-8'))
# + [markdown] colab_type="text" id="jHNkkXGz0-L_"
# ### Generating vocabulary
# + colab={"base_uri": "https://localhost:8080/", "height": 88} colab_type="code" executionInfo={"elapsed": 270306, "status": "ok", "timestamp": 1586974156434, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="B7rnYLeU0-MA" outputId="4312adbb-7dd5-44cc-9e4b-4a68efad7883"
vocab = set()
# Ideally we should take all dataset items into account here.
for text in dataset_text.take(1000):
vocab.update([char.decode('utf-8') for char in text.numpy()])
vocab = sorted(vocab)
print('Unique characters: {}'.format(len(vocab)))
print('vocab:')
print(vocab)
# + [markdown] colab_type="text" id="6dj4e-AGMaV4"
# ### Vectorize the text
#
# Before feeding the text to our RNN we need to convert the text from a sequence of characters to a sequence of numbers. To do so we will detect all unique characters in the text, form a vocabulary out of it and replace each character with its index in the vocabulary.
# + colab={"base_uri": "https://localhost:8080/", "height": 578} colab_type="code" executionInfo={"elapsed": 270275, "status": "ok", "timestamp": 1586974156436, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="xFFpuXfGMPq2" outputId="8c79b593-33d2-4761-e7cb-fb14f1f2cecc"
# Map characters to their indices in vocabulary.
char2index = {char: index for index, char in enumerate(vocab)}
print('{')
for char, _ in zip(char2index, range(30)):
print(' {:4s}: {:3d},'.format(repr(char), char2index[char]))
print(' ...\n}')
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 270255, "status": "ok", "timestamp": 1586974156438, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="lQB33zI7NkRo" outputId="e789efe1-b9c6-448c-91f8-02b6e985310c"
# Map character indices to characters from vacabulary.
index2char = np.array(vocab)
print(index2char)
# + colab={"base_uri": "https://localhost:8080/", "height": 476} colab_type="code" executionInfo={"elapsed": 270802, "status": "ok", "timestamp": 1586974157036, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="DXUAlYmvN_Rj" outputId="2e7683da-5683-4d7a-b9d0-fffd7add372e"
def char_to_index(char):
char_symbol = char.numpy().decode('utf-8')
char_index = char2index[char_symbol] if char_symbol in char2index else char2index['?']
return char_index
dataset_chars_indexed = dataset_chars.map(
lambda char: tf.py_function(func=char_to_index, inp=[char], Tout=tf.int32)
)
print('ORIGINAL CHARS:', '\n---')
for char in dataset_chars.take(10):
print(char.numpy().decode())
print('\n\n')
print('INDEXED CHARS:', '\n---')
for char_index in dataset_chars_indexed.take(20):
print(char_index.numpy())
# + [markdown] colab_type="text" id="CHv5HhUuTQYS"
# ## Create training sequences
# + colab={} colab_type="code" id="rpdFJJc90-ML"
# The maximum length sentence we want for a single input in characters.
sequence_length = 200
# + colab={"base_uri": "https://localhost:8080/", "height": 207} colab_type="code" executionInfo={"elapsed": 273784, "status": "ok", "timestamp": 1586974160086, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="Ap71VjB2Vuct" outputId="9b468ed6-8941-433b-f2e5-10b318d69ca9"
# Generate batched sequences out of the char_dataset.
sequences = dataset_chars_indexed.batch(sequence_length + 1, drop_remainder=True)
# Sequences examples.
for item in sequences.take(10):
print(repr(''.join(index2char[item.numpy()])))
print()
# + colab={} colab_type="code" id="Y8spPCfe-iTn"
# sequences shape:
# - Each sequence of length 101
#
# 201 201 201
# [(.....) (.....) ... (.....)]
# + [markdown] colab_type="text" id="HdcrcUs4Xxso"
# For each sequence, duplicate and shift it to form the input and target text. For example, say `sequence_length` is `4` and our text is `Hello`. The input sequence would be `Hell`, and the target sequence `ello`.
# + colab={} colab_type="code" id="9fxvXsP0XFDh"
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
# + colab={} colab_type="code" id="454rWIQYXXRY"
dataset_sequences = sequences.map(split_input_target)
# + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" executionInfo={"elapsed": 274570, "status": "ok", "timestamp": 1586974160943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="Kuoh4tCdYCck" outputId="a8eb75f1-a9a1-4795-acd5-e2129e81e080"
for input_example, target_example in dataset_sequences.take(1):
print('Input sequence size:', repr(len(input_example.numpy())))
print('Target sequence size:', repr(len(target_example.numpy())))
print()
print('Input:\n', repr(''.join(index2char[input_example.numpy()])))
print()
print('Target:\n', repr(''.join(index2char[target_example.numpy()])))
# + colab={} colab_type="code" id="cp0tl0sN807l"
# dataset shape:
# - Each sequence is a tuple of 2 sub-sequences of length 100 (input_text and target_text)
#
# 200 200 200
# /(.....)\ /(.....)\ ... /(.....)\ <-- input_text
# \(.....)/ \(.....)/ \(.....)/ <-- target_text
# + [markdown] colab_type="text" id="BDYHEJ0pY1ai"
# Each index of these vectors are processed as one time step. For the input at time step 0, the model receives the index for "F" and trys to predict the index for "i" as the next character. At the next timestep, it does the same thing but the RNN considers the previous step context in addition to the current input character.
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" executionInfo={"elapsed": 274533, "status": "ok", "timestamp": 1586974160945, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="C-0zpv53Y2o4" outputId="8f41e91e-3ecb-4086-bdbd-9c466564729b"
for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
print('Step #{:1d}'.format(i))
print(' input: {} ({:s})'.format(input_idx, repr(index2char[input_idx])))
print(' expected output: {} ({:s})'.format(target_idx, repr(index2char[target_idx])))
print()
# + [markdown] colab_type="text" id="1iDlp40lC5YB"
# ## Split training sequences into batches
#
# We used `tf.data` to split the text into manageable sequences. But before feeding this data into the model, we need to shuffle the data and pack it into batches.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 274506, "status": "ok", "timestamp": 1586974160945, "user": {"displayName": "<NAME>b", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="eDq-wa5EC3wW" outputId="89c0b910-d022-46c0-b83b-3352b13b7a6f"
# Batch size.
BATCH_SIZE = 64
# Buffer size to shuffle the dataset (TF data is designed to work
# with possibly infinite sequences, so it doesn't attempt to shuffle
# the entire sequence in memory. Instead, it maintains a buffer in
# which it shuffles elements).
BUFFER_SIZE = 100
# How many items to prefetch before the next iteration.
PREFETCH_SIZE = 10
dataset_sequence_batches = dataset_sequences \
.shuffle(BUFFER_SIZE) \
.batch(BATCH_SIZE, drop_remainder=True) \
.prefetch(PREFETCH_SIZE)
dataset_sequence_batches
# + colab={"base_uri": "https://localhost:8080/", "height": 306} colab_type="code" executionInfo={"elapsed": 478249, "status": "ok", "timestamp": 1586974364716, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="b_kYgvQGBO0U" outputId="0a456e52-5f7c-4f85-dea6-cdf4f42057fb"
for input_text, target_text in dataset_sequence_batches.take(1):
print('1st batch: input_text:', input_text)
print()
print('1st batch: target_text:', target_text)
# + colab={} colab_type="code" id="UkDCH15v_2I6"
# dataset shape:
# - 64 sequences per batch
# - Each sequence is a tuple of 2 sub-sequences of length 100 (input_text and target_text)
#
#
# 200 200 200 200 200 200
# |/(.....)\ /(.....)\ ... /(.....)\| ... |/(.....)\ /(.....)\ ... /(.....)\| <-- input_text
# |\(.....)/ \(.....)/ \(.....)/| ... |\(.....)/ \(.....)/ \(.....)/| <-- target_text
#
# <------------- 64 ----------------> <------------- 64 ---------------->
# + [markdown] colab_type="text" id="ghB-VwLlD-Oz"
# ## Build the model
#
# Use [tf.keras.Sequential](https://www.tensorflow.org/api_docs/python/tf/keras/Sequential) to define the model. For this simple example three layers are used to define our model:
#
# - [tf.keras.layers.Embedding](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding): The input layer. A trainable lookup table that will map the numbers of each character to a vector with `embedding_dim` dimensions;
# - [tf.keras.layers.LSTM](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM): A type of RNN with size units=rnn_units (You can also use a GRU layer here.)
# - [tf.keras.layers.Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense): The output layer, with vocab_size outputs.
# + colab={"base_uri": "https://localhost:8080/", "height": 425} colab_type="code" executionInfo={"elapsed": 478210, "status": "ok", "timestamp": 1586974364719, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="0cg8DlO3QjuT" outputId="c66429df-8d32-42ec-e8b9-2aea1157cb3f"
# Let's do a quick detour and see how Embeding layer works.
# It takes several char indices sequences (batch) as an input.
# It encodes every character of every sequence to a vector of tmp_embeding_size length.
tmp_vocab_size = 10
tmp_embeding_size = 5
tmp_input_length = 8
tmp_batch_size = 2
tmp_model = tf.keras.models.Sequential()
tmp_model.add(tf.keras.layers.Embedding(
input_dim=tmp_vocab_size,
output_dim=tmp_embeding_size,
input_length=tmp_input_length
))
# The model will take as input an integer matrix of size (batch, input_length).
# The largest integer (i.e. word index) in the input should be no larger than 9 (tmp_vocab_size).
# Now model.output_shape == (None, 10, 64), where None is the batch dimension.
tmp_input_array = np.random.randint(
low=0,
high=tmp_vocab_size,
size=(tmp_batch_size, tmp_input_length)
)
tmp_model.compile('rmsprop', 'mse')
tmp_output_array = tmp_model.predict(tmp_input_array)
print('tmp_input_array shape:', tmp_input_array.shape)
print('tmp_input_array:')
print(tmp_input_array)
print()
print('tmp_output_array shape:', tmp_output_array.shape)
print('tmp_output_array:')
print(tmp_output_array)
# + colab={} colab_type="code" id="I7ZuvZHBD_pS"
# Length of the vocabulary in chars.
vocab_size = len(vocab)
# The embedding dimension.
embedding_dim = 256
# Number of RNN units.
rnn_units = 1024
# + colab={} colab_type="code" id="-sojdDCAICWO"
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim,
batch_input_shape=[batch_size, None]
))
model.add(tf.keras.layers.LSTM(
units=rnn_units,
return_sequences=True,
stateful=True,
recurrent_initializer=tf.keras.initializers.GlorotNormal()
))
model.add(tf.keras.layers.Dense(vocab_size))
return model
# + colab={} colab_type="code" id="XoPwxyAPEg6z"
model = build_model(vocab_size, embedding_dim, rnn_units, BATCH_SIZE)
# + colab={"base_uri": "https://localhost:8080/", "height": 255} colab_type="code" executionInfo={"elapsed": 478161, "status": "ok", "timestamp": 1586974364722, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="iLnlZFgU55bQ" outputId="b299c133-c38b-4636-a478-6c601a5a4275"
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 422} colab_type="code" executionInfo={"elapsed": 478751, "status": "ok", "timestamp": 1586974365328, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="CcaO_rO_8-GH" outputId="80e5f385-1da2-41fb-c06b-7eb51fcd8b52"
tf.keras.utils.plot_model(
model,
show_shapes=True,
show_layer_names=True,
)
# + [markdown] colab_type="text" id="JpL9idwZV6fL"
# For each character the model looks up the embedding, runs the GRU one timestep with the embedding as input, and applies the dense layer to generate logits predicting the log-likelihood of the next character:
#
# 
#
# Image source: [Text generation with an RNN](https://www.tensorflow.org/tutorials/text/text_generation) notebook.
# + [markdown] colab_type="text" id="Npruiy2RAPkt"
# ## Try the model
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 678242, "status": "ok", "timestamp": 1586974564842, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="E4DCLA0GASL1" outputId="d6e39b67-d2da-46d2-c6a1-f62d81f64981"
for input_example_batch, target_example_batch in dataset_sequence_batches.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
# + [markdown] colab_type="text" id="MWebJXU9CEPd"
# To get actual predictions from the model we need to sample from the output distribution, to get actual character indices. This distribution is defined by the logits over the character vocabulary.
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 678227, "status": "ok", "timestamp": 1586974564843, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="Y4Jgo-iECFWI" outputId="22b015fd-bc24-4a4b-c60c-177856033131"
print('Prediction for the 1st letter of the batch 1st sequense:')
print(example_batch_predictions[0, 0])
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 678211, "status": "ok", "timestamp": 1586974564844, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="0dOr0MwFHlRb" outputId="24a550a0-6a12-4e05-8f50-caf752888b58"
# Quick overview of how tf.random.categorical() works.
# logits is 2-D Tensor with shape [batch_size, num_classes].
# Each slice [i, :] represents the unnormalized log-probabilities for all classes.
# In the example below we say that the probability for class "0" is low but the
# probability for class "2" is much higher.
tmp_logits = [
[-0.95, 0, 0.95],
];
# Let's generate 5 samples. Each sample is a class index. Class probabilities
# are being taken into account (we expect to see more samples of class "2").
tmp_samples = tf.random.categorical(
logits=tmp_logits,
num_samples=5
)
print(tmp_samples)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 678151, "status": "ok", "timestamp": 1586974564845, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="JPzr0r4zCgS3" outputId="1ce0c5c8-addc-4777-f505-63663962d576"
sampled_indices = tf.random.categorical(
logits=example_batch_predictions[0],
num_samples=1
)
sampled_indices.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 678118, "status": "ok", "timestamp": 1586974564846, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="YaA7DclID8dz" outputId="cfb93eca-f822-49e7-dc37-3365a80bebdc"
sampled_indices = tf.squeeze(
input=sampled_indices,
axis=-1
).numpy()
sampled_indices.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 678080, "status": "ok", "timestamp": 1586974564847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="_ubGQ0gVENhB" outputId="398ecbd2-5973-448c-f6f2-2c48a40f73f1"
sampled_indices
# + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" executionInfo={"elapsed": 678033, "status": "ok", "timestamp": 1586974564848, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="Gi9HOzw9EajS" outputId="177578de-a6be-417a-e528-d470494f59fb"
print('Input:\n', repr(''.join(index2char[input_example_batch[0]])))
print()
print('Next char prediction:\n', repr(''.join(index2char[sampled_indices])))
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" executionInfo={"elapsed": 678007, "status": "ok", "timestamp": 1586974564848, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="b87e0lsYMTsv" outputId="e562488a-f4b7-4a2f-9f56-f2a6c7dc5737"
for i, (input_idx, sample_idx) in enumerate(zip(input_example_batch[0][:5], sampled_indices[:5])):
print('Prediction #{:1d}'.format(i))
print(' input: {} ({:s})'.format(input_idx, repr(index2char[input_idx])))
print(' next predicted: {} ({:s})'.format(target_idx, repr(index2char[sample_idx])))
print()
# + [markdown] colab_type="text" id="LqcBufKEE_p6"
# ## Train the model
#
# At this point the problem can be treated as a standard classification problem. Given the previous RNN state, and the input this time step, predict the class of the next character.
# + [markdown] colab_type="text" id="l4s0-PvrFub5"
# ### Attach an optimizer, and a loss function
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 677987, "status": "ok", "timestamp": 1586974564849, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="UOEUUm6JE95a" outputId="c4b60238-ced1-4150-db09-fc056b409555"
# An objective function.
# The function is any callable with the signature scalar_loss = fn(y_true, y_pred).
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(
y_true=labels,
y_pred=logits,
from_logits=True
)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
print("Prediction shape: ", example_batch_predictions.shape, " # (batch_size, sequence_length, vocab_size)")
print("scalar_loss: ", example_batch_loss.numpy().mean())
# + colab={} colab_type="code" id="SXhJsB6eFgrJ"
adam_optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(
optimizer=adam_optimizer,
loss=loss
)
# + [markdown] colab_type="text" id="MK3Cf-xZFwL4"
# ### Configure checkpoints
# + colab={} colab_type="code" id="7Jet-3Ps0-Ny"
# # %rm -rf tmp/checkpoints
# + colab={} colab_type="code" id="LUhXnHPJFy5q"
# Directory where the checkpoints will be saved.
checkpoint_dir = 'tmp/checkpoints'
os.makedirs(checkpoint_dir, exist_ok=True)
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt_{epoch}')
checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True
)
# + [markdown] colab_type="text" id="oFg9MFJoGZWf"
# ### Execute the training
# + colab={} colab_type="code" id="AVk-pARPGaja"
EPOCHS=150
STEPS_PER_EPOCH = 10
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 5855654, "status": "ok", "timestamp": 1586982233796, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="y0rveBdAGeEz" outputId="1844d63b-85f6-4c40-97fd-dcc5b3a1c879"
tmp_dataset = dataset_sequence_batches.repeat()
history = model.fit(
x=tmp_dataset.as_numpy_iterator(),
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
callbacks=[
checkpoint_callback
]
)
# + colab={} colab_type="code" id="mLdnOyvzMggJ"
def render_training_history(training_history):
loss = training_history.history['loss']
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(loss, label='Training set')
plt.legend()
plt.grid(linestyle='--', linewidth=1, alpha=0.5)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" executionInfo={"elapsed": 617, "status": "ok", "timestamp": 1586982234407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="4Ghveem_OQBV" outputId="b7e062f8-46ce-4669-e48a-9be5009d44e6"
render_training_history(history)
# + [markdown] colab_type="text" id="X-dhNP2OG2EM"
# ## Generate text
# + [markdown] colab_type="text" id="SU_YfP6sG3NC"
# ### Restore the latest checkpoint
#
# To keep this prediction step simple, use a batch size of 1.
#
# Because of the way the RNN state is passed from timestep to timestep, the model only accepts a fixed batch size once built.
#
# To run the model with a different `batch_size`, we need to rebuild the model and restore the weights from the checkpoint.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1586982234408, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="BlG8o3wiG6f2" outputId="944da6fc-cf4e-4833-e14e-2526aaf05445"
tf.train.latest_checkpoint(checkpoint_dir)
# + colab={} colab_type="code" id="l7evN0LvH01P"
simplified_batch_size = 1
restored_model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
restored_model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
restored_model.build(tf.TensorShape([simplified_batch_size, None]))
# + colab={"base_uri": "https://localhost:8080/", "height": 255} colab_type="code" executionInfo={"elapsed": 60, "status": "ok", "timestamp": 1586982235202, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiiA4aUKCbFho88Jd0WWMoAqQUt3jbuCtfNYpHVOA=s64", "userId": "03172675069638383074"}, "user_tz": -120} id="Y3eduDtZI9zQ" outputId="461d4fb4-8059-444e-b602-9b9977336609"
restored_model.summary()
# + [markdown] colab_type="text" id="euNvAtr4JC3A"
# ### The prediction loop
#
# The following code block generates the text:
#
# - It Starts by choosing a start string, initializing the RNN state and setting the number of characters to generate.
#
# - Get the prediction distribution of the next character using the start string and the RNN state.
#
# - Then, use a categorical distribution to calculate the index of the predicted character. Use this predicted character as our next input to the model.
#
# - The RNN state returned by the model is fed back into the model so that it now has more context, instead than only one character. After predicting the next character, the modified RNN states are again fed back into the model, which is how it learns as it gets more context from the previously predicted characters.
#
# 
#
# Image source: [Text generation with an RNN](https://www.tensorflow.org/tutorials/text/text_generation) notebook.
# + colab={} colab_type="code" id="bOqdqGouJFf_"
# num_generate
# - number of characters to generate.
#
# temperature
# - Low temperatures results in more predictable text.
# - Higher temperatures results in more surprising text.
# - Experiment to find the best setting.
def generate_text(model, start_string, num_generate = 1000, temperature=1.0):
# Evaluation step (generating text using the learned model)
# Converting our start string to numbers (vectorizing).
input_indices = [char2index[s] for s in start_string]
input_indices = tf.expand_dims(input_indices, 0)
# Empty string to store our results.
text_generated = []
# Here batch size == 1.
model.reset_states()
for char_index in range(num_generate):
predictions = model(input_indices)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# Using a categorical distribution to predict the character returned by the model.
predictions = predictions / temperature
predicted_id = tf.random.categorical(
predictions,
num_samples=1
)[-1,0].numpy()
# We pass the predicted character as the next input to the model
# along with the previous hidden state.
input_indices = tf.expand_dims([predicted_id], 0)
text_generated.append(index2char[predicted_id])
return (start_string + ''.join(text_generated))
# +
num_generate = 300
temperatures = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2]
start_string = 'Science is'
for temperature in temperatures:
print("Temperature: {}".format(temperature))
print('---')
print(generate_text(restored_model, start_string, num_generate=num_generate, temperature=temperature))
print('\n')
# + [markdown] colab_type="text" id="0hh80MqEO_XI"
# ## Save the model
# + colab={} colab_type="code" id="VPE98xa8PA-u"
model_name = 'text_generation_wikipedia_rnn.h5'
restored_model.save(model_name, save_format='h5')
# + [markdown] colab_type="text" id="WYP08xbbTNKp"
# ## Converting the model to web-format
#
# To use this model on the web we need to convert it into the format that will be understandable by [tensorflowjs](https://www.tensorflow.org/js). To do so we may use [tfjs-converter](https://github.com/tensorflow/tfjs/tree/master/tfjs-converter) as following:
#
# ```
# tensorflowjs_converter --input_format keras \
# ./experiments/text_generation_wikipedia_rnn/text_generation_wikipedia_rnn.h5 \
# ./demos/public/models/text_generation_wikipedia_rnn
# ```
#
# You find this experiment in the [Demo app](https://trekhleb.github.io/machine-learning-experiments) and play around with it right in you browser to see how the model performs in real life.
| experiments/text_generation_wikipedia_rnn/text_generation_wikipedia_rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
__name__ = "k1lib.callbacks"
#export
from .callbacks import Callback, Callbacks, Cbs
import k1lib, numpy as np, math
from functools import partial
import matplotlib.pyplot as plt
from typing import Callable
__all__ = ["Loss", "Accuracy"]
#export
def plotF(losses, _slice): # actual function stored by the sliceable plot
plt.figure(figsize=(10, 3), dpi=100); step = _slice.step or 1
tR, vR = k1lib.Range.proportionalSlice(len(losses.train), len(losses.valid), _slice)
try:
plt.subplot(1, 2, 1); plt.plot(tR.range_[::step], losses.train[tR.slice_][::step]); plt.title(f"Train loss")
plt.subplot(1, 2, 2); plt.plot(vR.range_[::step], losses.valid[vR.slice_][::step]); plt.title(f"Valid loss")
except: pass
def commonPlot(obj):
return k1lib.viz.SliceablePlot(partial(plotF, obj), docs="""\n\nReminder: the actual slice you put in is for the training plot. The valid loss's plot will update automatically to be in the same time frame""")
def nonEmptyList(_list):
return [0] if _list == [] else _list
#export
@k1lib.patch(Cbs)
class Loss(Callback):
" "
def __init__(self):
"""Records losses after each batch.
Expected variables in :class:`~k1lib.Learner`:
- loss: single float value"""
super().__init__(); self.order = 20
self.train = []; self.valid = [] # all stats all times
# average stats for each epoch
self.epoch = k1lib.Object.fromDict({"train": [], "valid": []})\
.withRepr("Use...\n" +\
"- `.train` for epoch-averaged training losses\n" +\
"- `.valid` for epoch-averaged validation losses\n" +\
"- `.plot()` to plot the 2 above")
self.plot = partial(commonPlot, self)
self.epoch.plot = partial(commonPlot, self.epoch)
self._trainLosses = []; self._validLosses = []
self._landscape = k1lib.callbacks.Landscape(lambda l: l.loss, "_LossLandscape")
def endLoss(self):
if self.l.model.training: self._trainLosses.append(self.l.loss)
else: self._validLosses.append(self.l.loss)
def endEpoch(self):
self.train.extend(self._trainLosses); self.epoch.train.append(np.mean(nonEmptyList(self._trainLosses)))
self.valid.extend(self._validLosses); self.epoch.valid.append(np.mean(nonEmptyList(self._validLosses)))
self._trainLosses = []; self._validLosses = []
@property
def Landscape(self):
"""Gets loss-landscape-plotting Callback.
Example::
l = k1lib.Learner.sample()
l.cbs.add(Cbs.Loss())
l.Loss.Landscape.plot()"""
self.cbs.add(self._landscape); return self._landscape
def detach(self): self._landscape.detach(); return super().detach()
def __repr__(self):
return f"""{super()._reprHead}, use...
- cb.train: for all training losses over all epochs and batches (#epochs * #batches)
- cb.valid: for all validation losses over all epochs and batches (#epochs * #batches)
- cb.plot(): to plot the 2 above
- cb.epoch: for average losses of each epochs
- cb.Landscape: for loss-landscape-plotting Callback
{super()._reprCan}"""
l = k1lib.Learner.sample()
l.cbs.add(Cbs.Loss())
l.Loss.Landscape.plot()
#export
accFMsg = "You have to specify how to compute the accuracy with the AccF callback first"
@k1lib.patch(Cbs)
class Accuracy(Callback):
" "
def __init__(self):
"""Records accuracies after each batch.
Expected variables in :class:`~k1lib.Learner`:
- accuracy: single float value from 0 to 1"""
super().__init__(); self.order = 20
self.train = [0]; self.valid = [0]; self.paused = True
self._landscape = k1lib.callbacks.Landscape(lambda l: l.accuracy, "_AccuracyLandscape")
@property
def hasAccF(self):
return any(isinstance(cb, Cbs.AccF) for cb in self.l.cbs.cbs)
def startRun(self):
self.paused = not self.hasAccF
if not self.paused:
self.train = list(self.train); self.valid = list(self.valid)
def endRun(self):
if not self.paused:
self.train = np.array(self.train); self.valid = np.array(self.valid)
def endLoss(self):
if not self.paused:
(self.train if self.l.model.training else self.valid).append(self.l.accuracy)
def plot(self):
if not self.hasAccF: raise RuntimeError(accFMsg)
def plotF(_slice):
plt.figure(figsize=(10, 3), dpi=100); step = _slice.step or 1
tR, vR = k1lib.Range.proportionalSlice(len(self.train), len(self.valid), _slice)
try:
plt.subplot(1, 2, 1); plt.plot(tR.range_[::step], 100*self.train[tR.slice_][::step]); plt.title(f"Train accuracy")
plt.subplot(1, 2, 2); plt.plot(vR.range_[::step], 100*self.valid[vR.slice_][::step]); plt.title(f"Valid accuracy")
except: pass
return k1lib.viz.SliceablePlot(plotF)
@property
def Landscape(self):
"""Gets accuracy-landscape-plotting Callback.
Example::
l = k1lib.Learner.sample()
l.add(Cbs.Accuracy())
l.Accuracy.Landscape.plot()
This exact example won't work, as the sample :class:`~k1lib.Learner` task is not
categorical, but the general idea still stands"""
if self.hasAccF:
self._landscape.parent = self
self.cbs.add(self._landscape); return self._landscape
else: raise RuntimeError(f"{accFMsg}, before you can view the landscape")
def __repr__(self):
return f"""{super()._reprHead}{f" (.accuracyF not defined yet)" if not self.hasAccF else ""}, use...
- a.train: for train accuracies over all batches
- a.valid: for train accuracies over all batches
- a.plot(): to plot the 2 above
- a.Landscape: for loss-landscape-plotting Callback
{super()._reprCan}"""
try:
l = k1lib.Learner.sample()
l.add(Cbs.Accuracy())
l.Accuracy.Landscape
assert False
except RuntimeError: pass
# !../../export.py callbacks/loss_accuracy
| k1lib/callbacks/loss_accuracy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
# +
V = torch.randn(5, 10)
L = V.mm(V.t())
vecs, vals, _ = torch.svd(V)
vals = vals.pow(2)
n = vecs.size(0)
n_vals = vals.size(0)
subset = torch.FloatTensor([0,1,0,1,0])
subset_sum = subset.long().sum()
# +
## Best
grad_vals = 1 / vals
grad_vecs = vecs.new().resize_(n, n_vals).copy_(torch.zeros(n, n_vals))
ix = subset.new().resize_(n).copy_((subset * torch.arange(0,n))).nonzero().squeeze()
Pvecs = vecs[ix,:].squeeze(1)
submatrix = Pvecs.mm(vals.diag()).mm(Pvecs.t())
subinv = torch.inverse(submatrix)
grad_vals += Pvecs.t().mm(subinv).mm(Pvecs).diag()
grad_vecs[ix,:] += subinv.mm(Pvecs).mm(vals.diag())
print(grad_vals, grad_vecs)
# +
grad_vals = 1 / vals
grad_vecs = torch.zeros(n, n_vals)
matrix = vecs.mm(vals.diag()).mm(vecs.t())
P = torch.eye(n).masked_select(subset.expand(n,n).t().byte()).view(subset_sum, -1)
submatrix = P.mm(matrix).mm(P.t())
# ix = (subset * torch.arange(0,len(subset))).nonzero()
# submatrix = matrix[ix,].squeeze(1).t()[ix,].squeeze(1)
subinv = torch.inverse(submatrix)
Pvecs = P.mm(vecs)
# Pvecs = vecs[ix,:].squeeze(1)
grad_vals += Pvecs.t().mm(subinv).mm(Pvecs).diag()
grad_vecs += P.t().mm(subinv).mm(Pvecs).mm(vals.diag())
print(grad_vals, grad_vecs)
# +
grad_vals = 1 / vals
grad_vecs = vecs.new().resize_(n, n_vals).copy_(torch.zeros(n, n_vals))
ix = subset.new().resize_(n).copy_((subset * torch.arange(0,n))).nonzero()
Pvecs = vecs[ix,:].squeeze(1)
submatrix = Pvecs.mm(vals.diag()).mm(Pvecs.t())
subinv = torch.inverse(submatrix)
grad_vals += Pvecs.t().mm(subinv).mm(Pvecs).diag()
grad_vecs += P.t().mm(subinv).mm(Pvecs).mm(vals.diag())
print(grad_vals, grad_vecs)
# -
# +
import matplotlib.pyplot as plt
import numpy as np
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.01*height,
'%.2f' % float(height),
ha='center', va='bottom')
def curate_ax(idx, ax, my_data):
x_c = 1
x_m = 0.06
x_positioning = [x_c + x_m*i for i in range(len(my_data))]
width = 0.05 # width of bars
colors = ['0.9', '0.75', '0.6', '0.45', '0.3', '0.15']
rects = []
for i, data in enumerate(my_data):
r = ax.bar(x_positioning[i], data[idx], width, color=colors[i])
rects.append(r)
#autolabel(r)
return rects
# +
my_data = [[0.32, 0.4, 0.39],[0.30, 0.4, 0.2],[0.24,0.4, 0.1],
[0.7,0.4, 0.3],[0.75,0.4, 0.7],[0.8,0.4,0.6]]
f, (ax0, ax1, ax2) = plt.subplots(1, 3, sharey=True)
# Create subplots subplot
rects0 = curate_ax(0, ax0, my_data)
curate_ax(1, ax1, my_data)
curate_ax(2, ax2, my_data)
# Axis and Title Settings
plt.suptitle('Learning To Count Clusters')
# y-axis
ax0.set_yticks([0,1])
ax0.set_ylim([0,1])
ax0.set_ylabel('Accuracy')
# x-axis
ax0.set_xticks([],[])
ax1.set_xticks([],[])
ax2.set_xticks([],[])
ax0.set_xlabel('All Sets')
#ax0.set_title('y = 5')
# Legend
#ax.legend((rects1[0], rects2[0]), ('Men', 'Women'))
plt.legend((rects0[0], rects0[1], rects0[2], rects0[3], rects0[4], rects0[5]),
('5', '6', '7','8','9','10'),
loc = 'best', bbox_to_anchor = (0,-0.1,1.1,1),
bbox_transform = plt.gcf().transFigure)
plt.show()
# +
x = np.linspace(0,100)
y = np.sin(x)
y2 = np.cos(x)
f, axarr = plt.subplots(2, 2, sharex='col')
f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.4, hspace=None)
ax0 = axarr[0,0]
ax1 = axarr[0,1]
ax2 = axarr[1,0]
ax3 = axarr[1,1]
# Loss
ax0.set_ylabel('BCE', size=9)
ax0.set_yticks([0,0.25,0.5,0.75,1])
ax0.set_yticklabels(['0','','','','1'],size=8)
ax0.set_ylim([0,1])
#ax0.plot(x,y2)
# Subset Size
ax1.set_ylabel('Cardinality', size=9)
ax1.set_yticks([0,2.5,5,7.5,10])
ax1.set_yticklabels(['0','','','','10'],size=8)
ax1.set_ylim([0,10])
# Accuracy
ax2.set_ylabel('Accuracy', size=9)
ax2.set_yticks([0,0.25,0.5,0.75,1])
ax2.set_yticklabels(['0','','','','1'],size=8)
ax2.set_ylim([0,1])
# Precision
ax3.set_ylabel('Recall', size=9)
ax3.set_yticks([0,0.25,0.5,0.75,1])
ax3.set_yticklabels(['0','','','','1'],size=8)
ax3.set_ylim([0,1])
# Set x-axis
ax3.set_xlabel('t', size=9)
#ax3.set_xticks([])
#ax3.set_xticklabels(['0','','','','1'],size=8)
ax4.set_xlabel('t', size=9)
#ax4
plt.show()
# +
f, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(1, 5, sharey=True)
# Create subplots subplot
rects0 = curate_ax(0, ax0, super_loss)
curate_ax(1, ax1, super_loss)
curate_ax(2, ax2, super_loss)
curate_ax(3, ax3, super_loss)
curate_ax(4, ax4, super_loss)
# Axis and Title Settings
head = plt.suptitle('Learning To Count Clusters')
# y-axis
ax0.set_yticks([0,5,10,15,20,25])
ax0.set_ylim([0,25])
ax0.set_ylabel('Accuracy')
# x-axis
ax0.set_xticks([],[])
ax1.set_xticks([],[])
ax2.set_xticks([],[])
ax3.set_xticks([],[])
ax4.set_xticks([],[])
#ax5.set_xticks([],[])
ax0.set_xlabel('All Sets')
ax1.set_xlabel('y = 5')
ax2.set_xlabel('y = 10')
ax3.set_xlabel('y = 15')
ax4.set_xlabel('y = 20')
#ax0.set_title('y = 5')
# Legend
# Legend
lgd = plt.legend((rects0[0], rects0[1], rects0[2]),
(r'$\lambda = 10$', r'$\lambda = 15$', r'$\lambda = 20$'),
loc = 'best', bbox_to_anchor = (0,-0.1,1.1,1),
fontsize=9, numpoints=3, handlelength=1,
bbox_transform = plt.gcf().transFigure)
plt.savefig('odl.pdf', bbox_extra_artists=(lgd,head), bbox_inches='tight')
plt.show()
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fast GP implementations
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from matplotlib import rcParams
rcParams["figure.dpi"] = 100
rcParams["figure.figsize"] = 12, 4
# ## Benchmarking GP codes
# Implemented the right way, GPs can be super fast! Let's compare the time it takes to evaluate our GP likelihood and the time it takes to evaluate the likelihood computed with the snazzy ``george`` and ``celerite`` packages. We'll learn how to use both along the way. Let's create a large, fake dataset for these tests:
# +
import numpy as np
np.random.seed(0)
t = np.linspace(0, 10, 10000)
y = np.random.randn(10000)
sigma = np.ones(10000)
# -
# ### Our GP
# +
def ExpSquaredCovariance(t, A=1.0, l=1.0, tprime=None):
"""
Return the ``N x M`` exponential squared
covariance matrix.
"""
if tprime is None:
tprime = t
TPrime, T = np.meshgrid(tprime, t)
return A ** 2 * np.exp(-0.5 * (T - TPrime) ** 2 / l ** 2)
def ln_gp_likelihood(t, y, sigma=0, A=1.0, l=1.0):
"""
Return the log of the GP likelihood for a datatset y(t)
with uncertainties sigma, modeled with a Squared Exponential
Kernel with amplitude A and lengthscale l.
"""
# The covariance and its determinant
npts = len(t)
K = ExpSquaredCovariance(t, A=A, l=l) + sigma ** 2 * np.eye(npts)
# The log marginal likelihood
log_like = -0.5 * np.dot(y.T, np.linalg.solve(K, y))
log_like -= 0.5 * np.linalg.slogdet(K)[1]
log_like -= 0.5 * npts * np.log(2 * np.pi)
return log_like
# -
# Time to evaluate the GP likelihood:
# %%time
ln_gp_likelihood(t, y, sigma)
# ### george
# Let's time how long it takes to do the same operation using the ``george`` package (``pip install george``).
#
# The kernel we'll use is
#
# ```python
# kernel = amp ** 2 * george.kernels.ExpSquaredKernel(tau ** 2)
# ```
#
# where ``amp = 1`` and ``tau = 1`` in this case.
#
# To instantiate a GP using ``george``, simply run
#
# ```python
# gp = george.GP(kernel)
# ```
#
# The ``george`` package pre-computes a lot of matrices that are re-used in different operations, so before anything else, we'll ask it to compute the GP model for our timeseries:
#
# ```python
# gp.compute(t, sigma)
# ```
#
# Note that we've only given it the time array and the uncertainties, so as long as those remain the same, you don't have to re-compute anything. This will save you a lot of time in the long run!
#
# Finally, the log likelihood is given by ``gp.log_likelihood(y)``.
#
# How do the speeds compare? Did you get the same value of the likelihood?
import george
# %%time
kernel = george.kernels.ExpSquaredKernel(1.0)
gp = george.GP(kernel)
gp.compute(t, sigma)
# %%time
print(gp.log_likelihood(y))
# ``george`` also offers a fancy GP solver called the HODLR solver, which makes some approximations that dramatically speed up the matrix algebra. Let's instantiate the GP object again by passing the keyword ``solver=george.HODLRSolver`` and re-compute the log likelihood. How long did that take? Did we get the same value for the log likelihood?
# %%time
gp = george.GP(kernel, solver=george.HODLRSolver)
gp.compute(t, sigma)
# %%time
gp.log_likelihood(y)
# ### celerite
# The ``george`` package is super useful for GP modeling, and I recommend you read over the [docs and examples](https://george.readthedocs.io/en/latest/). It implements several different [kernels](https://george.readthedocs.io/en/latest/user/kernels/) that come in handy in different situations, and it has support for multi-dimensional GPs. But if all you care about are GPs in one dimension (in this case, we're only doing GPs in the time domain, so we're good), then ``celerite`` is what it's all about:
#
# ```bash
# pip install celerite
# ```
#
# Check out the [docs](https://celerite.readthedocs.io/en/stable/) here, as well as several tutorials. There is also a [paper](https://arxiv.org/abs/1703.09710) that discusses the math behind ``celerite``. The basic idea is that for certain families of kernels, there exist **extremely efficient** methods of factorizing the covariance matrices. Whereas GP fitting typically scales with the number of datapoints $N$ as $N^3$, ``celerite`` is able to do everything in order $N$ (!!!) This is a **huge** advantage, especially for datasets with tens or hundreds of thousands of data points. Using ``george`` or any homebuilt GP model for datasets larger than about ``10,000`` points is simply intractable, but with ``celerite`` you can do it in a breeze.
#
# Next we repeat the timing tests, but this time using ``celerite``. Note that the Exponential Squared Kernel is not available in ``celerite``, because it doesn't have the special form needed to make its factorization fast. Instead, we'll use the ``Matern 3/2`` kernel, which is qualitatively similar and can be approximated quite well in terms of the ``celerite`` basis functions:
#
# ```python
# kernel = celerite.terms.Matern32Term(np.log(1), np.log(1))
# ```
#
# Note that ``celerite`` accepts the **log** of the amplitude and the **log** of the timescale. Other than this, we can compute the likelihood using the same syntax as ``george``.
#
# How much faster did it run? Is the value of the likelihood different from what you found above? Why?
import celerite
from celerite import terms
# %%time
kernel = terms.Matern32Term(np.log(1), np.log(1))
gp = celerite.GP(kernel)
gp.compute(t, sigma)
# %%time
gp.log_likelihood(y)
# <div style="background-color: #D6EAF8; border-left: 15px solid #2E86C1;">
# <h1 style="line-height:2.5em; margin-left:1em;">Exercise (the one and only)</h1>
# </div>
#
# Let's use what we've learned about GPs in a real application: fitting an exoplanet transit model in the presence of correlated noise.
#
# Here is a (fictitious) light curve for a star with a transiting planet:
# +
import matplotlib.pyplot as plt
t, y, yerr = np.loadtxt("data/sample_transit.txt", unpack=True)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
plt.xlabel("time")
plt.ylabel("relative flux");
# -
# There is a transit visible to the eye at $t = 0$, which (say) is when you'd expect the planet to transit if its orbit were perfectly periodic. However, a recent paper claims that the planet shows transit timing variations, which are indicative of a second, perturbing planet in the system, and that a transit at $t = 0$ can be ruled out at 3 $\sigma$. **Your task is to verify this claim.**
#
# Assume you have no prior information on the planet other than the transit occurs in the observation window, the depth of the transit is somewhere in the range $(0, 1)$, and the transit duration is somewhere between $0.1$ and $1$ day. You don't know the exact process generating the noise, but you are certain that there's correlated noise in the dataset, so you'll have to pick a reasonable kernel and estimate its hyperparameters.
#
#
# Fit the transit with a simple inverted Gaussian with three free parameters:
#
# ```python
# def transit_shape(depth, t0, dur):
# return -depth * np.exp(-0.5 * (t - t0) ** 2 / (0.2 * dur) ** 2)
# ```
#
# *HINT: I borrowed heavily from [this tutorial](https://celerite.readthedocs.io/en/stable/tutorials/modeling/) in the celerite documentation, so you might want to take a look at it...*
| Sessions/Session13/Day2/02-Fast-GPs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Risshabh-ML/Vectorized-FNN-Performace_measure/blob/main/Sigmoid_NN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="p2sQ6hqTyRbD"
#
#
#
# #**sigmoid Function ploting in 3D and 2d space**
#
#
#
# + id="RYtf7iobNlUE"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error
from tqdm import tqdm_notebook
# + [markdown] id="leB0b3oUfc9_"
# * *%matplotthelib inline will make your plot outputs appear and be stored within the notebook*
# + [markdown] id="qaAdl5cYOBOj"
# $Sigmoid_{w,b}=\frac{1}{1+e^{-(w.x+b)}}$
# + id="3FUipUl_TikA"
def sigmoid(w,b,x):
return 1/(1+np.exp(-(w*x+b)))
# + colab={"base_uri": "https://localhost:8080/"} id="Y8fyDq9vPUt0" outputId="e43ab61c-4b85-427e-a430-85b8cdc02f64"
sigmoid(1,0.5,0)
# + colab={"base_uri": "https://localhost:8080/"} id="hH_97bsGUuLX" outputId="4f2786be-1dfc-49e0-c4bf-074f03b6e345"
w=0.8 #@param {type: "slider", min: -2, max: 2,step:0.1}
b=-0.8 #@param {type: "slider", min: -2, max: 2,step:0.1}
X=np.linspace(-12,+12,100)
#print(x)
Y=sigmoid(w,b,X)
plt.plot(X,Y)
plt.show()
# + [markdown] id="1FQc5nj1b9vp"
# $Sigmoid_{w1,x1.w2,x2}=\frac{1}{1+e^{-{w1.x1+w2.x2+b}}}$
# + id="f5FV2URvcpAm"
def sigmoid_2d(w1,w2,x1,x2,b):
return 1.0/(1+np.exp(-(w1*x1+w2*x2+b)))
# + colab={"base_uri": "https://localhost:8080/"} id="_TbrFINmdh0O" outputId="4504782c-69f5-4a34-c610-64a897206172"
sigmoid_2d(1,0,0.5,0,0)
# + [markdown] id="3EsxM-w8f3Jb"
# **sigmoid funtion ploting in 3d space**
# + id="td6xd6hseAdm"
from mpl_toolkits import mplot3d
# + colab={"base_uri": "https://localhost:8080/"} id="dSj019eIhHB0" outputId="edde933e-277d-414a-891d-482b889ea43e"
fig=plt.figure()
ax=plt.axes(projection='3d')
# + colab={"base_uri": "https://localhost:8080/"} id="fG_9V2zZmkM4" outputId="cf36cada-9f84-4fd5-eeef-ad8d08c9cd6d"
x1 = np.linspace(-10, 10, 100)
x2 = np.linspace(-10, 10, 100)
xx1,xx2=np.meshgrid(x1,x2)
print(xx1.shape,xx2.shape)
w1=0.5 #@param {type: "slider", min: -2, max: 2,step:0.1}
w2=0.5 #@param {type: "slider", min: -2, max: 2,step:0.1}
b=0 #@param {type: "slider", min: -2, max: 2,step:0.1}
Y=sigmoid_2d(xx1,xx2,w1,w2,b)
# + colab={"base_uri": "https://localhost:8080/"} id="WYfqwaWXK_t6" outputId="8349b6ab-87bd-4168-bc6f-ffa92840b61a"
plt.contourf(xx1, xx2, Y)
plt.show()
# + [markdown] id="sqThJE2Qn4ck"
# *mesgrid=1D- 2D array*
# + colab={"base_uri": "https://localhost:8080/"} id="gVmtqX42oN0N" outputId="171a911e-4127-4627-d923-b602283e1f34"
ax = plt.axes(projection='3d')
ax.plot_surface(xx1, xx2, Y,cmap='viridis', edgecolor='none')
ax.set_xlabel('xx1')
ax.set_ylabel('xx2')
ax.set_zlabel('Y');
ax.view_init(30, 270)
# + [markdown] id="FwMKH03fx1eL"
# # **Compute Loss for a given dataset or find the minimum value w And b**
# + id="1cWFTX6cyOeU"
W_unknown=0.5
b_unknown=0.25
X=np.random.random(25) * 20 - 10
Y=sigmoid(W_unknown,b_unknown,X)
# + colab={"base_uri": "https://localhost:8080/"} id="hIj9Wubh1y_4" outputId="807ea454-57b9-465a-a782-901c9e3393d6"
plt.plot(X,Y)
plt.show()
# + id="hlZnRvqK3Gcg"
def calculate_loss(X,Y,w_est,b_est):
loss=0
for x,y in zip(X,Y):
loss+=(y-sigmoid(w_est,b_est,x))**2
return loss
# + colab={"base_uri": "https://localhost:8080/"} id="3IDBN79p2ELY" outputId="baf39c6a-9412-4dba-9a21-5128c7eb6698"
W=np.linspace(0,2,100)
B=np.linspace(-1,1,100)
WW,BB=np.meshgrid(W,B)
loss=np.zeros(WW.shape)
print(loss.shape)
print(WW.shape)
# + id="fbJ_OQaJ6afw"
for i in range(WW.shape[0]):
for j in range(BB.shape[1]):
loss[i,j]=calculate_loss(X,Y,WW[i,j],BB[i,j])
# + colab={"base_uri": "https://localhost:8080/"} id="yrxRYKe87q7c" outputId="ea1c6659-0bfd-46dd-9c98-0573d38d7214"
fig=plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(WW,BB, loss,cmap='viridis')
ax.set_xlabel('w')
ax.set_ylabel('b')
ax.set_zlabel('loss')
#ax.view_init(30, 180)
# + id="zTjuKEzLgtaV"
ij=np.argmin(loss)
i=int(np.floor(ij/loss.shape[1]))
j=int(ij-i*loss.shape[1])
# + colab={"base_uri": "https://localhost:8080/"} id="sLreK3Z-iBSi" outputId="6da501bb-bb30-4b7c-8113-ff6518f3f63e"
print(i,j)
# + colab={"base_uri": "https://localhost:8080/"} id="SOm4NyQGiLKz" outputId="63203280-0a46-4ed8-8f76-2a9e374f1ede"
print(WW[i,j],BB[i,j])
# + [markdown] id="7SyI5hsaKQXJ"
# # Sigmoid **Class**
# + [markdown] id="3Ub_-fJ7NCAn"
# ## Fit For Toy Data
# + id="6mm2Sa7Q1fCQ"
class SigmoidNeuron:
def __init__(self):
self.w = None
self.b = None
def perceptron(self, x):
return np.dot(x, self.w.T) + self.b
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def grad_w(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return (y_pred - y) * y_pred * (1 - y_pred) * x
def grad_b(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return (y_pred - y) * y_pred * (1 - y_pred)
def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, display_loss=False):
# initialise w, b
if initialise:
self.w = np.random.randn(1, X.shape[1])
self.b = 0
if display_loss:
loss = {}
for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
dw = 0
db = 0
for x, y in zip(X, Y):
dw += self.grad_w(x, y)
db += self.grad_b(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
if display_loss:
Y_pred = self.sigmoid(self.perceptron(X))
loss[i] = mean_squared_error(Y_pred, Y)
if display_loss:
plt.plot(np.array(list(loss.values())).astype(float))
plt.xlabel('Epochs')
plt.ylabel('Mean Squared Error')
plt.show()
def predict(self, X):
Y_pred = []
for x in X:
y_pred = self.sigmoid(self.perceptron(x))
Y_pred.append(y_pred)
return np.array(Y_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="PireevSOLwlP" outputId="7b843266-566a-4333-8385-213422d77591"
xxx1 = np.array([[2.5, 2.5], [4, -1], [1, -4], [-3, 1.25], [-2, -4], [1, 5]])
yyy1 = np.array([1, 1, 1, 0, 0, 0])
X = np.squeeze(np.asarray(xxx1))
Y = np.squeeze(np.asarray(yyy1))
X.shape, Y.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 582, "referenced_widgets": ["ec7e2bcc80504bd6a25be4c088ff1ec8", "abd574da053d4510a41f183f808f86d8", "857c7d93004c463b8e592b8d4a6be99f", "2bd2909741ce46f5ad41d9491cda1dbc", "bbadafe6d38d4b1f8e2c3e9ba85596f7", "<KEY>", "9f3e28a48062438eb47a3c5f29162473", "edfc24f2f0764170a5ee966fac3642b7", "fdc354253d3746a7a835108a69cc0535", "9792de78879e4c29a81f3cbe56c467a3", "aa8dcbbe4fe5492c987c4a87007d9f41"]} id="uORcmfP_17IA" outputId="8ad31954-0175-4a9b-f7b0-42132dc283d6"
sn = SigmoidNeuron()
sn.fit(X, Y, 1, 0.01, True)
N = 20
plt.figure(figsize=(10, N*5))
for i in range(N):
print(sn.w, sn.b)
ax = plt.subplot(N, 1, i+1)
plot_sn(X, Y, sn, ax)
sn.fit(X, Y, 1, 0.75, False)
plt.show()
# + id="8NpvRrm48P3l"
# + [markdown] id="3cru2dza8Nud"
# ## **Load Data**
# + colab={"base_uri": "https://localhost:8080/"} id="OW9ZkM3y9j4d" outputId="0e8ffa2a-8030-4b7a-ad33-859e978467c3"
from google.colab import drive
drive.mount('/content/gdrive')
#drive.mount("/content/gdrive", force_remount=True)
# + colab={"base_uri": "https://localhost:8080/"} id="FHN52-OjpLJh" outputId="981b3fa8-3593-4241-db9a-c9743e5e8de0"
drive.mount("/content/gdrive", force_remount=True)
# + colab={"base_uri": "https://localhost:8080/"} id="rO7aCpUF8gdE" outputId="b98c4b79-e360-4d81-e739-cf91c9f4e52d"
# !ls "My drive/Data/mobile_cleaned.csv"
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="_vxeT0iLsFHM" outputId="07e506a4-94a1-4964-b722-c5f09<PASSWORD>"
pwd
# + id="tIXvdgySB0Da"
df=pd.read_csv('/content/gdrive/MyDrive/Data/mobile_cleaned.csv')
# + id="MxoiPT88CG-C" colab={"base_uri": "https://localhost:8080/"} outputId="b9cbb458-3231-40a8-8567-4f9affe4dca7"
df.shape
# + id="O_1yiyPYCNFk" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="96c0864c-6cc7-4ffc-debf-9a6823693102"
df.head()
# + id="DqpxP1tzCVb9" colab={"base_uri": "https://localhost:8080/"} outputId="70256628-5045-4be9-cba5-79c25b007776"
X=df.drop('Rating',axis=1)
Y=df['Rating'].values
Y
# + id="sb4yHR9RC80o"
threshold=4.2 #@param {type: "slider", min: 3, max: 5,step:0.1}
df['Class']=(df['Rating']>=threshold).astype(np.int)
# + id="jj-yuu8zDgx3" colab={"base_uri": "https://localhost:8080/"} outputId="17606c07-5a27-4632-a47d-a225f76de7fa"
df['Class'].value_counts()
# + id="SxJzEDJPEZb6" colab={"base_uri": "https://localhost:8080/"} outputId="bc841fe3-1109-4901-e0d1-00262511e933"
df['Class'].value_counts(normalize=True)
# + id="RC695CPEI8km" colab={"base_uri": "https://localhost:8080/"} outputId="93f56d2a-f70c-4209-946a-9831b2b31c52"
Y_bainarised=df['Class'].values
Y_bainarised
# + [markdown] id="jXmREpC5JLRf"
# ## **standarization**
# + id="neKhP3dnJcqK"
R=np.random.random([100,1])
#R
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="i3__m-dnKC2i" outputId="f11e3861-0711-48ae-b706-b97e7639c10a"
plt.plot(R)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="9ujtdhkvKCzt" outputId="1c77ac2e-e5a1-49e1-d27c-3e5f2a15a2eb"
np.mean(R)
# + colab={"base_uri": "https://localhost:8080/"} id="BxuxwR39KCrv" outputId="a9a786e4-c9cb-4b5e-f6be-7cb0c3e3a539"
np.std(R)
# + id="oagQceRQKy6u"
scalar= StandardScaler()
# + colab={"base_uri": "https://localhost:8080/"} id="WJfmiO-5KCoq" outputId="7a09130c-bad1-457d-89cb-33cbad2d3377"
scalar.fit(R)
# + colab={"base_uri": "https://localhost:8080/"} id="ldhL--YYLpbI" outputId="9218d0e4-8034-4976-dd86-8432470fb86d"
scalar.mean_
# + id="cumb66H5Lx95"
RT=scalar.transform(R)
# + colab={"base_uri": "https://localhost:8080/"} id="KnRMwu3lL6Rg" outputId="ca52b5d0-ad79-4edd-bf1e-340226eca40e"
np.mean(RT)
# + colab={"base_uri": "https://localhost:8080/"} id="xcN7zz71OfLP" outputId="b68e1b24-e150-43e9-ca30-863a509b19c1"
np.std(RT)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="SDymOFhBOkPG" outputId="2fa1d372-ec27-44a0-f5da-e137a8616a47"
plt.plot(RT)
plt.show()
# + id="AbeNHWEJR4gU"
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,random_state=0,stratify=Y_bainarised)
# + colab={"base_uri": "https://localhost:8080/"} id="u-rUO1wxR4X1" outputId="f9176775-b5a4-41ec-f625-a1645533324d"
print(X_train.shape,X_test.shape)
# + id="4qv0rxofSmWl"
scalar=StandardScaler()
# + id="kwkDmj5qSzm8"
x_scaled_train=scalar.fit_transform(X_train)
x_scaled_test=scalar.transform(X_test)
# + id="16SELBbjVSpq"
minmax_scaler = MinMaxScaler()
# + id="FeiRMLORVTAE"
Y_scaled_train = minmax_scaler.fit_transform(Y_train.reshape(-1, 1))
# + colab={"base_uri": "https://localhost:8080/"} id="DBufcHC3Vpsk" outputId="9f6e6234-a35a-454e-c624-ec094792ffdc"
np.min(Y_scaled_train)
# + id="RVdtH2y4Vppv"
Y_scaled_test = minmax_scaler.transform(Y_test.reshape(-1, 1))
# + colab={"base_uri": "https://localhost:8080/"} id="k9CwrRzYVpmv" outputId="7967bfe0-7e3c-4067-9a2b-596bb073b92b"
scaled_threshold = list(minmax_scaler.transform(np.array([threshold]).reshape(1, -1)))[0][0]
scaled_threshold
# + id="1n1j-QNTVpkJ"
Y_binarised_train = (Y_scaled_train > scaled_threshold).astype("int")
#Y_binarised_train
# + id="_4QRmOi9WiiB"
Y_binarised_test = (Y_scaled_test > scaled_threshold).astype("int").ravel()
# + id="ABliQ4FkWuZi"
sn=SigmoidNeuron()
# + id="w8PZmao7WuWU" colab={"base_uri": "https://localhost:8080/", "height": 346, "referenced_widgets": ["7f5e2ddb01ba49c49390112f077cf55e", "b51be88bb85c47d2a0e2dc39917cf61b", "acffa23c2aa7433f9971926367eefa10", "23c0c87532d14219af2117973848e7e1", "755fc46616ca4888a8c60e24982eaef6", "<KEY>", "<KEY>", "<KEY>", "07aa4e089dc044f89f7f112f8eb23ba5", "39157b741e34436382827a32f4dcd1a7", "ad779cf641a447f0b7fe717965af28ed"]} outputId="05e08752-e279-40bd-eda4-cc520ecf1e06"
sn.fit(x_scaled_train,Y_scaled_train,epochs=10000,learning_rate=.002,display_loss=True)
# + id="VYF_bCJKbn_v" colab={"base_uri": "https://localhost:8080/", "height": 183} outputId="02e69bea-f5a3-477b-9eef-8d5855c7ce58"
Y_pred_train = sn.predict(X_scaled_train)
Y_pred_test = sn.predict(X_scaled_test)
# + id="DFMJZTdVcvB-"
Y_pred_binarised_train = (Y_pred_train > scaled_threshold).astype("int").ravel()
Y_pred_binarised_test = (Y_pred_test > scaled_threshold).astype("int").ravel()
# + id="TFPiWOCBcE6C"
accuracy_train = accuracy_score(Y_pred_binarised_train, Y_binarised_train)
accuracy_test = accuracy_score(Y_pred_binarised_test, Y_binarised_test)
# + id="2nXtQxTFczj3"
print(accuracy_train, accuracy_test)
# + id="xQpgrvDxt1dx"
# + id="g9XNIHTft1Zp"
# + id="Gtm1RwMot1SF"
| Sigmoid_NN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import sys
import nltk
import pandas as pd
import re
import string
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.metrics import precision_score, recall_score, accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.pairwise import cosine_similarity
def dataRead(inputFile):
dataFrame = pd.read_json(inputFile)
return dataFrame
def dataProcessing(dataFrame, input):
exclude = set(string.punctuation)
stop_words = set(stopwords.words("english"))
ingredientList = []
dataList = []
for i in dataFrame['ingredients']:
i = " ".join(i)
dataList.append(i)
input = " ".join(input)
dataList.insert(0, input)
for data in dataList:
data = data.lower()
data = re.sub(r"(\d)", "", data)
token = nltk.word_tokenize(data)
mytokens = " ".join([word for word in token if word not in exclude and word not in stop_words])
ingredientList.append(mytokens)
return ingredientList
def vectorization(data):
vectorizer = TfidfVectorizer(stop_words='english')
vector = vectorizer.fit_transform(data)
inputMatrix = vector[0]
dataMatrix = vector[1:]
print("Vectorization Completed")
return inputMatrix, dataMatrix
def randomForestModel(inputMatrix, dataMatrix, dataFrame):
LabelEncoder = preprocessing.LabelEncoder()
LabelEncoder.fit(dataFrame['cuisine'])
X = dataMatrix
Y = LabelEncoder.transform(dataFrame['cuisine'])
x_train, x_test, y_train, y_test = train_test_split(dataMatrix, Y, test_size=0.3)
model = RandomForestClassifier(n_estimators = 100, criterion = 'entropy', random_state = 0)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Model Accuracy :",accuracy_score(y_test, y_pred) * 100)
inputPredict = model.predict(inputMatrix)
cuisine = LabelEncoder.inverse_transform(inputPredict)
print("Cuisine: ",cuisine)
def closestRecipe(inputMatrix, dataMatrix, dataFrame):
scores = cosine_similarity(inputMatrix,dataMatrix).transpose()
dataFrame['Scores'] = scores
closeRecipe = dataFrame[['id','Scores']].nlargest(10, ['Scores'])
print("Closest 10 Recipes \n",closeRecipe)
input = ['paprika', 'banana','rice krispies','plain flour', 'ground pepper', 'salt', 'tomatoes']
inputFile = "yummly.json"
dataFrame = dataRead(inputFile)
data = dataProcessing(dataFrame, input)
inputMatrix, dataMatrix = vectorization(data)
randomForestModel(inputMatrix, dataMatrix, dataFrame)
closestRecipe(inputMatrix, dataMatrix, dataFrame)
| analyser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 32: Markov chains (cont.), irreducibility, recurrence, transience, reversibility, random walk on an undirected network
#
#
# ## Stat 110, Prof. <NAME>, Harvard University
#
# ----
# ## Examples of Markov Chains
#
# Markov chains are memoryless, in a way, since the past doesn't really inform the future; only the present counts. Recall that the future is conditionally independent of the past, given the present.
#
# ### Some key concepts
#
# * A chain is **irreducible** if it is possible to get from any state to another.
# * A state is **recurrent** if, when starting there, the chain has probability of 1.0 for returning to that state. Note that if there is probability 1.0 for returning to a certain state, then it follows that in a Markov chain, you can return to that state _infinitely_ many times with probability 1.0.
# * Otherwise, the state is **transient**.
# ### Example 1
#
# 
#
# * This Markov chain is _irreducible_, as it is indeed possible to go from any one state to another.
# * All of the states in this Markov chain are _recurrent_.
# ### Example 2
#
# 
#
# * In this example, the chain is _reducible_; notice how there are actually two chains (1-2-3 and 4-5-6).
# * However, note that all of the states are _recurrent_.
#
# And if we connected states 3 and 6...
#
# 
#
# * This example is still not _irreducible_.
# * But states 1, 2 and 3 are now _transient_, since there is no way to return to any of those states once that edge from 3 to 6 is traversed.
# * The chain would become _irreducible_ and all states _recurrent_ if we added yet another edge from 4 to 1.
# ### Example 3
#
# 
#
# * The Markov chain in this example is _reducible_.
# * States 1 and 2 are _transient_.
# * States 0 and 3 are _recurrent_, but once you reach states 0 or 3, you cannot leave; these states are called _absorbing states_.
# * In case you didn't notice, the Markov chain in this example is the Gambler's Ruin, where a player either loses all her money (say state 0) or wins all the money (state 3).
# ### Example 4
#
# 
#
# * This is a _periodic_ Markov chain.
# * It is _irreducible_.
# * All states are _recurrent_.
#
# ----
# ## Stationary Distributions
#
# Recall the definition of a stationary distribution from the last lecture.
#
# $\vec{s}$, a probability row vector (PMF), is _stationary_ for a Markov chain with transition matrix $Q$ if $\vec{s} \, Q = \vec{s}$.
# ### Theorems of Stationary Distributions
#
# For any _irreducible_ Markov chain with finitely many states:
#
#
# 1. A stationary distribution $\vec{s}$ exists.
# 1. It is unique.
# 1. $\vec{s}_i = \frac{1}{r_i}$, where $r_i$ is the average return time for returning back to $i$.
# 1. If we also assume there is no _periodicity_ in the chain, where $Q^m$ is strictly positive for some $m$, then $P(X_n = i) \rightarrow s_i$ as $n \rightarrow \infty$
#
# Regarding 4, if we any probability vector $\vec{t}$, then $\vec{t} \, Q \rightarrow \vec{s}$.
#
# So the above theories of stationary distributions are worthy of study, since
#
# * they assure existence and uniqueness of stationary distribution under certain assumptions
# * they capture long-run behavior
# * show relation to average number of step for return to a state
#
# _But how would we compute the stationary distribution?_
# ### Reversible Markov Chains
#
# **Definition** Markov chains with transition matrix $Q = \left[ q_{ij} \right]$ is _reversible_ if there is a probability vector $\vec{s}$ such that $s_i \, q_{ij} = s_j \, q_{ji}$ for all states $i,j$.
# ### Theorem: Reversible transition matrices and Stationary distribution
#
# If a transition matrix is _reversible_ with respect to $\vec{s}$, then that $\vec{s}$ is _stationary_. This reversibility is with reference to time, so it is also called _time reversible_.
#
# For intuition, imagine a video tape of some particle changing states. If you ran that video backwards and show that to someone, and that person could not tell if the action was moving forwards or backwards, then that would be an example of _time reversiblity_.
#
#
# **Proof**
#
# Let $s_i \, q_{ij} = s_j \, q_{ji}$ for all $i,j$; show that $\vec{s} \, Q = \vec{s}$.
#
# \begin{align}
# \sum_i s_i \, q_{ij} &= \sum_i s_j \, q_{ji} \\
# &= s_j \sum_i q_{ji} \\
# &= s_j &\text{ but this is just the definition of matrix multiplication} \\
# \\\\
# \Rightarrow \vec{s} \, Q &= \vec{s}
# \end{align}
#
# ### Example of reversible Markov chain
#
# A random walk on a undirected network is an example of a reversible Markov chain.
#
# 
#
# In the diagram above, the nodes 1 through 4 are joined in an undirected graph. The degree of each node $d_i$ is the number of edges emanating from said node, so $d_1=2, d_2=2, d_3=3, d_4=1$.
#
# With transition matrix $Q$ for the graph above, then $d_i \, q_{ij} = d_j \, q_{ji}$.
#
# **Proof**
#
# Let $i \ne j$.
#
# Then $q_{ij}, q_{ji}$ are either both 0 or both non-zero. _The key is that we are talking about an undirected graph, and all edges are two-way streets._
#
# If there is an edge joining $i,j$), then $q_{ij} = \frac{1}{d_i}$.
#
# So in a graph with $M$ nodes $1, 2, \dots , M$, where each node has degree $d_i$, then $\vec{s}$ with $s_i = \frac{d_i}{\sum_{j} d_j}$ is stationary.
#
# ----
# View [Lecture 32: Markov Chains Continued | Statistics 110](http://bit.ly/2McjRIq) on YouTube.
| Lecture_32.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 6
#
# ## Question 9
#
# Regularisation on the `College` data set
import statsmodels.api as sm
import sklearn.model_selection
import sklearn.linear_model
import sklearn.metrics
import numpy as np
import matplotlib.pyplot as plt
import sklearn.decomposition
import sklearn.pipeline
import sklearn.cross_decomposition
import pandas as pd
college = sm.datasets.get_rdataset("College", "ISLR").data
college["Private"] = college["Private"] == "Yes"
college.head()
# ### (a) Split the data set into a training set and a test set
X = college.drop(columns=["Apps"])
y = college["Apps"]
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.33)
# ### (b) Fit a linear model using least squares, and report the test error
least_squares = sklearn.linear_model.LinearRegression()
least_squares.fit(X_train,y_train)
y_pred = least_squares.predict(X_test)
least_squares_mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
print(least_squares_mse)
# +
# The above is vast!
# -
# ### (c) Fit a ridge regression model, choosing $\lambda$ by CV. Report the test error
ridge = sklearn.linear_model.RidgeCV(alphas= np.linspace(0.001,10,num=1000))
ridge.fit(X_train,y_train)
y_pred = ridge.predict(X_test)
ridge_mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
print(ridge_mse)
# ### (d) Fit a lasso model, choosing $\lambda$ by CV. Report the test error, along with the number of non-zero coefficients.
# +
# Use the LassoCV
lasso_model = sklearn.linear_model.LassoCV(cv=5, max_iter=1e6)
lasso_model.fit(X_train,y_train)
mses = list(map(np.mean,lasso_model.mse_path_))
alphas = lasso_model.alphas_
plt.plot(alphas,np.log(mses))
plt.ylabel("log(mse)")
plt.xlabel("alpha")
plt.show()
# -
print(lasso_model.coef_)
print(lasso_model.intercept_)
y_pred = lasso_model.predict(X_test)
lasso_mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
lasso_mse
plt.scatter(college["Accept"], college["Apps"]) # Lasso suggests a roughly one-to-one mapping between these
# ### (e) Fit a PCR model, with M chosen by cross-validation. Report the test error, along with the value of M selected.
# +
# Standardise each predictor:
scaler = sklearn.preprocessing.StandardScaler()
regressor = sklearn.linear_model.LinearRegression()
pca = sklearn.decomposition.PCA()
pipe = sklearn.pipeline.Pipeline(steps=[("scaling", scaler), ("pca", pca), ("linear regression", regressor)])
p = len(X_train.columns)
params = {"pca__n_components": list(range(1,p+1))}
search = sklearn.model_selection.GridSearchCV(pipe, params, cv=5, return_train_score=True)
search.fit(X_train, y_train)
# +
pca.fit(X_train_scaled)
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax0.plot(pca.explained_variance_ratio_, linewidth=2)
ax0.set_ylabel('PCA explained variance')
ax0.axvline(search.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
ax0.legend(prop=dict(size=12))
# For each number of components, find the best classifier results
results = pd.DataFrame(search.cv_results_)
components_col = 'param_pca__n_components'
best_clfs = results.groupby(components_col).apply(
lambda g: g.nlargest(1, 'mean_test_score'))
best_clfs.plot(x=components_col, y='mean_test_score', yerr='std_test_score',
legend=False, ax=ax1)
ax1.set_ylabel('Classification accuracy (val)')
ax1.set_xlabel('n_components')
plt.tight_layout()
plt.show()
# +
# The above graph suggests PCR with 5 components.
pipe.set_params(pca__n_components=5).fit(X_train, y_train)
y_pred = pipe.predict(X_test)
pcr_mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
pcr_mse
# -
# ### (f) Fit a PLS model, with M chosen by cross-validation. Report the test error, along with the value of M selected.
# +
# Standardise each predictor:
# scaler = sklearn.preprocessing.StandardScaler()
pls = sklearn.cross_decomposition.PLSRegression()
# pipe = sklearn.pipeline.Pipeline(steps=[("scaling", scaler), ("pca", pca), ("linear regression", regressor)])
p = len(X_train.columns)
params = {"n_components": list(range(1,p+1))}
search = sklearn.model_selection.GridSearchCV(pls, params, cv=5, return_train_score=True)
search.fit(X_train, y_train)
# -
best_pls = search.best_estimator_
print(f"Number of components: {best_pls.n_components}")
y_pred = best_pls.predict(X_test)
pls_mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
pls_mse
# +
fig, ax = plt.subplots()
results = pd.DataFrame(search.cv_results_)
results.head()
plt.errorbar(results.param_n_components, results.mean_test_score, yerr=results.std_test_score)
# -
# Actually, M=6 looks best here.
pls = sklearn.cross_decomposition.PLSRegression(n_components=6)
pls.fit(X_train, y_train)
y_pred = pls.predict(X_test)
pls_mse = sklearn.metrics.mean_squared_error(y_test, y_pred)
pls_mse
# ### (g) Comment on the results
# The MSE is atrocious! Off by over one million applications in all cases! And actually, only ridge regression improves on ordinary least squares, and even then only slightly.
| Chapter6/question9.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="0";
import ktrain
from ktrain import text
# ## Multi-Label Text Classification: Identifying Toxic Online Comments
#
# Here, we will classify Wikipedia comments into one or more categories of so-called *toxic comments*. Categories of toxic online behavior include toxic, severe_toxic, obscene, threat, insult, and identity_hate. The dataset can be downloaded from the [Kaggle Toxic Comment Classification Challenge](https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data) as a CSV file (i.e., download the file ```train.csv```). We will load the data using the ```texts_from_csv``` method, which assumes the label_columns are already one-hot-encoded in the spreadsheet. Since *val_filepath* is None, 10% of the data will automatically be used as a validation set.
#
DATA_PATH = 'data/toxic-comments/train.csv'
NUM_WORDS = 50000
MAXLEN = 150
(x_train, y_train), (x_test, y_test), preproc = text.texts_from_csv(DATA_PATH,
'comment_text',
label_columns = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"],
val_filepath=None, # if None, 10% of data will be used for validation
max_features=NUM_WORDS, maxlen=MAXLEN,
ngram_range=1)
text.print_text_classifiers()
# We weill employ a Bidirectional GRU with pretrained word vectors. The following code cell loads a BIGRU model and defines a ```Learner``` object based on that model. The file ```crawl-300d-2M.vec ``` contains 2 million word vectors trained by Facebook and will be automatically downloaded for use with this model.
# +
model = text.text_classifier('bigru', (x_train, y_train), preproc=preproc)
learner = ktrain.get_learner(model, train_data=(x_train, y_train), val_data=(x_test, y_test))
# -
# As before, we use our learning rate finder to find a good learning rate. In this case, a learning rate of 0.0007 appears to be good.
learner.lr_find()
learner.lr_plot()
# Finally, we will train our model for 8 epochs using ```autofit``` with a learning rate of 0.001 for two epochs.
# define a custom callback for ROC-AUC
from keras.callbacks import Callback
from sklearn.metrics import roc_auc_score
class RocAucEvaluation(Callback):
def __init__(self, validation_data=(), interval=1):
super(Callback, self).__init__()
self.interval = interval
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
score = roc_auc_score(self.y_val, y_pred)
print("\n ROC-AUC - epoch: %d - score: %.6f \n" % (epoch+1, score))
RocAuc = RocAucEvaluation(validation_data=(x_test, y_test), interval=1)
# train
learner.autofit(0.001, 2, callbacks=[RocAuc])
# Our final ROC-AUC score is **0.9899**.
| examples/text/toxic_comments-bigru.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# +
ENV["GKS_ENCODING"]="utf8"
using Rocket
using ReactiveMP
using GraphPPL
using Distributions
using Plots
using ColorSchemes
using Random
using StatsPlots
using LinearAlgebra
using BenchmarkTools
# +
Random.seed!(125)
L = 50.0
nmixtures = 12
n_samples = 500
probvec = ones(nmixtures)
probvec = probvec ./ sum(probvec)
switch = Categorical(probvec)
println("Switch distribution: ", Distributions.params(switch))
gaussians = map(1:nmixtures) do index
angle = 2π / nmixtures * (index - 1)
basis_v = L * [ 1.0, 0.0 ]
rotationm = [ cos(angle) -sin(angle); sin(angle) cos(angle) ]
mean = rotationm * basis_v
covariance = Matrix(Hermitian(rotationm * [ 100.0 0.0; 0.0 1.0 ] * transpose(rotationm)))
return MvNormal(mean, covariance)
end
z = rand(switch, n_samples)
y = Vector{Vector{Float64}}(undef, n_samples)
for i in 1:n_samples
y[i] = rand(gaussians[z[i]])
end
colors = palette(:tab10);
# -
sdim(n) = (a) -> map(d -> d[n], a)
# +
p = plot(xlim = (-1.5L, 1.5L), ylim = (-1.5L, 1.5L))
for (index, (color, gaussian)) in enumerate(zip(colors, gaussians))
p = contour!(p, range(-2L, 2L, step = 0.25), range(-2L, 2L, step = 0.25), (x, y) -> pdf(gaussian, [ x, y ]), levels = 3, colorbar = false, color = color)
end
p = scatter!(y |> sdim(1), y |> sdim(2), ms = 2, alpha = 0.4)
plot(p, size = (800, 400))
# -
@model function gaussian_mixture_model(nmixtures, n)
z = randomvar(n)
m = randomvar(nmixtures)
w = randomvar(nmixtures)
L = 50.0
basis_v = L * [ 1.0, 0.0 ]
for i in 1:nmixtures
angle_prior = (2π / nmixtures) * (i - 1)
mean_mean_prior = [ cos(angle_prior) -sin(angle_prior); sin(angle_prior) cos(angle_prior) ] * basis_v
mean_mean_cov = [ 1e6 0.0; 0.0 1e6 ]
m[i] ~ MvNormalMeanCovariance(mean_mean_prior, mean_mean_cov)
w[i] ~ Wishart(2, [ 1e5 0.0; 0.0 1e5 ])
end
s ~ Dirichlet(ones(nmixtures))
y = datavar(Vector{Float64}, n)
means = tuple(m...)
precs = tuple(w...)
for i in 1:n
z[i] ~ Categorical(s) where { q = MeanField() }
y[i] ~ NormalMixture(z[i], means, precs) where { q = MeanField() }
end
return s, z, m, w, y
end
# +
import ProgressMeter
function inference(nmixtures, data, viters)
n = length(data)
model, (s, z, m, w, y) = gaussian_mixture_model(nmixtures, n)
means_estimates = Vector{Vector{Marginal}}()
precs_estimates = Vector{Vector{Marginal}}()
switch_estimates = Vector{Marginal}()
fe_values = Vector{Float64}()
switch_subscription = subscribe!(getmarginal(s), (m) -> push!(switch_estimates, m))
means_subscription = subscribe!(getmarginals(m), (m) -> push!(means_estimates, m))
precs_subscription = subscribe!(getmarginals(w), (m) -> push!(precs_estimates, m))
fe_subscription = subscribe!(score(BetheFreeEnergy(), model), (fe) -> push!(fe_values, fe))
setmarginal!(s, vague(Dirichlet, nmixtures))
basis_v = [ 1.0, 0.0 ]
for i in 1:nmixtures
angle_prior = (2π / nmixtures) * (i - 1)
mean_mean_prior = [ cos(angle_prior) -sin(angle_prior); sin(angle_prior) cos(angle_prior) ] * basis_v
mean_mean_cov = [ 1e6 0.0; 0.0 1e6 ]
setmarginal!(m[i], MvNormalMeanCovariance(mean_mean_prior, mean_mean_cov))
# setmarginal!(m[i], vague(MvNormalMeanCovariance, 2))
setmarginal!(w[i], Wishart(2, [ 1e3 0.0; 0.0 1e3 ]))
end
ProgressMeter.@showprogress for i in 1:viters
update!(y, data)
end
unsubscribe!(means_subscription)
unsubscribe!(precs_subscription)
unsubscribe!(switch_subscription)
unsubscribe!(fe_subscription)
return switch_estimates, means_estimates, precs_estimates, fe_values
end
# -
s, m, w, fe = inference(nmixtures, y, 15);
@benchmark inference($nmixtures, $y, 15)
# +
pe = plot(xlim = (-1.5L, 1.5L), ylim = (-1.5L, 1.5L))
rp = scatter(y |> sdim(1), y |> sdim(2))
pe = scatter!(pe, y |> sdim(1), y |> sdim(2))
e_means = mean.(m[end])
e_precs = mean.(w[end])
for (e_m, e_w, c) in zip(e_means, e_precs, colors)
gaussian = MvNormal(e_m, Matrix(Hermitian(inv(e_w))))
pe = contour!(pe, range(-2L, 2L, step = 0.25), range(-2L, 2L, step = 0.25), (x, y) -> pdf(gaussian, [ x, y ]), levels = 7, colorbar = false, color = c)
end
pfe = plot(fe[2:end], label = "Free Energy")
plot(rp, pe, pfe, size = (1400, 400), layout = @layout([ a b c ]))
# -
| demo/Gaussian Mixtures Multivariate.ipynb |